2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/errno.h>
39 #include <linux/pci.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/slab.h>
42 #include <linux/io-mapping.h>
43 #include <linux/delay.h>
44 #include <linux/kmod.h>
45 #include <linux/etherdevice.h>
46 #include <net/devlink.h>
48 #include <linux/mlx4/device.h>
49 #include <linux/mlx4/doorbell.h>
55 MODULE_AUTHOR("Roland Dreier");
56 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
57 MODULE_LICENSE("Dual BSD/GPL");
58 MODULE_VERSION(DRV_VERSION);
60 struct workqueue_struct *mlx4_wq;
62 #ifdef CONFIG_MLX4_DEBUG
64 int mlx4_debug_level = 0;
65 module_param_named(debug_level, mlx4_debug_level, int, 0644);
66 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
68 #endif /* CONFIG_MLX4_DEBUG */
73 module_param(msi_x, int, 0444);
74 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
76 #else /* CONFIG_PCI_MSI */
80 #endif /* CONFIG_PCI_MSI */
82 static uint8_t num_vfs[3] = {0, 0, 0};
83 static int num_vfs_argc;
84 module_param_array(num_vfs, byte , &num_vfs_argc, 0444);
85 MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n"
86 "num_vfs=port1,port2,port1+2");
88 static uint8_t probe_vf[3] = {0, 0, 0};
89 static int probe_vfs_argc;
90 module_param_array(probe_vf, byte, &probe_vfs_argc, 0444);
91 MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n"
92 "probe_vf=port1,port2,port1+2");
94 int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
95 module_param_named(log_num_mgm_entry_size,
96 mlx4_log_num_mgm_entry_size, int, 0444);
97 MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
98 " of qp per mcg, for example:"
99 " 10 gives 248.range: 7 <="
100 " log_num_mgm_entry_size <= 12."
101 " To activate device managed"
102 " flow steering when available, set to -1");
104 static bool enable_64b_cqe_eqe = true;
105 module_param(enable_64b_cqe_eqe, bool, 0444);
106 MODULE_PARM_DESC(enable_64b_cqe_eqe,
107 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
109 static bool enable_4k_uar;
110 module_param(enable_4k_uar, bool, 0444);
111 MODULE_PARM_DESC(enable_4k_uar,
112 "Enable using 4K UAR. Should not be enabled if have VFs which do not support 4K UARs (default: false)");
114 #define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \
115 MLX4_FUNC_CAP_EQE_CQE_STRIDE | \
116 MLX4_FUNC_CAP_DMFS_A0_STATIC)
118 #define RESET_PERSIST_MASK_FLAGS (MLX4_FLAG_SRIOV)
120 static char mlx4_version[] =
121 DRV_NAME ": Mellanox ConnectX core driver v"
122 DRV_VERSION " (" DRV_RELDATE ")\n";
124 static struct mlx4_profile default_profile = {
127 .rdmarc_per_qp = 1 << 4,
131 .num_mtt = 1 << 20, /* It is really num mtt segements */
134 static struct mlx4_profile low_mem_profile = {
137 .rdmarc_per_qp = 1 << 4,
144 static int log_num_mac = 7;
145 module_param_named(log_num_mac, log_num_mac, int, 0444);
146 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
148 static int log_num_vlan;
149 module_param_named(log_num_vlan, log_num_vlan, int, 0444);
150 MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
151 /* Log2 max number of VLANs per ETH port (0-7) */
152 #define MLX4_LOG_NUM_VLANS 7
153 #define MLX4_MIN_LOG_NUM_VLANS 0
154 #define MLX4_MIN_LOG_NUM_MAC 1
156 static bool use_prio;
157 module_param_named(use_prio, use_prio, bool, 0444);
158 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)");
160 int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
161 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
162 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
164 static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE};
165 static int arr_argc = 2;
166 module_param_array(port_type_array, int, &arr_argc, 0444);
167 MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default "
168 "1 for IB, 2 for Ethernet");
170 struct mlx4_port_config {
171 struct list_head list;
172 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
173 struct pci_dev *pdev;
176 static atomic_t pf_loading = ATOMIC_INIT(0);
178 static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev,
179 struct mlx4_dev_cap *dev_cap)
181 /* The reserved_uars is calculated by system page size unit.
182 * Therefore, adjustment is added when the uar page size is less
183 * than the system page size
185 dev->caps.reserved_uars =
187 mlx4_get_num_reserved_uar(dev),
188 dev_cap->reserved_uars /
189 (1 << (PAGE_SHIFT - dev->uar_page_shift)));
192 int mlx4_check_port_params(struct mlx4_dev *dev,
193 enum mlx4_port_type *port_type)
197 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
198 for (i = 0; i < dev->caps.num_ports - 1; i++) {
199 if (port_type[i] != port_type[i + 1]) {
200 mlx4_err(dev, "Only same port types supported on this HCA, aborting\n");
206 for (i = 0; i < dev->caps.num_ports; i++) {
207 if (!(port_type[i] & dev->caps.supported_type[i+1])) {
208 mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n",
216 static void mlx4_set_port_mask(struct mlx4_dev *dev)
220 for (i = 1; i <= dev->caps.num_ports; ++i)
221 dev->caps.port_mask[i] = dev->caps.port_type[i];
225 MLX4_QUERY_FUNC_NUM_SYS_EQS = 1 << 0,
228 static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
231 struct mlx4_func func;
233 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
234 err = mlx4_QUERY_FUNC(dev, &func, 0);
236 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
239 dev_cap->max_eqs = func.max_eq;
240 dev_cap->reserved_eqs = func.rsvd_eqs;
241 dev_cap->reserved_uars = func.rsvd_uars;
242 err |= MLX4_QUERY_FUNC_NUM_SYS_EQS;
247 static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev)
249 struct mlx4_caps *dev_cap = &dev->caps;
251 /* FW not supporting or cancelled by user */
252 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) ||
253 !(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE))
256 /* Must have 64B CQE_EQE enabled by FW to use bigger stride
257 * When FW has NCSI it may decide not to report 64B CQE/EQEs
259 if (!(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_EQE) ||
260 !(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_CQE)) {
261 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
262 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
266 if (cache_line_size() == 128 || cache_line_size() == 256) {
267 mlx4_dbg(dev, "Enabling CQE stride cacheLine supported\n");
268 /* Changing the real data inside CQE size to 32B */
269 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE;
270 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE;
272 if (mlx4_is_master(dev))
273 dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE;
275 if (cache_line_size() != 32 && cache_line_size() != 64)
276 mlx4_dbg(dev, "Disabling CQE stride, cacheLine size unsupported\n");
277 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
278 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
282 static int _mlx4_dev_port(struct mlx4_dev *dev, int port,
283 struct mlx4_port_cap *port_cap)
285 dev->caps.vl_cap[port] = port_cap->max_vl;
286 dev->caps.ib_mtu_cap[port] = port_cap->ib_mtu;
287 dev->phys_caps.gid_phys_table_len[port] = port_cap->max_gids;
288 dev->phys_caps.pkey_phys_table_len[port] = port_cap->max_pkeys;
289 /* set gid and pkey table operating lengths by default
290 * to non-sriov values
292 dev->caps.gid_table_len[port] = port_cap->max_gids;
293 dev->caps.pkey_table_len[port] = port_cap->max_pkeys;
294 dev->caps.port_width_cap[port] = port_cap->max_port_width;
295 dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu;
296 dev->caps.max_tc_eth = port_cap->max_tc_eth;
297 dev->caps.def_mac[port] = port_cap->def_mac;
298 dev->caps.supported_type[port] = port_cap->supported_port_types;
299 dev->caps.suggested_type[port] = port_cap->suggested_type;
300 dev->caps.default_sense[port] = port_cap->default_sense;
301 dev->caps.trans_type[port] = port_cap->trans_type;
302 dev->caps.vendor_oui[port] = port_cap->vendor_oui;
303 dev->caps.wavelength[port] = port_cap->wavelength;
304 dev->caps.trans_code[port] = port_cap->trans_code;
309 static int mlx4_dev_port(struct mlx4_dev *dev, int port,
310 struct mlx4_port_cap *port_cap)
314 err = mlx4_QUERY_PORT(dev, port, port_cap);
317 mlx4_err(dev, "QUERY_PORT command failed.\n");
322 static inline void mlx4_enable_ignore_fcs(struct mlx4_dev *dev)
324 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS))
327 if (mlx4_is_mfunc(dev)) {
328 mlx4_dbg(dev, "SRIOV mode - Disabling Ignore FCS");
329 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
333 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) {
335 "Keep FCS is not supported - Disabling Ignore FCS");
336 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
341 #define MLX4_A0_STEERING_TABLE_SIZE 256
342 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
347 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
349 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
352 mlx4_dev_cap_dump(dev, dev_cap);
354 if (dev_cap->min_page_sz > PAGE_SIZE) {
355 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
356 dev_cap->min_page_sz, PAGE_SIZE);
359 if (dev_cap->num_ports > MLX4_MAX_PORTS) {
360 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
361 dev_cap->num_ports, MLX4_MAX_PORTS);
365 if (dev_cap->uar_size > pci_resource_len(dev->persist->pdev, 2)) {
366 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
369 pci_resource_len(dev->persist->pdev, 2));
373 dev->caps.num_ports = dev_cap->num_ports;
374 dev->caps.num_sys_eqs = dev_cap->num_sys_eqs;
375 dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ?
376 dev->caps.num_sys_eqs :
378 for (i = 1; i <= dev->caps.num_ports; ++i) {
379 err = _mlx4_dev_port(dev, i, dev_cap->port_cap + i);
381 mlx4_err(dev, "QUERY_PORT command failed, aborting\n");
386 dev->caps.uar_page_size = PAGE_SIZE;
387 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
388 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
389 dev->caps.bf_reg_size = dev_cap->bf_reg_size;
390 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page;
391 dev->caps.max_sq_sg = dev_cap->max_sq_sg;
392 dev->caps.max_rq_sg = dev_cap->max_rq_sg;
393 dev->caps.max_wqes = dev_cap->max_qp_sz;
394 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp;
395 dev->caps.max_srq_wqes = dev_cap->max_srq_sz;
396 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1;
397 dev->caps.reserved_srqs = dev_cap->reserved_srqs;
398 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
399 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
401 * Subtract 1 from the limit because we need to allocate a
402 * spare CQE so the HCA HW can tell the difference between an
403 * empty CQ and a full CQ.
405 dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
406 dev->caps.reserved_cqs = dev_cap->reserved_cqs;
407 dev->caps.reserved_eqs = dev_cap->reserved_eqs;
408 dev->caps.reserved_mtts = dev_cap->reserved_mtts;
409 dev->caps.reserved_mrws = dev_cap->reserved_mrws;
411 dev->caps.reserved_pds = dev_cap->reserved_pds;
412 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
413 dev_cap->reserved_xrcds : 0;
414 dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
415 dev_cap->max_xrcds : 0;
416 dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz;
418 dev->caps.max_msg_sz = dev_cap->max_msg_sz;
419 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
420 dev->caps.flags = dev_cap->flags;
421 dev->caps.flags2 = dev_cap->flags2;
422 dev->caps.bmme_flags = dev_cap->bmme_flags;
423 dev->caps.reserved_lkey = dev_cap->reserved_lkey;
424 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
425 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
426 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
428 /* Save uar page shift */
429 if (!mlx4_is_slave(dev)) {
430 /* Virtual PCI function needs to determine UAR page size from
431 * firmware. Only master PCI function can set the uar page size
434 dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT;
436 dev->uar_page_shift = PAGE_SHIFT;
438 mlx4_set_num_reserved_uars(dev, dev_cap);
441 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) {
442 struct mlx4_init_hca_param hca_param;
444 memset(&hca_param, 0, sizeof(hca_param));
445 err = mlx4_QUERY_HCA(dev, &hca_param);
446 /* Turn off PHV_EN flag in case phv_check_en is set.
447 * phv_check_en is a HW check that parse the packet and verify
448 * phv bit was reported correctly in the wqe. To allow QinQ
449 * PHV_EN flag should be set and phv_check_en must be cleared
450 * otherwise QinQ packets will be drop by the HW.
452 if (err || hca_param.phv_check_en)
453 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_PHV_EN;
456 /* Sense port always allowed on supported devices for ConnectX-1 and -2 */
457 if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
458 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
459 /* Don't do sense port on multifunction devices (for now at least) */
460 if (mlx4_is_mfunc(dev))
461 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
463 if (mlx4_low_memory_profile()) {
464 dev->caps.log_num_macs = MLX4_MIN_LOG_NUM_MAC;
465 dev->caps.log_num_vlans = MLX4_MIN_LOG_NUM_VLANS;
467 dev->caps.log_num_macs = log_num_mac;
468 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
471 for (i = 1; i <= dev->caps.num_ports; ++i) {
472 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
473 if (dev->caps.supported_type[i]) {
474 /* if only ETH is supported - assign ETH */
475 if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH)
476 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
477 /* if only IB is supported, assign IB */
478 else if (dev->caps.supported_type[i] ==
480 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
482 /* if IB and ETH are supported, we set the port
483 * type according to user selection of port type;
484 * if user selected none, take the FW hint */
485 if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE)
486 dev->caps.port_type[i] = dev->caps.suggested_type[i] ?
487 MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB;
489 dev->caps.port_type[i] = port_type_array[i - 1];
493 * Link sensing is allowed on the port if 3 conditions are true:
494 * 1. Both protocols are supported on the port.
495 * 2. Different types are supported on the port
496 * 3. FW declared that it supports link sensing
498 mlx4_priv(dev)->sense.sense_allowed[i] =
499 ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) &&
500 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
501 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT));
504 * If "default_sense" bit is set, we move the port to "AUTO" mode
505 * and perform sense_port FW command to try and set the correct
506 * port type from beginning
508 if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) {
509 enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE;
510 dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO;
511 mlx4_SENSE_PORT(dev, i, &sensed_port);
512 if (sensed_port != MLX4_PORT_TYPE_NONE)
513 dev->caps.port_type[i] = sensed_port;
515 dev->caps.possible_type[i] = dev->caps.port_type[i];
518 if (dev->caps.log_num_macs > dev_cap->port_cap[i].log_max_macs) {
519 dev->caps.log_num_macs = dev_cap->port_cap[i].log_max_macs;
520 mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n",
521 i, 1 << dev->caps.log_num_macs);
523 if (dev->caps.log_num_vlans > dev_cap->port_cap[i].log_max_vlans) {
524 dev->caps.log_num_vlans = dev_cap->port_cap[i].log_max_vlans;
525 mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n",
526 i, 1 << dev->caps.log_num_vlans);
530 if (mlx4_is_master(dev) && (dev->caps.num_ports == 2) &&
531 (port_type_array[0] == MLX4_PORT_TYPE_IB) &&
532 (port_type_array[1] == MLX4_PORT_TYPE_ETH)) {
534 "Granular QoS per VF not supported with IB/Eth configuration\n");
535 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_QOS_VPP;
538 dev->caps.max_counters = dev_cap->max_counters;
540 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
541 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
542 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
543 (1 << dev->caps.log_num_macs) *
544 (1 << dev->caps.log_num_vlans) *
546 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
548 if (dev_cap->dmfs_high_rate_qpn_base > 0 &&
549 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN)
550 dev->caps.dmfs_high_rate_qpn_base = dev_cap->dmfs_high_rate_qpn_base;
552 dev->caps.dmfs_high_rate_qpn_base =
553 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
555 if (dev_cap->dmfs_high_rate_qpn_range > 0 &&
556 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) {
557 dev->caps.dmfs_high_rate_qpn_range = dev_cap->dmfs_high_rate_qpn_range;
558 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DEFAULT;
559 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_FS_A0;
561 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_NOT_SUPPORTED;
562 dev->caps.dmfs_high_rate_qpn_base =
563 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
564 dev->caps.dmfs_high_rate_qpn_range = MLX4_A0_STEERING_TABLE_SIZE;
567 dev->caps.rl_caps = dev_cap->rl_caps;
569 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] =
570 dev->caps.dmfs_high_rate_qpn_range;
572 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
573 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
574 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
575 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
577 dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0;
579 if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) {
581 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) {
582 mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n");
583 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE;
584 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE;
587 if (dev_cap->flags2 &
588 (MLX4_DEV_CAP_FLAG2_CQE_STRIDE |
589 MLX4_DEV_CAP_FLAG2_EQE_STRIDE)) {
590 mlx4_warn(dev, "Disabling EQE/CQE stride per user request\n");
591 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
592 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
596 if ((dev->caps.flags &
597 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) &&
599 dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE;
601 if (!mlx4_is_slave(dev)) {
602 mlx4_enable_cqe_eqe_stride(dev);
603 dev->caps.alloc_res_qp_mask =
604 (dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) |
607 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) &&
608 dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) {
609 mlx4_warn(dev, "Old device ETS support detected\n");
610 mlx4_warn(dev, "Consider upgrading device FW.\n");
611 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG;
615 dev->caps.alloc_res_qp_mask = 0;
618 mlx4_enable_ignore_fcs(dev);
623 static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev,
624 enum pci_bus_speed *speed,
625 enum pcie_link_width *width)
627 u32 lnkcap1, lnkcap2;
630 #define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */
632 *speed = PCI_SPEED_UNKNOWN;
633 *width = PCIE_LNK_WIDTH_UNKNOWN;
635 err1 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP,
637 err2 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP2,
639 if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */
640 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
641 *speed = PCIE_SPEED_8_0GT;
642 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
643 *speed = PCIE_SPEED_5_0GT;
644 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
645 *speed = PCIE_SPEED_2_5GT;
648 *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT;
649 if (!lnkcap2) { /* pre-r3.0 */
650 if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB)
651 *speed = PCIE_SPEED_5_0GT;
652 else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB)
653 *speed = PCIE_SPEED_2_5GT;
657 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) {
659 err2 ? err2 : -EINVAL;
664 static void mlx4_check_pcie_caps(struct mlx4_dev *dev)
666 enum pcie_link_width width, width_cap;
667 enum pci_bus_speed speed, speed_cap;
670 #define PCIE_SPEED_STR(speed) \
671 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
672 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
673 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
676 err = mlx4_get_pcie_dev_link_caps(dev, &speed_cap, &width_cap);
679 "Unable to determine PCIe device BW capabilities\n");
683 err = pcie_get_minimum_link(dev->persist->pdev, &speed, &width);
684 if (err || speed == PCI_SPEED_UNKNOWN ||
685 width == PCIE_LNK_WIDTH_UNKNOWN) {
687 "Unable to determine PCI device chain minimum BW\n");
691 if (width != width_cap || speed != speed_cap)
693 "PCIe BW is different than device's capability\n");
695 mlx4_info(dev, "PCIe link speed is %s, device supports %s\n",
696 PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap));
697 mlx4_info(dev, "PCIe link width is x%d, device supports x%d\n",
702 /*The function checks if there are live vf, return the num of them*/
703 static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
705 struct mlx4_priv *priv = mlx4_priv(dev);
706 struct mlx4_slave_state *s_state;
710 for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) {
711 s_state = &priv->mfunc.master.slave_state[i];
712 if (s_state->active && s_state->last_cmd !=
713 MLX4_COMM_CMD_RESET) {
714 mlx4_warn(dev, "%s: slave: %d is still active\n",
722 int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey)
724 u32 qk = MLX4_RESERVED_QKEY_BASE;
726 if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX ||
727 qpn < dev->phys_caps.base_proxy_sqpn)
730 if (qpn >= dev->phys_caps.base_tunnel_sqpn)
732 qk += qpn - dev->phys_caps.base_tunnel_sqpn;
734 qk += qpn - dev->phys_caps.base_proxy_sqpn;
738 EXPORT_SYMBOL(mlx4_get_parav_qkey);
740 void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val)
742 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
744 if (!mlx4_is_master(dev))
747 priv->virt2phys_pkey[slave][port - 1][i] = val;
749 EXPORT_SYMBOL(mlx4_sync_pkey_table);
751 void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid)
753 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
755 if (!mlx4_is_master(dev))
758 priv->slave_node_guids[slave] = guid;
760 EXPORT_SYMBOL(mlx4_put_slave_node_guid);
762 __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave)
764 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
766 if (!mlx4_is_master(dev))
769 return priv->slave_node_guids[slave];
771 EXPORT_SYMBOL(mlx4_get_slave_node_guid);
773 int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
775 struct mlx4_priv *priv = mlx4_priv(dev);
776 struct mlx4_slave_state *s_slave;
778 if (!mlx4_is_master(dev))
781 s_slave = &priv->mfunc.master.slave_state[slave];
782 return !!s_slave->active;
784 EXPORT_SYMBOL(mlx4_is_slave_active);
786 void mlx4_handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
787 struct _rule_hw *eth_header)
789 if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
790 is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
791 struct mlx4_net_trans_rule_hw_eth *eth =
792 (struct mlx4_net_trans_rule_hw_eth *)eth_header;
793 struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
794 bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
795 next_rule->rsvd == 0;
798 ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
801 EXPORT_SYMBOL(mlx4_handle_eth_header_mcast_prio);
803 static void slave_adjust_steering_mode(struct mlx4_dev *dev,
804 struct mlx4_dev_cap *dev_cap,
805 struct mlx4_init_hca_param *hca_param)
807 dev->caps.steering_mode = hca_param->steering_mode;
808 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
809 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
810 dev->caps.fs_log_max_ucast_qp_range_size =
811 dev_cap->fs_log_max_ucast_qp_range_size;
813 dev->caps.num_qp_per_mgm =
814 4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2);
816 mlx4_dbg(dev, "Steering mode is: %s\n",
817 mlx4_steering_mode_str(dev->caps.steering_mode));
820 static int mlx4_slave_cap(struct mlx4_dev *dev)
824 struct mlx4_dev_cap dev_cap;
825 struct mlx4_func_cap func_cap;
826 struct mlx4_init_hca_param hca_param;
829 memset(&hca_param, 0, sizeof(hca_param));
830 err = mlx4_QUERY_HCA(dev, &hca_param);
832 mlx4_err(dev, "QUERY_HCA command failed, aborting\n");
836 /* fail if the hca has an unknown global capability
837 * at this time global_caps should be always zeroed
839 if (hca_param.global_caps) {
840 mlx4_err(dev, "Unknown hca global capabilities\n");
844 dev->caps.hca_core_clock = hca_param.hca_core_clock;
846 memset(&dev_cap, 0, sizeof(dev_cap));
847 dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp;
848 err = mlx4_dev_cap(dev, &dev_cap);
850 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
854 err = mlx4_QUERY_FW(dev);
856 mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n");
858 page_size = ~dev->caps.page_size_cap + 1;
859 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
860 if (page_size > PAGE_SIZE) {
861 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
862 page_size, PAGE_SIZE);
866 /* Set uar_page_shift for VF */
867 dev->uar_page_shift = hca_param.uar_page_sz + 12;
869 /* Make sure the master uar page size is valid */
870 if (dev->uar_page_shift > PAGE_SHIFT) {
872 "Invalid configuration: uar page size is larger than system page size\n");
876 /* Set reserved_uars based on the uar_page_shift */
877 mlx4_set_num_reserved_uars(dev, &dev_cap);
879 /* Although uar page size in FW differs from system page size,
880 * upper software layers (mlx4_ib, mlx4_en and part of mlx4_core)
881 * still works with assumption that uar page size == system page size
883 dev->caps.uar_page_size = PAGE_SIZE;
885 memset(&func_cap, 0, sizeof(func_cap));
886 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap);
888 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n",
893 if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) !=
894 PF_CONTEXT_BEHAVIOUR_MASK) {
895 mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n",
896 func_cap.pf_context_behaviour, PF_CONTEXT_BEHAVIOUR_MASK);
900 dev->caps.num_ports = func_cap.num_ports;
901 dev->quotas.qp = func_cap.qp_quota;
902 dev->quotas.srq = func_cap.srq_quota;
903 dev->quotas.cq = func_cap.cq_quota;
904 dev->quotas.mpt = func_cap.mpt_quota;
905 dev->quotas.mtt = func_cap.mtt_quota;
906 dev->caps.num_qps = 1 << hca_param.log_num_qps;
907 dev->caps.num_srqs = 1 << hca_param.log_num_srqs;
908 dev->caps.num_cqs = 1 << hca_param.log_num_cqs;
909 dev->caps.num_mpts = 1 << hca_param.log_mpt_sz;
910 dev->caps.num_eqs = func_cap.max_eq;
911 dev->caps.reserved_eqs = func_cap.reserved_eq;
912 dev->caps.reserved_lkey = func_cap.reserved_lkey;
913 dev->caps.num_pds = MLX4_NUM_PDS;
914 dev->caps.num_mgms = 0;
915 dev->caps.num_amgms = 0;
917 if (dev->caps.num_ports > MLX4_MAX_PORTS) {
918 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
919 dev->caps.num_ports, MLX4_MAX_PORTS);
923 mlx4_replace_zero_macs(dev);
925 dev->caps.qp0_qkey = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL);
926 dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
927 dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
928 dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
929 dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
931 if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy ||
932 !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy ||
933 !dev->caps.qp0_qkey) {
938 for (i = 1; i <= dev->caps.num_ports; ++i) {
939 err = mlx4_QUERY_FUNC_CAP(dev, i, &func_cap);
941 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n",
945 dev->caps.qp0_qkey[i - 1] = func_cap.qp0_qkey;
946 dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn;
947 dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn;
948 dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn;
949 dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn;
950 dev->caps.port_mask[i] = dev->caps.port_type[i];
951 dev->caps.phys_port_id[i] = func_cap.phys_port_id;
952 err = mlx4_get_slave_pkey_gid_tbl_len(dev, i,
953 &dev->caps.gid_table_len[i],
954 &dev->caps.pkey_table_len[i]);
959 if (dev->caps.uar_page_size * (dev->caps.num_uars -
960 dev->caps.reserved_uars) >
961 pci_resource_len(dev->persist->pdev,
963 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
964 dev->caps.uar_page_size * dev->caps.num_uars,
966 pci_resource_len(dev->persist->pdev, 2));
971 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) {
972 dev->caps.eqe_size = 64;
973 dev->caps.eqe_factor = 1;
975 dev->caps.eqe_size = 32;
976 dev->caps.eqe_factor = 0;
979 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) {
980 dev->caps.cqe_size = 64;
981 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
983 dev->caps.cqe_size = 32;
986 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_EQE_STRIDE_ENABLED) {
987 dev->caps.eqe_size = hca_param.eqe_size;
988 dev->caps.eqe_factor = 0;
991 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_CQE_STRIDE_ENABLED) {
992 dev->caps.cqe_size = hca_param.cqe_size;
993 /* User still need to know when CQE > 32B */
994 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
997 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
998 mlx4_warn(dev, "Timestamping is not supported in slave mode\n");
1000 slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
1001 mlx4_dbg(dev, "RSS support for IP fragments is %s\n",
1002 hca_param.rss_ip_frags ? "on" : "off");
1004 if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP &&
1005 dev->caps.bf_reg_size)
1006 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_ETH_BF_QP;
1008 if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_A0_RES_QP)
1009 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_A0_QP;
1014 kfree(dev->caps.qp0_qkey);
1015 kfree(dev->caps.qp0_tunnel);
1016 kfree(dev->caps.qp0_proxy);
1017 kfree(dev->caps.qp1_tunnel);
1018 kfree(dev->caps.qp1_proxy);
1019 dev->caps.qp0_qkey = NULL;
1020 dev->caps.qp0_tunnel = NULL;
1021 dev->caps.qp0_proxy = NULL;
1022 dev->caps.qp1_tunnel = NULL;
1023 dev->caps.qp1_proxy = NULL;
1028 static void mlx4_request_modules(struct mlx4_dev *dev)
1031 int has_ib_port = false;
1032 int has_eth_port = false;
1033 #define EN_DRV_NAME "mlx4_en"
1034 #define IB_DRV_NAME "mlx4_ib"
1036 for (port = 1; port <= dev->caps.num_ports; port++) {
1037 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB)
1039 else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
1040 has_eth_port = true;
1044 request_module_nowait(EN_DRV_NAME);
1045 if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
1046 request_module_nowait(IB_DRV_NAME);
1050 * Change the port configuration of the device.
1051 * Every user of this function must hold the port mutex.
1053 int mlx4_change_port_types(struct mlx4_dev *dev,
1054 enum mlx4_port_type *port_types)
1060 for (port = 0; port < dev->caps.num_ports; port++) {
1061 /* Change the port type only if the new type is different
1062 * from the current, and not set to Auto */
1063 if (port_types[port] != dev->caps.port_type[port + 1])
1067 mlx4_unregister_device(dev);
1068 for (port = 1; port <= dev->caps.num_ports; port++) {
1069 mlx4_CLOSE_PORT(dev, port);
1070 dev->caps.port_type[port] = port_types[port - 1];
1071 err = mlx4_SET_PORT(dev, port, -1);
1073 mlx4_err(dev, "Failed to set port %d, aborting\n",
1078 mlx4_set_port_mask(dev);
1079 err = mlx4_register_device(dev);
1081 mlx4_err(dev, "Failed to register device\n");
1084 mlx4_request_modules(dev);
1091 static ssize_t show_port_type(struct device *dev,
1092 struct device_attribute *attr,
1095 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1097 struct mlx4_dev *mdev = info->dev;
1101 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ?
1103 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO)
1104 sprintf(buf, "auto (%s)\n", type);
1106 sprintf(buf, "%s\n", type);
1111 static int __set_port_type(struct mlx4_port_info *info,
1112 enum mlx4_port_type port_type)
1114 struct mlx4_dev *mdev = info->dev;
1115 struct mlx4_priv *priv = mlx4_priv(mdev);
1116 enum mlx4_port_type types[MLX4_MAX_PORTS];
1117 enum mlx4_port_type new_types[MLX4_MAX_PORTS];
1121 if ((port_type & mdev->caps.supported_type[info->port]) != port_type) {
1123 "Requested port type for port %d is not supported on this HCA\n",
1129 mlx4_stop_sense(mdev);
1130 mutex_lock(&priv->port_mutex);
1131 info->tmp_type = port_type;
1133 /* Possible type is always the one that was delivered */
1134 mdev->caps.possible_type[info->port] = info->tmp_type;
1136 for (i = 0; i < mdev->caps.num_ports; i++) {
1137 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
1138 mdev->caps.possible_type[i+1];
1139 if (types[i] == MLX4_PORT_TYPE_AUTO)
1140 types[i] = mdev->caps.port_type[i+1];
1143 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
1144 !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) {
1145 for (i = 1; i <= mdev->caps.num_ports; i++) {
1146 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
1147 mdev->caps.possible_type[i] = mdev->caps.port_type[i];
1153 mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n");
1157 mlx4_do_sense_ports(mdev, new_types, types);
1159 err = mlx4_check_port_params(mdev, new_types);
1163 /* We are about to apply the changes after the configuration
1164 * was verified, no need to remember the temporary types
1166 for (i = 0; i < mdev->caps.num_ports; i++)
1167 priv->port[i + 1].tmp_type = 0;
1169 err = mlx4_change_port_types(mdev, new_types);
1172 mlx4_start_sense(mdev);
1173 mutex_unlock(&priv->port_mutex);
1178 static ssize_t set_port_type(struct device *dev,
1179 struct device_attribute *attr,
1180 const char *buf, size_t count)
1182 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1184 struct mlx4_dev *mdev = info->dev;
1185 enum mlx4_port_type port_type;
1186 static DEFINE_MUTEX(set_port_type_mutex);
1189 mutex_lock(&set_port_type_mutex);
1191 if (!strcmp(buf, "ib\n")) {
1192 port_type = MLX4_PORT_TYPE_IB;
1193 } else if (!strcmp(buf, "eth\n")) {
1194 port_type = MLX4_PORT_TYPE_ETH;
1195 } else if (!strcmp(buf, "auto\n")) {
1196 port_type = MLX4_PORT_TYPE_AUTO;
1198 mlx4_err(mdev, "%s is not supported port type\n", buf);
1203 err = __set_port_type(info, port_type);
1206 mutex_unlock(&set_port_type_mutex);
1208 return err ? err : count;
1219 static inline int int_to_ibta_mtu(int mtu)
1222 case 256: return IB_MTU_256;
1223 case 512: return IB_MTU_512;
1224 case 1024: return IB_MTU_1024;
1225 case 2048: return IB_MTU_2048;
1226 case 4096: return IB_MTU_4096;
1231 static inline int ibta_mtu_to_int(enum ibta_mtu mtu)
1234 case IB_MTU_256: return 256;
1235 case IB_MTU_512: return 512;
1236 case IB_MTU_1024: return 1024;
1237 case IB_MTU_2048: return 2048;
1238 case IB_MTU_4096: return 4096;
1243 static ssize_t show_port_ib_mtu(struct device *dev,
1244 struct device_attribute *attr,
1247 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1249 struct mlx4_dev *mdev = info->dev;
1251 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH)
1252 mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
1254 sprintf(buf, "%d\n",
1255 ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port]));
1259 static ssize_t set_port_ib_mtu(struct device *dev,
1260 struct device_attribute *attr,
1261 const char *buf, size_t count)
1263 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1265 struct mlx4_dev *mdev = info->dev;
1266 struct mlx4_priv *priv = mlx4_priv(mdev);
1267 int err, port, mtu, ibta_mtu = -1;
1269 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) {
1270 mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
1274 err = kstrtoint(buf, 0, &mtu);
1276 ibta_mtu = int_to_ibta_mtu(mtu);
1278 if (err || ibta_mtu < 0) {
1279 mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf);
1283 mdev->caps.port_ib_mtu[info->port] = ibta_mtu;
1285 mlx4_stop_sense(mdev);
1286 mutex_lock(&priv->port_mutex);
1287 mlx4_unregister_device(mdev);
1288 for (port = 1; port <= mdev->caps.num_ports; port++) {
1289 mlx4_CLOSE_PORT(mdev, port);
1290 err = mlx4_SET_PORT(mdev, port, -1);
1292 mlx4_err(mdev, "Failed to set port %d, aborting\n",
1297 err = mlx4_register_device(mdev);
1299 mutex_unlock(&priv->port_mutex);
1300 mlx4_start_sense(mdev);
1301 return err ? err : count;
1304 /* bond for multi-function device */
1305 #define MAX_MF_BOND_ALLOWED_SLAVES 63
1306 static int mlx4_mf_bond(struct mlx4_dev *dev)
1310 struct mlx4_slaves_pport slaves_port1;
1311 struct mlx4_slaves_pport slaves_port2;
1312 DECLARE_BITMAP(slaves_port_1_2, MLX4_MFUNC_MAX);
1314 slaves_port1 = mlx4_phys_to_slaves_pport(dev, 1);
1315 slaves_port2 = mlx4_phys_to_slaves_pport(dev, 2);
1316 bitmap_and(slaves_port_1_2,
1317 slaves_port1.slaves, slaves_port2.slaves,
1318 dev->persist->num_vfs + 1);
1320 /* only single port vfs are allowed */
1321 if (bitmap_weight(slaves_port_1_2, dev->persist->num_vfs + 1) > 1) {
1322 mlx4_warn(dev, "HA mode unsupported for dual ported VFs\n");
1326 /* number of virtual functions is number of total functions minus one
1327 * physical function for each port.
1329 nvfs = bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) +
1330 bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1) - 2;
1332 /* limit on maximum allowed VFs */
1333 if (nvfs > MAX_MF_BOND_ALLOWED_SLAVES) {
1334 mlx4_warn(dev, "HA mode is not supported for %d VFs (max %d are allowed)\n",
1335 nvfs, MAX_MF_BOND_ALLOWED_SLAVES);
1339 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) {
1340 mlx4_warn(dev, "HA mode unsupported for NON DMFS steering\n");
1344 err = mlx4_bond_mac_table(dev);
1347 err = mlx4_bond_vlan_table(dev);
1350 err = mlx4_bond_fs_rules(dev);
1356 (void)mlx4_unbond_vlan_table(dev);
1358 (void)mlx4_unbond_mac_table(dev);
1362 static int mlx4_mf_unbond(struct mlx4_dev *dev)
1366 ret = mlx4_unbond_fs_rules(dev);
1368 mlx4_warn(dev, "multifunction unbond for flow rules failedi (%d)\n", ret);
1369 ret1 = mlx4_unbond_mac_table(dev);
1371 mlx4_warn(dev, "multifunction unbond for MAC table failed (%d)\n", ret1);
1374 ret1 = mlx4_unbond_vlan_table(dev);
1376 mlx4_warn(dev, "multifunction unbond for VLAN table failed (%d)\n", ret1);
1382 int mlx4_bond(struct mlx4_dev *dev)
1385 struct mlx4_priv *priv = mlx4_priv(dev);
1387 mutex_lock(&priv->bond_mutex);
1389 if (!mlx4_is_bonded(dev)) {
1390 ret = mlx4_do_bond(dev, true);
1392 mlx4_err(dev, "Failed to bond device: %d\n", ret);
1393 if (!ret && mlx4_is_master(dev)) {
1394 ret = mlx4_mf_bond(dev);
1396 mlx4_err(dev, "bond for multifunction failed\n");
1397 mlx4_do_bond(dev, false);
1402 mutex_unlock(&priv->bond_mutex);
1404 mlx4_dbg(dev, "Device is bonded\n");
1408 EXPORT_SYMBOL_GPL(mlx4_bond);
1410 int mlx4_unbond(struct mlx4_dev *dev)
1413 struct mlx4_priv *priv = mlx4_priv(dev);
1415 mutex_lock(&priv->bond_mutex);
1417 if (mlx4_is_bonded(dev)) {
1420 ret = mlx4_do_bond(dev, false);
1422 mlx4_err(dev, "Failed to unbond device: %d\n", ret);
1423 if (mlx4_is_master(dev))
1424 ret2 = mlx4_mf_unbond(dev);
1426 mlx4_warn(dev, "Failed to unbond device for multifunction (%d)\n", ret2);
1431 mutex_unlock(&priv->bond_mutex);
1433 mlx4_dbg(dev, "Device is unbonded\n");
1437 EXPORT_SYMBOL_GPL(mlx4_unbond);
1440 int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p)
1442 u8 port1 = v2p->port1;
1443 u8 port2 = v2p->port2;
1444 struct mlx4_priv *priv = mlx4_priv(dev);
1447 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP))
1450 mutex_lock(&priv->bond_mutex);
1452 /* zero means keep current mapping for this port */
1454 port1 = priv->v2p.port1;
1456 port2 = priv->v2p.port2;
1458 if ((port1 < 1) || (port1 > MLX4_MAX_PORTS) ||
1459 (port2 < 1) || (port2 > MLX4_MAX_PORTS) ||
1460 (port1 == 2 && port2 == 1)) {
1461 /* besides boundary checks cross mapping makes
1462 * no sense and therefore not allowed */
1464 } else if ((port1 == priv->v2p.port1) &&
1465 (port2 == priv->v2p.port2)) {
1468 err = mlx4_virt2phy_port_map(dev, port1, port2);
1470 mlx4_dbg(dev, "port map changed: [%d][%d]\n",
1472 priv->v2p.port1 = port1;
1473 priv->v2p.port2 = port2;
1475 mlx4_err(dev, "Failed to change port mape: %d\n", err);
1479 mutex_unlock(&priv->bond_mutex);
1482 EXPORT_SYMBOL_GPL(mlx4_port_map_set);
1484 static int mlx4_load_fw(struct mlx4_dev *dev)
1486 struct mlx4_priv *priv = mlx4_priv(dev);
1489 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
1490 GFP_HIGHUSER | __GFP_NOWARN, 0);
1491 if (!priv->fw.fw_icm) {
1492 mlx4_err(dev, "Couldn't allocate FW area, aborting\n");
1496 err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
1498 mlx4_err(dev, "MAP_FA command failed, aborting\n");
1502 err = mlx4_RUN_FW(dev);
1504 mlx4_err(dev, "RUN_FW command failed, aborting\n");
1514 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
1518 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
1521 struct mlx4_priv *priv = mlx4_priv(dev);
1525 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
1527 ((u64) (MLX4_CMPT_TYPE_QP *
1528 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1529 cmpt_entry_sz, dev->caps.num_qps,
1530 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1535 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
1537 ((u64) (MLX4_CMPT_TYPE_SRQ *
1538 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1539 cmpt_entry_sz, dev->caps.num_srqs,
1540 dev->caps.reserved_srqs, 0, 0);
1544 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
1546 ((u64) (MLX4_CMPT_TYPE_CQ *
1547 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1548 cmpt_entry_sz, dev->caps.num_cqs,
1549 dev->caps.reserved_cqs, 0, 0);
1553 num_eqs = dev->phys_caps.num_phys_eqs;
1554 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
1556 ((u64) (MLX4_CMPT_TYPE_EQ *
1557 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1558 cmpt_entry_sz, num_eqs, num_eqs, 0, 0);
1565 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1568 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1571 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1577 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1578 struct mlx4_init_hca_param *init_hca, u64 icm_size)
1580 struct mlx4_priv *priv = mlx4_priv(dev);
1585 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
1587 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n");
1591 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n",
1592 (unsigned long long) icm_size >> 10,
1593 (unsigned long long) aux_pages << 2);
1595 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
1596 GFP_HIGHUSER | __GFP_NOWARN, 0);
1597 if (!priv->fw.aux_icm) {
1598 mlx4_err(dev, "Couldn't allocate aux memory, aborting\n");
1602 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
1604 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n");
1608 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
1610 mlx4_err(dev, "Failed to map cMPT context memory, aborting\n");
1615 num_eqs = dev->phys_caps.num_phys_eqs;
1616 err = mlx4_init_icm_table(dev, &priv->eq_table.table,
1617 init_hca->eqc_base, dev_cap->eqc_entry_sz,
1618 num_eqs, num_eqs, 0, 0);
1620 mlx4_err(dev, "Failed to map EQ context memory, aborting\n");
1621 goto err_unmap_cmpt;
1625 * Reserved MTT entries must be aligned up to a cacheline
1626 * boundary, since the FW will write to them, while the driver
1627 * writes to all other MTT entries. (The variable
1628 * dev->caps.mtt_entry_sz below is really the MTT segment
1629 * size, not the raw entry size)
1631 dev->caps.reserved_mtts =
1632 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz,
1633 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz;
1635 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
1637 dev->caps.mtt_entry_sz,
1639 dev->caps.reserved_mtts, 1, 0);
1641 mlx4_err(dev, "Failed to map MTT context memory, aborting\n");
1645 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
1646 init_hca->dmpt_base,
1647 dev_cap->dmpt_entry_sz,
1649 dev->caps.reserved_mrws, 1, 1);
1651 mlx4_err(dev, "Failed to map dMPT context memory, aborting\n");
1655 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
1657 dev_cap->qpc_entry_sz,
1659 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1662 mlx4_err(dev, "Failed to map QP context memory, aborting\n");
1663 goto err_unmap_dmpt;
1666 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
1667 init_hca->auxc_base,
1668 dev_cap->aux_entry_sz,
1670 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1673 mlx4_err(dev, "Failed to map AUXC context memory, aborting\n");
1677 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
1678 init_hca->altc_base,
1679 dev_cap->altc_entry_sz,
1681 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1684 mlx4_err(dev, "Failed to map ALTC context memory, aborting\n");
1685 goto err_unmap_auxc;
1688 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
1689 init_hca->rdmarc_base,
1690 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
1692 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1695 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
1696 goto err_unmap_altc;
1699 err = mlx4_init_icm_table(dev, &priv->cq_table.table,
1701 dev_cap->cqc_entry_sz,
1703 dev->caps.reserved_cqs, 0, 0);
1705 mlx4_err(dev, "Failed to map CQ context memory, aborting\n");
1706 goto err_unmap_rdmarc;
1709 err = mlx4_init_icm_table(dev, &priv->srq_table.table,
1710 init_hca->srqc_base,
1711 dev_cap->srq_entry_sz,
1713 dev->caps.reserved_srqs, 0, 0);
1715 mlx4_err(dev, "Failed to map SRQ context memory, aborting\n");
1720 * For flow steering device managed mode it is required to use
1721 * mlx4_init_icm_table. For B0 steering mode it's not strictly
1722 * required, but for simplicity just map the whole multicast
1723 * group table now. The table isn't very big and it's a lot
1724 * easier than trying to track ref counts.
1726 err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
1728 mlx4_get_mgm_entry_size(dev),
1729 dev->caps.num_mgms + dev->caps.num_amgms,
1730 dev->caps.num_mgms + dev->caps.num_amgms,
1733 mlx4_err(dev, "Failed to map MCG context memory, aborting\n");
1740 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
1743 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
1746 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
1749 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
1752 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
1755 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
1758 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
1761 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
1764 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
1767 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
1768 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1769 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1770 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1773 mlx4_UNMAP_ICM_AUX(dev);
1776 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
1781 static void mlx4_free_icms(struct mlx4_dev *dev)
1783 struct mlx4_priv *priv = mlx4_priv(dev);
1785 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
1786 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
1787 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
1788 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
1789 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
1790 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
1791 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
1792 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
1793 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
1794 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
1795 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
1796 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1797 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1798 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1800 mlx4_UNMAP_ICM_AUX(dev);
1801 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
1804 static void mlx4_slave_exit(struct mlx4_dev *dev)
1806 struct mlx4_priv *priv = mlx4_priv(dev);
1808 mutex_lock(&priv->cmd.slave_cmd_mutex);
1809 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP,
1811 mlx4_warn(dev, "Failed to close slave function\n");
1812 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1815 static int map_bf_area(struct mlx4_dev *dev)
1817 struct mlx4_priv *priv = mlx4_priv(dev);
1818 resource_size_t bf_start;
1819 resource_size_t bf_len;
1822 if (!dev->caps.bf_reg_size)
1825 bf_start = pci_resource_start(dev->persist->pdev, 2) +
1826 (dev->caps.num_uars << PAGE_SHIFT);
1827 bf_len = pci_resource_len(dev->persist->pdev, 2) -
1828 (dev->caps.num_uars << PAGE_SHIFT);
1829 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
1830 if (!priv->bf_mapping)
1836 static void unmap_bf_area(struct mlx4_dev *dev)
1838 if (mlx4_priv(dev)->bf_mapping)
1839 io_mapping_free(mlx4_priv(dev)->bf_mapping);
1842 u64 mlx4_read_clock(struct mlx4_dev *dev)
1844 u32 clockhi, clocklo, clockhi1;
1847 struct mlx4_priv *priv = mlx4_priv(dev);
1849 for (i = 0; i < 10; i++) {
1850 clockhi = swab32(readl(priv->clock_mapping));
1851 clocklo = swab32(readl(priv->clock_mapping + 4));
1852 clockhi1 = swab32(readl(priv->clock_mapping));
1853 if (clockhi == clockhi1)
1857 cycles = (u64) clockhi << 32 | (u64) clocklo;
1861 EXPORT_SYMBOL_GPL(mlx4_read_clock);
1864 static int map_internal_clock(struct mlx4_dev *dev)
1866 struct mlx4_priv *priv = mlx4_priv(dev);
1868 priv->clock_mapping =
1869 ioremap(pci_resource_start(dev->persist->pdev,
1870 priv->fw.clock_bar) +
1871 priv->fw.clock_offset, MLX4_CLOCK_SIZE);
1873 if (!priv->clock_mapping)
1879 int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
1880 struct mlx4_clock_params *params)
1882 struct mlx4_priv *priv = mlx4_priv(dev);
1884 if (mlx4_is_slave(dev))
1890 params->bar = priv->fw.clock_bar;
1891 params->offset = priv->fw.clock_offset;
1892 params->size = MLX4_CLOCK_SIZE;
1896 EXPORT_SYMBOL_GPL(mlx4_get_internal_clock_params);
1898 static void unmap_internal_clock(struct mlx4_dev *dev)
1900 struct mlx4_priv *priv = mlx4_priv(dev);
1902 if (priv->clock_mapping)
1903 iounmap(priv->clock_mapping);
1906 static void mlx4_close_hca(struct mlx4_dev *dev)
1908 unmap_internal_clock(dev);
1910 if (mlx4_is_slave(dev))
1911 mlx4_slave_exit(dev);
1913 mlx4_CLOSE_HCA(dev, 0);
1914 mlx4_free_icms(dev);
1918 static void mlx4_close_fw(struct mlx4_dev *dev)
1920 if (!mlx4_is_slave(dev)) {
1922 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
1926 static int mlx4_comm_check_offline(struct mlx4_dev *dev)
1928 #define COMM_CHAN_OFFLINE_OFFSET 0x09
1933 struct mlx4_priv *priv = mlx4_priv(dev);
1935 end = msecs_to_jiffies(MLX4_COMM_OFFLINE_TIME_OUT) + jiffies;
1936 while (time_before(jiffies, end)) {
1937 comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm +
1938 MLX4_COMM_CHAN_FLAGS));
1939 offline_bit = (comm_flags &
1940 (u32)(1 << COMM_CHAN_OFFLINE_OFFSET));
1943 /* There are cases as part of AER/Reset flow that PF needs
1944 * around 100 msec to load. We therefore sleep for 100 msec
1945 * to allow other tasks to make use of that CPU during this
1950 mlx4_err(dev, "Communication channel is offline.\n");
1954 static void mlx4_reset_vf_support(struct mlx4_dev *dev)
1956 #define COMM_CHAN_RST_OFFSET 0x1e
1958 struct mlx4_priv *priv = mlx4_priv(dev);
1962 comm_caps = swab32(readl((__iomem char *)priv->mfunc.comm +
1963 MLX4_COMM_CHAN_CAPS));
1964 comm_rst = (comm_caps & (u32)(1 << COMM_CHAN_RST_OFFSET));
1967 dev->caps.vf_caps |= MLX4_VF_CAP_FLAG_RESET;
1970 static int mlx4_init_slave(struct mlx4_dev *dev)
1972 struct mlx4_priv *priv = mlx4_priv(dev);
1973 u64 dma = (u64) priv->mfunc.vhcr_dma;
1974 int ret_from_reset = 0;
1976 u32 cmd_channel_ver;
1978 if (atomic_read(&pf_loading)) {
1979 mlx4_warn(dev, "PF is not ready - Deferring probe\n");
1980 return -EPROBE_DEFER;
1983 mutex_lock(&priv->cmd.slave_cmd_mutex);
1984 priv->cmd.max_cmds = 1;
1985 if (mlx4_comm_check_offline(dev)) {
1986 mlx4_err(dev, "PF is not responsive, skipping initialization\n");
1990 mlx4_reset_vf_support(dev);
1991 mlx4_warn(dev, "Sending reset\n");
1992 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
1993 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME);
1994 /* if we are in the middle of flr the slave will try
1995 * NUM_OF_RESET_RETRIES times before leaving.*/
1996 if (ret_from_reset) {
1997 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
1998 mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n");
1999 mutex_unlock(&priv->cmd.slave_cmd_mutex);
2000 return -EPROBE_DEFER;
2005 /* check the driver version - the slave I/F revision
2006 * must match the master's */
2007 slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
2008 cmd_channel_ver = mlx4_comm_get_version();
2010 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
2011 MLX4_COMM_GET_IF_REV(slave_read)) {
2012 mlx4_err(dev, "slave driver version is not supported by the master\n");
2016 mlx4_warn(dev, "Sending vhcr0\n");
2017 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48,
2018 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
2020 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32,
2021 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
2023 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16,
2024 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
2026 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma,
2027 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
2030 mutex_unlock(&priv->cmd.slave_cmd_mutex);
2034 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 0);
2036 mutex_unlock(&priv->cmd.slave_cmd_mutex);
2040 static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev)
2044 for (i = 1; i <= dev->caps.num_ports; i++) {
2045 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
2046 dev->caps.gid_table_len[i] =
2047 mlx4_get_slave_num_gids(dev, 0, i);
2049 dev->caps.gid_table_len[i] = 1;
2050 dev->caps.pkey_table_len[i] =
2051 dev->phys_caps.pkey_phys_table_len[i] - 1;
2055 static int choose_log_fs_mgm_entry_size(int qp_per_entry)
2057 int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE;
2059 for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE;
2061 if (qp_per_entry <= 4 * ((1 << i) / 16 - 2))
2065 return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1;
2068 static const char *dmfs_high_rate_steering_mode_str(int dmfs_high_steer_mode)
2070 switch (dmfs_high_steer_mode) {
2071 case MLX4_STEERING_DMFS_A0_DEFAULT:
2072 return "default performance";
2074 case MLX4_STEERING_DMFS_A0_DYNAMIC:
2075 return "dynamic hybrid mode";
2077 case MLX4_STEERING_DMFS_A0_STATIC:
2078 return "performance optimized for limited rule configuration (static)";
2080 case MLX4_STEERING_DMFS_A0_DISABLE:
2081 return "disabled performance optimized steering";
2083 case MLX4_STEERING_DMFS_A0_NOT_SUPPORTED:
2084 return "performance optimized steering not supported";
2087 return "Unrecognized mode";
2091 #define MLX4_DMFS_A0_STEERING (1UL << 2)
2093 static void choose_steering_mode(struct mlx4_dev *dev,
2094 struct mlx4_dev_cap *dev_cap)
2096 if (mlx4_log_num_mgm_entry_size <= 0) {
2097 if ((-mlx4_log_num_mgm_entry_size) & MLX4_DMFS_A0_STEERING) {
2098 if (dev->caps.dmfs_high_steer_mode ==
2099 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
2100 mlx4_err(dev, "DMFS high rate mode not supported\n");
2102 dev->caps.dmfs_high_steer_mode =
2103 MLX4_STEERING_DMFS_A0_STATIC;
2107 if (mlx4_log_num_mgm_entry_size <= 0 &&
2108 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN &&
2109 (!mlx4_is_mfunc(dev) ||
2110 (dev_cap->fs_max_num_qp_per_entry >=
2111 (dev->persist->num_vfs + 1))) &&
2112 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >=
2113 MLX4_MIN_MGM_LOG_ENTRY_SIZE) {
2114 dev->oper_log_mgm_entry_size =
2115 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry);
2116 dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
2117 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
2118 dev->caps.fs_log_max_ucast_qp_range_size =
2119 dev_cap->fs_log_max_ucast_qp_range_size;
2121 if (dev->caps.dmfs_high_steer_mode !=
2122 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
2123 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DISABLE;
2124 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
2125 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
2126 dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
2128 dev->caps.steering_mode = MLX4_STEERING_MODE_A0;
2130 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
2131 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
2132 mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n");
2134 dev->oper_log_mgm_entry_size =
2135 mlx4_log_num_mgm_entry_size > 0 ?
2136 mlx4_log_num_mgm_entry_size :
2137 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
2138 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
2140 mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n",
2141 mlx4_steering_mode_str(dev->caps.steering_mode),
2142 dev->oper_log_mgm_entry_size,
2143 mlx4_log_num_mgm_entry_size);
2146 static void choose_tunnel_offload_mode(struct mlx4_dev *dev,
2147 struct mlx4_dev_cap *dev_cap)
2149 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2150 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)
2151 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN;
2153 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE;
2155 mlx4_dbg(dev, "Tunneling offload mode is: %s\n", (dev->caps.tunnel_offload_mode
2156 == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none");
2159 static int mlx4_validate_optimized_steering(struct mlx4_dev *dev)
2162 struct mlx4_port_cap port_cap;
2164 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
2167 for (i = 1; i <= dev->caps.num_ports; i++) {
2168 if (mlx4_dev_port(dev, i, &port_cap)) {
2170 "QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n");
2171 } else if ((dev->caps.dmfs_high_steer_mode !=
2172 MLX4_STEERING_DMFS_A0_DEFAULT) &&
2173 (port_cap.dmfs_optimized_state ==
2174 !!(dev->caps.dmfs_high_steer_mode ==
2175 MLX4_STEERING_DMFS_A0_DISABLE))) {
2177 "DMFS high rate steer mode differ, driver requested %s but %s in FW.\n",
2178 dmfs_high_rate_steering_mode_str(
2179 dev->caps.dmfs_high_steer_mode),
2180 (port_cap.dmfs_optimized_state ?
2181 "enabled" : "disabled"));
2188 static int mlx4_init_fw(struct mlx4_dev *dev)
2190 struct mlx4_mod_stat_cfg mlx4_cfg;
2193 if (!mlx4_is_slave(dev)) {
2194 err = mlx4_QUERY_FW(dev);
2197 mlx4_info(dev, "non-primary physical function, skipping\n");
2199 mlx4_err(dev, "QUERY_FW command failed, aborting\n");
2203 err = mlx4_load_fw(dev);
2205 mlx4_err(dev, "Failed to start FW, aborting\n");
2209 mlx4_cfg.log_pg_sz_m = 1;
2210 mlx4_cfg.log_pg_sz = 0;
2211 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
2213 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
2219 static int mlx4_init_hca(struct mlx4_dev *dev)
2221 struct mlx4_priv *priv = mlx4_priv(dev);
2222 struct mlx4_adapter adapter;
2223 struct mlx4_dev_cap dev_cap;
2224 struct mlx4_profile profile;
2225 struct mlx4_init_hca_param init_hca;
2227 struct mlx4_config_dev_params params;
2230 if (!mlx4_is_slave(dev)) {
2231 err = mlx4_dev_cap(dev, &dev_cap);
2233 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
2237 choose_steering_mode(dev, &dev_cap);
2238 choose_tunnel_offload_mode(dev, &dev_cap);
2240 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC &&
2241 mlx4_is_master(dev))
2242 dev->caps.function_caps |= MLX4_FUNC_CAP_DMFS_A0_STATIC;
2244 err = mlx4_get_phys_port_id(dev);
2246 mlx4_err(dev, "Fail to get physical port id\n");
2248 if (mlx4_is_master(dev))
2249 mlx4_parav_master_pf_caps(dev);
2251 if (mlx4_low_memory_profile()) {
2252 mlx4_info(dev, "Running from within kdump kernel. Using low memory profile\n");
2253 profile = low_mem_profile;
2255 profile = default_profile;
2257 if (dev->caps.steering_mode ==
2258 MLX4_STEERING_MODE_DEVICE_MANAGED)
2259 profile.num_mcg = MLX4_FS_NUM_MCG;
2261 icm_size = mlx4_make_profile(dev, &profile, &dev_cap,
2263 if ((long long) icm_size < 0) {
2268 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
2270 if (enable_4k_uar) {
2271 init_hca.log_uar_sz = ilog2(dev->caps.num_uars) +
2272 PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT;
2273 init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12;
2275 init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
2276 init_hca.uar_page_sz = PAGE_SHIFT - 12;
2279 init_hca.mw_enabled = 0;
2280 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2281 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
2282 init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE;
2284 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
2288 err = mlx4_INIT_HCA(dev, &init_hca);
2290 mlx4_err(dev, "INIT_HCA command failed, aborting\n");
2294 if (dev_cap.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
2295 err = mlx4_query_func(dev, &dev_cap);
2297 mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n");
2299 } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) {
2300 dev->caps.num_eqs = dev_cap.max_eqs;
2301 dev->caps.reserved_eqs = dev_cap.reserved_eqs;
2302 dev->caps.reserved_uars = dev_cap.reserved_uars;
2307 * If TS is supported by FW
2308 * read HCA frequency by QUERY_HCA command
2310 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
2311 memset(&init_hca, 0, sizeof(init_hca));
2312 err = mlx4_QUERY_HCA(dev, &init_hca);
2314 mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n");
2315 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
2317 dev->caps.hca_core_clock =
2318 init_hca.hca_core_clock;
2321 /* In case we got HCA frequency 0 - disable timestamping
2322 * to avoid dividing by zero
2324 if (!dev->caps.hca_core_clock) {
2325 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
2327 "HCA frequency is 0 - timestamping is not supported\n");
2328 } else if (map_internal_clock(dev)) {
2330 * Map internal clock,
2331 * in case of failure disable timestamping
2333 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
2334 mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n");
2338 if (dev->caps.dmfs_high_steer_mode !=
2339 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) {
2340 if (mlx4_validate_optimized_steering(dev))
2341 mlx4_warn(dev, "Optimized steering validation failed\n");
2343 if (dev->caps.dmfs_high_steer_mode ==
2344 MLX4_STEERING_DMFS_A0_DISABLE) {
2345 dev->caps.dmfs_high_rate_qpn_base =
2346 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
2347 dev->caps.dmfs_high_rate_qpn_range =
2348 MLX4_A0_STEERING_TABLE_SIZE;
2351 mlx4_dbg(dev, "DMFS high rate steer mode is: %s\n",
2352 dmfs_high_rate_steering_mode_str(
2353 dev->caps.dmfs_high_steer_mode));
2356 err = mlx4_init_slave(dev);
2358 if (err != -EPROBE_DEFER)
2359 mlx4_err(dev, "Failed to initialize slave\n");
2363 err = mlx4_slave_cap(dev);
2365 mlx4_err(dev, "Failed to obtain slave caps\n");
2370 if (map_bf_area(dev))
2371 mlx4_dbg(dev, "Failed to map blue flame area\n");
2373 /*Only the master set the ports, all the rest got it from it.*/
2374 if (!mlx4_is_slave(dev))
2375 mlx4_set_port_mask(dev);
2377 err = mlx4_QUERY_ADAPTER(dev, &adapter);
2379 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n");
2383 /* Query CONFIG_DEV parameters */
2384 err = mlx4_config_dev_retrieval(dev, ¶ms);
2385 if (err && err != -EOPNOTSUPP) {
2386 mlx4_err(dev, "Failed to query CONFIG_DEV parameters\n");
2388 dev->caps.rx_checksum_flags_port[1] = params.rx_csum_flags_port_1;
2389 dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2;
2391 priv->eq_table.inta_pin = adapter.inta_pin;
2392 memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
2397 unmap_internal_clock(dev);
2400 if (mlx4_is_slave(dev)) {
2401 kfree(dev->caps.qp0_qkey);
2402 kfree(dev->caps.qp0_tunnel);
2403 kfree(dev->caps.qp0_proxy);
2404 kfree(dev->caps.qp1_tunnel);
2405 kfree(dev->caps.qp1_proxy);
2409 if (mlx4_is_slave(dev))
2410 mlx4_slave_exit(dev);
2412 mlx4_CLOSE_HCA(dev, 0);
2415 if (!mlx4_is_slave(dev))
2416 mlx4_free_icms(dev);
2421 static int mlx4_init_counters_table(struct mlx4_dev *dev)
2423 struct mlx4_priv *priv = mlx4_priv(dev);
2426 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2429 if (!dev->caps.max_counters)
2432 nent_pow2 = roundup_pow_of_two(dev->caps.max_counters);
2433 /* reserve last counter index for sink counter */
2434 return mlx4_bitmap_init(&priv->counters_bitmap, nent_pow2,
2436 nent_pow2 - dev->caps.max_counters + 1);
2439 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
2441 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2444 if (!dev->caps.max_counters)
2447 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
2450 static void mlx4_cleanup_default_counters(struct mlx4_dev *dev)
2452 struct mlx4_priv *priv = mlx4_priv(dev);
2455 for (port = 0; port < dev->caps.num_ports; port++)
2456 if (priv->def_counter[port] != -1)
2457 mlx4_counter_free(dev, priv->def_counter[port]);
2460 static int mlx4_allocate_default_counters(struct mlx4_dev *dev)
2462 struct mlx4_priv *priv = mlx4_priv(dev);
2466 for (port = 0; port < dev->caps.num_ports; port++)
2467 priv->def_counter[port] = -1;
2469 for (port = 0; port < dev->caps.num_ports; port++) {
2470 err = mlx4_counter_alloc(dev, &idx);
2472 if (!err || err == -ENOSPC) {
2473 priv->def_counter[port] = idx;
2474 } else if (err == -ENOENT) {
2477 } else if (mlx4_is_slave(dev) && err == -EINVAL) {
2478 priv->def_counter[port] = MLX4_SINK_COUNTER_INDEX(dev);
2479 mlx4_warn(dev, "can't allocate counter from old PF driver, using index %d\n",
2480 MLX4_SINK_COUNTER_INDEX(dev));
2483 mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n",
2484 __func__, port + 1, err);
2485 mlx4_cleanup_default_counters(dev);
2489 mlx4_dbg(dev, "%s: default counter index %d for port %d\n",
2490 __func__, priv->def_counter[port], port + 1);
2496 int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
2498 struct mlx4_priv *priv = mlx4_priv(dev);
2500 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2503 *idx = mlx4_bitmap_alloc(&priv->counters_bitmap);
2505 *idx = MLX4_SINK_COUNTER_INDEX(dev);
2512 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
2517 if (mlx4_is_mfunc(dev)) {
2518 err = mlx4_cmd_imm(dev, 0, &out_param, RES_COUNTER,
2519 RES_OP_RESERVE, MLX4_CMD_ALLOC_RES,
2520 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
2522 *idx = get_param_l(&out_param);
2526 return __mlx4_counter_alloc(dev, idx);
2528 EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
2530 static int __mlx4_clear_if_stat(struct mlx4_dev *dev,
2533 struct mlx4_cmd_mailbox *if_stat_mailbox;
2535 u32 if_stat_in_mod = (counter_index & 0xff) | MLX4_QUERY_IF_STAT_RESET;
2537 if_stat_mailbox = mlx4_alloc_cmd_mailbox(dev);
2538 if (IS_ERR(if_stat_mailbox))
2539 return PTR_ERR(if_stat_mailbox);
2541 err = mlx4_cmd_box(dev, 0, if_stat_mailbox->dma, if_stat_in_mod, 0,
2542 MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
2545 mlx4_free_cmd_mailbox(dev, if_stat_mailbox);
2549 void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
2551 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2554 if (idx == MLX4_SINK_COUNTER_INDEX(dev))
2557 __mlx4_clear_if_stat(dev, idx);
2559 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR);
2563 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
2567 if (mlx4_is_mfunc(dev)) {
2568 set_param_l(&in_param, idx);
2569 mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE,
2570 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
2574 __mlx4_counter_free(dev, idx);
2576 EXPORT_SYMBOL_GPL(mlx4_counter_free);
2578 int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port)
2580 struct mlx4_priv *priv = mlx4_priv(dev);
2582 return priv->def_counter[port - 1];
2584 EXPORT_SYMBOL_GPL(mlx4_get_default_counter_index);
2586 void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, int port)
2588 struct mlx4_priv *priv = mlx4_priv(dev);
2590 priv->mfunc.master.vf_admin[entry].vport[port].guid = guid;
2592 EXPORT_SYMBOL_GPL(mlx4_set_admin_guid);
2594 __be64 mlx4_get_admin_guid(struct mlx4_dev *dev, int entry, int port)
2596 struct mlx4_priv *priv = mlx4_priv(dev);
2598 return priv->mfunc.master.vf_admin[entry].vport[port].guid;
2600 EXPORT_SYMBOL_GPL(mlx4_get_admin_guid);
2602 void mlx4_set_random_admin_guid(struct mlx4_dev *dev, int entry, int port)
2604 struct mlx4_priv *priv = mlx4_priv(dev);
2611 get_random_bytes((char *)&guid, sizeof(guid));
2612 guid &= ~(cpu_to_be64(1ULL << 56));
2613 guid |= cpu_to_be64(1ULL << 57);
2614 priv->mfunc.master.vf_admin[entry].vport[port].guid = guid;
2617 static int mlx4_setup_hca(struct mlx4_dev *dev)
2619 struct mlx4_priv *priv = mlx4_priv(dev);
2622 __be32 ib_port_default_caps;
2624 err = mlx4_init_uar_table(dev);
2626 mlx4_err(dev, "Failed to initialize user access region table, aborting\n");
2630 err = mlx4_uar_alloc(dev, &priv->driver_uar);
2632 mlx4_err(dev, "Failed to allocate driver access region, aborting\n");
2633 goto err_uar_table_free;
2636 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
2638 mlx4_err(dev, "Couldn't map kernel access region, aborting\n");
2643 err = mlx4_init_pd_table(dev);
2645 mlx4_err(dev, "Failed to initialize protection domain table, aborting\n");
2649 err = mlx4_init_xrcd_table(dev);
2651 mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n");
2652 goto err_pd_table_free;
2655 err = mlx4_init_mr_table(dev);
2657 mlx4_err(dev, "Failed to initialize memory region table, aborting\n");
2658 goto err_xrcd_table_free;
2661 if (!mlx4_is_slave(dev)) {
2662 err = mlx4_init_mcg_table(dev);
2664 mlx4_err(dev, "Failed to initialize multicast group table, aborting\n");
2665 goto err_mr_table_free;
2667 err = mlx4_config_mad_demux(dev);
2669 mlx4_err(dev, "Failed in config_mad_demux, aborting\n");
2670 goto err_mcg_table_free;
2674 err = mlx4_init_eq_table(dev);
2676 mlx4_err(dev, "Failed to initialize event queue table, aborting\n");
2677 goto err_mcg_table_free;
2680 err = mlx4_cmd_use_events(dev);
2682 mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n");
2683 goto err_eq_table_free;
2686 err = mlx4_NOP(dev);
2688 if (dev->flags & MLX4_FLAG_MSI_X) {
2689 mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
2690 priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
2691 mlx4_warn(dev, "Trying again without MSI-X\n");
2693 mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
2694 priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
2695 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
2701 mlx4_dbg(dev, "NOP command IRQ test passed\n");
2703 err = mlx4_init_cq_table(dev);
2705 mlx4_err(dev, "Failed to initialize completion queue table, aborting\n");
2709 err = mlx4_init_srq_table(dev);
2711 mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n");
2712 goto err_cq_table_free;
2715 err = mlx4_init_qp_table(dev);
2717 mlx4_err(dev, "Failed to initialize queue pair table, aborting\n");
2718 goto err_srq_table_free;
2721 if (!mlx4_is_slave(dev)) {
2722 err = mlx4_init_counters_table(dev);
2723 if (err && err != -ENOENT) {
2724 mlx4_err(dev, "Failed to initialize counters table, aborting\n");
2725 goto err_qp_table_free;
2729 err = mlx4_allocate_default_counters(dev);
2731 mlx4_err(dev, "Failed to allocate default counters, aborting\n");
2732 goto err_counters_table_free;
2735 if (!mlx4_is_slave(dev)) {
2736 for (port = 1; port <= dev->caps.num_ports; port++) {
2737 ib_port_default_caps = 0;
2738 err = mlx4_get_port_ib_caps(dev, port,
2739 &ib_port_default_caps);
2741 mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n",
2743 dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
2745 /* initialize per-slave default ib port capabilities */
2746 if (mlx4_is_master(dev)) {
2748 for (i = 0; i < dev->num_slaves; i++) {
2749 if (i == mlx4_master_func_num(dev))
2751 priv->mfunc.master.slave_state[i].ib_cap_mask[port] =
2752 ib_port_default_caps;
2756 if (mlx4_is_mfunc(dev))
2757 dev->caps.port_ib_mtu[port] = IB_MTU_2048;
2759 dev->caps.port_ib_mtu[port] = IB_MTU_4096;
2761 err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ?
2762 dev->caps.pkey_table_len[port] : -1);
2764 mlx4_err(dev, "Failed to set port %d, aborting\n",
2766 goto err_default_countes_free;
2773 err_default_countes_free:
2774 mlx4_cleanup_default_counters(dev);
2776 err_counters_table_free:
2777 if (!mlx4_is_slave(dev))
2778 mlx4_cleanup_counters_table(dev);
2781 mlx4_cleanup_qp_table(dev);
2784 mlx4_cleanup_srq_table(dev);
2787 mlx4_cleanup_cq_table(dev);
2790 mlx4_cmd_use_polling(dev);
2793 mlx4_cleanup_eq_table(dev);
2796 if (!mlx4_is_slave(dev))
2797 mlx4_cleanup_mcg_table(dev);
2800 mlx4_cleanup_mr_table(dev);
2802 err_xrcd_table_free:
2803 mlx4_cleanup_xrcd_table(dev);
2806 mlx4_cleanup_pd_table(dev);
2812 mlx4_uar_free(dev, &priv->driver_uar);
2815 mlx4_cleanup_uar_table(dev);
2819 static int mlx4_init_affinity_hint(struct mlx4_dev *dev, int port, int eqn)
2821 int requested_cpu = 0;
2822 struct mlx4_priv *priv = mlx4_priv(dev);
2827 if (eqn > dev->caps.num_comp_vectors)
2830 for (i = 1; i < port; i++)
2831 off += mlx4_get_eqs_per_port(dev, i);
2833 requested_cpu = eqn - off - !!(eqn > MLX4_EQ_ASYNC);
2835 /* Meaning EQs are shared, and this call comes from the second port */
2836 if (requested_cpu < 0)
2839 eq = &priv->eq_table.eq[eqn];
2841 if (!zalloc_cpumask_var(&eq->affinity_mask, GFP_KERNEL))
2844 cpumask_set_cpu(requested_cpu, eq->affinity_mask);
2849 static void mlx4_enable_msi_x(struct mlx4_dev *dev)
2851 struct mlx4_priv *priv = mlx4_priv(dev);
2852 struct msix_entry *entries;
2857 int nreq = dev->caps.num_ports * num_online_cpus() + 1;
2859 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
2861 if (nreq > MAX_MSIX)
2864 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
2868 for (i = 0; i < nreq; ++i)
2869 entries[i].entry = i;
2871 nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2,
2874 if (nreq < 0 || nreq < MLX4_EQ_ASYNC) {
2878 /* 1 is reserved for events (asyncrounous EQ) */
2879 dev->caps.num_comp_vectors = nreq - 1;
2881 priv->eq_table.eq[MLX4_EQ_ASYNC].irq = entries[0].vector;
2882 bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports,
2883 dev->caps.num_ports);
2885 for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) {
2886 if (i == MLX4_EQ_ASYNC)
2889 priv->eq_table.eq[i].irq =
2890 entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector;
2892 if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) {
2893 bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
2894 dev->caps.num_ports);
2895 /* We don't set affinity hint when there
2900 priv->eq_table.eq[i].actv_ports.ports);
2901 if (mlx4_init_affinity_hint(dev, port + 1, i))
2902 mlx4_warn(dev, "Couldn't init hint cpumask for EQ %d\n",
2905 /* We divide the Eqs evenly between the two ports.
2906 * (dev->caps.num_comp_vectors / dev->caps.num_ports)
2907 * refers to the number of Eqs per port
2908 * (i.e eqs_per_port). Theoretically, we would like to
2909 * write something like (i + 1) % eqs_per_port == 0.
2910 * However, since there's an asynchronous Eq, we have
2911 * to skip over it by comparing this condition to
2912 * !!((i + 1) > MLX4_EQ_ASYNC).
2914 if ((dev->caps.num_comp_vectors > dev->caps.num_ports) &&
2916 (dev->caps.num_comp_vectors / dev->caps.num_ports)) ==
2917 !!((i + 1) > MLX4_EQ_ASYNC))
2918 /* If dev->caps.num_comp_vectors < dev->caps.num_ports,
2919 * everything is shared anyway.
2924 dev->flags |= MLX4_FLAG_MSI_X;
2931 dev->caps.num_comp_vectors = 1;
2933 BUG_ON(MLX4_EQ_ASYNC >= 2);
2934 for (i = 0; i < 2; ++i) {
2935 priv->eq_table.eq[i].irq = dev->persist->pdev->irq;
2936 if (i != MLX4_EQ_ASYNC) {
2937 bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
2938 dev->caps.num_ports);
2943 static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
2945 struct devlink *devlink = priv_to_devlink(mlx4_priv(dev));
2946 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
2949 err = devlink_port_register(devlink, &info->devlink_port, port);
2955 if (!mlx4_is_slave(dev)) {
2956 mlx4_init_mac_table(dev, &info->mac_table);
2957 mlx4_init_vlan_table(dev, &info->vlan_table);
2958 mlx4_init_roce_gid_table(dev, &info->gid_table);
2959 info->base_qpn = mlx4_get_base_qpn(dev, port);
2962 sprintf(info->dev_name, "mlx4_port%d", port);
2963 info->port_attr.attr.name = info->dev_name;
2964 if (mlx4_is_mfunc(dev))
2965 info->port_attr.attr.mode = S_IRUGO;
2967 info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
2968 info->port_attr.store = set_port_type;
2970 info->port_attr.show = show_port_type;
2971 sysfs_attr_init(&info->port_attr.attr);
2973 err = device_create_file(&dev->persist->pdev->dev, &info->port_attr);
2975 mlx4_err(dev, "Failed to create file for port %d\n", port);
2976 devlink_port_unregister(&info->devlink_port);
2980 sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port);
2981 info->port_mtu_attr.attr.name = info->dev_mtu_name;
2982 if (mlx4_is_mfunc(dev))
2983 info->port_mtu_attr.attr.mode = S_IRUGO;
2985 info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR;
2986 info->port_mtu_attr.store = set_port_ib_mtu;
2988 info->port_mtu_attr.show = show_port_ib_mtu;
2989 sysfs_attr_init(&info->port_mtu_attr.attr);
2991 err = device_create_file(&dev->persist->pdev->dev,
2992 &info->port_mtu_attr);
2994 mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
2995 device_remove_file(&info->dev->persist->pdev->dev,
2997 devlink_port_unregister(&info->devlink_port);
3004 static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
3009 device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
3010 device_remove_file(&info->dev->persist->pdev->dev,
3011 &info->port_mtu_attr);
3012 devlink_port_unregister(&info->devlink_port);
3014 #ifdef CONFIG_RFS_ACCEL
3015 free_irq_cpu_rmap(info->rmap);
3020 static int mlx4_init_steering(struct mlx4_dev *dev)
3022 struct mlx4_priv *priv = mlx4_priv(dev);
3023 int num_entries = dev->caps.num_ports;
3026 priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL);
3030 for (i = 0; i < num_entries; i++)
3031 for (j = 0; j < MLX4_NUM_STEERS; j++) {
3032 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]);
3033 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]);
3038 static void mlx4_clear_steering(struct mlx4_dev *dev)
3040 struct mlx4_priv *priv = mlx4_priv(dev);
3041 struct mlx4_steer_index *entry, *tmp_entry;
3042 struct mlx4_promisc_qp *pqp, *tmp_pqp;
3043 int num_entries = dev->caps.num_ports;
3046 for (i = 0; i < num_entries; i++) {
3047 for (j = 0; j < MLX4_NUM_STEERS; j++) {
3048 list_for_each_entry_safe(pqp, tmp_pqp,
3049 &priv->steer[i].promisc_qps[j],
3051 list_del(&pqp->list);
3054 list_for_each_entry_safe(entry, tmp_entry,
3055 &priv->steer[i].steer_entries[j],
3057 list_del(&entry->list);
3058 list_for_each_entry_safe(pqp, tmp_pqp,
3061 list_del(&pqp->list);
3071 static int extended_func_num(struct pci_dev *pdev)
3073 return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn);
3076 #define MLX4_OWNER_BASE 0x8069c
3077 #define MLX4_OWNER_SIZE 4
3079 static int mlx4_get_ownership(struct mlx4_dev *dev)
3081 void __iomem *owner;
3084 if (pci_channel_offline(dev->persist->pdev))
3087 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) +
3091 mlx4_err(dev, "Failed to obtain ownership bit\n");
3100 static void mlx4_free_ownership(struct mlx4_dev *dev)
3102 void __iomem *owner;
3104 if (pci_channel_offline(dev->persist->pdev))
3107 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) +
3111 mlx4_err(dev, "Failed to obtain ownership bit\n");
3119 #define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV) ==\
3120 !!((flags) & MLX4_FLAG_MASTER))
3122 static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
3123 u8 total_vfs, int existing_vfs, int reset_flow)
3125 u64 dev_flags = dev->flags;
3127 int fw_enabled_sriov_vfs = min(pci_sriov_get_totalvfs(pdev),
3131 dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs),
3138 atomic_inc(&pf_loading);
3139 if (dev->flags & MLX4_FLAG_SRIOV) {
3140 if (existing_vfs != total_vfs) {
3141 mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n",
3142 existing_vfs, total_vfs);
3143 total_vfs = existing_vfs;
3147 dev->dev_vfs = kzalloc(total_vfs * sizeof(*dev->dev_vfs), GFP_KERNEL);
3148 if (NULL == dev->dev_vfs) {
3149 mlx4_err(dev, "Failed to allocate memory for VFs\n");
3153 if (!(dev->flags & MLX4_FLAG_SRIOV)) {
3154 if (total_vfs > fw_enabled_sriov_vfs) {
3155 mlx4_err(dev, "requested vfs (%d) > available vfs (%d). Continuing without SR_IOV\n",
3156 total_vfs, fw_enabled_sriov_vfs);
3160 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs);
3161 err = pci_enable_sriov(pdev, total_vfs);
3164 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
3168 mlx4_warn(dev, "Running in master mode\n");
3169 dev_flags |= MLX4_FLAG_SRIOV |
3171 dev_flags &= ~MLX4_FLAG_SLAVE;
3172 dev->persist->num_vfs = total_vfs;
3177 atomic_dec(&pf_loading);
3179 dev->persist->num_vfs = 0;
3180 kfree(dev->dev_vfs);
3181 dev->dev_vfs = NULL;
3182 return dev_flags & ~MLX4_FLAG_MASTER;
3186 MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64 = -1,
3189 static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
3192 int requested_vfs = nvfs[0] + nvfs[1] + nvfs[2];
3193 /* Checking for 64 VFs as a limitation of CX2 */
3194 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_80_VFS) &&
3195 requested_vfs >= 64) {
3196 mlx4_err(dev, "Requested %d VFs, but FW does not support more than 64\n",
3198 return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64;
3203 static int mlx4_pci_enable_device(struct mlx4_dev *dev)
3205 struct pci_dev *pdev = dev->persist->pdev;
3208 mutex_lock(&dev->persist->pci_status_mutex);
3209 if (dev->persist->pci_status == MLX4_PCI_STATUS_DISABLED) {
3210 err = pci_enable_device(pdev);
3212 dev->persist->pci_status = MLX4_PCI_STATUS_ENABLED;
3214 mutex_unlock(&dev->persist->pci_status_mutex);
3219 static void mlx4_pci_disable_device(struct mlx4_dev *dev)
3221 struct pci_dev *pdev = dev->persist->pdev;
3223 mutex_lock(&dev->persist->pci_status_mutex);
3224 if (dev->persist->pci_status == MLX4_PCI_STATUS_ENABLED) {
3225 pci_disable_device(pdev);
3226 dev->persist->pci_status = MLX4_PCI_STATUS_DISABLED;
3228 mutex_unlock(&dev->persist->pci_status_mutex);
3231 static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
3232 int total_vfs, int *nvfs, struct mlx4_priv *priv,
3235 struct mlx4_dev *dev;
3240 struct mlx4_dev_cap *dev_cap = NULL;
3241 int existing_vfs = 0;
3245 INIT_LIST_HEAD(&priv->ctx_list);
3246 spin_lock_init(&priv->ctx_lock);
3248 mutex_init(&priv->port_mutex);
3249 mutex_init(&priv->bond_mutex);
3251 INIT_LIST_HEAD(&priv->pgdir_list);
3252 mutex_init(&priv->pgdir_mutex);
3253 spin_lock_init(&priv->cmd.context_lock);
3255 INIT_LIST_HEAD(&priv->bf_list);
3256 mutex_init(&priv->bf_mutex);
3258 dev->rev_id = pdev->revision;
3259 dev->numa_node = dev_to_node(&pdev->dev);
3261 /* Detect if this device is a virtual function */
3262 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
3263 mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
3264 dev->flags |= MLX4_FLAG_SLAVE;
3266 /* We reset the device and enable SRIOV only for physical
3267 * devices. Try to claim ownership on the device;
3268 * if already taken, skip -- do not allow multiple PFs */
3269 err = mlx4_get_ownership(dev);
3274 mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n");
3279 atomic_set(&priv->opreq_count, 0);
3280 INIT_WORK(&priv->opreq_task, mlx4_opreq_action);
3283 * Now reset the HCA before we touch the PCI capabilities or
3284 * attempt a firmware command, since a boot ROM may have left
3285 * the HCA in an undefined state.
3287 err = mlx4_reset(dev);
3289 mlx4_err(dev, "Failed to reset HCA, aborting\n");
3294 dev->flags = MLX4_FLAG_MASTER;
3295 existing_vfs = pci_num_vf(pdev);
3297 dev->flags |= MLX4_FLAG_SRIOV;
3298 dev->persist->num_vfs = total_vfs;
3302 /* on load remove any previous indication of internal error,
3305 dev->persist->state = MLX4_DEVICE_STATE_UP;
3308 err = mlx4_cmd_init(dev);
3310 mlx4_err(dev, "Failed to init command interface, aborting\n");
3314 /* In slave functions, the communication channel must be initialized
3315 * before posting commands. Also, init num_slaves before calling
3317 if (mlx4_is_mfunc(dev)) {
3318 if (mlx4_is_master(dev)) {
3319 dev->num_slaves = MLX4_MAX_NUM_SLAVES;
3322 dev->num_slaves = 0;
3323 err = mlx4_multi_func_init(dev);
3325 mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n");
3331 err = mlx4_init_fw(dev);
3333 mlx4_err(dev, "Failed to init fw, aborting.\n");
3337 if (mlx4_is_master(dev)) {
3338 /* when we hit the goto slave_start below, dev_cap already initialized */
3340 dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL);
3347 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
3349 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
3353 if (mlx4_check_dev_cap(dev, dev_cap, nvfs))
3356 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
3357 u64 dev_flags = mlx4_enable_sriov(dev, pdev,
3363 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
3364 dev->flags = dev_flags;
3365 if (!SRIOV_VALID_STATE(dev->flags)) {
3366 mlx4_err(dev, "Invalid SRIOV state\n");
3369 err = mlx4_reset(dev);
3371 mlx4_err(dev, "Failed to reset HCA, aborting.\n");
3377 /* Legacy mode FW requires SRIOV to be enabled before
3378 * doing QUERY_DEV_CAP, since max_eq's value is different if
3381 memset(dev_cap, 0, sizeof(*dev_cap));
3382 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
3384 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
3388 if (mlx4_check_dev_cap(dev, dev_cap, nvfs))
3393 err = mlx4_init_hca(dev);
3395 if (err == -EACCES) {
3396 /* Not primary Physical function
3397 * Running in slave mode */
3398 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
3399 /* We're not a PF */
3400 if (dev->flags & MLX4_FLAG_SRIOV) {
3402 pci_disable_sriov(pdev);
3403 if (mlx4_is_master(dev) && !reset_flow)
3404 atomic_dec(&pf_loading);
3405 dev->flags &= ~MLX4_FLAG_SRIOV;
3407 if (!mlx4_is_slave(dev))
3408 mlx4_free_ownership(dev);
3409 dev->flags |= MLX4_FLAG_SLAVE;
3410 dev->flags &= ~MLX4_FLAG_MASTER;
3416 if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
3417 u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs,
3418 existing_vfs, reset_flow);
3420 if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) {
3421 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR);
3422 dev->flags = dev_flags;
3423 err = mlx4_cmd_init(dev);
3425 /* Only VHCR is cleaned up, so could still
3428 mlx4_err(dev, "Failed to init VHCR command interface, aborting\n");
3432 dev->flags = dev_flags;
3435 if (!SRIOV_VALID_STATE(dev->flags)) {
3436 mlx4_err(dev, "Invalid SRIOV state\n");
3441 /* check if the device is functioning at its maximum possible speed.
3442 * No return code for this call, just warn the user in case of PCI
3443 * express device capabilities are under-satisfied by the bus.
3445 if (!mlx4_is_slave(dev))
3446 mlx4_check_pcie_caps(dev);
3448 /* In master functions, the communication channel must be initialized
3449 * after obtaining its address from fw */
3450 if (mlx4_is_master(dev)) {
3451 if (dev->caps.num_ports < 2 &&
3455 "Error: Trying to configure VFs on port 2, but HCA has only %d physical ports\n",
3456 dev->caps.num_ports);
3459 memcpy(dev->persist->nvfs, nvfs, sizeof(dev->persist->nvfs));
3462 i < sizeof(dev->persist->nvfs)/
3463 sizeof(dev->persist->nvfs[0]); i++) {
3466 for (j = 0; j < dev->persist->nvfs[i]; ++sum, ++j) {
3467 dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1;
3468 dev->dev_vfs[sum].n_ports = i < 2 ? 1 :
3469 dev->caps.num_ports;
3473 /* In master functions, the communication channel
3474 * must be initialized after obtaining its address from fw
3476 err = mlx4_multi_func_init(dev);
3478 mlx4_err(dev, "Failed to init master mfunc interface, aborting.\n");
3483 err = mlx4_alloc_eq_table(dev);
3485 goto err_master_mfunc;
3487 bitmap_zero(priv->msix_ctl.pool_bm, MAX_MSIX);
3488 mutex_init(&priv->msix_ctl.pool_lock);
3490 mlx4_enable_msi_x(dev);
3491 if ((mlx4_is_mfunc(dev)) &&
3492 !(dev->flags & MLX4_FLAG_MSI_X)) {
3494 mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n");
3498 if (!mlx4_is_slave(dev)) {
3499 err = mlx4_init_steering(dev);
3501 goto err_disable_msix;
3504 mlx4_init_quotas(dev);
3506 err = mlx4_setup_hca(dev);
3507 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
3508 !mlx4_is_mfunc(dev)) {
3509 dev->flags &= ~MLX4_FLAG_MSI_X;
3510 dev->caps.num_comp_vectors = 1;
3511 pci_disable_msix(pdev);
3512 err = mlx4_setup_hca(dev);
3518 /* When PF resources are ready arm its comm channel to enable
3521 if (mlx4_is_master(dev)) {
3522 err = mlx4_ARM_COMM_CHANNEL(dev);
3524 mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
3530 for (port = 1; port <= dev->caps.num_ports; port++) {
3531 err = mlx4_init_port_info(dev, port);
3536 priv->v2p.port1 = 1;
3537 priv->v2p.port2 = 2;
3539 err = mlx4_register_device(dev);
3543 mlx4_request_modules(dev);
3545 mlx4_sense_init(dev);
3546 mlx4_start_sense(dev);
3550 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow)
3551 atomic_dec(&pf_loading);
3557 for (--port; port >= 1; --port)
3558 mlx4_cleanup_port_info(&priv->port[port]);
3560 mlx4_cleanup_default_counters(dev);
3561 if (!mlx4_is_slave(dev))
3562 mlx4_cleanup_counters_table(dev);
3563 mlx4_cleanup_qp_table(dev);
3564 mlx4_cleanup_srq_table(dev);
3565 mlx4_cleanup_cq_table(dev);
3566 mlx4_cmd_use_polling(dev);
3567 mlx4_cleanup_eq_table(dev);
3568 mlx4_cleanup_mcg_table(dev);
3569 mlx4_cleanup_mr_table(dev);
3570 mlx4_cleanup_xrcd_table(dev);
3571 mlx4_cleanup_pd_table(dev);
3572 mlx4_cleanup_uar_table(dev);
3575 if (!mlx4_is_slave(dev))
3576 mlx4_clear_steering(dev);
3579 if (dev->flags & MLX4_FLAG_MSI_X)
3580 pci_disable_msix(pdev);
3583 mlx4_free_eq_table(dev);
3586 if (mlx4_is_master(dev)) {
3587 mlx4_free_resource_tracker(dev, RES_TR_FREE_STRUCTS_ONLY);
3588 mlx4_multi_func_cleanup(dev);
3591 if (mlx4_is_slave(dev)) {
3592 kfree(dev->caps.qp0_qkey);
3593 kfree(dev->caps.qp0_tunnel);
3594 kfree(dev->caps.qp0_proxy);
3595 kfree(dev->caps.qp1_tunnel);
3596 kfree(dev->caps.qp1_proxy);
3600 mlx4_close_hca(dev);
3606 if (mlx4_is_slave(dev))
3607 mlx4_multi_func_cleanup(dev);
3610 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
3613 if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) {
3614 pci_disable_sriov(pdev);
3615 dev->flags &= ~MLX4_FLAG_SRIOV;
3618 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow)
3619 atomic_dec(&pf_loading);
3621 kfree(priv->dev.dev_vfs);
3623 if (!mlx4_is_slave(dev))
3624 mlx4_free_ownership(dev);
3630 static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
3631 struct mlx4_priv *priv)
3634 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
3635 int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0};
3636 const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = {
3637 {2, 0, 0}, {0, 1, 2}, {0, 1, 2} };
3638 unsigned total_vfs = 0;
3641 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
3643 err = mlx4_pci_enable_device(&priv->dev);
3645 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
3649 /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS
3650 * per port, we must limit the number of VFs to 63 (since their are
3653 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc;
3654 total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) {
3655 nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i];
3657 dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n");
3659 goto err_disable_pdev;
3662 for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc;
3664 prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i];
3665 if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) {
3666 dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n");
3668 goto err_disable_pdev;
3671 if (total_vfs > MLX4_MAX_NUM_VF) {
3673 "Requested more VF's (%d) than allowed by hw (%d)\n",
3674 total_vfs, MLX4_MAX_NUM_VF);
3676 goto err_disable_pdev;
3679 for (i = 0; i < MLX4_MAX_PORTS; i++) {
3680 if (nvfs[i] + nvfs[2] > MLX4_MAX_NUM_VF_P_PORT) {
3682 "Requested more VF's (%d) for port (%d) than allowed by driver (%d)\n",
3683 nvfs[i] + nvfs[2], i + 1,
3684 MLX4_MAX_NUM_VF_P_PORT);
3686 goto err_disable_pdev;
3690 /* Check for BARs. */
3691 if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) &&
3692 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
3693 dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
3694 pci_dev_data, pci_resource_flags(pdev, 0));
3696 goto err_disable_pdev;
3698 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
3699 dev_err(&pdev->dev, "Missing UAR, aborting\n");
3701 goto err_disable_pdev;
3704 err = pci_request_regions(pdev, DRV_NAME);
3706 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
3707 goto err_disable_pdev;
3710 pci_set_master(pdev);
3712 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
3714 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
3715 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3717 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
3718 goto err_release_regions;
3721 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3723 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
3724 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3726 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n");
3727 goto err_release_regions;
3731 /* Allow large DMA segments, up to the firmware limit of 1 GB */
3732 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
3733 /* Detect if this device is a virtual function */
3734 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
3735 /* When acting as pf, we normally skip vfs unless explicitly
3736 * requested to probe them.
3739 unsigned vfs_offset = 0;
3741 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) &&
3742 vfs_offset + nvfs[i] < extended_func_num(pdev);
3743 vfs_offset += nvfs[i], i++)
3745 if (i == sizeof(nvfs)/sizeof(nvfs[0])) {
3747 goto err_release_regions;
3749 if ((extended_func_num(pdev) - vfs_offset)
3751 dev_warn(&pdev->dev, "Skipping virtual function:%d\n",
3752 extended_func_num(pdev));
3754 goto err_release_regions;
3759 err = mlx4_catas_init(&priv->dev);
3761 goto err_release_regions;
3763 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0);
3770 mlx4_catas_end(&priv->dev);
3772 err_release_regions:
3773 pci_release_regions(pdev);
3776 mlx4_pci_disable_device(&priv->dev);
3777 pci_set_drvdata(pdev, NULL);
3781 static int mlx4_devlink_port_type_set(struct devlink_port *devlink_port,
3782 enum devlink_port_type port_type)
3784 struct mlx4_port_info *info = container_of(devlink_port,
3785 struct mlx4_port_info,
3787 enum mlx4_port_type mlx4_port_type;
3789 switch (port_type) {
3790 case DEVLINK_PORT_TYPE_AUTO:
3791 mlx4_port_type = MLX4_PORT_TYPE_AUTO;
3793 case DEVLINK_PORT_TYPE_ETH:
3794 mlx4_port_type = MLX4_PORT_TYPE_ETH;
3796 case DEVLINK_PORT_TYPE_IB:
3797 mlx4_port_type = MLX4_PORT_TYPE_IB;
3803 return __set_port_type(info, mlx4_port_type);
3806 static const struct devlink_ops mlx4_devlink_ops = {
3807 .port_type_set = mlx4_devlink_port_type_set,
3810 static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
3812 struct devlink *devlink;
3813 struct mlx4_priv *priv;
3814 struct mlx4_dev *dev;
3817 printk_once(KERN_INFO "%s", mlx4_version);
3819 devlink = devlink_alloc(&mlx4_devlink_ops, sizeof(*priv));
3822 priv = devlink_priv(devlink);
3825 dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL);
3826 if (!dev->persist) {
3828 goto err_devlink_free;
3830 dev->persist->pdev = pdev;
3831 dev->persist->dev = dev;
3832 pci_set_drvdata(pdev, dev->persist);
3833 priv->pci_dev_data = id->driver_data;
3834 mutex_init(&dev->persist->device_state_mutex);
3835 mutex_init(&dev->persist->interface_state_mutex);
3836 mutex_init(&dev->persist->pci_status_mutex);
3838 ret = devlink_register(devlink, &pdev->dev);
3840 goto err_persist_free;
3842 ret = __mlx4_init_one(pdev, id->driver_data, priv);
3844 goto err_devlink_unregister;
3846 pci_save_state(pdev);
3849 err_devlink_unregister:
3850 devlink_unregister(devlink);
3852 kfree(dev->persist);
3854 devlink_free(devlink);
3858 static void mlx4_clean_dev(struct mlx4_dev *dev)
3860 struct mlx4_dev_persistent *persist = dev->persist;
3861 struct mlx4_priv *priv = mlx4_priv(dev);
3862 unsigned long flags = (dev->flags & RESET_PERSIST_MASK_FLAGS);
3864 memset(priv, 0, sizeof(*priv));
3865 priv->dev.persist = persist;
3866 priv->dev.flags = flags;
3869 static void mlx4_unload_one(struct pci_dev *pdev)
3871 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3872 struct mlx4_dev *dev = persist->dev;
3873 struct mlx4_priv *priv = mlx4_priv(dev);
3880 /* saving current ports type for further use */
3881 for (i = 0; i < dev->caps.num_ports; i++) {
3882 dev->persist->curr_port_type[i] = dev->caps.port_type[i + 1];
3883 dev->persist->curr_port_poss_type[i] = dev->caps.
3884 possible_type[i + 1];
3887 pci_dev_data = priv->pci_dev_data;
3889 mlx4_stop_sense(dev);
3890 mlx4_unregister_device(dev);
3892 for (p = 1; p <= dev->caps.num_ports; p++) {
3893 mlx4_cleanup_port_info(&priv->port[p]);
3894 mlx4_CLOSE_PORT(dev, p);
3897 if (mlx4_is_master(dev))
3898 mlx4_free_resource_tracker(dev,
3899 RES_TR_FREE_SLAVES_ONLY);
3901 mlx4_cleanup_default_counters(dev);
3902 if (!mlx4_is_slave(dev))
3903 mlx4_cleanup_counters_table(dev);
3904 mlx4_cleanup_qp_table(dev);
3905 mlx4_cleanup_srq_table(dev);
3906 mlx4_cleanup_cq_table(dev);
3907 mlx4_cmd_use_polling(dev);
3908 mlx4_cleanup_eq_table(dev);
3909 mlx4_cleanup_mcg_table(dev);
3910 mlx4_cleanup_mr_table(dev);
3911 mlx4_cleanup_xrcd_table(dev);
3912 mlx4_cleanup_pd_table(dev);
3914 if (mlx4_is_master(dev))
3915 mlx4_free_resource_tracker(dev,
3916 RES_TR_FREE_STRUCTS_ONLY);
3919 mlx4_uar_free(dev, &priv->driver_uar);
3920 mlx4_cleanup_uar_table(dev);
3921 if (!mlx4_is_slave(dev))
3922 mlx4_clear_steering(dev);
3923 mlx4_free_eq_table(dev);
3924 if (mlx4_is_master(dev))
3925 mlx4_multi_func_cleanup(dev);
3926 mlx4_close_hca(dev);
3928 if (mlx4_is_slave(dev))
3929 mlx4_multi_func_cleanup(dev);
3930 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
3932 if (dev->flags & MLX4_FLAG_MSI_X)
3933 pci_disable_msix(pdev);
3935 if (!mlx4_is_slave(dev))
3936 mlx4_free_ownership(dev);
3938 kfree(dev->caps.qp0_qkey);
3939 kfree(dev->caps.qp0_tunnel);
3940 kfree(dev->caps.qp0_proxy);
3941 kfree(dev->caps.qp1_tunnel);
3942 kfree(dev->caps.qp1_proxy);
3943 kfree(dev->dev_vfs);
3945 mlx4_clean_dev(dev);
3946 priv->pci_dev_data = pci_dev_data;
3950 static void mlx4_remove_one(struct pci_dev *pdev)
3952 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3953 struct mlx4_dev *dev = persist->dev;
3954 struct mlx4_priv *priv = mlx4_priv(dev);
3955 struct devlink *devlink = priv_to_devlink(priv);
3958 mutex_lock(&persist->interface_state_mutex);
3959 persist->interface_state |= MLX4_INTERFACE_STATE_DELETION;
3960 mutex_unlock(&persist->interface_state_mutex);
3962 /* Disabling SR-IOV is not allowed while there are active vf's */
3963 if (mlx4_is_master(dev) && dev->flags & MLX4_FLAG_SRIOV) {
3964 active_vfs = mlx4_how_many_lives_vf(dev);
3966 pr_warn("Removing PF when there are active VF's !!\n");
3967 pr_warn("Will not disable SR-IOV.\n");
3971 /* device marked to be under deletion running now without the lock
3972 * letting other tasks to be terminated
3974 if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
3975 mlx4_unload_one(pdev);
3977 mlx4_info(dev, "%s: interface is down\n", __func__);
3978 mlx4_catas_end(dev);
3979 if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) {
3980 mlx4_warn(dev, "Disabling SR-IOV\n");
3981 pci_disable_sriov(pdev);
3984 pci_release_regions(pdev);
3985 mlx4_pci_disable_device(dev);
3986 devlink_unregister(devlink);
3987 kfree(dev->persist);
3988 devlink_free(devlink);
3989 pci_set_drvdata(pdev, NULL);
3992 static int restore_current_port_types(struct mlx4_dev *dev,
3993 enum mlx4_port_type *types,
3994 enum mlx4_port_type *poss_types)
3996 struct mlx4_priv *priv = mlx4_priv(dev);
3999 mlx4_stop_sense(dev);
4001 mutex_lock(&priv->port_mutex);
4002 for (i = 0; i < dev->caps.num_ports; i++)
4003 dev->caps.possible_type[i + 1] = poss_types[i];
4004 err = mlx4_change_port_types(dev, types);
4005 mlx4_start_sense(dev);
4006 mutex_unlock(&priv->port_mutex);
4011 int mlx4_restart_one(struct pci_dev *pdev)
4013 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4014 struct mlx4_dev *dev = persist->dev;
4015 struct mlx4_priv *priv = mlx4_priv(dev);
4016 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
4017 int pci_dev_data, err, total_vfs;
4019 pci_dev_data = priv->pci_dev_data;
4020 total_vfs = dev->persist->num_vfs;
4021 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
4023 mlx4_unload_one(pdev);
4024 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1);
4026 mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n",
4027 __func__, pci_name(pdev), err);
4031 err = restore_current_port_types(dev, dev->persist->curr_port_type,
4032 dev->persist->curr_port_poss_type);
4034 mlx4_err(dev, "could not restore original port types (%d)\n",
4040 #define MLX_SP(id) { PCI_VDEVICE(MELLANOX, id), MLX4_PCI_DEV_FORCE_SENSE_PORT }
4041 #define MLX_VF(id) { PCI_VDEVICE(MELLANOX, id), MLX4_PCI_DEV_IS_VF }
4042 #define MLX_GN(id) { PCI_VDEVICE(MELLANOX, id), 0 }
4044 static const struct pci_device_id mlx4_pci_table[] = {
4045 /* MT25408 "Hermon" */
4046 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_SDR), /* SDR */
4047 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_DDR), /* DDR */
4048 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_QDR), /* QDR */
4049 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2), /* DDR Gen2 */
4050 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2), /* QDR Gen2 */
4051 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_EN), /* EN 10GigE */
4052 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2), /* EN 10GigE Gen2 */
4053 /* MT25458 ConnectX EN 10GBASE-T */
4054 MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN),
4055 MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2), /* Gen2 */
4056 /* MT26468 ConnectX EN 10GigE PCIe Gen2*/
4057 MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2),
4058 /* MT26438 ConnectX EN 40GigE PCIe Gen2 5GT/s */
4059 MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2),
4060 /* MT26478 ConnectX2 40GigE PCIe Gen2 */
4061 MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX2),
4062 /* MT25400 Family [ConnectX-2] */
4063 MLX_VF(0x1002), /* Virtual Function */
4064 /* MT27500 Family [ConnectX-3] */
4065 MLX_GN(PCI_DEVICE_ID_MELLANOX_CONNECTX3),
4066 MLX_VF(0x1004), /* Virtual Function */
4067 MLX_GN(0x1005), /* MT27510 Family */
4068 MLX_GN(0x1006), /* MT27511 Family */
4069 MLX_GN(PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO), /* MT27520 Family */
4070 MLX_GN(0x1008), /* MT27521 Family */
4071 MLX_GN(0x1009), /* MT27530 Family */
4072 MLX_GN(0x100a), /* MT27531 Family */
4073 MLX_GN(0x100b), /* MT27540 Family */
4074 MLX_GN(0x100c), /* MT27541 Family */
4075 MLX_GN(0x100d), /* MT27550 Family */
4076 MLX_GN(0x100e), /* MT27551 Family */
4077 MLX_GN(0x100f), /* MT27560 Family */
4078 MLX_GN(0x1010), /* MT27561 Family */
4081 * See the mellanox_check_broken_intx_masking() quirk when
4088 MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
4090 static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
4091 pci_channel_state_t state)
4093 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4095 mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n");
4096 mlx4_enter_error_state(persist);
4098 mutex_lock(&persist->interface_state_mutex);
4099 if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
4100 mlx4_unload_one(pdev);
4102 mutex_unlock(&persist->interface_state_mutex);
4103 if (state == pci_channel_io_perm_failure)
4104 return PCI_ERS_RESULT_DISCONNECT;
4106 mlx4_pci_disable_device(persist->dev);
4107 return PCI_ERS_RESULT_NEED_RESET;
4110 static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
4112 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4113 struct mlx4_dev *dev = persist->dev;
4116 mlx4_err(dev, "mlx4_pci_slot_reset was called\n");
4117 err = mlx4_pci_enable_device(dev);
4119 mlx4_err(dev, "Can not re-enable device, err=%d\n", err);
4120 return PCI_ERS_RESULT_DISCONNECT;
4123 pci_set_master(pdev);
4124 pci_restore_state(pdev);
4125 pci_save_state(pdev);
4126 return PCI_ERS_RESULT_RECOVERED;
4129 static void mlx4_pci_resume(struct pci_dev *pdev)
4131 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4132 struct mlx4_dev *dev = persist->dev;
4133 struct mlx4_priv *priv = mlx4_priv(dev);
4134 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
4138 mlx4_err(dev, "%s was called\n", __func__);
4139 total_vfs = dev->persist->num_vfs;
4140 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
4142 mutex_lock(&persist->interface_state_mutex);
4143 if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
4144 err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
4147 mlx4_err(dev, "%s: mlx4_load_one failed, err=%d\n",
4152 err = restore_current_port_types(dev, dev->persist->
4153 curr_port_type, dev->persist->
4154 curr_port_poss_type);
4156 mlx4_err(dev, "could not restore original port types (%d)\n", err);
4159 mutex_unlock(&persist->interface_state_mutex);
4163 static void mlx4_shutdown(struct pci_dev *pdev)
4165 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4167 mlx4_info(persist->dev, "mlx4_shutdown was called\n");
4168 mutex_lock(&persist->interface_state_mutex);
4169 if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
4170 mlx4_unload_one(pdev);
4171 mutex_unlock(&persist->interface_state_mutex);
4174 static const struct pci_error_handlers mlx4_err_handler = {
4175 .error_detected = mlx4_pci_err_detected,
4176 .slot_reset = mlx4_pci_slot_reset,
4177 .resume = mlx4_pci_resume,
4180 static struct pci_driver mlx4_driver = {
4182 .id_table = mlx4_pci_table,
4183 .probe = mlx4_init_one,
4184 .shutdown = mlx4_shutdown,
4185 .remove = mlx4_remove_one,
4186 .err_handler = &mlx4_err_handler,
4189 static int __init mlx4_verify_params(void)
4191 if ((log_num_mac < 0) || (log_num_mac > 7)) {
4192 pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac);
4196 if (log_num_vlan != 0)
4197 pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
4198 MLX4_LOG_NUM_VLANS);
4201 pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n");
4203 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
4204 pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n",
4209 /* Check if module param for ports type has legal combination */
4210 if (port_type_array[0] == false && port_type_array[1] == true) {
4211 pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
4212 port_type_array[0] = true;
4215 if (mlx4_log_num_mgm_entry_size < -7 ||
4216 (mlx4_log_num_mgm_entry_size > 0 &&
4217 (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE ||
4218 mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE))) {
4219 pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-7..0 or %d..%d)\n",
4220 mlx4_log_num_mgm_entry_size,
4221 MLX4_MIN_MGM_LOG_ENTRY_SIZE,
4222 MLX4_MAX_MGM_LOG_ENTRY_SIZE);
4229 static int __init mlx4_init(void)
4233 if (mlx4_verify_params())
4237 mlx4_wq = create_singlethread_workqueue("mlx4");
4241 ret = pci_register_driver(&mlx4_driver);
4243 destroy_workqueue(mlx4_wq);
4244 return ret < 0 ? ret : 0;
4247 static void __exit mlx4_cleanup(void)
4249 pci_unregister_driver(&mlx4_driver);
4250 destroy_workqueue(mlx4_wq);
4253 module_init(mlx4_init);
4254 module_exit(mlx4_cleanup);