2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/eswitch.h>
35 #include <linux/mlx5/mlx5_ifc_vdpa.h>
36 #include <linux/mlx5/vport.h>
37 #include "mlx5_core.h"
39 /* intf dev list mutex */
40 static DEFINE_MUTEX(mlx5_intf_mutex);
41 static DEFINE_IDA(mlx5_adev_ida);
43 static bool is_eth_rep_supported(struct mlx5_core_dev *dev)
45 if (!IS_ENABLED(CONFIG_MLX5_ESWITCH))
48 if (!MLX5_ESWITCH_MANAGER(dev))
51 if (!is_mdev_switchdev_mode(dev))
57 bool mlx5_eth_supported(struct mlx5_core_dev *dev)
59 if (!IS_ENABLED(CONFIG_MLX5_CORE_EN))
62 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
65 if (!MLX5_CAP_GEN(dev, eth_net_offloads)) {
66 mlx5_core_warn(dev, "Missing eth_net_offloads capability\n");
70 if (!MLX5_CAP_GEN(dev, nic_flow_table)) {
71 mlx5_core_warn(dev, "Missing nic_flow_table capability\n");
75 if (!MLX5_CAP_ETH(dev, csum_cap)) {
76 mlx5_core_warn(dev, "Missing csum_cap capability\n");
80 if (!MLX5_CAP_ETH(dev, max_lso_cap)) {
81 mlx5_core_warn(dev, "Missing max_lso_cap capability\n");
85 if (!MLX5_CAP_ETH(dev, vlan_cap)) {
86 mlx5_core_warn(dev, "Missing vlan_cap capability\n");
90 if (!MLX5_CAP_ETH(dev, rss_ind_tbl_cap)) {
91 mlx5_core_warn(dev, "Missing rss_ind_tbl_cap capability\n");
95 if (MLX5_CAP_FLOWTABLE(dev,
96 flow_table_properties_nic_receive.max_ft_level) < 3) {
97 mlx5_core_warn(dev, "max_ft_level < 3\n");
101 if (!MLX5_CAP_ETH(dev, self_lb_en_modifiable))
102 mlx5_core_warn(dev, "Self loop back prevention is not supported\n");
103 if (!MLX5_CAP_GEN(dev, cq_moderation))
104 mlx5_core_warn(dev, "CQ moderation is not supported\n");
109 static bool is_eth_enabled(struct mlx5_core_dev *dev)
111 union devlink_param_value val;
114 err = devlink_param_driverinit_value_get(priv_to_devlink(dev),
115 DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH,
117 return err ? false : val.vbool;
120 bool mlx5_vnet_supported(struct mlx5_core_dev *dev)
122 if (!IS_ENABLED(CONFIG_MLX5_VDPA_NET))
125 if (mlx5_core_is_pf(dev))
128 if (!(MLX5_CAP_GEN_64(dev, general_obj_types) &
129 MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q))
132 if (!(MLX5_CAP_DEV_VDPA_EMULATION(dev, event_mode) &
133 MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE))
136 if (!MLX5_CAP_DEV_VDPA_EMULATION(dev, eth_frame_offload_type))
142 static bool is_vnet_enabled(struct mlx5_core_dev *dev)
144 union devlink_param_value val;
147 err = devlink_param_driverinit_value_get(priv_to_devlink(dev),
148 DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
150 return err ? false : val.vbool;
153 static bool is_ib_rep_supported(struct mlx5_core_dev *dev)
155 if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
158 if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
161 if (!is_eth_rep_supported(dev))
164 if (!MLX5_ESWITCH_MANAGER(dev))
167 if (!is_mdev_switchdev_mode(dev))
170 if (mlx5_core_mp_enabled(dev))
176 static bool is_mp_supported(struct mlx5_core_dev *dev)
178 if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
181 if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
184 if (is_ib_rep_supported(dev))
187 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
190 if (!mlx5_core_is_mp_slave(dev))
196 bool mlx5_rdma_supported(struct mlx5_core_dev *dev)
198 if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
201 if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
204 if (is_ib_rep_supported(dev))
207 if (is_mp_supported(dev))
213 static bool is_ib_enabled(struct mlx5_core_dev *dev)
215 union devlink_param_value val;
218 err = devlink_param_driverinit_value_get(priv_to_devlink(dev),
219 DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
221 return err ? false : val.vbool;
225 MLX5_INTERFACE_PROTOCOL_ETH,
226 MLX5_INTERFACE_PROTOCOL_ETH_REP,
228 MLX5_INTERFACE_PROTOCOL_IB,
229 MLX5_INTERFACE_PROTOCOL_IB_REP,
230 MLX5_INTERFACE_PROTOCOL_MPIB,
232 MLX5_INTERFACE_PROTOCOL_VNET,
235 static const struct mlx5_adev_device {
237 bool (*is_supported)(struct mlx5_core_dev *dev);
238 bool (*is_enabled)(struct mlx5_core_dev *dev);
239 } mlx5_adev_devices[] = {
240 [MLX5_INTERFACE_PROTOCOL_VNET] = { .suffix = "vnet",
241 .is_supported = &mlx5_vnet_supported,
242 .is_enabled = &is_vnet_enabled },
243 [MLX5_INTERFACE_PROTOCOL_IB] = { .suffix = "rdma",
244 .is_supported = &mlx5_rdma_supported,
245 .is_enabled = &is_ib_enabled },
246 [MLX5_INTERFACE_PROTOCOL_ETH] = { .suffix = "eth",
247 .is_supported = &mlx5_eth_supported,
248 .is_enabled = &is_eth_enabled },
249 [MLX5_INTERFACE_PROTOCOL_ETH_REP] = { .suffix = "eth-rep",
250 .is_supported = &is_eth_rep_supported },
251 [MLX5_INTERFACE_PROTOCOL_IB_REP] = { .suffix = "rdma-rep",
252 .is_supported = &is_ib_rep_supported },
253 [MLX5_INTERFACE_PROTOCOL_MPIB] = { .suffix = "multiport",
254 .is_supported = &is_mp_supported },
257 int mlx5_adev_idx_alloc(void)
259 return ida_alloc(&mlx5_adev_ida, GFP_KERNEL);
262 void mlx5_adev_idx_free(int idx)
264 ida_free(&mlx5_adev_ida, idx);
267 int mlx5_adev_init(struct mlx5_core_dev *dev)
269 struct mlx5_priv *priv = &dev->priv;
271 priv->adev = kcalloc(ARRAY_SIZE(mlx5_adev_devices),
272 sizeof(struct mlx5_adev *), GFP_KERNEL);
279 void mlx5_adev_cleanup(struct mlx5_core_dev *dev)
281 struct mlx5_priv *priv = &dev->priv;
286 static void adev_release(struct device *dev)
288 struct mlx5_adev *mlx5_adev =
289 container_of(dev, struct mlx5_adev, adev.dev);
290 struct mlx5_priv *priv = &mlx5_adev->mdev->priv;
291 int idx = mlx5_adev->idx;
294 priv->adev[idx] = NULL;
297 static struct mlx5_adev *add_adev(struct mlx5_core_dev *dev, int idx)
299 const char *suffix = mlx5_adev_devices[idx].suffix;
300 struct auxiliary_device *adev;
301 struct mlx5_adev *madev;
304 madev = kzalloc(sizeof(*madev), GFP_KERNEL);
306 return ERR_PTR(-ENOMEM);
309 adev->id = dev->priv.adev_idx;
311 adev->dev.parent = dev->device;
312 adev->dev.release = adev_release;
316 ret = auxiliary_device_init(adev);
322 ret = auxiliary_device_add(adev);
324 auxiliary_device_uninit(adev);
330 static void del_adev(struct auxiliary_device *adev)
332 auxiliary_device_delete(adev);
333 auxiliary_device_uninit(adev);
336 int mlx5_attach_device(struct mlx5_core_dev *dev)
338 struct mlx5_priv *priv = &dev->priv;
339 struct auxiliary_device *adev;
340 struct auxiliary_driver *adrv;
343 devl_assert_locked(priv_to_devlink(dev));
344 mutex_lock(&mlx5_intf_mutex);
345 priv->flags &= ~MLX5_PRIV_FLAGS_DETACH;
346 priv->flags |= MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
347 for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
348 if (!priv->adev[i]) {
349 bool is_supported = false;
351 if (mlx5_adev_devices[i].is_enabled) {
354 enabled = mlx5_adev_devices[i].is_enabled(dev);
359 if (mlx5_adev_devices[i].is_supported)
360 is_supported = mlx5_adev_devices[i].is_supported(dev);
365 priv->adev[i] = add_adev(dev, i);
366 if (IS_ERR(priv->adev[i])) {
367 ret = PTR_ERR(priv->adev[i]);
368 priv->adev[i] = NULL;
371 adev = &priv->adev[i]->adev;
373 /* Pay attention that this is not PCI driver that
374 * mlx5_core_dev is connected, but auxiliary driver.
376 * Here we can race of module unload with devlink
377 * reload, but we don't need to take extra lock because
378 * we are holding global mlx5_intf_mutex.
380 if (!adev->dev.driver)
382 adrv = to_auxiliary_drv(adev->dev.driver);
385 ret = adrv->resume(adev);
388 mlx5_core_warn(dev, "Device[%d] (%s) failed to load\n",
389 i, mlx5_adev_devices[i].suffix);
394 priv->flags &= ~MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
395 mutex_unlock(&mlx5_intf_mutex);
399 void mlx5_detach_device(struct mlx5_core_dev *dev)
401 struct mlx5_priv *priv = &dev->priv;
402 struct auxiliary_device *adev;
403 struct auxiliary_driver *adrv;
404 pm_message_t pm = {};
407 devl_assert_locked(priv_to_devlink(dev));
408 mutex_lock(&mlx5_intf_mutex);
409 priv->flags |= MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
410 for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) {
414 if (mlx5_adev_devices[i].is_enabled) {
417 enabled = mlx5_adev_devices[i].is_enabled(dev);
422 adev = &priv->adev[i]->adev;
423 /* Auxiliary driver was unbind manually through sysfs */
424 if (!adev->dev.driver)
427 adrv = to_auxiliary_drv(adev->dev.driver);
430 adrv->suspend(adev, pm);
435 del_adev(&priv->adev[i]->adev);
436 priv->adev[i] = NULL;
438 priv->flags &= ~MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
439 priv->flags |= MLX5_PRIV_FLAGS_DETACH;
440 mutex_unlock(&mlx5_intf_mutex);
443 int mlx5_register_device(struct mlx5_core_dev *dev)
447 devl_assert_locked(priv_to_devlink(dev));
448 mutex_lock(&mlx5_intf_mutex);
449 dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
450 ret = mlx5_rescan_drivers_locked(dev);
451 mutex_unlock(&mlx5_intf_mutex);
453 mlx5_unregister_device(dev);
458 void mlx5_unregister_device(struct mlx5_core_dev *dev)
460 devl_assert_locked(priv_to_devlink(dev));
461 mutex_lock(&mlx5_intf_mutex);
462 dev->priv.flags = MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
463 mlx5_rescan_drivers_locked(dev);
464 mutex_unlock(&mlx5_intf_mutex);
467 static int add_drivers(struct mlx5_core_dev *dev)
469 struct mlx5_priv *priv = &dev->priv;
472 for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
473 bool is_supported = false;
478 if (mlx5_adev_devices[i].is_supported)
479 is_supported = mlx5_adev_devices[i].is_supported(dev);
484 priv->adev[i] = add_adev(dev, i);
485 if (IS_ERR(priv->adev[i])) {
486 mlx5_core_warn(dev, "Device[%d] (%s) failed to load\n",
487 i, mlx5_adev_devices[i].suffix);
488 /* We continue to rescan drivers and leave to the caller
489 * to make decision if to release everything or continue.
491 ret = PTR_ERR(priv->adev[i]);
492 priv->adev[i] = NULL;
498 static void delete_drivers(struct mlx5_core_dev *dev)
500 struct mlx5_priv *priv = &dev->priv;
504 delete_all = priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
506 for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) {
507 bool is_supported = false;
512 if (mlx5_adev_devices[i].is_enabled) {
515 enabled = mlx5_adev_devices[i].is_enabled(dev);
520 if (mlx5_adev_devices[i].is_supported && !delete_all)
521 is_supported = mlx5_adev_devices[i].is_supported(dev);
527 del_adev(&priv->adev[i]->adev);
528 priv->adev[i] = NULL;
532 /* This function is used after mlx5_core_dev is reconfigured.
534 int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
536 struct mlx5_priv *priv = &dev->priv;
539 lockdep_assert_held(&mlx5_intf_mutex);
540 if (priv->flags & MLX5_PRIV_FLAGS_DETACH)
543 priv->flags |= MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
545 if (priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
548 err = add_drivers(dev);
551 priv->flags &= ~MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
555 bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev)
557 u64 fsystem_guid, psystem_guid;
559 fsystem_guid = mlx5_query_nic_system_image_guid(dev);
560 psystem_guid = mlx5_query_nic_system_image_guid(peer_dev);
562 return (fsystem_guid && psystem_guid && fsystem_guid == psystem_guid);
565 static u32 mlx5_gen_pci_id(const struct mlx5_core_dev *dev)
567 return (u32)((pci_domain_nr(dev->pdev->bus) << 16) |
568 (dev->pdev->bus->number << 8) |
569 PCI_SLOT(dev->pdev->devfn));
572 static int _next_phys_dev(struct mlx5_core_dev *mdev,
573 const struct mlx5_core_dev *curr)
575 if (!mlx5_core_is_pf(mdev))
581 if (!mlx5_same_hw_devs(mdev, (struct mlx5_core_dev *)curr) &&
582 mlx5_gen_pci_id(mdev) != mlx5_gen_pci_id(curr))
588 static void *pci_get_other_drvdata(struct device *this, struct device *other)
590 if (this->driver != other->driver)
593 return pci_get_drvdata(to_pci_dev(other));
596 static int next_phys_dev_lag(struct device *dev, const void *data)
598 struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data;
600 mdev = pci_get_other_drvdata(this->device, dev);
604 if (!MLX5_CAP_GEN(mdev, vport_group_manager) ||
605 !MLX5_CAP_GEN(mdev, lag_master) ||
606 (MLX5_CAP_GEN(mdev, num_lag_ports) > MLX5_MAX_PORTS ||
607 MLX5_CAP_GEN(mdev, num_lag_ports) <= 1))
610 return _next_phys_dev(mdev, data);
613 static struct mlx5_core_dev *mlx5_get_next_dev(struct mlx5_core_dev *dev,
614 int (*match)(struct device *dev, const void *data))
618 if (!mlx5_core_is_pf(dev))
621 next = bus_find_device(&pci_bus_type, NULL, dev, match);
626 return pci_get_drvdata(to_pci_dev(next));
629 /* Must be called with intf_mutex held */
630 struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev)
632 lockdep_assert_held(&mlx5_intf_mutex);
633 return mlx5_get_next_dev(dev, &next_phys_dev_lag);
636 void mlx5_dev_list_lock(void)
638 mutex_lock(&mlx5_intf_mutex);
640 void mlx5_dev_list_unlock(void)
642 mutex_unlock(&mlx5_intf_mutex);
645 int mlx5_dev_list_trylock(void)
647 return mutex_trylock(&mlx5_intf_mutex);