1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies Ltd */
4 #include <linux/mlx5/driver.h>
5 #include <linux/mlx5/device.h>
8 #include "sf/vhca_event.h"
10 #include "sf/mlx5_ifc_vhca_event.h"
13 struct mlx5_sf_dev_table {
14 struct xarray devices;
16 phys_addr_t base_address;
18 struct notifier_block nb;
19 struct mlx5_core_dev *dev;
22 static bool mlx5_sf_dev_supported(const struct mlx5_core_dev *dev)
24 return MLX5_CAP_GEN(dev, sf) && mlx5_vhca_event_supported(dev);
27 bool mlx5_sf_dev_allocated(const struct mlx5_core_dev *dev)
29 struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
31 if (!mlx5_sf_dev_supported(dev))
34 return !xa_empty(&table->devices);
37 static ssize_t sfnum_show(struct device *dev, struct device_attribute *attr, char *buf)
39 struct auxiliary_device *adev = container_of(dev, struct auxiliary_device, dev);
40 struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
42 return scnprintf(buf, PAGE_SIZE, "%u\n", sf_dev->sfnum);
44 static DEVICE_ATTR_RO(sfnum);
46 static struct attribute *sf_device_attrs[] = {
51 static const struct attribute_group sf_attr_group = {
52 .attrs = sf_device_attrs,
55 static const struct attribute_group *sf_attr_groups[2] = {
60 static void mlx5_sf_dev_release(struct device *device)
62 struct auxiliary_device *adev = container_of(device, struct auxiliary_device, dev);
63 struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
65 mlx5_adev_idx_free(adev->id);
69 static void mlx5_sf_dev_remove(struct mlx5_sf_dev *sf_dev)
71 auxiliary_device_delete(&sf_dev->adev);
72 auxiliary_device_uninit(&sf_dev->adev);
75 static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u32 sfnum)
77 struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
78 struct mlx5_sf_dev *sf_dev;
83 id = mlx5_adev_idx_alloc();
89 sf_dev = kzalloc(sizeof(*sf_dev), GFP_KERNEL);
91 mlx5_adev_idx_free(id);
97 sf_dev->adev.name = MLX5_SF_DEV_ID_NAME;
98 sf_dev->adev.dev.release = mlx5_sf_dev_release;
99 sf_dev->adev.dev.parent = &pdev->dev;
100 sf_dev->adev.dev.groups = sf_attr_groups;
101 sf_dev->sfnum = sfnum;
102 sf_dev->parent_mdev = dev;
104 if (!table->max_sfs) {
105 mlx5_adev_idx_free(id);
110 sf_dev->bar_base_addr = table->base_address + (sf_index * table->sf_bar_length);
112 err = auxiliary_device_init(&sf_dev->adev);
114 mlx5_adev_idx_free(id);
119 err = auxiliary_device_add(&sf_dev->adev);
121 put_device(&sf_dev->adev.dev);
125 err = xa_insert(&table->devices, sf_index, sf_dev, GFP_KERNEL);
131 mlx5_sf_dev_remove(sf_dev);
133 mlx5_core_err(dev, "SF DEV: fail device add for index=%d sfnum=%d err=%d\n",
134 sf_index, sfnum, err);
137 static void mlx5_sf_dev_del(struct mlx5_core_dev *dev, struct mlx5_sf_dev *sf_dev, u16 sf_index)
139 struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
141 xa_erase(&table->devices, sf_index);
142 mlx5_sf_dev_remove(sf_dev);
146 mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_code, void *data)
148 struct mlx5_sf_dev_table *table = container_of(nb, struct mlx5_sf_dev_table, nb);
149 const struct mlx5_vhca_state_event *event = data;
150 struct mlx5_sf_dev *sf_dev;
155 max_functions = mlx5_sf_max_functions(table->dev);
159 base_id = MLX5_CAP_GEN(table->dev, sf_base_id);
160 if (event->function_id < base_id || event->function_id >= (base_id + max_functions))
163 sf_index = event->function_id - base_id;
164 sf_dev = xa_load(&table->devices, sf_index);
165 switch (event->new_vhca_state) {
166 case MLX5_VHCA_STATE_INVALID:
167 case MLX5_VHCA_STATE_ALLOCATED:
169 mlx5_sf_dev_del(table->dev, sf_dev, sf_index);
171 case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
173 mlx5_sf_dev_del(table->dev, sf_dev, sf_index);
175 mlx5_core_err(table->dev,
176 "SF DEV: teardown state for invalid dev index=%d fn_id=0x%x\n",
177 sf_index, event->sw_function_id);
179 case MLX5_VHCA_STATE_ACTIVE:
181 mlx5_sf_dev_add(table->dev, sf_index, event->sw_function_id);
189 static int mlx5_sf_dev_vhca_arm_all(struct mlx5_sf_dev_table *table)
191 struct mlx5_core_dev *dev = table->dev;
197 max_functions = mlx5_sf_max_functions(dev);
198 function_id = MLX5_CAP_GEN(dev, sf_base_id);
199 /* Arm the vhca context as the vhca event notifier */
200 for (i = 0; i < max_functions; i++) {
201 err = mlx5_vhca_event_arm(dev, function_id);
210 void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
212 struct mlx5_sf_dev_table *table;
213 unsigned int max_sfs;
216 if (!mlx5_sf_dev_supported(dev) || !mlx5_vhca_event_supported(dev))
219 table = kzalloc(sizeof(*table), GFP_KERNEL);
225 table->nb.notifier_call = mlx5_sf_dev_state_change_handler;
227 if (MLX5_CAP_GEN(dev, max_num_sf))
228 max_sfs = MLX5_CAP_GEN(dev, max_num_sf);
230 max_sfs = 1 << MLX5_CAP_GEN(dev, log_max_sf);
231 table->sf_bar_length = 1 << (MLX5_CAP_GEN(dev, log_min_sf_size) + 12);
232 table->base_address = pci_resource_start(dev->pdev, 2);
233 table->max_sfs = max_sfs;
234 xa_init(&table->devices);
235 dev->priv.sf_dev_table = table;
237 err = mlx5_vhca_event_notifier_register(dev, &table->nb);
240 err = mlx5_sf_dev_vhca_arm_all(table);
243 mlx5_core_dbg(dev, "SF DEV: max sf devices=%d\n", max_sfs);
247 mlx5_vhca_event_notifier_unregister(dev, &table->nb);
251 dev->priv.sf_dev_table = NULL;
253 mlx5_core_err(dev, "SF DEV table create err = %d\n", err);
256 static void mlx5_sf_dev_destroy_all(struct mlx5_sf_dev_table *table)
258 struct mlx5_sf_dev *sf_dev;
261 xa_for_each(&table->devices, index, sf_dev) {
262 xa_erase(&table->devices, index);
263 mlx5_sf_dev_remove(sf_dev);
267 void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev)
269 struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
274 mlx5_vhca_event_notifier_unregister(dev, &table->nb);
276 /* Now that event handler is not running, it is safe to destroy
277 * the sf device without race.
279 mlx5_sf_dev_destroy_all(table);
281 WARN_ON(!xa_empty(&table->devices));
283 dev->priv.sf_dev_table = NULL;