Merge tag 'gpio-v4.20-1' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlx5 / core / dev.c
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/mlx5/driver.h>
34 #include "mlx5_core.h"
35
36 static LIST_HEAD(intf_list);
37 static LIST_HEAD(mlx5_dev_list);
38 /* intf dev list mutex */
39 static DEFINE_MUTEX(mlx5_intf_mutex);
40
41 struct mlx5_device_context {
42         struct list_head        list;
43         struct mlx5_interface  *intf;
44         void                   *context;
45         unsigned long           state;
46 };
47
48 struct mlx5_delayed_event {
49         struct list_head        list;
50         struct mlx5_core_dev    *dev;
51         enum mlx5_dev_event     event;
52         unsigned long           param;
53 };
54
55 enum {
56         MLX5_INTERFACE_ADDED,
57         MLX5_INTERFACE_ATTACHED,
58 };
59
60 static void add_delayed_event(struct mlx5_priv *priv,
61                               struct mlx5_core_dev *dev,
62                               enum mlx5_dev_event event,
63                               unsigned long param)
64 {
65         struct mlx5_delayed_event *delayed_event;
66
67         delayed_event = kzalloc(sizeof(*delayed_event), GFP_ATOMIC);
68         if (!delayed_event) {
69                 mlx5_core_err(dev, "event %d is missed\n", event);
70                 return;
71         }
72
73         mlx5_core_dbg(dev, "Accumulating event %d\n", event);
74         delayed_event->dev = dev;
75         delayed_event->event = event;
76         delayed_event->param = param;
77         list_add_tail(&delayed_event->list, &priv->waiting_events_list);
78 }
79
80 static void delayed_event_release(struct mlx5_device_context *dev_ctx,
81                                   struct mlx5_priv *priv)
82 {
83         struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
84         struct mlx5_delayed_event *de;
85         struct mlx5_delayed_event *n;
86         struct list_head temp;
87
88         INIT_LIST_HEAD(&temp);
89
90         spin_lock_irq(&priv->ctx_lock);
91
92         priv->is_accum_events = false;
93         list_splice_init(&priv->waiting_events_list, &temp);
94         if (!dev_ctx->context)
95                 goto out;
96         list_for_each_entry_safe(de, n, &temp, list)
97                 dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param);
98
99 out:
100         spin_unlock_irq(&priv->ctx_lock);
101
102         list_for_each_entry_safe(de, n, &temp, list) {
103                 list_del(&de->list);
104                 kfree(de);
105         }
106 }
107
108 /* accumulating events that can come after mlx5_ib calls to
109  * ib_register_device, till adding that interface to the events list.
110  */
111 static void delayed_event_start(struct mlx5_priv *priv)
112 {
113         spin_lock_irq(&priv->ctx_lock);
114         priv->is_accum_events = true;
115         spin_unlock_irq(&priv->ctx_lock);
116 }
117
118 void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
119 {
120         struct mlx5_device_context *dev_ctx;
121         struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
122
123         if (!mlx5_lag_intf_add(intf, priv))
124                 return;
125
126         dev_ctx = kzalloc(sizeof(*dev_ctx), GFP_KERNEL);
127         if (!dev_ctx)
128                 return;
129
130         dev_ctx->intf = intf;
131
132         delayed_event_start(priv);
133
134         dev_ctx->context = intf->add(dev);
135         if (dev_ctx->context) {
136                 set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
137                 if (intf->attach)
138                         set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
139
140                 spin_lock_irq(&priv->ctx_lock);
141                 list_add_tail(&dev_ctx->list, &priv->ctx_list);
142
143 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
144                 if (dev_ctx->intf->pfault) {
145                         if (priv->pfault) {
146                                 mlx5_core_err(dev, "multiple page fault handlers not supported");
147                         } else {
148                                 priv->pfault_ctx = dev_ctx->context;
149                                 priv->pfault = dev_ctx->intf->pfault;
150                         }
151                 }
152 #endif
153                 spin_unlock_irq(&priv->ctx_lock);
154         }
155
156         delayed_event_release(dev_ctx, priv);
157
158         if (!dev_ctx->context)
159                 kfree(dev_ctx);
160 }
161
162 static struct mlx5_device_context *mlx5_get_device(struct mlx5_interface *intf,
163                                                    struct mlx5_priv *priv)
164 {
165         struct mlx5_device_context *dev_ctx;
166
167         list_for_each_entry(dev_ctx, &priv->ctx_list, list)
168                 if (dev_ctx->intf == intf)
169                         return dev_ctx;
170         return NULL;
171 }
172
173 void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
174 {
175         struct mlx5_device_context *dev_ctx;
176         struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
177
178         dev_ctx = mlx5_get_device(intf, priv);
179         if (!dev_ctx)
180                 return;
181
182 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
183         spin_lock_irq(&priv->ctx_lock);
184         if (priv->pfault == dev_ctx->intf->pfault)
185                 priv->pfault = NULL;
186         spin_unlock_irq(&priv->ctx_lock);
187
188         synchronize_srcu(&priv->pfault_srcu);
189 #endif
190
191         spin_lock_irq(&priv->ctx_lock);
192         list_del(&dev_ctx->list);
193         spin_unlock_irq(&priv->ctx_lock);
194
195         if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
196                 intf->remove(dev, dev_ctx->context);
197
198         kfree(dev_ctx);
199 }
200
201 static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv *priv)
202 {
203         struct mlx5_device_context *dev_ctx;
204         struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
205
206         dev_ctx = mlx5_get_device(intf, priv);
207         if (!dev_ctx)
208                 return;
209
210         delayed_event_start(priv);
211         if (intf->attach) {
212                 if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
213                         goto out;
214                 if (intf->attach(dev, dev_ctx->context))
215                         goto out;
216
217                 set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
218         } else {
219                 if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
220                         goto out;
221                 dev_ctx->context = intf->add(dev);
222                 if (!dev_ctx->context)
223                         goto out;
224
225                 set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
226         }
227
228 out:
229         delayed_event_release(dev_ctx, priv);
230 }
231
232 void mlx5_attach_device(struct mlx5_core_dev *dev)
233 {
234         struct mlx5_priv *priv = &dev->priv;
235         struct mlx5_interface *intf;
236
237         mutex_lock(&mlx5_intf_mutex);
238         list_for_each_entry(intf, &intf_list, list)
239                 mlx5_attach_interface(intf, priv);
240         mutex_unlock(&mlx5_intf_mutex);
241 }
242
243 static void mlx5_detach_interface(struct mlx5_interface *intf, struct mlx5_priv *priv)
244 {
245         struct mlx5_device_context *dev_ctx;
246         struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
247
248         dev_ctx = mlx5_get_device(intf, priv);
249         if (!dev_ctx)
250                 return;
251
252         if (intf->detach) {
253                 if (!test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
254                         return;
255                 intf->detach(dev, dev_ctx->context);
256                 clear_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
257         } else {
258                 if (!test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
259                         return;
260                 intf->remove(dev, dev_ctx->context);
261                 clear_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
262         }
263 }
264
265 void mlx5_detach_device(struct mlx5_core_dev *dev)
266 {
267         struct mlx5_priv *priv = &dev->priv;
268         struct mlx5_interface *intf;
269
270         mutex_lock(&mlx5_intf_mutex);
271         list_for_each_entry(intf, &intf_list, list)
272                 mlx5_detach_interface(intf, priv);
273         mutex_unlock(&mlx5_intf_mutex);
274 }
275
276 bool mlx5_device_registered(struct mlx5_core_dev *dev)
277 {
278         struct mlx5_priv *priv;
279         bool found = false;
280
281         mutex_lock(&mlx5_intf_mutex);
282         list_for_each_entry(priv, &mlx5_dev_list, dev_list)
283                 if (priv == &dev->priv)
284                         found = true;
285         mutex_unlock(&mlx5_intf_mutex);
286
287         return found;
288 }
289
290 int mlx5_register_device(struct mlx5_core_dev *dev)
291 {
292         struct mlx5_priv *priv = &dev->priv;
293         struct mlx5_interface *intf;
294
295         mutex_lock(&mlx5_intf_mutex);
296         list_add_tail(&priv->dev_list, &mlx5_dev_list);
297         list_for_each_entry(intf, &intf_list, list)
298                 mlx5_add_device(intf, priv);
299         mutex_unlock(&mlx5_intf_mutex);
300
301         return 0;
302 }
303
304 void mlx5_unregister_device(struct mlx5_core_dev *dev)
305 {
306         struct mlx5_priv *priv = &dev->priv;
307         struct mlx5_interface *intf;
308
309         mutex_lock(&mlx5_intf_mutex);
310         list_for_each_entry(intf, &intf_list, list)
311                 mlx5_remove_device(intf, priv);
312         list_del(&priv->dev_list);
313         mutex_unlock(&mlx5_intf_mutex);
314 }
315
316 int mlx5_register_interface(struct mlx5_interface *intf)
317 {
318         struct mlx5_priv *priv;
319
320         if (!intf->add || !intf->remove)
321                 return -EINVAL;
322
323         mutex_lock(&mlx5_intf_mutex);
324         list_add_tail(&intf->list, &intf_list);
325         list_for_each_entry(priv, &mlx5_dev_list, dev_list)
326                 mlx5_add_device(intf, priv);
327         mutex_unlock(&mlx5_intf_mutex);
328
329         return 0;
330 }
331 EXPORT_SYMBOL(mlx5_register_interface);
332
333 void mlx5_unregister_interface(struct mlx5_interface *intf)
334 {
335         struct mlx5_priv *priv;
336
337         mutex_lock(&mlx5_intf_mutex);
338         list_for_each_entry(priv, &mlx5_dev_list, dev_list)
339                 mlx5_remove_device(intf, priv);
340         list_del(&intf->list);
341         mutex_unlock(&mlx5_intf_mutex);
342 }
343 EXPORT_SYMBOL(mlx5_unregister_interface);
344
345 void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol)
346 {
347         mutex_lock(&mlx5_intf_mutex);
348         mlx5_remove_dev_by_protocol(mdev, protocol);
349         mlx5_add_dev_by_protocol(mdev, protocol);
350         mutex_unlock(&mlx5_intf_mutex);
351 }
352
353 void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
354 {
355         struct mlx5_priv *priv = &mdev->priv;
356         struct mlx5_device_context *dev_ctx;
357         unsigned long flags;
358         void *result = NULL;
359
360         spin_lock_irqsave(&priv->ctx_lock, flags);
361
362         list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
363                 if ((dev_ctx->intf->protocol == protocol) &&
364                     dev_ctx->intf->get_dev) {
365                         result = dev_ctx->intf->get_dev(dev_ctx->context);
366                         break;
367                 }
368
369         spin_unlock_irqrestore(&priv->ctx_lock, flags);
370
371         return result;
372 }
373 EXPORT_SYMBOL(mlx5_get_protocol_dev);
374
375 /* Must be called with intf_mutex held */
376 void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
377 {
378         struct mlx5_interface *intf;
379
380         list_for_each_entry(intf, &intf_list, list)
381                 if (intf->protocol == protocol) {
382                         mlx5_add_device(intf, &dev->priv);
383                         break;
384                 }
385 }
386
387 /* Must be called with intf_mutex held */
388 void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
389 {
390         struct mlx5_interface *intf;
391
392         list_for_each_entry(intf, &intf_list, list)
393                 if (intf->protocol == protocol) {
394                         mlx5_remove_device(intf, &dev->priv);
395                         break;
396                 }
397 }
398
399 static u32 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
400 {
401         return (u32)((pci_domain_nr(dev->pdev->bus) << 16) |
402                      (dev->pdev->bus->number << 8) |
403                      PCI_SLOT(dev->pdev->devfn));
404 }
405
406 /* Must be called with intf_mutex held */
407 struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
408 {
409         u32 pci_id = mlx5_gen_pci_id(dev);
410         struct mlx5_core_dev *res = NULL;
411         struct mlx5_core_dev *tmp_dev;
412         struct mlx5_priv *priv;
413
414         list_for_each_entry(priv, &mlx5_dev_list, dev_list) {
415                 tmp_dev = container_of(priv, struct mlx5_core_dev, priv);
416                 if ((dev != tmp_dev) && (mlx5_gen_pci_id(tmp_dev) == pci_id)) {
417                         res = tmp_dev;
418                         break;
419                 }
420         }
421
422         return res;
423 }
424
425 void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
426                      unsigned long param)
427 {
428         struct mlx5_priv *priv = &dev->priv;
429         struct mlx5_device_context *dev_ctx;
430         unsigned long flags;
431
432         spin_lock_irqsave(&priv->ctx_lock, flags);
433
434         if (priv->is_accum_events)
435                 add_delayed_event(priv, dev, event, param);
436
437         /* After mlx5_detach_device, the dev_ctx->intf is still set and dev_ctx is
438          * still in priv->ctx_list. In this case, only notify the dev_ctx if its
439          * ADDED or ATTACHED bit are set.
440          */
441         list_for_each_entry(dev_ctx, &priv->ctx_list, list)
442                 if (dev_ctx->intf->event &&
443                     (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state) ||
444                      test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)))
445                         dev_ctx->intf->event(dev, dev_ctx->context, event, param);
446
447         spin_unlock_irqrestore(&priv->ctx_lock, flags);
448 }
449
450 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
451 void mlx5_core_page_fault(struct mlx5_core_dev *dev,
452                           struct mlx5_pagefault *pfault)
453 {
454         struct mlx5_priv *priv = &dev->priv;
455         int srcu_idx;
456
457         srcu_idx = srcu_read_lock(&priv->pfault_srcu);
458         if (priv->pfault)
459                 priv->pfault(dev, priv->pfault_ctx, pfault);
460         srcu_read_unlock(&priv->pfault_srcu, srcu_idx);
461 }
462 #endif
463
464 void mlx5_dev_list_lock(void)
465 {
466         mutex_lock(&mlx5_intf_mutex);
467 }
468
469 void mlx5_dev_list_unlock(void)
470 {
471         mutex_unlock(&mlx5_intf_mutex);
472 }
473
474 int mlx5_dev_list_trylock(void)
475 {
476         return mutex_trylock(&mlx5_intf_mutex);
477 }