2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/module.h>
35 #include <linux/string.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/slab.h>
39 #include <linux/init.h>
40 #include <linux/mutex.h>
41 #include <linux/netdevice.h>
42 #include <rdma/rdma_netlink.h>
43 #include <rdma/ib_addr.h>
44 #include <rdma/ib_cache.h>
46 #include "core_priv.h"
48 MODULE_AUTHOR("Roland Dreier");
49 MODULE_DESCRIPTION("core kernel InfiniBand API");
50 MODULE_LICENSE("Dual BSD/GPL");
52 struct ib_client_data {
53 struct list_head list;
54 struct ib_client *client;
56 /* The device or client is going down. Do not call client or device
57 * callbacks other than remove(). */
61 struct workqueue_struct *ib_comp_wq;
62 struct workqueue_struct *ib_wq;
63 EXPORT_SYMBOL_GPL(ib_wq);
65 /* The device_list and client_list contain devices and clients after their
66 * registration has completed, and the devices and clients are removed
67 * during unregistration. */
68 static LIST_HEAD(device_list);
69 static LIST_HEAD(client_list);
72 * device_mutex and lists_rwsem protect access to both device_list and
73 * client_list. device_mutex protects writer access by device and client
74 * registration / de-registration. lists_rwsem protects reader access to
75 * these lists. Iterators of these lists must lock it for read, while updates
76 * to the lists must be done with a write lock. A special case is when the
77 * device_mutex is locked. In this case locking the lists for read access is
78 * not necessary as the device_mutex implies it.
80 * lists_rwsem also protects access to the client data list.
82 static DEFINE_MUTEX(device_mutex);
83 static DECLARE_RWSEM(lists_rwsem);
86 static int ib_device_check_mandatory(struct ib_device *device)
88 #define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x }
92 } mandatory_table[] = {
93 IB_MANDATORY_FUNC(query_device),
94 IB_MANDATORY_FUNC(query_port),
95 IB_MANDATORY_FUNC(query_pkey),
96 IB_MANDATORY_FUNC(query_gid),
97 IB_MANDATORY_FUNC(alloc_pd),
98 IB_MANDATORY_FUNC(dealloc_pd),
99 IB_MANDATORY_FUNC(create_ah),
100 IB_MANDATORY_FUNC(destroy_ah),
101 IB_MANDATORY_FUNC(create_qp),
102 IB_MANDATORY_FUNC(modify_qp),
103 IB_MANDATORY_FUNC(destroy_qp),
104 IB_MANDATORY_FUNC(post_send),
105 IB_MANDATORY_FUNC(post_recv),
106 IB_MANDATORY_FUNC(create_cq),
107 IB_MANDATORY_FUNC(destroy_cq),
108 IB_MANDATORY_FUNC(poll_cq),
109 IB_MANDATORY_FUNC(req_notify_cq),
110 IB_MANDATORY_FUNC(get_dma_mr),
111 IB_MANDATORY_FUNC(dereg_mr),
112 IB_MANDATORY_FUNC(get_port_immutable)
116 for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
117 if (!*(void **) ((void *) device + mandatory_table[i].offset)) {
118 pr_warn("Device %s is missing mandatory function %s\n",
119 device->name, mandatory_table[i].name);
127 static struct ib_device *__ib_device_get_by_name(const char *name)
129 struct ib_device *device;
131 list_for_each_entry(device, &device_list, core_list)
132 if (!strncmp(name, device->name, IB_DEVICE_NAME_MAX))
139 static int alloc_name(char *name)
141 unsigned long *inuse;
142 char buf[IB_DEVICE_NAME_MAX];
143 struct ib_device *device;
146 inuse = (unsigned long *) get_zeroed_page(GFP_KERNEL);
150 list_for_each_entry(device, &device_list, core_list) {
151 if (!sscanf(device->name, name, &i))
153 if (i < 0 || i >= PAGE_SIZE * 8)
155 snprintf(buf, sizeof buf, name, i);
156 if (!strncmp(buf, device->name, IB_DEVICE_NAME_MAX))
160 i = find_first_zero_bit(inuse, PAGE_SIZE * 8);
161 free_page((unsigned long) inuse);
162 snprintf(buf, sizeof buf, name, i);
164 if (__ib_device_get_by_name(buf))
167 strlcpy(name, buf, IB_DEVICE_NAME_MAX);
171 static void ib_device_release(struct device *device)
173 struct ib_device *dev = container_of(device, struct ib_device, dev);
175 ib_cache_release_one(dev);
176 kfree(dev->port_immutable);
180 static int ib_device_uevent(struct device *device,
181 struct kobj_uevent_env *env)
183 struct ib_device *dev = container_of(device, struct ib_device, dev);
185 if (add_uevent_var(env, "NAME=%s", dev->name))
189 * It would be nice to pass the node GUID with the event...
195 static struct class ib_class = {
196 .name = "infiniband",
197 .dev_release = ib_device_release,
198 .dev_uevent = ib_device_uevent,
202 * ib_alloc_device - allocate an IB device struct
203 * @size:size of structure to allocate
205 * Low-level drivers should use ib_alloc_device() to allocate &struct
206 * ib_device. @size is the size of the structure to be allocated,
207 * including any private data used by the low-level driver.
208 * ib_dealloc_device() must be used to free structures allocated with
211 struct ib_device *ib_alloc_device(size_t size)
213 struct ib_device *device;
215 if (WARN_ON(size < sizeof(struct ib_device)))
218 device = kzalloc(size, GFP_KERNEL);
222 device->dev.class = &ib_class;
223 device_initialize(&device->dev);
225 dev_set_drvdata(&device->dev, device);
227 INIT_LIST_HEAD(&device->event_handler_list);
228 spin_lock_init(&device->event_handler_lock);
229 spin_lock_init(&device->client_data_lock);
230 INIT_LIST_HEAD(&device->client_data_list);
231 INIT_LIST_HEAD(&device->port_list);
235 EXPORT_SYMBOL(ib_alloc_device);
238 * ib_dealloc_device - free an IB device struct
239 * @device:structure to free
241 * Free a structure allocated with ib_alloc_device().
243 void ib_dealloc_device(struct ib_device *device)
245 WARN_ON(device->reg_state != IB_DEV_UNREGISTERED &&
246 device->reg_state != IB_DEV_UNINITIALIZED);
247 kobject_put(&device->dev.kobj);
249 EXPORT_SYMBOL(ib_dealloc_device);
251 static int add_client_context(struct ib_device *device, struct ib_client *client)
253 struct ib_client_data *context;
256 context = kmalloc(sizeof *context, GFP_KERNEL);
260 context->client = client;
261 context->data = NULL;
262 context->going_down = false;
264 down_write(&lists_rwsem);
265 spin_lock_irqsave(&device->client_data_lock, flags);
266 list_add(&context->list, &device->client_data_list);
267 spin_unlock_irqrestore(&device->client_data_lock, flags);
268 up_write(&lists_rwsem);
273 static int verify_immutable(const struct ib_device *dev, u8 port)
275 return WARN_ON(!rdma_cap_ib_mad(dev, port) &&
276 rdma_max_mad_size(dev, port) != 0);
279 static int read_port_immutable(struct ib_device *device)
282 u8 start_port = rdma_start_port(device);
283 u8 end_port = rdma_end_port(device);
287 * device->port_immutable is indexed directly by the port number to make
288 * access to this data as efficient as possible.
290 * Therefore port_immutable is declared as a 1 based array with
291 * potential empty slots at the beginning.
293 device->port_immutable = kzalloc(sizeof(*device->port_immutable)
296 if (!device->port_immutable)
299 for (port = start_port; port <= end_port; ++port) {
300 ret = device->get_port_immutable(device, port,
301 &device->port_immutable[port]);
305 if (verify_immutable(device, port))
311 void ib_get_device_fw_str(struct ib_device *dev, char *str, size_t str_len)
313 if (dev->get_dev_fw_str)
314 dev->get_dev_fw_str(dev, str, str_len);
318 EXPORT_SYMBOL(ib_get_device_fw_str);
321 * ib_register_device - Register an IB device with IB core
322 * @device:Device to register
324 * Low-level drivers use ib_register_device() to register their
325 * devices with the IB core. All registered clients will receive a
326 * callback for each device that is added. @device must be allocated
327 * with ib_alloc_device().
329 int ib_register_device(struct ib_device *device,
330 int (*port_callback)(struct ib_device *,
331 u8, struct kobject *))
334 struct ib_client *client;
335 struct ib_udata uhw = {.outlen = 0, .inlen = 0};
336 struct device *parent = device->dev.parent;
338 WARN_ON_ONCE(!parent);
339 WARN_ON_ONCE(device->dma_device);
340 if (device->dev.dma_ops) {
342 * The caller provided custom DMA operations. Copy the
343 * DMA-related fields that are used by e.g. dma_alloc_coherent()
346 device->dma_device = &device->dev;
347 if (!device->dev.dma_mask)
348 device->dev.dma_mask = parent->dma_mask;
349 if (!device->dev.coherent_dma_mask)
350 device->dev.coherent_dma_mask =
351 parent->coherent_dma_mask;
354 * The caller did not provide custom DMA operations. Use the
355 * DMA mapping operations of the parent device.
357 device->dma_device = parent;
360 mutex_lock(&device_mutex);
362 if (strchr(device->name, '%')) {
363 ret = alloc_name(device->name);
368 if (ib_device_check_mandatory(device)) {
373 ret = read_port_immutable(device);
375 pr_warn("Couldn't create per port immutable data %s\n",
380 ret = ib_cache_setup_one(device);
382 pr_warn("Couldn't set up InfiniBand P_Key/GID cache\n");
386 ret = ib_device_register_rdmacg(device);
388 pr_warn("Couldn't register device with rdma cgroup\n");
389 ib_cache_cleanup_one(device);
393 memset(&device->attrs, 0, sizeof(device->attrs));
394 ret = device->query_device(device, &device->attrs, &uhw);
396 pr_warn("Couldn't query the device attributes\n");
397 ib_device_unregister_rdmacg(device);
398 ib_cache_cleanup_one(device);
402 ret = ib_device_register_sysfs(device, port_callback);
404 pr_warn("Couldn't register device %s with driver model\n",
406 ib_device_unregister_rdmacg(device);
407 ib_cache_cleanup_one(device);
411 device->reg_state = IB_DEV_REGISTERED;
413 list_for_each_entry(client, &client_list, list)
414 if (client->add && !add_client_context(device, client))
417 down_write(&lists_rwsem);
418 list_add_tail(&device->core_list, &device_list);
419 up_write(&lists_rwsem);
421 mutex_unlock(&device_mutex);
424 EXPORT_SYMBOL(ib_register_device);
427 * ib_unregister_device - Unregister an IB device
428 * @device:Device to unregister
430 * Unregister an IB device. All clients will receive a remove callback.
432 void ib_unregister_device(struct ib_device *device)
434 struct ib_client_data *context, *tmp;
437 mutex_lock(&device_mutex);
439 down_write(&lists_rwsem);
440 list_del(&device->core_list);
441 spin_lock_irqsave(&device->client_data_lock, flags);
442 list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
443 context->going_down = true;
444 spin_unlock_irqrestore(&device->client_data_lock, flags);
445 downgrade_write(&lists_rwsem);
447 list_for_each_entry_safe(context, tmp, &device->client_data_list,
449 if (context->client->remove)
450 context->client->remove(device, context->data);
452 up_read(&lists_rwsem);
454 mutex_unlock(&device_mutex);
456 ib_device_unregister_rdmacg(device);
457 ib_device_unregister_sysfs(device);
458 ib_cache_cleanup_one(device);
460 down_write(&lists_rwsem);
461 spin_lock_irqsave(&device->client_data_lock, flags);
462 list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
464 spin_unlock_irqrestore(&device->client_data_lock, flags);
465 up_write(&lists_rwsem);
467 device->reg_state = IB_DEV_UNREGISTERED;
469 EXPORT_SYMBOL(ib_unregister_device);
472 * ib_register_client - Register an IB client
473 * @client:Client to register
475 * Upper level users of the IB drivers can use ib_register_client() to
476 * register callbacks for IB device addition and removal. When an IB
477 * device is added, each registered client's add method will be called
478 * (in the order the clients were registered), and when a device is
479 * removed, each client's remove method will be called (in the reverse
480 * order that clients were registered). In addition, when
481 * ib_register_client() is called, the client will receive an add
482 * callback for all devices already registered.
484 int ib_register_client(struct ib_client *client)
486 struct ib_device *device;
488 mutex_lock(&device_mutex);
490 list_for_each_entry(device, &device_list, core_list)
491 if (client->add && !add_client_context(device, client))
494 down_write(&lists_rwsem);
495 list_add_tail(&client->list, &client_list);
496 up_write(&lists_rwsem);
498 mutex_unlock(&device_mutex);
502 EXPORT_SYMBOL(ib_register_client);
505 * ib_unregister_client - Unregister an IB client
506 * @client:Client to unregister
508 * Upper level users use ib_unregister_client() to remove their client
509 * registration. When ib_unregister_client() is called, the client
510 * will receive a remove callback for each IB device still registered.
512 void ib_unregister_client(struct ib_client *client)
514 struct ib_client_data *context, *tmp;
515 struct ib_device *device;
518 mutex_lock(&device_mutex);
520 down_write(&lists_rwsem);
521 list_del(&client->list);
522 up_write(&lists_rwsem);
524 list_for_each_entry(device, &device_list, core_list) {
525 struct ib_client_data *found_context = NULL;
527 down_write(&lists_rwsem);
528 spin_lock_irqsave(&device->client_data_lock, flags);
529 list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
530 if (context->client == client) {
531 context->going_down = true;
532 found_context = context;
535 spin_unlock_irqrestore(&device->client_data_lock, flags);
536 up_write(&lists_rwsem);
539 client->remove(device, found_context ?
540 found_context->data : NULL);
542 if (!found_context) {
543 pr_warn("No client context found for %s/%s\n",
544 device->name, client->name);
548 down_write(&lists_rwsem);
549 spin_lock_irqsave(&device->client_data_lock, flags);
550 list_del(&found_context->list);
551 kfree(found_context);
552 spin_unlock_irqrestore(&device->client_data_lock, flags);
553 up_write(&lists_rwsem);
556 mutex_unlock(&device_mutex);
558 EXPORT_SYMBOL(ib_unregister_client);
561 * ib_get_client_data - Get IB client context
562 * @device:Device to get context for
563 * @client:Client to get context for
565 * ib_get_client_data() returns client context set with
566 * ib_set_client_data().
568 void *ib_get_client_data(struct ib_device *device, struct ib_client *client)
570 struct ib_client_data *context;
574 spin_lock_irqsave(&device->client_data_lock, flags);
575 list_for_each_entry(context, &device->client_data_list, list)
576 if (context->client == client) {
580 spin_unlock_irqrestore(&device->client_data_lock, flags);
584 EXPORT_SYMBOL(ib_get_client_data);
587 * ib_set_client_data - Set IB client context
588 * @device:Device to set context for
589 * @client:Client to set context for
590 * @data:Context to set
592 * ib_set_client_data() sets client context that can be retrieved with
593 * ib_get_client_data().
595 void ib_set_client_data(struct ib_device *device, struct ib_client *client,
598 struct ib_client_data *context;
601 spin_lock_irqsave(&device->client_data_lock, flags);
602 list_for_each_entry(context, &device->client_data_list, list)
603 if (context->client == client) {
604 context->data = data;
608 pr_warn("No client context found for %s/%s\n",
609 device->name, client->name);
612 spin_unlock_irqrestore(&device->client_data_lock, flags);
614 EXPORT_SYMBOL(ib_set_client_data);
617 * ib_register_event_handler - Register an IB event handler
618 * @event_handler:Handler to register
620 * ib_register_event_handler() registers an event handler that will be
621 * called back when asynchronous IB events occur (as defined in
622 * chapter 11 of the InfiniBand Architecture Specification). This
623 * callback may occur in interrupt context.
625 int ib_register_event_handler (struct ib_event_handler *event_handler)
629 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
630 list_add_tail(&event_handler->list,
631 &event_handler->device->event_handler_list);
632 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
636 EXPORT_SYMBOL(ib_register_event_handler);
639 * ib_unregister_event_handler - Unregister an event handler
640 * @event_handler:Handler to unregister
642 * Unregister an event handler registered with
643 * ib_register_event_handler().
645 int ib_unregister_event_handler(struct ib_event_handler *event_handler)
649 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
650 list_del(&event_handler->list);
651 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
655 EXPORT_SYMBOL(ib_unregister_event_handler);
658 * ib_dispatch_event - Dispatch an asynchronous event
659 * @event:Event to dispatch
661 * Low-level drivers must call ib_dispatch_event() to dispatch the
662 * event to all registered event handlers when an asynchronous event
665 void ib_dispatch_event(struct ib_event *event)
668 struct ib_event_handler *handler;
670 spin_lock_irqsave(&event->device->event_handler_lock, flags);
672 list_for_each_entry(handler, &event->device->event_handler_list, list)
673 handler->handler(handler, event);
675 spin_unlock_irqrestore(&event->device->event_handler_lock, flags);
677 EXPORT_SYMBOL(ib_dispatch_event);
680 * ib_query_port - Query IB port attributes
681 * @device:Device to query
682 * @port_num:Port number to query
683 * @port_attr:Port attributes
685 * ib_query_port() returns the attributes of a port through the
686 * @port_attr pointer.
688 int ib_query_port(struct ib_device *device,
690 struct ib_port_attr *port_attr)
695 if (!rdma_is_port_valid(device, port_num))
698 memset(port_attr, 0, sizeof(*port_attr));
699 err = device->query_port(device, port_num, port_attr);
700 if (err || port_attr->subnet_prefix)
703 if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND)
706 err = ib_query_gid(device, port_num, 0, &gid, NULL);
710 port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix);
713 EXPORT_SYMBOL(ib_query_port);
716 * ib_query_gid - Get GID table entry
717 * @device:Device to query
718 * @port_num:Port number to query
719 * @index:GID table index to query
721 * @attr: Returned GID attributes related to this GID index (only in RoCE).
724 * ib_query_gid() fetches the specified GID table entry.
726 int ib_query_gid(struct ib_device *device,
727 u8 port_num, int index, union ib_gid *gid,
728 struct ib_gid_attr *attr)
730 if (rdma_cap_roce_gid_table(device, port_num))
731 return ib_get_cached_gid(device, port_num, index, gid, attr);
736 return device->query_gid(device, port_num, index, gid);
738 EXPORT_SYMBOL(ib_query_gid);
741 * ib_enum_roce_netdev - enumerate all RoCE ports
742 * @ib_dev : IB device we want to query
743 * @filter: Should we call the callback?
744 * @filter_cookie: Cookie passed to filter
745 * @cb: Callback to call for each found RoCE ports
746 * @cookie: Cookie passed back to the callback
748 * Enumerates all of the physical RoCE ports of ib_dev
749 * which are related to netdevice and calls callback() on each
750 * device for which filter() function returns non zero.
752 void ib_enum_roce_netdev(struct ib_device *ib_dev,
753 roce_netdev_filter filter,
755 roce_netdev_callback cb,
760 for (port = rdma_start_port(ib_dev); port <= rdma_end_port(ib_dev);
762 if (rdma_protocol_roce(ib_dev, port)) {
763 struct net_device *idev = NULL;
765 if (ib_dev->get_netdev)
766 idev = ib_dev->get_netdev(ib_dev, port);
769 idev->reg_state >= NETREG_UNREGISTERED) {
774 if (filter(ib_dev, port, idev, filter_cookie))
775 cb(ib_dev, port, idev, cookie);
783 * ib_enum_all_roce_netdevs - enumerate all RoCE devices
784 * @filter: Should we call the callback?
785 * @filter_cookie: Cookie passed to filter
786 * @cb: Callback to call for each found RoCE ports
787 * @cookie: Cookie passed back to the callback
789 * Enumerates all RoCE devices' physical ports which are related
790 * to netdevices and calls callback() on each device for which
791 * filter() function returns non zero.
793 void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
795 roce_netdev_callback cb,
798 struct ib_device *dev;
800 down_read(&lists_rwsem);
801 list_for_each_entry(dev, &device_list, core_list)
802 ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie);
803 up_read(&lists_rwsem);
807 * ib_query_pkey - Get P_Key table entry
808 * @device:Device to query
809 * @port_num:Port number to query
810 * @index:P_Key table index to query
811 * @pkey:Returned P_Key
813 * ib_query_pkey() fetches the specified P_Key table entry.
815 int ib_query_pkey(struct ib_device *device,
816 u8 port_num, u16 index, u16 *pkey)
818 return device->query_pkey(device, port_num, index, pkey);
820 EXPORT_SYMBOL(ib_query_pkey);
823 * ib_modify_device - Change IB device attributes
824 * @device:Device to modify
825 * @device_modify_mask:Mask of attributes to change
826 * @device_modify:New attribute values
828 * ib_modify_device() changes a device's attributes as specified by
829 * the @device_modify_mask and @device_modify structure.
831 int ib_modify_device(struct ib_device *device,
832 int device_modify_mask,
833 struct ib_device_modify *device_modify)
835 if (!device->modify_device)
838 return device->modify_device(device, device_modify_mask,
841 EXPORT_SYMBOL(ib_modify_device);
844 * ib_modify_port - Modifies the attributes for the specified port.
845 * @device: The device to modify.
846 * @port_num: The number of the port to modify.
847 * @port_modify_mask: Mask used to specify which attributes of the port
849 * @port_modify: New attribute values for the port.
851 * ib_modify_port() changes a port's attributes as specified by the
852 * @port_modify_mask and @port_modify structure.
854 int ib_modify_port(struct ib_device *device,
855 u8 port_num, int port_modify_mask,
856 struct ib_port_modify *port_modify)
858 if (!device->modify_port)
861 if (!rdma_is_port_valid(device, port_num))
864 return device->modify_port(device, port_num, port_modify_mask,
867 EXPORT_SYMBOL(ib_modify_port);
870 * ib_find_gid - Returns the port number and GID table index where
871 * a specified GID value occurs.
872 * @device: The device to query.
873 * @gid: The GID value to search for.
874 * @gid_type: Type of GID.
875 * @ndev: The ndev related to the GID to search for.
876 * @port_num: The port number of the device where the GID value was found.
877 * @index: The index into the GID table where the GID was found. This
878 * parameter may be NULL.
880 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
881 enum ib_gid_type gid_type, struct net_device *ndev,
882 u8 *port_num, u16 *index)
884 union ib_gid tmp_gid;
887 for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) {
888 if (rdma_cap_roce_gid_table(device, port)) {
889 if (!ib_find_cached_gid_by_port(device, gid, gid_type, port,
896 if (gid_type != IB_GID_TYPE_IB)
899 for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) {
900 ret = ib_query_gid(device, port, i, &tmp_gid, NULL);
903 if (!memcmp(&tmp_gid, gid, sizeof *gid)) {
914 EXPORT_SYMBOL(ib_find_gid);
917 * ib_find_pkey - Returns the PKey table index where a specified
919 * @device: The device to query.
920 * @port_num: The port number of the device to search for the PKey.
921 * @pkey: The PKey value to search for.
922 * @index: The index into the PKey table where the PKey was found.
924 int ib_find_pkey(struct ib_device *device,
925 u8 port_num, u16 pkey, u16 *index)
931 for (i = 0; i < device->port_immutable[port_num].pkey_tbl_len; ++i) {
932 ret = ib_query_pkey(device, port_num, i, &tmp_pkey);
935 if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) {
936 /* if there is full-member pkey take it.*/
937 if (tmp_pkey & 0x8000) {
946 /*no full-member, if exists take the limited*/
947 if (partial_ix >= 0) {
953 EXPORT_SYMBOL(ib_find_pkey);
956 * ib_get_net_dev_by_params() - Return the appropriate net_dev
957 * for a received CM request
958 * @dev: An RDMA device on which the request has been received.
959 * @port: Port number on the RDMA device.
960 * @pkey: The Pkey the request came on.
961 * @gid: A GID that the net_dev uses to communicate.
962 * @addr: Contains the IP address that the request specified as its
965 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
968 const union ib_gid *gid,
969 const struct sockaddr *addr)
971 struct net_device *net_dev = NULL;
972 struct ib_client_data *context;
974 if (!rdma_protocol_ib(dev, port))
977 down_read(&lists_rwsem);
979 list_for_each_entry(context, &dev->client_data_list, list) {
980 struct ib_client *client = context->client;
982 if (context->going_down)
985 if (client->get_net_dev_by_params) {
986 net_dev = client->get_net_dev_by_params(dev, port, pkey,
994 up_read(&lists_rwsem);
998 EXPORT_SYMBOL(ib_get_net_dev_by_params);
1000 static struct ibnl_client_cbs ibnl_ls_cb_table[] = {
1001 [RDMA_NL_LS_OP_RESOLVE] = {
1002 .dump = ib_nl_handle_resolve_resp,
1003 .module = THIS_MODULE },
1004 [RDMA_NL_LS_OP_SET_TIMEOUT] = {
1005 .dump = ib_nl_handle_set_timeout,
1006 .module = THIS_MODULE },
1007 [RDMA_NL_LS_OP_IP_RESOLVE] = {
1008 .dump = ib_nl_handle_ip_res_resp,
1009 .module = THIS_MODULE },
1012 static int ib_add_ibnl_clients(void)
1014 return ibnl_add_client(RDMA_NL_LS, ARRAY_SIZE(ibnl_ls_cb_table),
1018 static void ib_remove_ibnl_clients(void)
1020 ibnl_remove_client(RDMA_NL_LS);
1023 static int __init ib_core_init(void)
1027 ib_wq = alloc_workqueue("infiniband", 0, 0);
1031 ib_comp_wq = alloc_workqueue("ib-comp-wq",
1032 WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
1038 ret = class_register(&ib_class);
1040 pr_warn("Couldn't create InfiniBand device class\n");
1046 pr_warn("Couldn't init IB netlink interface\n");
1052 pr_warn("Could't init IB address resolution\n");
1056 ret = ib_mad_init();
1058 pr_warn("Couldn't init IB MAD\n");
1064 pr_warn("Couldn't init SA\n");
1068 ret = ib_add_ibnl_clients();
1070 pr_warn("Couldn't register ibnl clients\n");
1087 class_unregister(&ib_class);
1089 destroy_workqueue(ib_comp_wq);
1091 destroy_workqueue(ib_wq);
1095 static void __exit ib_core_cleanup(void)
1098 ib_remove_ibnl_clients();
1103 class_unregister(&ib_class);
1104 destroy_workqueue(ib_comp_wq);
1105 /* Make sure that any pending umem accounting work is done. */
1106 destroy_workqueue(ib_wq);
1109 module_init(ib_core_init);
1110 module_exit(ib_core_cleanup);