2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/module.h>
39 #include <linux/init.h>
40 #include <linux/slab.h>
41 #include <linux/kernel.h>
42 #include <linux/vmalloc.h>
44 #include <linux/if_arp.h> /* For ARPHRD_xxx */
49 #include <linux/jhash.h>
51 #include <net/addrconf.h>
52 #include <linux/inetdevice.h>
53 #include <rdma/ib_cache.h>
55 #define DRV_VERSION "1.0.0"
57 const char ipoib_driver_version[] = DRV_VERSION;
59 MODULE_AUTHOR("Roland Dreier");
60 MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
61 MODULE_LICENSE("Dual BSD/GPL");
63 int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE;
64 int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE;
66 module_param_named(send_queue_size, ipoib_sendq_size, int, 0444);
67 MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
68 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
69 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
71 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
72 int ipoib_debug_level;
74 module_param_named(debug_level, ipoib_debug_level, int, 0644);
75 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
78 struct ipoib_path_iter {
79 struct net_device *dev;
80 struct ipoib_path path;
83 static const u8 ipv4_bcast_addr[] = {
84 0x00, 0xff, 0xff, 0xff,
85 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00,
86 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
89 struct workqueue_struct *ipoib_workqueue;
91 struct ib_sa_client ipoib_sa_client;
93 static void ipoib_add_one(struct ib_device *device);
94 static void ipoib_remove_one(struct ib_device *device, void *client_data);
95 static void ipoib_neigh_reclaim(struct rcu_head *rp);
96 static struct net_device *ipoib_get_net_dev_by_params(
97 struct ib_device *dev, u8 port, u16 pkey,
98 const union ib_gid *gid, const struct sockaddr *addr,
100 static int ipoib_set_mac(struct net_device *dev, void *addr);
101 static int ipoib_ioctl(struct net_device *dev, struct ifreq *ifr,
104 static struct ib_client ipoib_client = {
106 .add = ipoib_add_one,
107 .remove = ipoib_remove_one,
108 .get_net_dev_by_params = ipoib_get_net_dev_by_params,
111 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
112 static int ipoib_netdev_event(struct notifier_block *this,
113 unsigned long event, void *ptr)
115 struct netdev_notifier_info *ni = ptr;
116 struct net_device *dev = ni->dev;
118 if (dev->netdev_ops->ndo_open != ipoib_open)
122 case NETDEV_REGISTER:
123 ipoib_create_debug_files(dev);
125 case NETDEV_CHANGENAME:
126 ipoib_delete_debug_files(dev);
127 ipoib_create_debug_files(dev);
129 case NETDEV_UNREGISTER:
130 ipoib_delete_debug_files(dev);
138 int ipoib_open(struct net_device *dev)
140 struct ipoib_dev_priv *priv = ipoib_priv(dev);
142 ipoib_dbg(priv, "bringing up interface\n");
144 netif_carrier_off(dev);
146 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
148 priv->sm_fullmember_sendonly_support = false;
150 if (ipoib_ib_dev_open(dev)) {
151 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
156 ipoib_ib_dev_up(dev);
158 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
159 struct ipoib_dev_priv *cpriv;
161 /* Bring up any child interfaces too */
162 down_read(&priv->vlan_rwsem);
163 list_for_each_entry(cpriv, &priv->child_intfs, list) {
166 flags = cpriv->dev->flags;
170 dev_change_flags(cpriv->dev, flags | IFF_UP);
172 up_read(&priv->vlan_rwsem);
175 netif_start_queue(dev);
180 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
185 static int ipoib_stop(struct net_device *dev)
187 struct ipoib_dev_priv *priv = ipoib_priv(dev);
189 ipoib_dbg(priv, "stopping interface\n");
191 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
193 netif_stop_queue(dev);
195 ipoib_ib_dev_down(dev);
196 ipoib_ib_dev_stop(dev);
198 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
199 struct ipoib_dev_priv *cpriv;
201 /* Bring down any child interfaces too */
202 down_read(&priv->vlan_rwsem);
203 list_for_each_entry(cpriv, &priv->child_intfs, list) {
206 flags = cpriv->dev->flags;
207 if (!(flags & IFF_UP))
210 dev_change_flags(cpriv->dev, flags & ~IFF_UP);
212 up_read(&priv->vlan_rwsem);
218 static void ipoib_uninit(struct net_device *dev)
220 ipoib_dev_cleanup(dev);
223 static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features)
225 struct ipoib_dev_priv *priv = ipoib_priv(dev);
227 if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
228 features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
233 static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
235 struct ipoib_dev_priv *priv = ipoib_priv(dev);
238 /* dev->mtu > 2K ==> connected mode */
239 if (ipoib_cm_admin_enabled(dev)) {
240 if (new_mtu > ipoib_cm_max_mtu(dev))
243 if (new_mtu > priv->mcast_mtu)
244 ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n",
251 if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
254 priv->admin_mtu = new_mtu;
256 if (priv->mcast_mtu < priv->admin_mtu)
257 ipoib_dbg(priv, "MTU must be smaller than the underlying "
258 "link layer MTU - 4 (%u)\n", priv->mcast_mtu);
260 new_mtu = min(priv->mcast_mtu, priv->admin_mtu);
262 if (priv->rn_ops->ndo_change_mtu) {
263 bool carrier_status = netif_carrier_ok(dev);
265 netif_carrier_off(dev);
267 /* notify lower level on the real mtu */
268 ret = priv->rn_ops->ndo_change_mtu(dev, new_mtu);
271 netif_carrier_on(dev);
279 static void ipoib_get_stats(struct net_device *dev,
280 struct rtnl_link_stats64 *stats)
282 struct ipoib_dev_priv *priv = ipoib_priv(dev);
284 if (priv->rn_ops->ndo_get_stats64)
285 priv->rn_ops->ndo_get_stats64(dev, stats);
287 netdev_stats_to_stats64(stats, &dev->stats);
290 /* Called with an RCU read lock taken */
291 static bool ipoib_is_dev_match_addr_rcu(const struct sockaddr *addr,
292 struct net_device *dev)
294 struct net *net = dev_net(dev);
295 struct in_device *in_dev;
296 struct sockaddr_in *addr_in = (struct sockaddr_in *)addr;
297 struct sockaddr_in6 *addr_in6 = (struct sockaddr_in6 *)addr;
300 switch (addr->sa_family) {
302 in_dev = in_dev_get(dev);
306 ret_addr = inet_confirm_addr(net, in_dev, 0,
307 addr_in->sin_addr.s_addr,
315 if (IS_ENABLED(CONFIG_IPV6) &&
316 ipv6_chk_addr(net, &addr_in6->sin6_addr, dev, 1))
325 * Find the master net_device on top of the given net_device.
326 * @dev: base IPoIB net_device
328 * Returns the master net_device with a reference held, or the same net_device
329 * if no master exists.
331 static struct net_device *ipoib_get_master_net_dev(struct net_device *dev)
333 struct net_device *master;
336 master = netdev_master_upper_dev_get_rcu(dev);
348 struct ipoib_walk_data {
349 const struct sockaddr *addr;
350 struct net_device *result;
353 static int ipoib_upper_walk(struct net_device *upper, void *_data)
355 struct ipoib_walk_data *data = _data;
358 if (ipoib_is_dev_match_addr_rcu(data->addr, upper)) {
360 data->result = upper;
368 * Find a net_device matching the given address, which is an upper device of
369 * the given net_device.
370 * @addr: IP address to look for.
371 * @dev: base IPoIB net_device
373 * If found, returns the net_device with a reference held. Otherwise return
376 static struct net_device *ipoib_get_net_dev_match_addr(
377 const struct sockaddr *addr, struct net_device *dev)
379 struct ipoib_walk_data data = {
384 if (ipoib_is_dev_match_addr_rcu(addr, dev)) {
390 netdev_walk_all_upper_dev_rcu(dev, ipoib_upper_walk, &data);
396 /* returns the number of IPoIB netdevs on top a given ipoib device matching a
397 * pkey_index and address, if one exists.
399 * @found_net_dev: contains a matching net_device if the return value >= 1,
400 * with a reference held. */
401 static int ipoib_match_gid_pkey_addr(struct ipoib_dev_priv *priv,
402 const union ib_gid *gid,
404 const struct sockaddr *addr,
406 struct net_device **found_net_dev)
408 struct ipoib_dev_priv *child_priv;
409 struct net_device *net_dev = NULL;
412 if (priv->pkey_index == pkey_index &&
413 (!gid || !memcmp(gid, &priv->local_gid, sizeof(*gid)))) {
415 net_dev = ipoib_get_master_net_dev(priv->dev);
417 /* Verify the net_device matches the IP address, as
418 * IPoIB child devices currently share a GID. */
419 net_dev = ipoib_get_net_dev_match_addr(addr, priv->dev);
423 *found_net_dev = net_dev;
430 /* Check child interfaces */
431 down_read_nested(&priv->vlan_rwsem, nesting);
432 list_for_each_entry(child_priv, &priv->child_intfs, list) {
433 matches += ipoib_match_gid_pkey_addr(child_priv, gid,
440 up_read(&priv->vlan_rwsem);
445 /* Returns the number of matching net_devs found (between 0 and 2). Also
446 * return the matching net_device in the @net_dev parameter, holding a
447 * reference to the net_device, if the number of matches >= 1 */
448 static int __ipoib_get_net_dev_by_params(struct list_head *dev_list, u8 port,
450 const union ib_gid *gid,
451 const struct sockaddr *addr,
452 struct net_device **net_dev)
454 struct ipoib_dev_priv *priv;
459 list_for_each_entry(priv, dev_list, list) {
460 if (priv->port != port)
463 matches += ipoib_match_gid_pkey_addr(priv, gid, pkey_index,
472 static struct net_device *ipoib_get_net_dev_by_params(
473 struct ib_device *dev, u8 port, u16 pkey,
474 const union ib_gid *gid, const struct sockaddr *addr,
477 struct net_device *net_dev;
478 struct list_head *dev_list = client_data;
483 if (!rdma_protocol_ib(dev, port))
486 ret = ib_find_cached_pkey(dev, port, pkey, &pkey_index);
493 /* See if we can find a unique device matching the L2 parameters */
494 matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
495 gid, NULL, &net_dev);
506 /* Couldn't find a unique device with L2 parameters only. Use L3
507 * address to uniquely match the net device */
508 matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
509 gid, addr, &net_dev);
514 dev_warn_ratelimited(&dev->dev,
515 "duplicate IP address detected\n");
522 int ipoib_set_mode(struct net_device *dev, const char *buf)
524 struct ipoib_dev_priv *priv = ipoib_priv(dev);
526 if ((test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) &&
527 !strcmp(buf, "connected\n")) ||
528 (!test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) &&
529 !strcmp(buf, "datagram\n"))) {
533 /* flush paths if we switch modes so that connections are restarted */
534 if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
535 set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
536 ipoib_warn(priv, "enabling connected mode "
537 "will cause multicast packet drops\n");
538 netdev_update_features(dev);
539 dev_set_mtu(dev, ipoib_cm_max_mtu(dev));
541 priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
543 ipoib_flush_paths(dev);
544 return (!rtnl_trylock()) ? -EBUSY : 0;
547 if (!strcmp(buf, "datagram\n")) {
548 clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
549 netdev_update_features(dev);
550 dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
552 ipoib_flush_paths(dev);
553 return (!rtnl_trylock()) ? -EBUSY : 0;
559 struct ipoib_path *__path_find(struct net_device *dev, void *gid)
561 struct ipoib_dev_priv *priv = ipoib_priv(dev);
562 struct rb_node *n = priv->path_tree.rb_node;
563 struct ipoib_path *path;
567 path = rb_entry(n, struct ipoib_path, rb_node);
569 ret = memcmp(gid, path->pathrec.dgid.raw,
570 sizeof (union ib_gid));
583 static int __path_add(struct net_device *dev, struct ipoib_path *path)
585 struct ipoib_dev_priv *priv = ipoib_priv(dev);
586 struct rb_node **n = &priv->path_tree.rb_node;
587 struct rb_node *pn = NULL;
588 struct ipoib_path *tpath;
593 tpath = rb_entry(pn, struct ipoib_path, rb_node);
595 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw,
596 sizeof (union ib_gid));
605 rb_link_node(&path->rb_node, pn, n);
606 rb_insert_color(&path->rb_node, &priv->path_tree);
608 list_add_tail(&path->list, &priv->path_list);
613 static void path_free(struct net_device *dev, struct ipoib_path *path)
617 while ((skb = __skb_dequeue(&path->queue)))
618 dev_kfree_skb_irq(skb);
620 ipoib_dbg(ipoib_priv(dev), "path_free\n");
622 /* remove all neigh connected to this path */
623 ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw);
626 ipoib_put_ah(path->ah);
631 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
633 struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev)
635 struct ipoib_path_iter *iter;
637 iter = kmalloc(sizeof *iter, GFP_KERNEL);
642 memset(iter->path.pathrec.dgid.raw, 0, 16);
644 if (ipoib_path_iter_next(iter)) {
652 int ipoib_path_iter_next(struct ipoib_path_iter *iter)
654 struct ipoib_dev_priv *priv = ipoib_priv(iter->dev);
656 struct ipoib_path *path;
659 spin_lock_irq(&priv->lock);
661 n = rb_first(&priv->path_tree);
664 path = rb_entry(n, struct ipoib_path, rb_node);
666 if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw,
667 sizeof (union ib_gid)) < 0) {
676 spin_unlock_irq(&priv->lock);
681 void ipoib_path_iter_read(struct ipoib_path_iter *iter,
682 struct ipoib_path *path)
687 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
689 void ipoib_mark_paths_invalid(struct net_device *dev)
691 struct ipoib_dev_priv *priv = ipoib_priv(dev);
692 struct ipoib_path *path, *tp;
694 spin_lock_irq(&priv->lock);
696 list_for_each_entry_safe(path, tp, &priv->path_list, list) {
697 ipoib_dbg(priv, "mark path LID 0x%08x GID %pI6 invalid\n",
698 be32_to_cpu(sa_path_get_dlid(&path->pathrec)),
699 path->pathrec.dgid.raw);
704 spin_unlock_irq(&priv->lock);
707 static void push_pseudo_header(struct sk_buff *skb, const char *daddr)
709 struct ipoib_pseudo_header *phdr;
711 phdr = skb_push(skb, sizeof(*phdr));
712 memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
715 void ipoib_flush_paths(struct net_device *dev)
717 struct ipoib_dev_priv *priv = ipoib_priv(dev);
718 struct ipoib_path *path, *tp;
719 LIST_HEAD(remove_list);
722 netif_tx_lock_bh(dev);
723 spin_lock_irqsave(&priv->lock, flags);
725 list_splice_init(&priv->path_list, &remove_list);
727 list_for_each_entry(path, &remove_list, list)
728 rb_erase(&path->rb_node, &priv->path_tree);
730 list_for_each_entry_safe(path, tp, &remove_list, list) {
732 ib_sa_cancel_query(path->query_id, path->query);
733 spin_unlock_irqrestore(&priv->lock, flags);
734 netif_tx_unlock_bh(dev);
735 wait_for_completion(&path->done);
736 path_free(dev, path);
737 netif_tx_lock_bh(dev);
738 spin_lock_irqsave(&priv->lock, flags);
741 spin_unlock_irqrestore(&priv->lock, flags);
742 netif_tx_unlock_bh(dev);
745 static void path_rec_completion(int status,
746 struct sa_path_rec *pathrec,
749 struct ipoib_path *path = path_ptr;
750 struct net_device *dev = path->dev;
751 struct ipoib_dev_priv *priv = ipoib_priv(dev);
752 struct ipoib_ah *ah = NULL;
753 struct ipoib_ah *old_ah = NULL;
754 struct ipoib_neigh *neigh, *tn;
755 struct sk_buff_head skqueue;
760 ipoib_dbg(priv, "PathRec LID 0x%04x for GID %pI6\n",
761 be32_to_cpu(sa_path_get_dlid(pathrec)),
764 ipoib_dbg(priv, "PathRec status %d for GID %pI6\n",
765 status, path->pathrec.dgid.raw);
767 skb_queue_head_init(&skqueue);
770 struct rdma_ah_attr av;
772 if (!ib_init_ah_attr_from_path(priv->ca, priv->port,
773 pathrec, &av, NULL)) {
774 ah = ipoib_create_ah(dev, priv->pd, &av);
775 rdma_destroy_ah_attr(&av);
779 spin_lock_irqsave(&priv->lock, flags);
781 if (!IS_ERR_OR_NULL(ah)) {
783 * pathrec.dgid is used as the database key from the LLADDR,
784 * it must remain unchanged even if the SA returns a different
785 * GID to use in the AH.
787 if (memcmp(pathrec->dgid.raw, path->pathrec.dgid.raw,
788 sizeof(union ib_gid))) {
791 "%s got PathRec for gid %pI6 while asked for %pI6\n",
792 dev->name, pathrec->dgid.raw,
793 path->pathrec.dgid.raw);
794 memcpy(pathrec->dgid.raw, path->pathrec.dgid.raw,
795 sizeof(union ib_gid));
798 path->pathrec = *pathrec;
803 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
804 ah, be32_to_cpu(sa_path_get_dlid(pathrec)),
807 while ((skb = __skb_dequeue(&path->queue)))
808 __skb_queue_tail(&skqueue, skb);
810 list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
812 WARN_ON(neigh->ah != old_ah);
814 * Dropping the ah reference inside
815 * priv->lock is safe here, because we
816 * will hold one more reference from
817 * the original value of path->ah (ie
820 ipoib_put_ah(neigh->ah);
822 kref_get(&path->ah->ref);
823 neigh->ah = path->ah;
825 if (ipoib_cm_enabled(dev, neigh->daddr)) {
826 if (!ipoib_cm_get(neigh))
827 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev,
830 if (!ipoib_cm_get(neigh)) {
831 ipoib_neigh_free(neigh);
836 while ((skb = __skb_dequeue(&neigh->queue)))
837 __skb_queue_tail(&skqueue, skb);
843 complete(&path->done);
845 spin_unlock_irqrestore(&priv->lock, flags);
847 if (IS_ERR_OR_NULL(ah))
848 ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw);
851 ipoib_put_ah(old_ah);
853 while ((skb = __skb_dequeue(&skqueue))) {
856 ret = dev_queue_xmit(skb);
858 ipoib_warn(priv, "%s: dev_queue_xmit failed to re-queue packet, ret:%d\n",
863 static void init_path_rec(struct ipoib_dev_priv *priv, struct ipoib_path *path,
866 path->dev = priv->dev;
868 if (rdma_cap_opa_ah(priv->ca, priv->port))
869 path->pathrec.rec_type = SA_PATH_REC_TYPE_OPA;
871 path->pathrec.rec_type = SA_PATH_REC_TYPE_IB;
873 memcpy(path->pathrec.dgid.raw, gid, sizeof(union ib_gid));
874 path->pathrec.sgid = priv->local_gid;
875 path->pathrec.pkey = cpu_to_be16(priv->pkey);
876 path->pathrec.numb_path = 1;
877 path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
880 static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
882 struct ipoib_dev_priv *priv = ipoib_priv(dev);
883 struct ipoib_path *path;
885 if (!priv->broadcast)
888 path = kzalloc(sizeof *path, GFP_ATOMIC);
892 skb_queue_head_init(&path->queue);
894 INIT_LIST_HEAD(&path->neigh_list);
896 init_path_rec(priv, path, gid);
901 static int path_rec_start(struct net_device *dev,
902 struct ipoib_path *path)
904 struct ipoib_dev_priv *priv = ipoib_priv(dev);
906 ipoib_dbg(priv, "Start path record lookup for %pI6\n",
907 path->pathrec.dgid.raw);
909 init_completion(&path->done);
912 ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port,
914 IB_SA_PATH_REC_DGID |
915 IB_SA_PATH_REC_SGID |
916 IB_SA_PATH_REC_NUMB_PATH |
917 IB_SA_PATH_REC_TRAFFIC_CLASS |
922 if (path->query_id < 0) {
923 ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id);
925 complete(&path->done);
926 return path->query_id;
932 static void neigh_refresh_path(struct ipoib_neigh *neigh, u8 *daddr,
933 struct net_device *dev)
935 struct ipoib_dev_priv *priv = ipoib_priv(dev);
936 struct ipoib_path *path;
939 spin_lock_irqsave(&priv->lock, flags);
941 path = __path_find(dev, daddr + 4);
945 path_rec_start(dev, path);
947 spin_unlock_irqrestore(&priv->lock, flags);
950 static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr,
951 struct net_device *dev)
953 struct ipoib_dev_priv *priv = ipoib_priv(dev);
954 struct rdma_netdev *rn = netdev_priv(dev);
955 struct ipoib_path *path;
956 struct ipoib_neigh *neigh;
959 spin_lock_irqsave(&priv->lock, flags);
960 neigh = ipoib_neigh_alloc(daddr, dev);
962 spin_unlock_irqrestore(&priv->lock, flags);
963 ++dev->stats.tx_dropped;
964 dev_kfree_skb_any(skb);
968 /* To avoid race condition, make sure that the
969 * neigh will be added only once.
971 if (unlikely(!list_empty(&neigh->list))) {
972 spin_unlock_irqrestore(&priv->lock, flags);
976 path = __path_find(dev, daddr + 4);
978 path = path_rec_create(dev, daddr + 4);
982 __path_add(dev, path);
985 list_add_tail(&neigh->list, &path->neigh_list);
987 if (path->ah && path->ah->valid) {
988 kref_get(&path->ah->ref);
989 neigh->ah = path->ah;
991 if (ipoib_cm_enabled(dev, neigh->daddr)) {
992 if (!ipoib_cm_get(neigh))
993 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh));
994 if (!ipoib_cm_get(neigh)) {
995 ipoib_neigh_free(neigh);
998 if (skb_queue_len(&neigh->queue) <
999 IPOIB_MAX_PATH_REC_QUEUE) {
1000 push_pseudo_header(skb, neigh->daddr);
1001 __skb_queue_tail(&neigh->queue, skb);
1003 ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
1004 skb_queue_len(&neigh->queue));
1008 spin_unlock_irqrestore(&priv->lock, flags);
1009 path->ah->last_send = rn->send(dev, skb, path->ah->ah,
1011 ipoib_neigh_put(neigh);
1017 if (!path->query && path_rec_start(dev, path))
1019 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
1020 push_pseudo_header(skb, neigh->daddr);
1021 __skb_queue_tail(&neigh->queue, skb);
1027 spin_unlock_irqrestore(&priv->lock, flags);
1028 ipoib_neigh_put(neigh);
1032 ipoib_neigh_free(neigh);
1034 ++dev->stats.tx_dropped;
1035 dev_kfree_skb_any(skb);
1037 spin_unlock_irqrestore(&priv->lock, flags);
1038 ipoib_neigh_put(neigh);
1043 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
1044 struct ipoib_pseudo_header *phdr)
1046 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1047 struct rdma_netdev *rn = netdev_priv(dev);
1048 struct ipoib_path *path;
1049 unsigned long flags;
1051 spin_lock_irqsave(&priv->lock, flags);
1053 /* no broadcast means that all paths are (going to be) not valid */
1054 if (!priv->broadcast)
1055 goto drop_and_unlock;
1057 path = __path_find(dev, phdr->hwaddr + 4);
1058 if (!path || !path->ah || !path->ah->valid) {
1060 path = path_rec_create(dev, phdr->hwaddr + 4);
1062 goto drop_and_unlock;
1063 __path_add(dev, path);
1066 * make sure there are no changes in the existing
1069 init_path_rec(priv, path, phdr->hwaddr + 4);
1071 if (!path->query && path_rec_start(dev, path)) {
1072 goto drop_and_unlock;
1075 if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
1076 push_pseudo_header(skb, phdr->hwaddr);
1077 __skb_queue_tail(&path->queue, skb);
1080 goto drop_and_unlock;
1084 spin_unlock_irqrestore(&priv->lock, flags);
1085 ipoib_dbg(priv, "Send unicast ARP to %08x\n",
1086 be32_to_cpu(sa_path_get_dlid(&path->pathrec)));
1087 path->ah->last_send = rn->send(dev, skb, path->ah->ah,
1088 IPOIB_QPN(phdr->hwaddr));
1092 ++dev->stats.tx_dropped;
1093 dev_kfree_skb_any(skb);
1095 spin_unlock_irqrestore(&priv->lock, flags);
1098 static netdev_tx_t ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
1100 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1101 struct rdma_netdev *rn = netdev_priv(dev);
1102 struct ipoib_neigh *neigh;
1103 struct ipoib_pseudo_header *phdr;
1104 struct ipoib_header *header;
1105 unsigned long flags;
1107 phdr = (struct ipoib_pseudo_header *) skb->data;
1108 skb_pull(skb, sizeof(*phdr));
1109 header = (struct ipoib_header *) skb->data;
1111 if (unlikely(phdr->hwaddr[4] == 0xff)) {
1112 /* multicast, arrange "if" according to probability */
1113 if ((header->proto != htons(ETH_P_IP)) &&
1114 (header->proto != htons(ETH_P_IPV6)) &&
1115 (header->proto != htons(ETH_P_ARP)) &&
1116 (header->proto != htons(ETH_P_RARP)) &&
1117 (header->proto != htons(ETH_P_TIPC))) {
1118 /* ethertype not supported by IPoIB */
1119 ++dev->stats.tx_dropped;
1120 dev_kfree_skb_any(skb);
1121 return NETDEV_TX_OK;
1123 /* Add in the P_Key for multicast*/
1124 phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
1125 phdr->hwaddr[9] = priv->pkey & 0xff;
1127 neigh = ipoib_neigh_get(dev, phdr->hwaddr);
1129 goto send_using_neigh;
1130 ipoib_mcast_send(dev, phdr->hwaddr, skb);
1131 return NETDEV_TX_OK;
1134 /* unicast, arrange "switch" according to probability */
1135 switch (header->proto) {
1136 case htons(ETH_P_IP):
1137 case htons(ETH_P_IPV6):
1138 case htons(ETH_P_TIPC):
1139 neigh = ipoib_neigh_get(dev, phdr->hwaddr);
1140 if (unlikely(!neigh)) {
1141 neigh = neigh_add_path(skb, phdr->hwaddr, dev);
1143 return NETDEV_TX_OK;
1146 case htons(ETH_P_ARP):
1147 case htons(ETH_P_RARP):
1148 /* for unicast ARP and RARP should always perform path find */
1149 unicast_arp_send(skb, dev, phdr);
1150 return NETDEV_TX_OK;
1152 /* ethertype not supported by IPoIB */
1153 ++dev->stats.tx_dropped;
1154 dev_kfree_skb_any(skb);
1155 return NETDEV_TX_OK;
1159 /* note we now hold a ref to neigh */
1160 if (ipoib_cm_get(neigh)) {
1161 if (ipoib_cm_up(neigh)) {
1162 ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
1165 } else if (neigh->ah && neigh->ah->valid) {
1166 neigh->ah->last_send = rn->send(dev, skb, neigh->ah->ah,
1167 IPOIB_QPN(phdr->hwaddr));
1169 } else if (neigh->ah) {
1170 neigh_refresh_path(neigh, phdr->hwaddr, dev);
1173 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
1174 push_pseudo_header(skb, phdr->hwaddr);
1175 spin_lock_irqsave(&priv->lock, flags);
1176 __skb_queue_tail(&neigh->queue, skb);
1177 spin_unlock_irqrestore(&priv->lock, flags);
1179 ++dev->stats.tx_dropped;
1180 dev_kfree_skb_any(skb);
1184 ipoib_neigh_put(neigh);
1186 return NETDEV_TX_OK;
1189 static void ipoib_timeout(struct net_device *dev)
1191 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1193 ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
1194 jiffies_to_msecs(jiffies - dev_trans_start(dev)));
1195 ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n",
1196 netif_queue_stopped(dev),
1197 priv->tx_head, priv->tx_tail);
1198 /* XXX reset QP, etc. */
1201 static int ipoib_hard_header(struct sk_buff *skb,
1202 struct net_device *dev,
1203 unsigned short type,
1204 const void *daddr, const void *saddr, unsigned len)
1206 struct ipoib_header *header;
1208 header = skb_push(skb, sizeof *header);
1210 header->proto = htons(type);
1211 header->reserved = 0;
1214 * we don't rely on dst_entry structure, always stuff the
1215 * destination address into skb hard header so we can figure out where
1216 * to send the packet later.
1218 push_pseudo_header(skb, daddr);
1220 return IPOIB_HARD_LEN;
1223 static void ipoib_set_mcast_list(struct net_device *dev)
1225 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1227 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
1228 ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set");
1232 queue_work(priv->wq, &priv->restart_task);
1235 static int ipoib_get_iflink(const struct net_device *dev)
1237 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1239 /* parent interface */
1240 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
1241 return dev->ifindex;
1243 /* child/vlan interface */
1244 return priv->parent->ifindex;
1247 static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
1250 * Use only the address parts that contributes to spreading
1251 * The subnet prefix is not used as one can not connect to
1252 * same remote port (GUID) using the same remote QPN via two
1253 * different subnets.
1255 /* qpn octets[1:4) & port GUID octets[12:20) */
1256 u32 *d32 = (u32 *) daddr;
1259 hv = jhash_3words(d32[3], d32[4], IPOIB_QPN_MASK & d32[0], 0);
1260 return hv & htbl->mask;
1263 struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr)
1265 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1266 struct ipoib_neigh_table *ntbl = &priv->ntbl;
1267 struct ipoib_neigh_hash *htbl;
1268 struct ipoib_neigh *neigh = NULL;
1273 htbl = rcu_dereference_bh(ntbl->htbl);
1278 hash_val = ipoib_addr_hash(htbl, daddr);
1279 for (neigh = rcu_dereference_bh(htbl->buckets[hash_val]);
1281 neigh = rcu_dereference_bh(neigh->hnext)) {
1282 if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
1283 /* found, take one ref on behalf of the caller */
1284 if (!atomic_inc_not_zero(&neigh->refcnt)) {
1290 if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE))
1291 neigh->alive = jiffies;
1297 rcu_read_unlock_bh();
1301 static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
1303 struct ipoib_neigh_table *ntbl = &priv->ntbl;
1304 struct ipoib_neigh_hash *htbl;
1305 unsigned long neigh_obsolete;
1307 unsigned long flags;
1309 LIST_HEAD(remove_list);
1311 if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
1314 spin_lock_irqsave(&priv->lock, flags);
1316 htbl = rcu_dereference_protected(ntbl->htbl,
1317 lockdep_is_held(&priv->lock));
1322 /* neigh is obsolete if it was idle for two GC periods */
1323 dt = 2 * arp_tbl.gc_interval;
1324 neigh_obsolete = jiffies - dt;
1325 /* handle possible race condition */
1326 if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
1329 for (i = 0; i < htbl->size; i++) {
1330 struct ipoib_neigh *neigh;
1331 struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1333 while ((neigh = rcu_dereference_protected(*np,
1334 lockdep_is_held(&priv->lock))) != NULL) {
1335 /* was the neigh idle for two GC periods */
1336 if (time_after(neigh_obsolete, neigh->alive)) {
1338 ipoib_check_and_add_mcast_sendonly(priv, neigh->daddr + 4, &remove_list);
1340 rcu_assign_pointer(*np,
1341 rcu_dereference_protected(neigh->hnext,
1342 lockdep_is_held(&priv->lock)));
1343 /* remove from path/mc list */
1344 list_del_init(&neigh->list);
1345 call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1354 spin_unlock_irqrestore(&priv->lock, flags);
1355 ipoib_mcast_remove_list(&remove_list);
1358 static void ipoib_reap_neigh(struct work_struct *work)
1360 struct ipoib_dev_priv *priv =
1361 container_of(work, struct ipoib_dev_priv, neigh_reap_task.work);
1363 __ipoib_reap_neigh(priv);
1365 if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
1366 queue_delayed_work(priv->wq, &priv->neigh_reap_task,
1367 arp_tbl.gc_interval);
1371 static struct ipoib_neigh *ipoib_neigh_ctor(u8 *daddr,
1372 struct net_device *dev)
1374 struct ipoib_neigh *neigh;
1376 neigh = kzalloc(sizeof *neigh, GFP_ATOMIC);
1381 memcpy(&neigh->daddr, daddr, sizeof(neigh->daddr));
1382 skb_queue_head_init(&neigh->queue);
1383 INIT_LIST_HEAD(&neigh->list);
1384 ipoib_cm_set(neigh, NULL);
1385 /* one ref on behalf of the caller */
1386 atomic_set(&neigh->refcnt, 1);
1391 struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr,
1392 struct net_device *dev)
1394 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1395 struct ipoib_neigh_table *ntbl = &priv->ntbl;
1396 struct ipoib_neigh_hash *htbl;
1397 struct ipoib_neigh *neigh;
1400 htbl = rcu_dereference_protected(ntbl->htbl,
1401 lockdep_is_held(&priv->lock));
1407 /* need to add a new neigh, but maybe some other thread succeeded?
1408 * recalc hash, maybe hash resize took place so we do a search
1410 hash_val = ipoib_addr_hash(htbl, daddr);
1411 for (neigh = rcu_dereference_protected(htbl->buckets[hash_val],
1412 lockdep_is_held(&priv->lock));
1414 neigh = rcu_dereference_protected(neigh->hnext,
1415 lockdep_is_held(&priv->lock))) {
1416 if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
1417 /* found, take one ref on behalf of the caller */
1418 if (!atomic_inc_not_zero(&neigh->refcnt)) {
1423 neigh->alive = jiffies;
1428 neigh = ipoib_neigh_ctor(daddr, dev);
1432 /* one ref on behalf of the hash table */
1433 atomic_inc(&neigh->refcnt);
1434 neigh->alive = jiffies;
1436 rcu_assign_pointer(neigh->hnext,
1437 rcu_dereference_protected(htbl->buckets[hash_val],
1438 lockdep_is_held(&priv->lock)));
1439 rcu_assign_pointer(htbl->buckets[hash_val], neigh);
1440 atomic_inc(&ntbl->entries);
1447 void ipoib_neigh_dtor(struct ipoib_neigh *neigh)
1449 /* neigh reference count was dropprd to zero */
1450 struct net_device *dev = neigh->dev;
1451 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1452 struct sk_buff *skb;
1454 ipoib_put_ah(neigh->ah);
1455 while ((skb = __skb_dequeue(&neigh->queue))) {
1456 ++dev->stats.tx_dropped;
1457 dev_kfree_skb_any(skb);
1459 if (ipoib_cm_get(neigh))
1460 ipoib_cm_destroy_tx(ipoib_cm_get(neigh));
1461 ipoib_dbg(ipoib_priv(dev),
1462 "neigh free for %06x %pI6\n",
1463 IPOIB_QPN(neigh->daddr),
1466 if (atomic_dec_and_test(&priv->ntbl.entries)) {
1467 if (test_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags))
1468 complete(&priv->ntbl.flushed);
1472 static void ipoib_neigh_reclaim(struct rcu_head *rp)
1474 /* Called as a result of removal from hash table */
1475 struct ipoib_neigh *neigh = container_of(rp, struct ipoib_neigh, rcu);
1476 /* note TX context may hold another ref */
1477 ipoib_neigh_put(neigh);
1480 void ipoib_neigh_free(struct ipoib_neigh *neigh)
1482 struct net_device *dev = neigh->dev;
1483 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1484 struct ipoib_neigh_table *ntbl = &priv->ntbl;
1485 struct ipoib_neigh_hash *htbl;
1486 struct ipoib_neigh __rcu **np;
1487 struct ipoib_neigh *n;
1490 htbl = rcu_dereference_protected(ntbl->htbl,
1491 lockdep_is_held(&priv->lock));
1495 hash_val = ipoib_addr_hash(htbl, neigh->daddr);
1496 np = &htbl->buckets[hash_val];
1497 for (n = rcu_dereference_protected(*np,
1498 lockdep_is_held(&priv->lock));
1500 n = rcu_dereference_protected(*np,
1501 lockdep_is_held(&priv->lock))) {
1504 rcu_assign_pointer(*np,
1505 rcu_dereference_protected(neigh->hnext,
1506 lockdep_is_held(&priv->lock)));
1507 /* remove from parent list */
1508 list_del_init(&neigh->list);
1509 call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1517 static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
1519 struct ipoib_neigh_table *ntbl = &priv->ntbl;
1520 struct ipoib_neigh_hash *htbl;
1521 struct ipoib_neigh __rcu **buckets;
1524 clear_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
1526 htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
1529 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1530 size = roundup_pow_of_two(arp_tbl.gc_thresh3);
1531 buckets = kcalloc(size, sizeof(*buckets), GFP_KERNEL);
1537 htbl->mask = (size - 1);
1538 htbl->buckets = buckets;
1539 RCU_INIT_POINTER(ntbl->htbl, htbl);
1541 atomic_set(&ntbl->entries, 0);
1543 /* start garbage collection */
1544 clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1545 queue_delayed_work(priv->wq, &priv->neigh_reap_task,
1546 arp_tbl.gc_interval);
1551 static void neigh_hash_free_rcu(struct rcu_head *head)
1553 struct ipoib_neigh_hash *htbl = container_of(head,
1554 struct ipoib_neigh_hash,
1556 struct ipoib_neigh __rcu **buckets = htbl->buckets;
1557 struct ipoib_neigh_table *ntbl = htbl->ntbl;
1561 complete(&ntbl->deleted);
1564 void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid)
1566 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1567 struct ipoib_neigh_table *ntbl = &priv->ntbl;
1568 struct ipoib_neigh_hash *htbl;
1569 unsigned long flags;
1572 /* remove all neigh connected to a given path or mcast */
1573 spin_lock_irqsave(&priv->lock, flags);
1575 htbl = rcu_dereference_protected(ntbl->htbl,
1576 lockdep_is_held(&priv->lock));
1581 for (i = 0; i < htbl->size; i++) {
1582 struct ipoib_neigh *neigh;
1583 struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1585 while ((neigh = rcu_dereference_protected(*np,
1586 lockdep_is_held(&priv->lock))) != NULL) {
1587 /* delete neighs belong to this parent */
1588 if (!memcmp(gid, neigh->daddr + 4, sizeof (union ib_gid))) {
1589 rcu_assign_pointer(*np,
1590 rcu_dereference_protected(neigh->hnext,
1591 lockdep_is_held(&priv->lock)));
1592 /* remove from parent list */
1593 list_del_init(&neigh->list);
1594 call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1602 spin_unlock_irqrestore(&priv->lock, flags);
1605 static void ipoib_flush_neighs(struct ipoib_dev_priv *priv)
1607 struct ipoib_neigh_table *ntbl = &priv->ntbl;
1608 struct ipoib_neigh_hash *htbl;
1609 unsigned long flags;
1610 int i, wait_flushed = 0;
1612 init_completion(&priv->ntbl.flushed);
1613 set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
1615 spin_lock_irqsave(&priv->lock, flags);
1617 htbl = rcu_dereference_protected(ntbl->htbl,
1618 lockdep_is_held(&priv->lock));
1622 wait_flushed = atomic_read(&priv->ntbl.entries);
1626 for (i = 0; i < htbl->size; i++) {
1627 struct ipoib_neigh *neigh;
1628 struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1630 while ((neigh = rcu_dereference_protected(*np,
1631 lockdep_is_held(&priv->lock))) != NULL) {
1632 rcu_assign_pointer(*np,
1633 rcu_dereference_protected(neigh->hnext,
1634 lockdep_is_held(&priv->lock)));
1635 /* remove from path/mc list */
1636 list_del_init(&neigh->list);
1637 call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1642 rcu_assign_pointer(ntbl->htbl, NULL);
1643 call_rcu(&htbl->rcu, neigh_hash_free_rcu);
1646 spin_unlock_irqrestore(&priv->lock, flags);
1648 wait_for_completion(&priv->ntbl.flushed);
1651 static void ipoib_neigh_hash_uninit(struct net_device *dev)
1653 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1656 ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n");
1657 init_completion(&priv->ntbl.deleted);
1659 /* Stop GC if called at init fail need to cancel work */
1660 stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1662 cancel_delayed_work(&priv->neigh_reap_task);
1664 ipoib_flush_neighs(priv);
1666 wait_for_completion(&priv->ntbl.deleted);
1669 static void ipoib_napi_add(struct net_device *dev)
1671 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1673 netif_napi_add(dev, &priv->recv_napi, ipoib_rx_poll, IPOIB_NUM_WC);
1674 netif_napi_add(dev, &priv->send_napi, ipoib_tx_poll, MAX_SEND_CQE);
1677 static void ipoib_napi_del(struct net_device *dev)
1679 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1681 netif_napi_del(&priv->recv_napi);
1682 netif_napi_del(&priv->send_napi);
1685 static void ipoib_dev_uninit_default(struct net_device *dev)
1687 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1689 ipoib_transport_dev_cleanup(dev);
1691 ipoib_napi_del(dev);
1693 ipoib_cm_dev_cleanup(dev);
1695 kfree(priv->rx_ring);
1696 vfree(priv->tx_ring);
1698 priv->rx_ring = NULL;
1699 priv->tx_ring = NULL;
1702 static int ipoib_dev_init_default(struct net_device *dev)
1704 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1706 ipoib_napi_add(dev);
1708 /* Allocate RX/TX "rings" to hold queued skbs */
1709 priv->rx_ring = kcalloc(ipoib_recvq_size,
1710 sizeof(*priv->rx_ring),
1715 priv->tx_ring = vzalloc(array_size(ipoib_sendq_size,
1716 sizeof(*priv->tx_ring)));
1717 if (!priv->tx_ring) {
1718 pr_warn("%s: failed to allocate TX ring (%d entries)\n",
1719 priv->ca->name, ipoib_sendq_size);
1720 goto out_rx_ring_cleanup;
1723 /* priv->tx_head, tx_tail & tx_outstanding are already 0 */
1725 if (ipoib_transport_dev_init(dev, priv->ca)) {
1726 pr_warn("%s: ipoib_transport_dev_init failed\n",
1728 goto out_tx_ring_cleanup;
1731 /* after qp created set dev address */
1732 priv->dev->dev_addr[1] = (priv->qp->qp_num >> 16) & 0xff;
1733 priv->dev->dev_addr[2] = (priv->qp->qp_num >> 8) & 0xff;
1734 priv->dev->dev_addr[3] = (priv->qp->qp_num) & 0xff;
1738 out_tx_ring_cleanup:
1739 vfree(priv->tx_ring);
1741 out_rx_ring_cleanup:
1742 kfree(priv->rx_ring);
1745 ipoib_napi_del(dev);
1749 static int ipoib_ioctl(struct net_device *dev, struct ifreq *ifr,
1752 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1754 if (!priv->rn_ops->ndo_do_ioctl)
1757 return priv->rn_ops->ndo_do_ioctl(dev, ifr, cmd);
1760 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
1762 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1770 * the various IPoIB tasks assume they will never race against
1771 * themselves, so always use a single thread workqueue
1773 priv->wq = alloc_ordered_workqueue("ipoib_wq", WQ_MEM_RECLAIM);
1775 pr_warn("%s: failed to allocate device WQ\n", dev->name);
1779 /* create pd, which used both for control and datapath*/
1780 priv->pd = ib_alloc_pd(priv->ca, 0);
1781 if (IS_ERR(priv->pd)) {
1782 pr_warn("%s: failed to allocate PD\n", ca->name);
1786 ret = priv->rn_ops->ndo_init(dev);
1788 pr_warn("%s failed to init HW resource\n", dev->name);
1792 if (ipoib_neigh_hash_init(priv) < 0) {
1793 pr_warn("%s failed to init neigh hash\n", dev->name);
1794 goto out_dev_uninit;
1797 if (dev->flags & IFF_UP) {
1798 if (ipoib_ib_dev_open(dev)) {
1799 pr_warn("%s failed to open device\n", dev->name);
1801 goto out_dev_uninit;
1808 ipoib_ib_dev_cleanup(dev);
1812 ib_dealloc_pd(priv->pd);
1818 destroy_workqueue(priv->wq);
1826 void ipoib_dev_cleanup(struct net_device *dev)
1828 struct ipoib_dev_priv *priv = ipoib_priv(dev), *cpriv, *tcpriv;
1833 /* Delete any child interfaces first */
1834 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
1835 /* Stop GC on child */
1836 set_bit(IPOIB_STOP_NEIGH_GC, &cpriv->flags);
1837 cancel_delayed_work(&cpriv->neigh_reap_task);
1838 unregister_netdevice_queue(cpriv->dev, &head);
1840 unregister_netdevice_many(&head);
1842 ipoib_neigh_hash_uninit(dev);
1844 ipoib_ib_dev_cleanup(dev);
1846 /* no more works over the priv->wq */
1848 flush_workqueue(priv->wq);
1849 destroy_workqueue(priv->wq);
1854 static int ipoib_set_vf_link_state(struct net_device *dev, int vf, int link_state)
1856 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1858 return ib_set_vf_link_state(priv->ca, vf, priv->port, link_state);
1861 static int ipoib_get_vf_config(struct net_device *dev, int vf,
1862 struct ifla_vf_info *ivf)
1864 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1867 err = ib_get_vf_config(priv->ca, vf, priv->port, ivf);
1876 static int ipoib_set_vf_guid(struct net_device *dev, int vf, u64 guid, int type)
1878 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1880 if (type != IFLA_VF_IB_NODE_GUID && type != IFLA_VF_IB_PORT_GUID)
1883 return ib_set_vf_guid(priv->ca, vf, priv->port, guid, type);
1886 static int ipoib_get_vf_stats(struct net_device *dev, int vf,
1887 struct ifla_vf_stats *vf_stats)
1889 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1891 return ib_get_vf_stats(priv->ca, vf, priv->port, vf_stats);
1894 static const struct header_ops ipoib_header_ops = {
1895 .create = ipoib_hard_header,
1898 static const struct net_device_ops ipoib_netdev_ops_pf = {
1899 .ndo_uninit = ipoib_uninit,
1900 .ndo_open = ipoib_open,
1901 .ndo_stop = ipoib_stop,
1902 .ndo_change_mtu = ipoib_change_mtu,
1903 .ndo_fix_features = ipoib_fix_features,
1904 .ndo_start_xmit = ipoib_start_xmit,
1905 .ndo_tx_timeout = ipoib_timeout,
1906 .ndo_set_rx_mode = ipoib_set_mcast_list,
1907 .ndo_get_iflink = ipoib_get_iflink,
1908 .ndo_set_vf_link_state = ipoib_set_vf_link_state,
1909 .ndo_get_vf_config = ipoib_get_vf_config,
1910 .ndo_get_vf_stats = ipoib_get_vf_stats,
1911 .ndo_set_vf_guid = ipoib_set_vf_guid,
1912 .ndo_set_mac_address = ipoib_set_mac,
1913 .ndo_get_stats64 = ipoib_get_stats,
1914 .ndo_do_ioctl = ipoib_ioctl,
1917 static const struct net_device_ops ipoib_netdev_ops_vf = {
1918 .ndo_uninit = ipoib_uninit,
1919 .ndo_open = ipoib_open,
1920 .ndo_stop = ipoib_stop,
1921 .ndo_change_mtu = ipoib_change_mtu,
1922 .ndo_fix_features = ipoib_fix_features,
1923 .ndo_start_xmit = ipoib_start_xmit,
1924 .ndo_tx_timeout = ipoib_timeout,
1925 .ndo_set_rx_mode = ipoib_set_mcast_list,
1926 .ndo_get_iflink = ipoib_get_iflink,
1927 .ndo_get_stats64 = ipoib_get_stats,
1928 .ndo_do_ioctl = ipoib_ioctl,
1931 void ipoib_setup_common(struct net_device *dev)
1933 dev->header_ops = &ipoib_header_ops;
1935 ipoib_set_ethtool_ops(dev);
1937 dev->watchdog_timeo = HZ;
1939 dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
1941 dev->hard_header_len = IPOIB_HARD_LEN;
1942 dev->addr_len = INFINIBAND_ALEN;
1943 dev->type = ARPHRD_INFINIBAND;
1944 dev->tx_queue_len = ipoib_sendq_size * 2;
1945 dev->features = (NETIF_F_VLAN_CHALLENGED |
1947 netif_keep_dst(dev);
1949 memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
1952 static void ipoib_build_priv(struct net_device *dev)
1954 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1957 spin_lock_init(&priv->lock);
1958 init_rwsem(&priv->vlan_rwsem);
1959 mutex_init(&priv->mcast_mutex);
1960 mutex_init(&priv->sysfs_mutex);
1962 INIT_LIST_HEAD(&priv->path_list);
1963 INIT_LIST_HEAD(&priv->child_intfs);
1964 INIT_LIST_HEAD(&priv->dead_ahs);
1965 INIT_LIST_HEAD(&priv->multicast_list);
1967 INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task);
1968 INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task);
1969 INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light);
1970 INIT_WORK(&priv->flush_normal, ipoib_ib_dev_flush_normal);
1971 INIT_WORK(&priv->flush_heavy, ipoib_ib_dev_flush_heavy);
1972 INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
1973 INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
1974 INIT_DELAYED_WORK(&priv->neigh_reap_task, ipoib_reap_neigh);
1977 static const struct net_device_ops ipoib_netdev_default_pf = {
1978 .ndo_init = ipoib_dev_init_default,
1979 .ndo_uninit = ipoib_dev_uninit_default,
1980 .ndo_open = ipoib_ib_dev_open_default,
1981 .ndo_stop = ipoib_ib_dev_stop_default,
1984 static struct net_device
1985 *ipoib_create_netdev_default(struct ib_device *hca,
1987 unsigned char name_assign_type,
1988 void (*setup)(struct net_device *))
1990 struct net_device *dev;
1991 struct rdma_netdev *rn;
1993 dev = alloc_netdev((int)sizeof(struct rdma_netdev),
1995 name_assign_type, setup);
1999 rn = netdev_priv(dev);
2001 rn->send = ipoib_send;
2002 rn->attach_mcast = ipoib_mcast_attach;
2003 rn->detach_mcast = ipoib_mcast_detach;
2004 rn->free_rdma_netdev = free_netdev;
2007 dev->netdev_ops = &ipoib_netdev_default_pf;
2012 static struct net_device *ipoib_get_netdev(struct ib_device *hca, u8 port,
2015 struct net_device *dev;
2017 if (hca->alloc_rdma_netdev) {
2018 dev = hca->alloc_rdma_netdev(hca, port,
2019 RDMA_NETDEV_IPOIB, name,
2021 ipoib_setup_common);
2022 if (IS_ERR_OR_NULL(dev) && PTR_ERR(dev) != -EOPNOTSUPP)
2026 if (!hca->alloc_rdma_netdev || PTR_ERR(dev) == -EOPNOTSUPP)
2027 dev = ipoib_create_netdev_default(hca, name, NET_NAME_UNKNOWN,
2028 ipoib_setup_common);
2033 struct ipoib_dev_priv *ipoib_intf_alloc(struct ib_device *hca, u8 port,
2036 struct net_device *dev;
2037 struct ipoib_dev_priv *priv;
2038 struct rdma_netdev *rn;
2040 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
2044 dev = ipoib_get_netdev(hca, port, name);
2048 priv->rn_ops = dev->netdev_ops;
2050 /* fixme : should be after the query_cap */
2051 if (priv->hca_caps & IB_DEVICE_VIRTUAL_FUNCTION)
2052 dev->netdev_ops = &ipoib_netdev_ops_vf;
2054 dev->netdev_ops = &ipoib_netdev_ops_pf;
2056 rn = netdev_priv(dev);
2057 rn->clnt_priv = priv;
2058 ipoib_build_priv(dev);
2066 static ssize_t show_pkey(struct device *dev,
2067 struct device_attribute *attr, char *buf)
2069 struct net_device *ndev = to_net_dev(dev);
2070 struct ipoib_dev_priv *priv = ipoib_priv(ndev);
2072 return sprintf(buf, "0x%04x\n", priv->pkey);
2074 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
2076 static ssize_t show_umcast(struct device *dev,
2077 struct device_attribute *attr, char *buf)
2079 struct net_device *ndev = to_net_dev(dev);
2080 struct ipoib_dev_priv *priv = ipoib_priv(ndev);
2082 return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags));
2085 void ipoib_set_umcast(struct net_device *ndev, int umcast_val)
2087 struct ipoib_dev_priv *priv = ipoib_priv(ndev);
2089 if (umcast_val > 0) {
2090 set_bit(IPOIB_FLAG_UMCAST, &priv->flags);
2091 ipoib_warn(priv, "ignoring multicast groups joined directly "
2094 clear_bit(IPOIB_FLAG_UMCAST, &priv->flags);
2097 static ssize_t set_umcast(struct device *dev,
2098 struct device_attribute *attr,
2099 const char *buf, size_t count)
2101 unsigned long umcast_val = simple_strtoul(buf, NULL, 0);
2103 ipoib_set_umcast(to_net_dev(dev), umcast_val);
2107 static DEVICE_ATTR(umcast, S_IWUSR | S_IRUGO, show_umcast, set_umcast);
2109 int ipoib_add_umcast_attr(struct net_device *dev)
2111 return device_create_file(&dev->dev, &dev_attr_umcast);
2114 static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid)
2116 struct ipoib_dev_priv *child_priv;
2117 struct net_device *netdev = priv->dev;
2119 netif_addr_lock_bh(netdev);
2121 memcpy(&priv->local_gid.global.interface_id,
2122 &gid->global.interface_id,
2123 sizeof(gid->global.interface_id));
2124 memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid));
2125 clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
2127 netif_addr_unlock_bh(netdev);
2129 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
2130 down_read(&priv->vlan_rwsem);
2131 list_for_each_entry(child_priv, &priv->child_intfs, list)
2132 set_base_guid(child_priv, gid);
2133 up_read(&priv->vlan_rwsem);
2137 static int ipoib_check_lladdr(struct net_device *dev,
2138 struct sockaddr_storage *ss)
2140 union ib_gid *gid = (union ib_gid *)(ss->__data + 4);
2143 netif_addr_lock_bh(dev);
2145 /* Make sure the QPN, reserved and subnet prefix match the current
2146 * lladdr, it also makes sure the lladdr is unicast.
2148 if (memcmp(dev->dev_addr, ss->__data,
2149 4 + sizeof(gid->global.subnet_prefix)) ||
2150 gid->global.interface_id == 0)
2153 netif_addr_unlock_bh(dev);
2158 static int ipoib_set_mac(struct net_device *dev, void *addr)
2160 struct ipoib_dev_priv *priv = ipoib_priv(dev);
2161 struct sockaddr_storage *ss = addr;
2164 if (!(dev->priv_flags & IFF_LIVE_ADDR_CHANGE) && netif_running(dev))
2167 ret = ipoib_check_lladdr(dev, ss);
2171 set_base_guid(priv, (union ib_gid *)(ss->__data + 4));
2173 queue_work(ipoib_workqueue, &priv->flush_light);
2178 static ssize_t create_child(struct device *dev,
2179 struct device_attribute *attr,
2180 const char *buf, size_t count)
2185 if (sscanf(buf, "%i", &pkey) != 1)
2188 if (pkey <= 0 || pkey > 0xffff || pkey == 0x8000)
2192 * Set the full membership bit, so that we join the right
2193 * broadcast group, etc.
2197 ret = ipoib_vlan_add(to_net_dev(dev), pkey);
2199 return ret ? ret : count;
2201 static DEVICE_ATTR(create_child, S_IWUSR, NULL, create_child);
2203 static ssize_t delete_child(struct device *dev,
2204 struct device_attribute *attr,
2205 const char *buf, size_t count)
2210 if (sscanf(buf, "%i", &pkey) != 1)
2213 if (pkey < 0 || pkey > 0xffff)
2216 ret = ipoib_vlan_delete(to_net_dev(dev), pkey);
2218 return ret ? ret : count;
2221 static DEVICE_ATTR(delete_child, S_IWUSR, NULL, delete_child);
2223 int ipoib_add_pkey_attr(struct net_device *dev)
2225 return device_create_file(&dev->dev, &dev_attr_pkey);
2228 void ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
2230 priv->hca_caps = hca->attrs.device_cap_flags;
2232 if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
2233 priv->dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
2235 if (priv->hca_caps & IB_DEVICE_UD_TSO)
2236 priv->dev->hw_features |= NETIF_F_TSO;
2238 priv->dev->features |= priv->dev->hw_features;
2242 static struct net_device *ipoib_add_port(const char *format,
2243 struct ib_device *hca, u8 port)
2245 struct ipoib_dev_priv *priv;
2246 struct ib_port_attr attr;
2247 struct rdma_netdev *rn;
2248 int result = -ENOMEM;
2250 priv = ipoib_intf_alloc(hca, port, format);
2252 pr_warn("%s, %d: ipoib_intf_alloc failed\n", hca->name, port);
2253 goto alloc_mem_failed;
2256 SET_NETDEV_DEV(priv->dev, hca->dev.parent);
2257 priv->dev->dev_id = port - 1;
2259 result = ib_query_port(hca, port, &attr);
2261 pr_warn("%s: ib_query_port %d failed\n", hca->name, port);
2262 goto device_init_failed;
2265 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
2267 /* MTU will be reset when mcast join happens */
2268 priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
2269 priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
2270 priv->dev->max_mtu = IPOIB_CM_MTU;
2272 priv->dev->neigh_priv_len = sizeof(struct ipoib_neigh);
2274 result = ib_query_pkey(hca, port, 0, &priv->pkey);
2276 pr_warn("%s: ib_query_pkey port %d failed (ret = %d)\n",
2277 hca->name, port, result);
2278 goto device_init_failed;
2281 ipoib_set_dev_features(priv, hca);
2284 * Set the full membership bit, so that we join the right
2285 * broadcast group, etc.
2287 priv->pkey |= 0x8000;
2289 priv->dev->broadcast[8] = priv->pkey >> 8;
2290 priv->dev->broadcast[9] = priv->pkey & 0xff;
2292 result = rdma_query_gid(hca, port, 0, &priv->local_gid);
2294 pr_warn("%s: rdma_query_gid port %d failed (ret = %d)\n",
2295 hca->name, port, result);
2296 goto device_init_failed;
2299 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw,
2300 sizeof(union ib_gid));
2301 set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
2303 result = ipoib_dev_init(priv->dev, hca, port);
2305 pr_warn("%s: failed to initialize port %d (ret = %d)\n",
2306 hca->name, port, result);
2307 goto device_init_failed;
2310 INIT_IB_EVENT_HANDLER(&priv->event_handler,
2311 priv->ca, ipoib_event);
2312 ib_register_event_handler(&priv->event_handler);
2314 /* call event handler to ensure pkey in sync */
2315 queue_work(ipoib_workqueue, &priv->flush_heavy);
2317 result = register_netdev(priv->dev);
2319 pr_warn("%s: couldn't register ipoib port %d; error %d\n",
2320 hca->name, port, result);
2321 goto register_failed;
2325 if (ipoib_cm_add_mode_attr(priv->dev))
2327 if (ipoib_add_pkey_attr(priv->dev))
2329 if (ipoib_add_umcast_attr(priv->dev))
2331 if (device_create_file(&priv->dev->dev, &dev_attr_create_child))
2333 if (device_create_file(&priv->dev->dev, &dev_attr_delete_child))
2339 unregister_netdev(priv->dev);
2342 ib_unregister_event_handler(&priv->event_handler);
2343 flush_workqueue(ipoib_workqueue);
2344 /* Stop GC if started before flush */
2345 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
2346 cancel_delayed_work(&priv->neigh_reap_task);
2347 flush_workqueue(priv->wq);
2348 ipoib_dev_cleanup(priv->dev);
2351 rn = netdev_priv(priv->dev);
2352 rn->free_rdma_netdev(priv->dev);
2356 return ERR_PTR(result);
2359 static void ipoib_add_one(struct ib_device *device)
2361 struct list_head *dev_list;
2362 struct net_device *dev;
2363 struct ipoib_dev_priv *priv;
2367 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
2371 INIT_LIST_HEAD(dev_list);
2373 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
2374 if (!rdma_protocol_ib(device, p))
2376 dev = ipoib_add_port("ib%d", device, p);
2378 priv = ipoib_priv(dev);
2379 list_add_tail(&priv->list, dev_list);
2389 ib_set_client_data(device, &ipoib_client, dev_list);
2392 static void ipoib_remove_one(struct ib_device *device, void *client_data)
2394 struct ipoib_dev_priv *priv, *tmp, *cpriv, *tcpriv;
2395 struct list_head *dev_list = client_data;
2400 list_for_each_entry_safe(priv, tmp, dev_list, list) {
2401 struct rdma_netdev *parent_rn = netdev_priv(priv->dev);
2403 ib_unregister_event_handler(&priv->event_handler);
2404 flush_workqueue(ipoib_workqueue);
2406 /* mark interface in the middle of destruction */
2407 set_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags);
2410 dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP);
2414 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
2415 cancel_delayed_work(&priv->neigh_reap_task);
2416 flush_workqueue(priv->wq);
2418 /* Wrap rtnl_lock/unlock with mutex to protect sysfs calls */
2419 mutex_lock(&priv->sysfs_mutex);
2420 unregister_netdev(priv->dev);
2421 mutex_unlock(&priv->sysfs_mutex);
2423 parent_rn->free_rdma_netdev(priv->dev);
2425 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
2426 struct rdma_netdev *child_rn;
2428 child_rn = netdev_priv(cpriv->dev);
2429 child_rn->free_rdma_netdev(cpriv->dev);
2439 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
2440 static struct notifier_block ipoib_netdev_notifier = {
2441 .notifier_call = ipoib_netdev_event,
2445 static int __init ipoib_init_module(void)
2449 ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size);
2450 ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE);
2451 ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE);
2453 ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
2454 ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
2455 ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE);
2456 #ifdef CONFIG_INFINIBAND_IPOIB_CM
2457 ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
2458 ipoib_max_conn_qp = max(ipoib_max_conn_qp, 0);
2462 * When copying small received packets, we only copy from the
2463 * linear data part of the SKB, so we rely on this condition.
2465 BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE);
2467 ret = ipoib_register_debugfs();
2472 * We create a global workqueue here that is used for all flush
2473 * operations. However, if you attempt to flush a workqueue
2474 * from a task on that same workqueue, it deadlocks the system.
2475 * We want to be able to flush the tasks associated with a
2476 * specific net device, so we also create a workqueue for each
2477 * netdevice. We queue up the tasks for that device only on
2478 * its private workqueue, and we only queue up flush events
2479 * on our global flush workqueue. This avoids the deadlocks.
2481 ipoib_workqueue = alloc_ordered_workqueue("ipoib_flush",
2483 if (!ipoib_workqueue) {
2488 ib_sa_register_client(&ipoib_sa_client);
2490 ret = ib_register_client(&ipoib_client);
2494 ret = ipoib_netlink_init();
2498 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
2499 register_netdevice_notifier(&ipoib_netdev_notifier);
2504 ib_unregister_client(&ipoib_client);
2507 ib_sa_unregister_client(&ipoib_sa_client);
2508 destroy_workqueue(ipoib_workqueue);
2511 ipoib_unregister_debugfs();
2516 static void __exit ipoib_cleanup_module(void)
2518 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
2519 unregister_netdevice_notifier(&ipoib_netdev_notifier);
2521 ipoib_netlink_fini();
2522 ib_unregister_client(&ipoib_client);
2523 ib_sa_unregister_client(&ipoib_sa_client);
2524 ipoib_unregister_debugfs();
2525 destroy_workqueue(ipoib_workqueue);
2528 module_init(ipoib_init_module);
2529 module_exit(ipoib_cleanup_module);