Merge branch 'drm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied...
[sfrench/cifs-2.6.git] / drivers / infiniband / ulp / ipoib / ipoib_main.c
1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34
35 #include "ipoib.h"
36
37 #include <linux/module.h>
38
39 #include <linux/init.h>
40 #include <linux/slab.h>
41 #include <linux/kernel.h>
42 #include <linux/vmalloc.h>
43
44 #include <linux/if_arp.h>       /* For ARPHRD_xxx */
45
46 #include <linux/ip.h>
47 #include <linux/in.h>
48
49 #include <net/dst.h>
50
51 MODULE_AUTHOR("Roland Dreier");
52 MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
53 MODULE_LICENSE("Dual BSD/GPL");
54
55 int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE;
56 int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE;
57
58 module_param_named(send_queue_size, ipoib_sendq_size, int, 0444);
59 MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
60 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
61 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
62
63 static int lro;
64 module_param(lro, bool, 0444);
65 MODULE_PARM_DESC(lro,  "Enable LRO (Large Receive Offload)");
66
67 static int lro_max_aggr = IPOIB_LRO_MAX_AGGR;
68 module_param(lro_max_aggr, int, 0644);
69 MODULE_PARM_DESC(lro_max_aggr, "LRO: Max packets to be aggregated "
70                 "(default = 64)");
71
72 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
73 int ipoib_debug_level;
74
75 module_param_named(debug_level, ipoib_debug_level, int, 0644);
76 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
77 #endif
78
79 struct ipoib_path_iter {
80         struct net_device *dev;
81         struct ipoib_path  path;
82 };
83
84 static const u8 ipv4_bcast_addr[] = {
85         0x00, 0xff, 0xff, 0xff,
86         0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00,
87         0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
88 };
89
90 struct workqueue_struct *ipoib_workqueue;
91
92 struct ib_sa_client ipoib_sa_client;
93
94 static void ipoib_add_one(struct ib_device *device);
95 static void ipoib_remove_one(struct ib_device *device);
96
97 static struct ib_client ipoib_client = {
98         .name   = "ipoib",
99         .add    = ipoib_add_one,
100         .remove = ipoib_remove_one
101 };
102
103 int ipoib_open(struct net_device *dev)
104 {
105         struct ipoib_dev_priv *priv = netdev_priv(dev);
106
107         ipoib_dbg(priv, "bringing up interface\n");
108
109         set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
110
111         if (ipoib_pkey_dev_delay_open(dev))
112                 return 0;
113
114         napi_enable(&priv->napi);
115
116         if (ipoib_ib_dev_open(dev)) {
117                 napi_disable(&priv->napi);
118                 return -EINVAL;
119         }
120
121         if (ipoib_ib_dev_up(dev)) {
122                 ipoib_ib_dev_stop(dev, 1);
123                 napi_disable(&priv->napi);
124                 return -EINVAL;
125         }
126
127         if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
128                 struct ipoib_dev_priv *cpriv;
129
130                 /* Bring up any child interfaces too */
131                 mutex_lock(&priv->vlan_mutex);
132                 list_for_each_entry(cpriv, &priv->child_intfs, list) {
133                         int flags;
134
135                         flags = cpriv->dev->flags;
136                         if (flags & IFF_UP)
137                                 continue;
138
139                         dev_change_flags(cpriv->dev, flags | IFF_UP);
140                 }
141                 mutex_unlock(&priv->vlan_mutex);
142         }
143
144         netif_start_queue(dev);
145
146         return 0;
147 }
148
149 static int ipoib_stop(struct net_device *dev)
150 {
151         struct ipoib_dev_priv *priv = netdev_priv(dev);
152
153         ipoib_dbg(priv, "stopping interface\n");
154
155         clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
156         napi_disable(&priv->napi);
157
158         netif_stop_queue(dev);
159
160         ipoib_ib_dev_down(dev, 0);
161         ipoib_ib_dev_stop(dev, 0);
162
163         if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
164                 struct ipoib_dev_priv *cpriv;
165
166                 /* Bring down any child interfaces too */
167                 mutex_lock(&priv->vlan_mutex);
168                 list_for_each_entry(cpriv, &priv->child_intfs, list) {
169                         int flags;
170
171                         flags = cpriv->dev->flags;
172                         if (!(flags & IFF_UP))
173                                 continue;
174
175                         dev_change_flags(cpriv->dev, flags & ~IFF_UP);
176                 }
177                 mutex_unlock(&priv->vlan_mutex);
178         }
179
180         return 0;
181 }
182
183 static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
184 {
185         struct ipoib_dev_priv *priv = netdev_priv(dev);
186
187         /* dev->mtu > 2K ==> connected mode */
188         if (ipoib_cm_admin_enabled(dev)) {
189                 if (new_mtu > ipoib_cm_max_mtu(dev))
190                         return -EINVAL;
191
192                 if (new_mtu > priv->mcast_mtu)
193                         ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n",
194                                    priv->mcast_mtu);
195
196                 dev->mtu = new_mtu;
197                 return 0;
198         }
199
200         if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
201                 return -EINVAL;
202
203         priv->admin_mtu = new_mtu;
204
205         dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
206
207         return 0;
208 }
209
210 static struct ipoib_path *__path_find(struct net_device *dev, void *gid)
211 {
212         struct ipoib_dev_priv *priv = netdev_priv(dev);
213         struct rb_node *n = priv->path_tree.rb_node;
214         struct ipoib_path *path;
215         int ret;
216
217         while (n) {
218                 path = rb_entry(n, struct ipoib_path, rb_node);
219
220                 ret = memcmp(gid, path->pathrec.dgid.raw,
221                              sizeof (union ib_gid));
222
223                 if (ret < 0)
224                         n = n->rb_left;
225                 else if (ret > 0)
226                         n = n->rb_right;
227                 else
228                         return path;
229         }
230
231         return NULL;
232 }
233
234 static int __path_add(struct net_device *dev, struct ipoib_path *path)
235 {
236         struct ipoib_dev_priv *priv = netdev_priv(dev);
237         struct rb_node **n = &priv->path_tree.rb_node;
238         struct rb_node *pn = NULL;
239         struct ipoib_path *tpath;
240         int ret;
241
242         while (*n) {
243                 pn = *n;
244                 tpath = rb_entry(pn, struct ipoib_path, rb_node);
245
246                 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw,
247                              sizeof (union ib_gid));
248                 if (ret < 0)
249                         n = &pn->rb_left;
250                 else if (ret > 0)
251                         n = &pn->rb_right;
252                 else
253                         return -EEXIST;
254         }
255
256         rb_link_node(&path->rb_node, pn, n);
257         rb_insert_color(&path->rb_node, &priv->path_tree);
258
259         list_add_tail(&path->list, &priv->path_list);
260
261         return 0;
262 }
263
264 static void path_free(struct net_device *dev, struct ipoib_path *path)
265 {
266         struct ipoib_dev_priv *priv = netdev_priv(dev);
267         struct ipoib_neigh *neigh, *tn;
268         struct sk_buff *skb;
269         unsigned long flags;
270
271         while ((skb = __skb_dequeue(&path->queue)))
272                 dev_kfree_skb_irq(skb);
273
274         spin_lock_irqsave(&priv->lock, flags);
275
276         list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
277                 /*
278                  * It's safe to call ipoib_put_ah() inside priv->lock
279                  * here, because we know that path->ah will always
280                  * hold one more reference, so ipoib_put_ah() will
281                  * never do more than decrement the ref count.
282                  */
283                 if (neigh->ah)
284                         ipoib_put_ah(neigh->ah);
285
286                 ipoib_neigh_free(dev, neigh);
287         }
288
289         spin_unlock_irqrestore(&priv->lock, flags);
290
291         if (path->ah)
292                 ipoib_put_ah(path->ah);
293
294         kfree(path);
295 }
296
297 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
298
299 struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev)
300 {
301         struct ipoib_path_iter *iter;
302
303         iter = kmalloc(sizeof *iter, GFP_KERNEL);
304         if (!iter)
305                 return NULL;
306
307         iter->dev = dev;
308         memset(iter->path.pathrec.dgid.raw, 0, 16);
309
310         if (ipoib_path_iter_next(iter)) {
311                 kfree(iter);
312                 return NULL;
313         }
314
315         return iter;
316 }
317
318 int ipoib_path_iter_next(struct ipoib_path_iter *iter)
319 {
320         struct ipoib_dev_priv *priv = netdev_priv(iter->dev);
321         struct rb_node *n;
322         struct ipoib_path *path;
323         int ret = 1;
324
325         spin_lock_irq(&priv->lock);
326
327         n = rb_first(&priv->path_tree);
328
329         while (n) {
330                 path = rb_entry(n, struct ipoib_path, rb_node);
331
332                 if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw,
333                            sizeof (union ib_gid)) < 0) {
334                         iter->path = *path;
335                         ret = 0;
336                         break;
337                 }
338
339                 n = rb_next(n);
340         }
341
342         spin_unlock_irq(&priv->lock);
343
344         return ret;
345 }
346
347 void ipoib_path_iter_read(struct ipoib_path_iter *iter,
348                           struct ipoib_path *path)
349 {
350         *path = iter->path;
351 }
352
353 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
354
355 void ipoib_mark_paths_invalid(struct net_device *dev)
356 {
357         struct ipoib_dev_priv *priv = netdev_priv(dev);
358         struct ipoib_path *path, *tp;
359
360         spin_lock_irq(&priv->lock);
361
362         list_for_each_entry_safe(path, tp, &priv->path_list, list) {
363                 ipoib_dbg(priv, "mark path LID 0x%04x GID %pI6 invalid\n",
364                         be16_to_cpu(path->pathrec.dlid),
365                         path->pathrec.dgid.raw);
366                 path->valid =  0;
367         }
368
369         spin_unlock_irq(&priv->lock);
370 }
371
372 void ipoib_flush_paths(struct net_device *dev)
373 {
374         struct ipoib_dev_priv *priv = netdev_priv(dev);
375         struct ipoib_path *path, *tp;
376         LIST_HEAD(remove_list);
377         unsigned long flags;
378
379         netif_tx_lock_bh(dev);
380         spin_lock_irqsave(&priv->lock, flags);
381
382         list_splice_init(&priv->path_list, &remove_list);
383
384         list_for_each_entry(path, &remove_list, list)
385                 rb_erase(&path->rb_node, &priv->path_tree);
386
387         list_for_each_entry_safe(path, tp, &remove_list, list) {
388                 if (path->query)
389                         ib_sa_cancel_query(path->query_id, path->query);
390                 spin_unlock_irqrestore(&priv->lock, flags);
391                 netif_tx_unlock_bh(dev);
392                 wait_for_completion(&path->done);
393                 path_free(dev, path);
394                 netif_tx_lock_bh(dev);
395                 spin_lock_irqsave(&priv->lock, flags);
396         }
397
398         spin_unlock_irqrestore(&priv->lock, flags);
399         netif_tx_unlock_bh(dev);
400 }
401
402 static void path_rec_completion(int status,
403                                 struct ib_sa_path_rec *pathrec,
404                                 void *path_ptr)
405 {
406         struct ipoib_path *path = path_ptr;
407         struct net_device *dev = path->dev;
408         struct ipoib_dev_priv *priv = netdev_priv(dev);
409         struct ipoib_ah *ah = NULL;
410         struct ipoib_ah *old_ah = NULL;
411         struct ipoib_neigh *neigh, *tn;
412         struct sk_buff_head skqueue;
413         struct sk_buff *skb;
414         unsigned long flags;
415
416         if (!status)
417                 ipoib_dbg(priv, "PathRec LID 0x%04x for GID %pI6\n",
418                           be16_to_cpu(pathrec->dlid), pathrec->dgid.raw);
419         else
420                 ipoib_dbg(priv, "PathRec status %d for GID %pI6\n",
421                           status, path->pathrec.dgid.raw);
422
423         skb_queue_head_init(&skqueue);
424
425         if (!status) {
426                 struct ib_ah_attr av;
427
428                 if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av))
429                         ah = ipoib_create_ah(dev, priv->pd, &av);
430         }
431
432         spin_lock_irqsave(&priv->lock, flags);
433
434         if (ah) {
435                 path->pathrec = *pathrec;
436
437                 old_ah   = path->ah;
438                 path->ah = ah;
439
440                 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
441                           ah, be16_to_cpu(pathrec->dlid), pathrec->sl);
442
443                 while ((skb = __skb_dequeue(&path->queue)))
444                         __skb_queue_tail(&skqueue, skb);
445
446                 list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
447                         if (neigh->ah) {
448                                 WARN_ON(neigh->ah != old_ah);
449                                 /*
450                                  * Dropping the ah reference inside
451                                  * priv->lock is safe here, because we
452                                  * will hold one more reference from
453                                  * the original value of path->ah (ie
454                                  * old_ah).
455                                  */
456                                 ipoib_put_ah(neigh->ah);
457                         }
458                         kref_get(&path->ah->ref);
459                         neigh->ah = path->ah;
460                         memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw,
461                                sizeof(union ib_gid));
462
463                         if (ipoib_cm_enabled(dev, neigh->neighbour)) {
464                                 if (!ipoib_cm_get(neigh))
465                                         ipoib_cm_set(neigh, ipoib_cm_create_tx(dev,
466                                                                                path,
467                                                                                neigh));
468                                 if (!ipoib_cm_get(neigh)) {
469                                         list_del(&neigh->list);
470                                         if (neigh->ah)
471                                                 ipoib_put_ah(neigh->ah);
472                                         ipoib_neigh_free(dev, neigh);
473                                         continue;
474                                 }
475                         }
476
477                         while ((skb = __skb_dequeue(&neigh->queue)))
478                                 __skb_queue_tail(&skqueue, skb);
479                 }
480                 path->valid = 1;
481         }
482
483         path->query = NULL;
484         complete(&path->done);
485
486         spin_unlock_irqrestore(&priv->lock, flags);
487
488         if (old_ah)
489                 ipoib_put_ah(old_ah);
490
491         while ((skb = __skb_dequeue(&skqueue))) {
492                 skb->dev = dev;
493                 if (dev_queue_xmit(skb))
494                         ipoib_warn(priv, "dev_queue_xmit failed "
495                                    "to requeue packet\n");
496         }
497 }
498
499 static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
500 {
501         struct ipoib_dev_priv *priv = netdev_priv(dev);
502         struct ipoib_path *path;
503
504         if (!priv->broadcast)
505                 return NULL;
506
507         path = kzalloc(sizeof *path, GFP_ATOMIC);
508         if (!path)
509                 return NULL;
510
511         path->dev = dev;
512
513         skb_queue_head_init(&path->queue);
514
515         INIT_LIST_HEAD(&path->neigh_list);
516
517         memcpy(path->pathrec.dgid.raw, gid, sizeof (union ib_gid));
518         path->pathrec.sgid          = priv->local_gid;
519         path->pathrec.pkey          = cpu_to_be16(priv->pkey);
520         path->pathrec.numb_path     = 1;
521         path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
522
523         return path;
524 }
525
526 static int path_rec_start(struct net_device *dev,
527                           struct ipoib_path *path)
528 {
529         struct ipoib_dev_priv *priv = netdev_priv(dev);
530
531         ipoib_dbg(priv, "Start path record lookup for %pI6\n",
532                   path->pathrec.dgid.raw);
533
534         init_completion(&path->done);
535
536         path->query_id =
537                 ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port,
538                                    &path->pathrec,
539                                    IB_SA_PATH_REC_DGID          |
540                                    IB_SA_PATH_REC_SGID          |
541                                    IB_SA_PATH_REC_NUMB_PATH     |
542                                    IB_SA_PATH_REC_TRAFFIC_CLASS |
543                                    IB_SA_PATH_REC_PKEY,
544                                    1000, GFP_ATOMIC,
545                                    path_rec_completion,
546                                    path, &path->query);
547         if (path->query_id < 0) {
548                 ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id);
549                 path->query = NULL;
550                 complete(&path->done);
551                 return path->query_id;
552         }
553
554         return 0;
555 }
556
557 static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
558 {
559         struct ipoib_dev_priv *priv = netdev_priv(dev);
560         struct ipoib_path *path;
561         struct ipoib_neigh *neigh;
562         unsigned long flags;
563
564         neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev);
565         if (!neigh) {
566                 ++dev->stats.tx_dropped;
567                 dev_kfree_skb_any(skb);
568                 return;
569         }
570
571         spin_lock_irqsave(&priv->lock, flags);
572
573         path = __path_find(dev, skb->dst->neighbour->ha + 4);
574         if (!path) {
575                 path = path_rec_create(dev, skb->dst->neighbour->ha + 4);
576                 if (!path)
577                         goto err_path;
578
579                 __path_add(dev, path);
580         }
581
582         list_add_tail(&neigh->list, &path->neigh_list);
583
584         if (path->ah) {
585                 kref_get(&path->ah->ref);
586                 neigh->ah = path->ah;
587                 memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw,
588                        sizeof(union ib_gid));
589
590                 if (ipoib_cm_enabled(dev, neigh->neighbour)) {
591                         if (!ipoib_cm_get(neigh))
592                                 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh));
593                         if (!ipoib_cm_get(neigh)) {
594                                 list_del(&neigh->list);
595                                 if (neigh->ah)
596                                         ipoib_put_ah(neigh->ah);
597                                 ipoib_neigh_free(dev, neigh);
598                                 goto err_drop;
599                         }
600                         if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
601                                 __skb_queue_tail(&neigh->queue, skb);
602                         else {
603                                 ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
604                                            skb_queue_len(&neigh->queue));
605                                 goto err_drop;
606                         }
607                 } else
608                         ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb->dst->neighbour->ha));
609         } else {
610                 neigh->ah  = NULL;
611
612                 if (!path->query && path_rec_start(dev, path))
613                         goto err_list;
614
615                 __skb_queue_tail(&neigh->queue, skb);
616         }
617
618         spin_unlock_irqrestore(&priv->lock, flags);
619         return;
620
621 err_list:
622         list_del(&neigh->list);
623
624 err_path:
625         ipoib_neigh_free(dev, neigh);
626 err_drop:
627         ++dev->stats.tx_dropped;
628         dev_kfree_skb_any(skb);
629
630         spin_unlock_irqrestore(&priv->lock, flags);
631 }
632
633 static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
634 {
635         struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
636
637         /* Look up path record for unicasts */
638         if (skb->dst->neighbour->ha[4] != 0xff) {
639                 neigh_add_path(skb, dev);
640                 return;
641         }
642
643         /* Add in the P_Key for multicasts */
644         skb->dst->neighbour->ha[8] = (priv->pkey >> 8) & 0xff;
645         skb->dst->neighbour->ha[9] = priv->pkey & 0xff;
646         ipoib_mcast_send(dev, skb->dst->neighbour->ha + 4, skb);
647 }
648
649 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
650                              struct ipoib_pseudoheader *phdr)
651 {
652         struct ipoib_dev_priv *priv = netdev_priv(dev);
653         struct ipoib_path *path;
654         unsigned long flags;
655
656         spin_lock_irqsave(&priv->lock, flags);
657
658         path = __path_find(dev, phdr->hwaddr + 4);
659         if (!path || !path->valid) {
660                 if (!path)
661                         path = path_rec_create(dev, phdr->hwaddr + 4);
662                 if (path) {
663                         /* put pseudoheader back on for next time */
664                         skb_push(skb, sizeof *phdr);
665                         __skb_queue_tail(&path->queue, skb);
666
667                         if (!path->query && path_rec_start(dev, path)) {
668                                 spin_unlock_irqrestore(&priv->lock, flags);
669                                 path_free(dev, path);
670                                 return;
671                         } else
672                                 __path_add(dev, path);
673                 } else {
674                         ++dev->stats.tx_dropped;
675                         dev_kfree_skb_any(skb);
676                 }
677
678                 spin_unlock_irqrestore(&priv->lock, flags);
679                 return;
680         }
681
682         if (path->ah) {
683                 ipoib_dbg(priv, "Send unicast ARP to %04x\n",
684                           be16_to_cpu(path->pathrec.dlid));
685
686                 ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
687         } else if ((path->query || !path_rec_start(dev, path)) &&
688                    skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
689                 /* put pseudoheader back on for next time */
690                 skb_push(skb, sizeof *phdr);
691                 __skb_queue_tail(&path->queue, skb);
692         } else {
693                 ++dev->stats.tx_dropped;
694                 dev_kfree_skb_any(skb);
695         }
696
697         spin_unlock_irqrestore(&priv->lock, flags);
698 }
699
700 static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
701 {
702         struct ipoib_dev_priv *priv = netdev_priv(dev);
703         struct ipoib_neigh *neigh;
704         unsigned long flags;
705
706         if (likely(skb->dst && skb->dst->neighbour)) {
707                 if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) {
708                         ipoib_path_lookup(skb, dev);
709                         return NETDEV_TX_OK;
710                 }
711
712                 neigh = *to_ipoib_neigh(skb->dst->neighbour);
713
714                 if (neigh->ah)
715                         if (unlikely((memcmp(&neigh->dgid.raw,
716                                             skb->dst->neighbour->ha + 4,
717                                             sizeof(union ib_gid))) ||
718                                          (neigh->dev != dev))) {
719                                 spin_lock_irqsave(&priv->lock, flags);
720                                 /*
721                                  * It's safe to call ipoib_put_ah() inside
722                                  * priv->lock here, because we know that
723                                  * path->ah will always hold one more reference,
724                                  * so ipoib_put_ah() will never do more than
725                                  * decrement the ref count.
726                                  */
727                                 ipoib_put_ah(neigh->ah);
728                                 list_del(&neigh->list);
729                                 ipoib_neigh_free(dev, neigh);
730                                 spin_unlock_irqrestore(&priv->lock, flags);
731                                 ipoib_path_lookup(skb, dev);
732                                 return NETDEV_TX_OK;
733                         }
734
735                 if (ipoib_cm_get(neigh)) {
736                         if (ipoib_cm_up(neigh)) {
737                                 ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
738                                 return NETDEV_TX_OK;
739                         }
740                 } else if (neigh->ah) {
741                         ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(skb->dst->neighbour->ha));
742                         return NETDEV_TX_OK;
743                 }
744
745                 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
746                         spin_lock_irqsave(&priv->lock, flags);
747                         __skb_queue_tail(&neigh->queue, skb);
748                         spin_unlock_irqrestore(&priv->lock, flags);
749                 } else {
750                         ++dev->stats.tx_dropped;
751                         dev_kfree_skb_any(skb);
752                 }
753         } else {
754                 struct ipoib_pseudoheader *phdr =
755                         (struct ipoib_pseudoheader *) skb->data;
756                 skb_pull(skb, sizeof *phdr);
757
758                 if (phdr->hwaddr[4] == 0xff) {
759                         /* Add in the P_Key for multicast*/
760                         phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
761                         phdr->hwaddr[9] = priv->pkey & 0xff;
762
763                         ipoib_mcast_send(dev, phdr->hwaddr + 4, skb);
764                 } else {
765                         /* unicast GID -- should be ARP or RARP reply */
766
767                         if ((be16_to_cpup((__be16 *) skb->data) != ETH_P_ARP) &&
768                             (be16_to_cpup((__be16 *) skb->data) != ETH_P_RARP)) {
769                                 ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x %pI6\n",
770                                            skb->dst ? "neigh" : "dst",
771                                            be16_to_cpup((__be16 *) skb->data),
772                                            IPOIB_QPN(phdr->hwaddr),
773                                            phdr->hwaddr + 4);
774                                 dev_kfree_skb_any(skb);
775                                 ++dev->stats.tx_dropped;
776                                 return NETDEV_TX_OK;
777                         }
778
779                         unicast_arp_send(skb, dev, phdr);
780                 }
781         }
782
783         return NETDEV_TX_OK;
784 }
785
786 static void ipoib_timeout(struct net_device *dev)
787 {
788         struct ipoib_dev_priv *priv = netdev_priv(dev);
789
790         ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
791                    jiffies_to_msecs(jiffies - dev->trans_start));
792         ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n",
793                    netif_queue_stopped(dev),
794                    priv->tx_head, priv->tx_tail);
795         /* XXX reset QP, etc. */
796 }
797
798 static int ipoib_hard_header(struct sk_buff *skb,
799                              struct net_device *dev,
800                              unsigned short type,
801                              const void *daddr, const void *saddr, unsigned len)
802 {
803         struct ipoib_header *header;
804
805         header = (struct ipoib_header *) skb_push(skb, sizeof *header);
806
807         header->proto = htons(type);
808         header->reserved = 0;
809
810         /*
811          * If we don't have a neighbour structure, stuff the
812          * destination address onto the front of the skb so we can
813          * figure out where to send the packet later.
814          */
815         if ((!skb->dst || !skb->dst->neighbour) && daddr) {
816                 struct ipoib_pseudoheader *phdr =
817                         (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
818                 memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
819         }
820
821         return 0;
822 }
823
824 static void ipoib_set_mcast_list(struct net_device *dev)
825 {
826         struct ipoib_dev_priv *priv = netdev_priv(dev);
827
828         if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
829                 ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set");
830                 return;
831         }
832
833         queue_work(ipoib_workqueue, &priv->restart_task);
834 }
835
836 static void ipoib_neigh_cleanup(struct neighbour *n)
837 {
838         struct ipoib_neigh *neigh;
839         struct ipoib_dev_priv *priv = netdev_priv(n->dev);
840         unsigned long flags;
841         struct ipoib_ah *ah = NULL;
842
843         neigh = *to_ipoib_neigh(n);
844         if (neigh)
845                 priv = netdev_priv(neigh->dev);
846         else
847                 return;
848         ipoib_dbg(priv,
849                   "neigh_cleanup for %06x %pI6\n",
850                   IPOIB_QPN(n->ha),
851                   n->ha + 4);
852
853         spin_lock_irqsave(&priv->lock, flags);
854
855         if (neigh->ah)
856                 ah = neigh->ah;
857         list_del(&neigh->list);
858         ipoib_neigh_free(n->dev, neigh);
859
860         spin_unlock_irqrestore(&priv->lock, flags);
861
862         if (ah)
863                 ipoib_put_ah(ah);
864 }
865
866 struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour,
867                                       struct net_device *dev)
868 {
869         struct ipoib_neigh *neigh;
870
871         neigh = kmalloc(sizeof *neigh, GFP_ATOMIC);
872         if (!neigh)
873                 return NULL;
874
875         neigh->neighbour = neighbour;
876         neigh->dev = dev;
877         *to_ipoib_neigh(neighbour) = neigh;
878         skb_queue_head_init(&neigh->queue);
879         ipoib_cm_set(neigh, NULL);
880
881         return neigh;
882 }
883
884 void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh)
885 {
886         struct sk_buff *skb;
887         *to_ipoib_neigh(neigh->neighbour) = NULL;
888         while ((skb = __skb_dequeue(&neigh->queue))) {
889                 ++dev->stats.tx_dropped;
890                 dev_kfree_skb_any(skb);
891         }
892         if (ipoib_cm_get(neigh))
893                 ipoib_cm_destroy_tx(ipoib_cm_get(neigh));
894         kfree(neigh);
895 }
896
897 static int ipoib_neigh_setup_dev(struct net_device *dev, struct neigh_parms *parms)
898 {
899         parms->neigh_cleanup = ipoib_neigh_cleanup;
900
901         return 0;
902 }
903
904 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
905 {
906         struct ipoib_dev_priv *priv = netdev_priv(dev);
907
908         /* Allocate RX/TX "rings" to hold queued skbs */
909         priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
910                                 GFP_KERNEL);
911         if (!priv->rx_ring) {
912                 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
913                        ca->name, ipoib_recvq_size);
914                 goto out;
915         }
916
917         priv->tx_ring = vmalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
918         if (!priv->tx_ring) {
919                 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
920                        ca->name, ipoib_sendq_size);
921                 goto out_rx_ring_cleanup;
922         }
923         memset(priv->tx_ring, 0, ipoib_sendq_size * sizeof *priv->tx_ring);
924
925         /* priv->tx_head, tx_tail & tx_outstanding are already 0 */
926
927         if (ipoib_ib_dev_init(dev, ca, port))
928                 goto out_tx_ring_cleanup;
929
930         return 0;
931
932 out_tx_ring_cleanup:
933         vfree(priv->tx_ring);
934
935 out_rx_ring_cleanup:
936         kfree(priv->rx_ring);
937
938 out:
939         return -ENOMEM;
940 }
941
942 void ipoib_dev_cleanup(struct net_device *dev)
943 {
944         struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv;
945
946         ipoib_delete_debug_files(dev);
947
948         /* Delete any child interfaces first */
949         list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
950                 unregister_netdev(cpriv->dev);
951                 ipoib_dev_cleanup(cpriv->dev);
952                 free_netdev(cpriv->dev);
953         }
954
955         ipoib_ib_dev_cleanup(dev);
956
957         kfree(priv->rx_ring);
958         vfree(priv->tx_ring);
959
960         priv->rx_ring = NULL;
961         priv->tx_ring = NULL;
962 }
963
964 static const struct header_ops ipoib_header_ops = {
965         .create = ipoib_hard_header,
966 };
967
968 static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
969                        void **tcph, u64 *hdr_flags, void *priv)
970 {
971         unsigned int ip_len;
972         struct iphdr *iph;
973
974         if (unlikely(skb->protocol != htons(ETH_P_IP)))
975                 return -1;
976
977         /*
978          * In the future we may add an else clause that verifies the
979          * checksum and allows devices which do not calculate checksum
980          * to use LRO.
981          */
982         if (unlikely(skb->ip_summed != CHECKSUM_UNNECESSARY))
983                 return -1;
984
985         /* Check for non-TCP packet */
986         skb_reset_network_header(skb);
987         iph = ip_hdr(skb);
988         if (iph->protocol != IPPROTO_TCP)
989                 return -1;
990
991         ip_len = ip_hdrlen(skb);
992         skb_set_transport_header(skb, ip_len);
993         *tcph = tcp_hdr(skb);
994
995         /* check if IP header and TCP header are complete */
996         if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
997                 return -1;
998
999         *hdr_flags = LRO_IPV4 | LRO_TCP;
1000         *iphdr = iph;
1001
1002         return 0;
1003 }
1004
1005 static void ipoib_lro_setup(struct ipoib_dev_priv *priv)
1006 {
1007         priv->lro.lro_mgr.max_aggr       = lro_max_aggr;
1008         priv->lro.lro_mgr.max_desc       = IPOIB_MAX_LRO_DESCRIPTORS;
1009         priv->lro.lro_mgr.lro_arr        = priv->lro.lro_desc;
1010         priv->lro.lro_mgr.get_skb_header = get_skb_hdr;
1011         priv->lro.lro_mgr.features       = LRO_F_NAPI;
1012         priv->lro.lro_mgr.dev            = priv->dev;
1013         priv->lro.lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1014 }
1015
1016 static void ipoib_setup(struct net_device *dev)
1017 {
1018         struct ipoib_dev_priv *priv = netdev_priv(dev);
1019
1020         dev->open                = ipoib_open;
1021         dev->stop                = ipoib_stop;
1022         dev->change_mtu          = ipoib_change_mtu;
1023         dev->hard_start_xmit     = ipoib_start_xmit;
1024         dev->tx_timeout          = ipoib_timeout;
1025         dev->header_ops          = &ipoib_header_ops;
1026         dev->set_multicast_list  = ipoib_set_mcast_list;
1027         dev->neigh_setup         = ipoib_neigh_setup_dev;
1028
1029         ipoib_set_ethtool_ops(dev);
1030
1031         netif_napi_add(dev, &priv->napi, ipoib_poll, 100);
1032
1033         dev->watchdog_timeo      = HZ;
1034
1035         dev->flags              |= IFF_BROADCAST | IFF_MULTICAST;
1036
1037         /*
1038          * We add in INFINIBAND_ALEN to allow for the destination
1039          * address "pseudoheader" for skbs without neighbour struct.
1040          */
1041         dev->hard_header_len     = IPOIB_ENCAP_LEN + INFINIBAND_ALEN;
1042         dev->addr_len            = INFINIBAND_ALEN;
1043         dev->type                = ARPHRD_INFINIBAND;
1044         dev->tx_queue_len        = ipoib_sendq_size * 2;
1045         dev->features            = (NETIF_F_VLAN_CHALLENGED     |
1046                                     NETIF_F_HIGHDMA);
1047
1048         memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
1049
1050         netif_carrier_off(dev);
1051
1052         priv->dev = dev;
1053
1054         ipoib_lro_setup(priv);
1055
1056         spin_lock_init(&priv->lock);
1057
1058         mutex_init(&priv->vlan_mutex);
1059
1060         INIT_LIST_HEAD(&priv->path_list);
1061         INIT_LIST_HEAD(&priv->child_intfs);
1062         INIT_LIST_HEAD(&priv->dead_ahs);
1063         INIT_LIST_HEAD(&priv->multicast_list);
1064
1065         INIT_DELAYED_WORK(&priv->pkey_poll_task, ipoib_pkey_poll);
1066         INIT_DELAYED_WORK(&priv->mcast_task,   ipoib_mcast_join_task);
1067         INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task);
1068         INIT_WORK(&priv->flush_light,   ipoib_ib_dev_flush_light);
1069         INIT_WORK(&priv->flush_normal,   ipoib_ib_dev_flush_normal);
1070         INIT_WORK(&priv->flush_heavy,   ipoib_ib_dev_flush_heavy);
1071         INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
1072         INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
1073 }
1074
1075 struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
1076 {
1077         struct net_device *dev;
1078
1079         dev = alloc_netdev((int) sizeof (struct ipoib_dev_priv), name,
1080                            ipoib_setup);
1081         if (!dev)
1082                 return NULL;
1083
1084         return netdev_priv(dev);
1085 }
1086
1087 static ssize_t show_pkey(struct device *dev,
1088                          struct device_attribute *attr, char *buf)
1089 {
1090         struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
1091
1092         return sprintf(buf, "0x%04x\n", priv->pkey);
1093 }
1094 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
1095
1096 static ssize_t show_umcast(struct device *dev,
1097                            struct device_attribute *attr, char *buf)
1098 {
1099         struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
1100
1101         return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags));
1102 }
1103
1104 static ssize_t set_umcast(struct device *dev,
1105                           struct device_attribute *attr,
1106                           const char *buf, size_t count)
1107 {
1108         struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
1109         unsigned long umcast_val = simple_strtoul(buf, NULL, 0);
1110
1111         if (umcast_val > 0) {
1112                 set_bit(IPOIB_FLAG_UMCAST, &priv->flags);
1113                 ipoib_warn(priv, "ignoring multicast groups joined directly "
1114                                 "by userspace\n");
1115         } else
1116                 clear_bit(IPOIB_FLAG_UMCAST, &priv->flags);
1117
1118         return count;
1119 }
1120 static DEVICE_ATTR(umcast, S_IWUSR | S_IRUGO, show_umcast, set_umcast);
1121
1122 int ipoib_add_umcast_attr(struct net_device *dev)
1123 {
1124         return device_create_file(&dev->dev, &dev_attr_umcast);
1125 }
1126
1127 static ssize_t create_child(struct device *dev,
1128                             struct device_attribute *attr,
1129                             const char *buf, size_t count)
1130 {
1131         int pkey;
1132         int ret;
1133
1134         if (sscanf(buf, "%i", &pkey) != 1)
1135                 return -EINVAL;
1136
1137         if (pkey < 0 || pkey > 0xffff)
1138                 return -EINVAL;
1139
1140         /*
1141          * Set the full membership bit, so that we join the right
1142          * broadcast group, etc.
1143          */
1144         pkey |= 0x8000;
1145
1146         ret = ipoib_vlan_add(to_net_dev(dev), pkey);
1147
1148         return ret ? ret : count;
1149 }
1150 static DEVICE_ATTR(create_child, S_IWUGO, NULL, create_child);
1151
1152 static ssize_t delete_child(struct device *dev,
1153                             struct device_attribute *attr,
1154                             const char *buf, size_t count)
1155 {
1156         int pkey;
1157         int ret;
1158
1159         if (sscanf(buf, "%i", &pkey) != 1)
1160                 return -EINVAL;
1161
1162         if (pkey < 0 || pkey > 0xffff)
1163                 return -EINVAL;
1164
1165         ret = ipoib_vlan_delete(to_net_dev(dev), pkey);
1166
1167         return ret ? ret : count;
1168
1169 }
1170 static DEVICE_ATTR(delete_child, S_IWUGO, NULL, delete_child);
1171
1172 int ipoib_add_pkey_attr(struct net_device *dev)
1173 {
1174         return device_create_file(&dev->dev, &dev_attr_pkey);
1175 }
1176
1177 int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
1178 {
1179         struct ib_device_attr *device_attr;
1180         int result = -ENOMEM;
1181
1182         device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
1183         if (!device_attr) {
1184                 printk(KERN_WARNING "%s: allocation of %zu bytes failed\n",
1185                        hca->name, sizeof *device_attr);
1186                 return result;
1187         }
1188
1189         result = ib_query_device(hca, device_attr);
1190         if (result) {
1191                 printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n",
1192                        hca->name, result);
1193                 kfree(device_attr);
1194                 return result;
1195         }
1196         priv->hca_caps = device_attr->device_cap_flags;
1197
1198         kfree(device_attr);
1199
1200         if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
1201                 set_bit(IPOIB_FLAG_CSUM, &priv->flags);
1202                 priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1203         }
1204
1205         if (lro)
1206                 priv->dev->features |= NETIF_F_LRO;
1207
1208         if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO)
1209                 priv->dev->features |= NETIF_F_TSO;
1210
1211         return 0;
1212 }
1213
1214
1215 static struct net_device *ipoib_add_port(const char *format,
1216                                          struct ib_device *hca, u8 port)
1217 {
1218         struct ipoib_dev_priv *priv;
1219         struct ib_port_attr attr;
1220         int result = -ENOMEM;
1221
1222         priv = ipoib_intf_alloc(format);
1223         if (!priv)
1224                 goto alloc_mem_failed;
1225
1226         SET_NETDEV_DEV(priv->dev, hca->dma_device);
1227
1228         if (!ib_query_port(hca, port, &attr))
1229                 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
1230         else {
1231                 printk(KERN_WARNING "%s: ib_query_port %d failed\n",
1232                        hca->name, port);
1233                 goto device_init_failed;
1234         }
1235
1236         /* MTU will be reset when mcast join happens */
1237         priv->dev->mtu  = IPOIB_UD_MTU(priv->max_ib_mtu);
1238         priv->mcast_mtu  = priv->admin_mtu = priv->dev->mtu;
1239
1240         result = ib_query_pkey(hca, port, 0, &priv->pkey);
1241         if (result) {
1242                 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
1243                        hca->name, port, result);
1244                 goto device_init_failed;
1245         }
1246
1247         if (ipoib_set_dev_features(priv, hca))
1248                 goto device_init_failed;
1249
1250         /*
1251          * Set the full membership bit, so that we join the right
1252          * broadcast group, etc.
1253          */
1254         priv->pkey |= 0x8000;
1255
1256         priv->dev->broadcast[8] = priv->pkey >> 8;
1257         priv->dev->broadcast[9] = priv->pkey & 0xff;
1258
1259         result = ib_query_gid(hca, port, 0, &priv->local_gid);
1260         if (result) {
1261                 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
1262                        hca->name, port, result);
1263                 goto device_init_failed;
1264         } else
1265                 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
1266
1267         result = ipoib_dev_init(priv->dev, hca, port);
1268         if (result < 0) {
1269                 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
1270                        hca->name, port, result);
1271                 goto device_init_failed;
1272         }
1273
1274         INIT_IB_EVENT_HANDLER(&priv->event_handler,
1275                               priv->ca, ipoib_event);
1276         result = ib_register_event_handler(&priv->event_handler);
1277         if (result < 0) {
1278                 printk(KERN_WARNING "%s: ib_register_event_handler failed for "
1279                        "port %d (ret = %d)\n",
1280                        hca->name, port, result);
1281                 goto event_failed;
1282         }
1283
1284         result = register_netdev(priv->dev);
1285         if (result) {
1286                 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
1287                        hca->name, port, result);
1288                 goto register_failed;
1289         }
1290
1291         ipoib_create_debug_files(priv->dev);
1292
1293         if (ipoib_cm_add_mode_attr(priv->dev))
1294                 goto sysfs_failed;
1295         if (ipoib_add_pkey_attr(priv->dev))
1296                 goto sysfs_failed;
1297         if (ipoib_add_umcast_attr(priv->dev))
1298                 goto sysfs_failed;
1299         if (device_create_file(&priv->dev->dev, &dev_attr_create_child))
1300                 goto sysfs_failed;
1301         if (device_create_file(&priv->dev->dev, &dev_attr_delete_child))
1302                 goto sysfs_failed;
1303
1304         return priv->dev;
1305
1306 sysfs_failed:
1307         ipoib_delete_debug_files(priv->dev);
1308         unregister_netdev(priv->dev);
1309
1310 register_failed:
1311         ib_unregister_event_handler(&priv->event_handler);
1312         flush_workqueue(ipoib_workqueue);
1313
1314 event_failed:
1315         ipoib_dev_cleanup(priv->dev);
1316
1317 device_init_failed:
1318         free_netdev(priv->dev);
1319
1320 alloc_mem_failed:
1321         return ERR_PTR(result);
1322 }
1323
1324 static void ipoib_add_one(struct ib_device *device)
1325 {
1326         struct list_head *dev_list;
1327         struct net_device *dev;
1328         struct ipoib_dev_priv *priv;
1329         int s, e, p;
1330
1331         if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
1332                 return;
1333
1334         dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
1335         if (!dev_list)
1336                 return;
1337
1338         INIT_LIST_HEAD(dev_list);
1339
1340         if (device->node_type == RDMA_NODE_IB_SWITCH) {
1341                 s = 0;
1342                 e = 0;
1343         } else {
1344                 s = 1;
1345                 e = device->phys_port_cnt;
1346         }
1347
1348         for (p = s; p <= e; ++p) {
1349                 dev = ipoib_add_port("ib%d", device, p);
1350                 if (!IS_ERR(dev)) {
1351                         priv = netdev_priv(dev);
1352                         list_add_tail(&priv->list, dev_list);
1353                 }
1354         }
1355
1356         ib_set_client_data(device, &ipoib_client, dev_list);
1357 }
1358
1359 static void ipoib_remove_one(struct ib_device *device)
1360 {
1361         struct ipoib_dev_priv *priv, *tmp;
1362         struct list_head *dev_list;
1363
1364         if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
1365                 return;
1366
1367         dev_list = ib_get_client_data(device, &ipoib_client);
1368
1369         list_for_each_entry_safe(priv, tmp, dev_list, list) {
1370                 ib_unregister_event_handler(&priv->event_handler);
1371
1372                 rtnl_lock();
1373                 dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP);
1374                 rtnl_unlock();
1375
1376                 flush_workqueue(ipoib_workqueue);
1377
1378                 unregister_netdev(priv->dev);
1379                 ipoib_dev_cleanup(priv->dev);
1380                 free_netdev(priv->dev);
1381         }
1382
1383         kfree(dev_list);
1384 }
1385
1386 static int __init ipoib_init_module(void)
1387 {
1388         int ret;
1389
1390         ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size);
1391         ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE);
1392         ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE);
1393
1394         ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
1395         ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
1396         ipoib_sendq_size = max(ipoib_sendq_size, max(2 * MAX_SEND_CQE,
1397                                                      IPOIB_MIN_QUEUE_SIZE));
1398 #ifdef CONFIG_INFINIBAND_IPOIB_CM
1399         ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
1400 #endif
1401
1402         /*
1403          * When copying small received packets, we only copy from the
1404          * linear data part of the SKB, so we rely on this condition.
1405          */
1406         BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE);
1407
1408         ret = ipoib_register_debugfs();
1409         if (ret)
1410                 return ret;
1411
1412         /*
1413          * We create our own workqueue mainly because we want to be
1414          * able to flush it when devices are being removed.  We can't
1415          * use schedule_work()/flush_scheduled_work() because both
1416          * unregister_netdev() and linkwatch_event take the rtnl lock,
1417          * so flush_scheduled_work() can deadlock during device
1418          * removal.
1419          */
1420         ipoib_workqueue = create_singlethread_workqueue("ipoib");
1421         if (!ipoib_workqueue) {
1422                 ret = -ENOMEM;
1423                 goto err_fs;
1424         }
1425
1426         ib_sa_register_client(&ipoib_sa_client);
1427
1428         ret = ib_register_client(&ipoib_client);
1429         if (ret)
1430                 goto err_sa;
1431
1432         return 0;
1433
1434 err_sa:
1435         ib_sa_unregister_client(&ipoib_sa_client);
1436         destroy_workqueue(ipoib_workqueue);
1437
1438 err_fs:
1439         ipoib_unregister_debugfs();
1440
1441         return ret;
1442 }
1443
1444 static void __exit ipoib_cleanup_module(void)
1445 {
1446         ib_unregister_client(&ipoib_client);
1447         ib_sa_unregister_client(&ipoib_sa_client);
1448         ipoib_unregister_debugfs();
1449         destroy_workqueue(ipoib_workqueue);
1450 }
1451
1452 module_init(ipoib_init_module);
1453 module_exit(ipoib_cleanup_module);