Merge tag 'f2fs-for-4.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeu...
[sfrench/cifs-2.6.git] / drivers / infiniband / ulp / ipoib / ipoib_cm.c
1 /*
2  * Copyright (c) 2006 Mellanox Technologies. All rights reserved
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <rdma/ib_cm.h>
34 #include <net/dst.h>
35 #include <net/icmp.h>
36 #include <linux/icmpv6.h>
37 #include <linux/delay.h>
38 #include <linux/slab.h>
39 #include <linux/vmalloc.h>
40 #include <linux/moduleparam.h>
41 #include <linux/sched/signal.h>
42 #include <linux/sched/mm.h>
43
44 #include "ipoib.h"
45
46 int ipoib_max_conn_qp = 128;
47
48 module_param_named(max_nonsrq_conn_qp, ipoib_max_conn_qp, int, 0444);
49 MODULE_PARM_DESC(max_nonsrq_conn_qp,
50                  "Max number of connected-mode QPs per interface "
51                  "(applied only if shared receive queue is not available)");
52
53 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
54 static int data_debug_level;
55
56 module_param_named(cm_data_debug_level, data_debug_level, int, 0644);
57 MODULE_PARM_DESC(cm_data_debug_level,
58                  "Enable data path debug tracing for connected mode if > 0");
59 #endif
60
61 #define IPOIB_CM_IETF_ID 0x1000000000000000ULL
62
63 #define IPOIB_CM_RX_UPDATE_TIME (256 * HZ)
64 #define IPOIB_CM_RX_TIMEOUT     (2 * 256 * HZ)
65 #define IPOIB_CM_RX_DELAY       (3 * 256 * HZ)
66 #define IPOIB_CM_RX_UPDATE_MASK (0x3)
67
68 #define IPOIB_CM_RX_RESERVE     (ALIGN(IPOIB_HARD_LEN, 16) - IPOIB_ENCAP_LEN)
69
70 static struct ib_qp_attr ipoib_cm_err_attr = {
71         .qp_state = IB_QPS_ERR
72 };
73
74 #define IPOIB_CM_RX_DRAIN_WRID 0xffffffff
75
76 static struct ib_send_wr ipoib_cm_rx_drain_wr = {
77         .opcode = IB_WR_SEND,
78 };
79
80 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
81                                struct ib_cm_event *event);
82
83 static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,
84                                   u64 mapping[IPOIB_CM_RX_SG])
85 {
86         int i;
87
88         ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
89
90         for (i = 0; i < frags; ++i)
91                 ib_dma_unmap_page(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
92 }
93
94 static int ipoib_cm_post_receive_srq(struct net_device *dev, int id)
95 {
96         struct ipoib_dev_priv *priv = ipoib_priv(dev);
97         struct ib_recv_wr *bad_wr;
98         int i, ret;
99
100         priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
101
102         for (i = 0; i < priv->cm.num_frags; ++i)
103                 priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i];
104
105         ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr);
106         if (unlikely(ret)) {
107                 ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
108                 ipoib_cm_dma_unmap_rx(priv, priv->cm.num_frags - 1,
109                                       priv->cm.srq_ring[id].mapping);
110                 dev_kfree_skb_any(priv->cm.srq_ring[id].skb);
111                 priv->cm.srq_ring[id].skb = NULL;
112         }
113
114         return ret;
115 }
116
117 static int ipoib_cm_post_receive_nonsrq(struct net_device *dev,
118                                         struct ipoib_cm_rx *rx,
119                                         struct ib_recv_wr *wr,
120                                         struct ib_sge *sge, int id)
121 {
122         struct ipoib_dev_priv *priv = ipoib_priv(dev);
123         struct ib_recv_wr *bad_wr;
124         int i, ret;
125
126         wr->wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
127
128         for (i = 0; i < IPOIB_CM_RX_SG; ++i)
129                 sge[i].addr = rx->rx_ring[id].mapping[i];
130
131         ret = ib_post_recv(rx->qp, wr, &bad_wr);
132         if (unlikely(ret)) {
133                 ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret);
134                 ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
135                                       rx->rx_ring[id].mapping);
136                 dev_kfree_skb_any(rx->rx_ring[id].skb);
137                 rx->rx_ring[id].skb = NULL;
138         }
139
140         return ret;
141 }
142
143 static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
144                                              struct ipoib_cm_rx_buf *rx_ring,
145                                              int id, int frags,
146                                              u64 mapping[IPOIB_CM_RX_SG],
147                                              gfp_t gfp)
148 {
149         struct ipoib_dev_priv *priv = ipoib_priv(dev);
150         struct sk_buff *skb;
151         int i;
152
153         skb = dev_alloc_skb(ALIGN(IPOIB_CM_HEAD_SIZE + IPOIB_PSEUDO_LEN, 16));
154         if (unlikely(!skb))
155                 return NULL;
156
157         /*
158          * IPoIB adds a IPOIB_ENCAP_LEN byte header, this will align the
159          * IP header to a multiple of 16.
160          */
161         skb_reserve(skb, IPOIB_CM_RX_RESERVE);
162
163         mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
164                                        DMA_FROM_DEVICE);
165         if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) {
166                 dev_kfree_skb_any(skb);
167                 return NULL;
168         }
169
170         for (i = 0; i < frags; i++) {
171                 struct page *page = alloc_page(gfp);
172
173                 if (!page)
174                         goto partial_error;
175                 skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
176
177                 mapping[i + 1] = ib_dma_map_page(priv->ca, page,
178                                                  0, PAGE_SIZE, DMA_FROM_DEVICE);
179                 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1])))
180                         goto partial_error;
181         }
182
183         rx_ring[id].skb = skb;
184         return skb;
185
186 partial_error:
187
188         ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
189
190         for (; i > 0; --i)
191                 ib_dma_unmap_page(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE);
192
193         dev_kfree_skb_any(skb);
194         return NULL;
195 }
196
197 static void ipoib_cm_free_rx_ring(struct net_device *dev,
198                                   struct ipoib_cm_rx_buf *rx_ring)
199 {
200         struct ipoib_dev_priv *priv = ipoib_priv(dev);
201         int i;
202
203         for (i = 0; i < ipoib_recvq_size; ++i)
204                 if (rx_ring[i].skb) {
205                         ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
206                                               rx_ring[i].mapping);
207                         dev_kfree_skb_any(rx_ring[i].skb);
208                 }
209
210         vfree(rx_ring);
211 }
212
213 static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
214 {
215         struct ib_send_wr *bad_wr;
216         struct ipoib_cm_rx *p;
217
218         /* We only reserved 1 extra slot in CQ for drain WRs, so
219          * make sure we have at most 1 outstanding WR. */
220         if (list_empty(&priv->cm.rx_flush_list) ||
221             !list_empty(&priv->cm.rx_drain_list))
222                 return;
223
224         /*
225          * QPs on flush list are error state.  This way, a "flush
226          * error" WC will be immediately generated for each WR we post.
227          */
228         p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list);
229         ipoib_cm_rx_drain_wr.wr_id = IPOIB_CM_RX_DRAIN_WRID;
230         if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, &bad_wr))
231                 ipoib_warn(priv, "failed to post drain wr\n");
232
233         list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list);
234 }
235
236 static void ipoib_cm_rx_event_handler(struct ib_event *event, void *ctx)
237 {
238         struct ipoib_cm_rx *p = ctx;
239         struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
240         unsigned long flags;
241
242         if (event->event != IB_EVENT_QP_LAST_WQE_REACHED)
243                 return;
244
245         spin_lock_irqsave(&priv->lock, flags);
246         list_move(&p->list, &priv->cm.rx_flush_list);
247         p->state = IPOIB_CM_RX_FLUSH;
248         ipoib_cm_start_rx_drain(priv);
249         spin_unlock_irqrestore(&priv->lock, flags);
250 }
251
252 static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev,
253                                            struct ipoib_cm_rx *p)
254 {
255         struct ipoib_dev_priv *priv = ipoib_priv(dev);
256         struct ib_qp_init_attr attr = {
257                 .event_handler = ipoib_cm_rx_event_handler,
258                 .send_cq = priv->recv_cq, /* For drain WR */
259                 .recv_cq = priv->recv_cq,
260                 .srq = priv->cm.srq,
261                 .cap.max_send_wr = 1, /* For drain WR */
262                 .cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */
263                 .sq_sig_type = IB_SIGNAL_ALL_WR,
264                 .qp_type = IB_QPT_RC,
265                 .qp_context = p,
266         };
267
268         if (!ipoib_cm_has_srq(dev)) {
269                 attr.cap.max_recv_wr  = ipoib_recvq_size;
270                 attr.cap.max_recv_sge = IPOIB_CM_RX_SG;
271         }
272
273         return ib_create_qp(priv->pd, &attr);
274 }
275
276 static int ipoib_cm_modify_rx_qp(struct net_device *dev,
277                                  struct ib_cm_id *cm_id, struct ib_qp *qp,
278                                  unsigned psn)
279 {
280         struct ipoib_dev_priv *priv = ipoib_priv(dev);
281         struct ib_qp_attr qp_attr;
282         int qp_attr_mask, ret;
283
284         qp_attr.qp_state = IB_QPS_INIT;
285         ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
286         if (ret) {
287                 ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret);
288                 return ret;
289         }
290         ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
291         if (ret) {
292                 ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret);
293                 return ret;
294         }
295         qp_attr.qp_state = IB_QPS_RTR;
296         ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
297         if (ret) {
298                 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
299                 return ret;
300         }
301         qp_attr.rq_psn = psn;
302         ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
303         if (ret) {
304                 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
305                 return ret;
306         }
307
308         /*
309          * Current Mellanox HCA firmware won't generate completions
310          * with error for drain WRs unless the QP has been moved to
311          * RTS first. This work-around leaves a window where a QP has
312          * moved to error asynchronously, but this will eventually get
313          * fixed in firmware, so let's not error out if modify QP
314          * fails.
315          */
316         qp_attr.qp_state = IB_QPS_RTS;
317         ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
318         if (ret) {
319                 ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
320                 return 0;
321         }
322         ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
323         if (ret) {
324                 ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
325                 return 0;
326         }
327
328         return 0;
329 }
330
331 static void ipoib_cm_init_rx_wr(struct net_device *dev,
332                                 struct ib_recv_wr *wr,
333                                 struct ib_sge *sge)
334 {
335         struct ipoib_dev_priv *priv = ipoib_priv(dev);
336         int i;
337
338         for (i = 0; i < priv->cm.num_frags; ++i)
339                 sge[i].lkey = priv->pd->local_dma_lkey;
340
341         sge[0].length = IPOIB_CM_HEAD_SIZE;
342         for (i = 1; i < priv->cm.num_frags; ++i)
343                 sge[i].length = PAGE_SIZE;
344
345         wr->next    = NULL;
346         wr->sg_list = sge;
347         wr->num_sge = priv->cm.num_frags;
348 }
349
350 static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_id,
351                                    struct ipoib_cm_rx *rx)
352 {
353         struct ipoib_dev_priv *priv = ipoib_priv(dev);
354         struct {
355                 struct ib_recv_wr wr;
356                 struct ib_sge sge[IPOIB_CM_RX_SG];
357         } *t;
358         int ret;
359         int i;
360
361         rx->rx_ring = vzalloc(ipoib_recvq_size * sizeof *rx->rx_ring);
362         if (!rx->rx_ring)
363                 return -ENOMEM;
364
365         t = kmalloc(sizeof *t, GFP_KERNEL);
366         if (!t) {
367                 ret = -ENOMEM;
368                 goto err_free_1;
369         }
370
371         ipoib_cm_init_rx_wr(dev, &t->wr, t->sge);
372
373         spin_lock_irq(&priv->lock);
374
375         if (priv->cm.nonsrq_conn_qp >= ipoib_max_conn_qp) {
376                 spin_unlock_irq(&priv->lock);
377                 ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0);
378                 ret = -EINVAL;
379                 goto err_free;
380         } else
381                 ++priv->cm.nonsrq_conn_qp;
382
383         spin_unlock_irq(&priv->lock);
384
385         for (i = 0; i < ipoib_recvq_size; ++i) {
386                 if (!ipoib_cm_alloc_rx_skb(dev, rx->rx_ring, i, IPOIB_CM_RX_SG - 1,
387                                            rx->rx_ring[i].mapping,
388                                            GFP_KERNEL)) {
389                         ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
390                         ret = -ENOMEM;
391                         goto err_count;
392                 }
393                 ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i);
394                 if (ret) {
395                         ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq "
396                                    "failed for buf %d\n", i);
397                         ret = -EIO;
398                         goto err_count;
399                 }
400         }
401
402         rx->recv_count = ipoib_recvq_size;
403
404         kfree(t);
405
406         return 0;
407
408 err_count:
409         spin_lock_irq(&priv->lock);
410         --priv->cm.nonsrq_conn_qp;
411         spin_unlock_irq(&priv->lock);
412
413 err_free:
414         kfree(t);
415
416 err_free_1:
417         ipoib_cm_free_rx_ring(dev, rx->rx_ring);
418
419         return ret;
420 }
421
422 static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id,
423                              struct ib_qp *qp, struct ib_cm_req_event_param *req,
424                              unsigned psn)
425 {
426         struct ipoib_dev_priv *priv = ipoib_priv(dev);
427         struct ipoib_cm_data data = {};
428         struct ib_cm_rep_param rep = {};
429
430         data.qpn = cpu_to_be32(priv->qp->qp_num);
431         data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
432
433         rep.private_data = &data;
434         rep.private_data_len = sizeof data;
435         rep.flow_control = 0;
436         rep.rnr_retry_count = req->rnr_retry_count;
437         rep.srq = ipoib_cm_has_srq(dev);
438         rep.qp_num = qp->qp_num;
439         rep.starting_psn = psn;
440         return ib_send_cm_rep(cm_id, &rep);
441 }
442
443 static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
444 {
445         struct net_device *dev = cm_id->context;
446         struct ipoib_dev_priv *priv = ipoib_priv(dev);
447         struct ipoib_cm_rx *p;
448         unsigned psn;
449         int ret;
450
451         ipoib_dbg(priv, "REQ arrived\n");
452         p = kzalloc(sizeof *p, GFP_KERNEL);
453         if (!p)
454                 return -ENOMEM;
455         p->dev = dev;
456         p->id = cm_id;
457         cm_id->context = p;
458         p->state = IPOIB_CM_RX_LIVE;
459         p->jiffies = jiffies;
460         INIT_LIST_HEAD(&p->list);
461
462         p->qp = ipoib_cm_create_rx_qp(dev, p);
463         if (IS_ERR(p->qp)) {
464                 ret = PTR_ERR(p->qp);
465                 goto err_qp;
466         }
467
468         psn = prandom_u32() & 0xffffff;
469         ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn);
470         if (ret)
471                 goto err_modify;
472
473         if (!ipoib_cm_has_srq(dev)) {
474                 ret = ipoib_cm_nonsrq_init_rx(dev, cm_id, p);
475                 if (ret)
476                         goto err_modify;
477         }
478
479         spin_lock_irq(&priv->lock);
480         queue_delayed_work(priv->wq,
481                            &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
482         /* Add this entry to passive ids list head, but do not re-add it
483          * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
484         p->jiffies = jiffies;
485         if (p->state == IPOIB_CM_RX_LIVE)
486                 list_move(&p->list, &priv->cm.passive_ids);
487         spin_unlock_irq(&priv->lock);
488
489         ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn);
490         if (ret) {
491                 ipoib_warn(priv, "failed to send REP: %d\n", ret);
492                 if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
493                         ipoib_warn(priv, "unable to move qp to error state\n");
494         }
495         return 0;
496
497 err_modify:
498         ib_destroy_qp(p->qp);
499 err_qp:
500         kfree(p);
501         return ret;
502 }
503
504 static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
505                                struct ib_cm_event *event)
506 {
507         struct ipoib_cm_rx *p;
508         struct ipoib_dev_priv *priv;
509
510         switch (event->event) {
511         case IB_CM_REQ_RECEIVED:
512                 return ipoib_cm_req_handler(cm_id, event);
513         case IB_CM_DREQ_RECEIVED:
514                 ib_send_cm_drep(cm_id, NULL, 0);
515                 /* Fall through */
516         case IB_CM_REJ_RECEIVED:
517                 p = cm_id->context;
518                 priv = ipoib_priv(p->dev);
519                 if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
520                         ipoib_warn(priv, "unable to move qp to error state\n");
521                 /* Fall through */
522         default:
523                 return 0;
524         }
525 }
526 /* Adjust length of skb with fragments to match received data */
527 static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
528                           unsigned int length, struct sk_buff *toskb)
529 {
530         int i, num_frags;
531         unsigned int size;
532
533         /* put header into skb */
534         size = min(length, hdr_space);
535         skb->tail += size;
536         skb->len += size;
537         length -= size;
538
539         num_frags = skb_shinfo(skb)->nr_frags;
540         for (i = 0; i < num_frags; i++) {
541                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
542
543                 if (length == 0) {
544                         /* don't need this page */
545                         skb_fill_page_desc(toskb, i, skb_frag_page(frag),
546                                            0, PAGE_SIZE);
547                         --skb_shinfo(skb)->nr_frags;
548                 } else {
549                         size = min(length, (unsigned) PAGE_SIZE);
550
551                         skb_frag_size_set(frag, size);
552                         skb->data_len += size;
553                         skb->truesize += size;
554                         skb->len += size;
555                         length -= size;
556                 }
557         }
558 }
559
560 void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
561 {
562         struct ipoib_dev_priv *priv = ipoib_priv(dev);
563         struct ipoib_cm_rx_buf *rx_ring;
564         unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV);
565         struct sk_buff *skb, *newskb;
566         struct ipoib_cm_rx *p;
567         unsigned long flags;
568         u64 mapping[IPOIB_CM_RX_SG];
569         int frags;
570         int has_srq;
571         struct sk_buff *small_skb;
572
573         ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n",
574                        wr_id, wc->status);
575
576         if (unlikely(wr_id >= ipoib_recvq_size)) {
577                 if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~(IPOIB_OP_CM | IPOIB_OP_RECV))) {
578                         spin_lock_irqsave(&priv->lock, flags);
579                         list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
580                         ipoib_cm_start_rx_drain(priv);
581                         queue_work(priv->wq, &priv->cm.rx_reap_task);
582                         spin_unlock_irqrestore(&priv->lock, flags);
583                 } else
584                         ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
585                                    wr_id, ipoib_recvq_size);
586                 return;
587         }
588
589         p = wc->qp->qp_context;
590
591         has_srq = ipoib_cm_has_srq(dev);
592         rx_ring = has_srq ? priv->cm.srq_ring : p->rx_ring;
593
594         skb = rx_ring[wr_id].skb;
595
596         if (unlikely(wc->status != IB_WC_SUCCESS)) {
597                 ipoib_dbg(priv,
598                           "cm recv error (status=%d, wrid=%d vend_err %#x)\n",
599                           wc->status, wr_id, wc->vendor_err);
600                 ++dev->stats.rx_dropped;
601                 if (has_srq)
602                         goto repost;
603                 else {
604                         if (!--p->recv_count) {
605                                 spin_lock_irqsave(&priv->lock, flags);
606                                 list_move(&p->list, &priv->cm.rx_reap_list);
607                                 spin_unlock_irqrestore(&priv->lock, flags);
608                                 queue_work(priv->wq, &priv->cm.rx_reap_task);
609                         }
610                         return;
611                 }
612         }
613
614         if (unlikely(!(wr_id & IPOIB_CM_RX_UPDATE_MASK))) {
615                 if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) {
616                         spin_lock_irqsave(&priv->lock, flags);
617                         p->jiffies = jiffies;
618                         /* Move this entry to list head, but do not re-add it
619                          * if it has been moved out of list. */
620                         if (p->state == IPOIB_CM_RX_LIVE)
621                                 list_move(&p->list, &priv->cm.passive_ids);
622                         spin_unlock_irqrestore(&priv->lock, flags);
623                 }
624         }
625
626         if (wc->byte_len < IPOIB_CM_COPYBREAK) {
627                 int dlen = wc->byte_len;
628
629                 small_skb = dev_alloc_skb(dlen + IPOIB_CM_RX_RESERVE);
630                 if (small_skb) {
631                         skb_reserve(small_skb, IPOIB_CM_RX_RESERVE);
632                         ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0],
633                                                    dlen, DMA_FROM_DEVICE);
634                         skb_copy_from_linear_data(skb, small_skb->data, dlen);
635                         ib_dma_sync_single_for_device(priv->ca, rx_ring[wr_id].mapping[0],
636                                                       dlen, DMA_FROM_DEVICE);
637                         skb_put(small_skb, dlen);
638                         skb = small_skb;
639                         goto copied;
640                 }
641         }
642
643         frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
644                                               (unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE;
645
646         newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags,
647                                        mapping, GFP_ATOMIC);
648         if (unlikely(!newskb)) {
649                 /*
650                  * If we can't allocate a new RX buffer, dump
651                  * this packet and reuse the old buffer.
652                  */
653                 ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
654                 ++dev->stats.rx_dropped;
655                 goto repost;
656         }
657
658         ipoib_cm_dma_unmap_rx(priv, frags, rx_ring[wr_id].mapping);
659         memcpy(rx_ring[wr_id].mapping, mapping, (frags + 1) * sizeof *mapping);
660
661         ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
662                        wc->byte_len, wc->slid);
663
664         skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb);
665
666 copied:
667         skb->protocol = ((struct ipoib_header *) skb->data)->proto;
668         skb_add_pseudo_hdr(skb);
669
670         ++dev->stats.rx_packets;
671         dev->stats.rx_bytes += skb->len;
672
673         skb->dev = dev;
674         /* XXX get correct PACKET_ type here */
675         skb->pkt_type = PACKET_HOST;
676         netif_receive_skb(skb);
677
678 repost:
679         if (has_srq) {
680                 if (unlikely(ipoib_cm_post_receive_srq(dev, wr_id)))
681                         ipoib_warn(priv, "ipoib_cm_post_receive_srq failed "
682                                    "for buf %d\n", wr_id);
683         } else {
684                 if (unlikely(ipoib_cm_post_receive_nonsrq(dev, p,
685                                                           &priv->cm.rx_wr,
686                                                           priv->cm.rx_sge,
687                                                           wr_id))) {
688                         --p->recv_count;
689                         ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq failed "
690                                    "for buf %d\n", wr_id);
691                 }
692         }
693 }
694
695 static inline int post_send(struct ipoib_dev_priv *priv,
696                             struct ipoib_cm_tx *tx,
697                             unsigned int wr_id,
698                             struct ipoib_tx_buf *tx_req)
699 {
700         struct ib_send_wr *bad_wr;
701
702         ipoib_build_sge(priv, tx_req);
703
704         priv->tx_wr.wr.wr_id    = wr_id | IPOIB_OP_CM;
705
706         return ib_post_send(tx->qp, &priv->tx_wr.wr, &bad_wr);
707 }
708
709 void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
710 {
711         struct ipoib_dev_priv *priv = ipoib_priv(dev);
712         struct ipoib_tx_buf *tx_req;
713         int rc;
714         unsigned usable_sge = tx->max_send_sge - !!skb_headlen(skb);
715
716         if (unlikely(skb->len > tx->mtu)) {
717                 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
718                            skb->len, tx->mtu);
719                 ++dev->stats.tx_dropped;
720                 ++dev->stats.tx_errors;
721                 ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
722                 return;
723         }
724         if (skb_shinfo(skb)->nr_frags > usable_sge) {
725                 if (skb_linearize(skb) < 0) {
726                         ipoib_warn(priv, "skb could not be linearized\n");
727                         ++dev->stats.tx_dropped;
728                         ++dev->stats.tx_errors;
729                         dev_kfree_skb_any(skb);
730                         return;
731                 }
732                 /* Does skb_linearize return ok without reducing nr_frags? */
733                 if (skb_shinfo(skb)->nr_frags > usable_sge) {
734                         ipoib_warn(priv, "too many frags after skb linearize\n");
735                         ++dev->stats.tx_dropped;
736                         ++dev->stats.tx_errors;
737                         dev_kfree_skb_any(skb);
738                         return;
739                 }
740         }
741         ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n",
742                        tx->tx_head, skb->len, tx->qp->qp_num);
743
744         /*
745          * We put the skb into the tx_ring _before_ we call post_send()
746          * because it's entirely possible that the completion handler will
747          * run before we execute anything after the post_send().  That
748          * means we have to make sure everything is properly recorded and
749          * our state is consistent before we call post_send().
750          */
751         tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
752         tx_req->skb = skb;
753
754         if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
755                 ++dev->stats.tx_errors;
756                 dev_kfree_skb_any(skb);
757                 return;
758         }
759
760         if ((priv->tx_head - priv->tx_tail) == ipoib_sendq_size - 1) {
761                 ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
762                           tx->qp->qp_num);
763                 netif_stop_queue(dev);
764         }
765
766         skb_orphan(skb);
767         skb_dst_drop(skb);
768
769         if (netif_queue_stopped(dev))
770                 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
771                                      IB_CQ_REPORT_MISSED_EVENTS)) {
772                         ipoib_warn(priv, "IPoIB/CM:request notify on send CQ failed\n");
773                         napi_schedule(&priv->send_napi);
774                 }
775
776         rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), tx_req);
777         if (unlikely(rc)) {
778                 ipoib_warn(priv, "IPoIB/CM:post_send failed, error %d\n", rc);
779                 ++dev->stats.tx_errors;
780                 ipoib_dma_unmap_tx(priv, tx_req);
781                 dev_kfree_skb_any(skb);
782
783                 if (netif_queue_stopped(dev))
784                         netif_wake_queue(dev);
785         } else {
786                 netif_trans_update(dev);
787                 ++tx->tx_head;
788                 ++priv->tx_head;
789         }
790 }
791
792 void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
793 {
794         struct ipoib_dev_priv *priv = ipoib_priv(dev);
795         struct ipoib_cm_tx *tx = wc->qp->qp_context;
796         unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM;
797         struct ipoib_tx_buf *tx_req;
798         unsigned long flags;
799
800         ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
801                        wr_id, wc->status);
802
803         if (unlikely(wr_id >= ipoib_sendq_size)) {
804                 ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n",
805                            wr_id, ipoib_sendq_size);
806                 return;
807         }
808
809         tx_req = &tx->tx_ring[wr_id];
810
811         ipoib_dma_unmap_tx(priv, tx_req);
812
813         /* FIXME: is this right? Shouldn't we only increment on success? */
814         ++dev->stats.tx_packets;
815         dev->stats.tx_bytes += tx_req->skb->len;
816
817         dev_kfree_skb_any(tx_req->skb);
818
819         netif_tx_lock(dev);
820
821         ++tx->tx_tail;
822         ++priv->tx_tail;
823
824         if (unlikely(netif_queue_stopped(dev) &&
825                      (priv->tx_head - priv->tx_tail) <= ipoib_sendq_size >> 1 &&
826                      test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
827                 netif_wake_queue(dev);
828
829         if (wc->status != IB_WC_SUCCESS &&
830             wc->status != IB_WC_WR_FLUSH_ERR) {
831                 struct ipoib_neigh *neigh;
832
833                 /* IB_WC[_RNR]_RETRY_EXC_ERR error is part of the life cycle,
834                  * so don't make waves.
835                  */
836                 if (wc->status == IB_WC_RNR_RETRY_EXC_ERR ||
837                     wc->status == IB_WC_RETRY_EXC_ERR)
838                         ipoib_dbg(priv,
839                                   "%s: failed cm send event (status=%d, wrid=%d vend_err %#x)\n",
840                                    __func__, wc->status, wr_id, wc->vendor_err);
841                 else
842                         ipoib_warn(priv,
843                                     "%s: failed cm send event (status=%d, wrid=%d vend_err %#x)\n",
844                                    __func__, wc->status, wr_id, wc->vendor_err);
845
846                 spin_lock_irqsave(&priv->lock, flags);
847                 neigh = tx->neigh;
848
849                 if (neigh) {
850                         neigh->cm = NULL;
851                         ipoib_neigh_free(neigh);
852
853                         tx->neigh = NULL;
854                 }
855
856                 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
857                         list_move(&tx->list, &priv->cm.reap_list);
858                         queue_work(priv->wq, &priv->cm.reap_task);
859                 }
860
861                 clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
862
863                 spin_unlock_irqrestore(&priv->lock, flags);
864         }
865
866         netif_tx_unlock(dev);
867 }
868
869 int ipoib_cm_dev_open(struct net_device *dev)
870 {
871         struct ipoib_dev_priv *priv = ipoib_priv(dev);
872         int ret;
873
874         if (!IPOIB_CM_SUPPORTED(dev->dev_addr))
875                 return 0;
876
877         priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev);
878         if (IS_ERR(priv->cm.id)) {
879                 printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name);
880                 ret = PTR_ERR(priv->cm.id);
881                 goto err_cm;
882         }
883
884         ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num),
885                            0);
886         if (ret) {
887                 printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name,
888                        IPOIB_CM_IETF_ID | priv->qp->qp_num);
889                 goto err_listen;
890         }
891
892         return 0;
893
894 err_listen:
895         ib_destroy_cm_id(priv->cm.id);
896 err_cm:
897         priv->cm.id = NULL;
898         return ret;
899 }
900
901 static void ipoib_cm_free_rx_reap_list(struct net_device *dev)
902 {
903         struct ipoib_dev_priv *priv = ipoib_priv(dev);
904         struct ipoib_cm_rx *rx, *n;
905         LIST_HEAD(list);
906
907         spin_lock_irq(&priv->lock);
908         list_splice_init(&priv->cm.rx_reap_list, &list);
909         spin_unlock_irq(&priv->lock);
910
911         list_for_each_entry_safe(rx, n, &list, list) {
912                 ib_destroy_cm_id(rx->id);
913                 ib_destroy_qp(rx->qp);
914                 if (!ipoib_cm_has_srq(dev)) {
915                         ipoib_cm_free_rx_ring(priv->dev, rx->rx_ring);
916                         spin_lock_irq(&priv->lock);
917                         --priv->cm.nonsrq_conn_qp;
918                         spin_unlock_irq(&priv->lock);
919                 }
920                 kfree(rx);
921         }
922 }
923
924 void ipoib_cm_dev_stop(struct net_device *dev)
925 {
926         struct ipoib_dev_priv *priv = ipoib_priv(dev);
927         struct ipoib_cm_rx *p;
928         unsigned long begin;
929         int ret;
930
931         if (!IPOIB_CM_SUPPORTED(dev->dev_addr) || !priv->cm.id)
932                 return;
933
934         ib_destroy_cm_id(priv->cm.id);
935         priv->cm.id = NULL;
936
937         spin_lock_irq(&priv->lock);
938         while (!list_empty(&priv->cm.passive_ids)) {
939                 p = list_entry(priv->cm.passive_ids.next, typeof(*p), list);
940                 list_move(&p->list, &priv->cm.rx_error_list);
941                 p->state = IPOIB_CM_RX_ERROR;
942                 spin_unlock_irq(&priv->lock);
943                 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
944                 if (ret)
945                         ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
946                 spin_lock_irq(&priv->lock);
947         }
948
949         /* Wait for all RX to be drained */
950         begin = jiffies;
951
952         while (!list_empty(&priv->cm.rx_error_list) ||
953                !list_empty(&priv->cm.rx_flush_list) ||
954                !list_empty(&priv->cm.rx_drain_list)) {
955                 if (time_after(jiffies, begin + 5 * HZ)) {
956                         ipoib_warn(priv, "RX drain timing out\n");
957
958                         /*
959                          * assume the HW is wedged and just free up everything.
960                          */
961                         list_splice_init(&priv->cm.rx_flush_list,
962                                          &priv->cm.rx_reap_list);
963                         list_splice_init(&priv->cm.rx_error_list,
964                                          &priv->cm.rx_reap_list);
965                         list_splice_init(&priv->cm.rx_drain_list,
966                                          &priv->cm.rx_reap_list);
967                         break;
968                 }
969                 spin_unlock_irq(&priv->lock);
970                 usleep_range(1000, 2000);
971                 ipoib_drain_cq(dev);
972                 spin_lock_irq(&priv->lock);
973         }
974
975         spin_unlock_irq(&priv->lock);
976
977         ipoib_cm_free_rx_reap_list(dev);
978
979         cancel_delayed_work(&priv->cm.stale_task);
980 }
981
982 static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
983 {
984         struct ipoib_cm_tx *p = cm_id->context;
985         struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
986         struct ipoib_cm_data *data = event->private_data;
987         struct sk_buff_head skqueue;
988         struct ib_qp_attr qp_attr;
989         int qp_attr_mask, ret;
990         struct sk_buff *skb;
991
992         p->mtu = be32_to_cpu(data->mtu);
993
994         if (p->mtu <= IPOIB_ENCAP_LEN) {
995                 ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n",
996                            p->mtu, IPOIB_ENCAP_LEN);
997                 return -EINVAL;
998         }
999
1000         qp_attr.qp_state = IB_QPS_RTR;
1001         ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
1002         if (ret) {
1003                 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
1004                 return ret;
1005         }
1006
1007         qp_attr.rq_psn = 0 /* FIXME */;
1008         ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
1009         if (ret) {
1010                 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
1011                 return ret;
1012         }
1013
1014         qp_attr.qp_state = IB_QPS_RTS;
1015         ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
1016         if (ret) {
1017                 ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
1018                 return ret;
1019         }
1020         ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
1021         if (ret) {
1022                 ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
1023                 return ret;
1024         }
1025
1026         skb_queue_head_init(&skqueue);
1027
1028         spin_lock_irq(&priv->lock);
1029         set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
1030         if (p->neigh)
1031                 while ((skb = __skb_dequeue(&p->neigh->queue)))
1032                         __skb_queue_tail(&skqueue, skb);
1033         spin_unlock_irq(&priv->lock);
1034
1035         while ((skb = __skb_dequeue(&skqueue))) {
1036                 skb->dev = p->dev;
1037                 ret = dev_queue_xmit(skb);
1038                 if (ret)
1039                         ipoib_warn(priv, "%s:dev_queue_xmit failed to re-queue packet, ret:%d\n",
1040                                    __func__, ret);
1041         }
1042
1043         ret = ib_send_cm_rtu(cm_id, NULL, 0);
1044         if (ret) {
1045                 ipoib_warn(priv, "failed to send RTU: %d\n", ret);
1046                 return ret;
1047         }
1048         return 0;
1049 }
1050
1051 static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_cm_tx *tx)
1052 {
1053         struct ipoib_dev_priv *priv = ipoib_priv(dev);
1054         struct ib_qp_init_attr attr = {
1055                 .send_cq                = priv->send_cq,
1056                 .recv_cq                = priv->recv_cq,
1057                 .srq                    = priv->cm.srq,
1058                 .cap.max_send_wr        = ipoib_sendq_size,
1059                 .cap.max_send_sge       = 1,
1060                 .sq_sig_type            = IB_SIGNAL_ALL_WR,
1061                 .qp_type                = IB_QPT_RC,
1062                 .qp_context             = tx,
1063                 .create_flags           = 0
1064         };
1065         struct ib_qp *tx_qp;
1066
1067         if (dev->features & NETIF_F_SG)
1068                 attr.cap.max_send_sge =
1069                         min_t(u32, priv->ca->attrs.max_sge, MAX_SKB_FRAGS + 1);
1070
1071         tx_qp = ib_create_qp(priv->pd, &attr);
1072         tx->max_send_sge = attr.cap.max_send_sge;
1073         return tx_qp;
1074 }
1075
1076 static int ipoib_cm_send_req(struct net_device *dev,
1077                              struct ib_cm_id *id, struct ib_qp *qp,
1078                              u32 qpn,
1079                              struct sa_path_rec *pathrec)
1080 {
1081         struct ipoib_dev_priv *priv = ipoib_priv(dev);
1082         struct ipoib_cm_data data = {};
1083         struct ib_cm_req_param req = {};
1084
1085         data.qpn = cpu_to_be32(priv->qp->qp_num);
1086         data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
1087
1088         req.primary_path                = pathrec;
1089         req.alternate_path              = NULL;
1090         req.service_id                  = cpu_to_be64(IPOIB_CM_IETF_ID | qpn);
1091         req.qp_num                      = qp->qp_num;
1092         req.qp_type                     = qp->qp_type;
1093         req.private_data                = &data;
1094         req.private_data_len            = sizeof data;
1095         req.flow_control                = 0;
1096
1097         req.starting_psn                = 0; /* FIXME */
1098
1099         /*
1100          * Pick some arbitrary defaults here; we could make these
1101          * module parameters if anyone cared about setting them.
1102          */
1103         req.responder_resources         = 4;
1104         req.remote_cm_response_timeout  = 20;
1105         req.local_cm_response_timeout   = 20;
1106         req.retry_count                 = 0; /* RFC draft warns against retries */
1107         req.rnr_retry_count             = 0; /* RFC draft warns against retries */
1108         req.max_cm_retries              = 15;
1109         req.srq                         = ipoib_cm_has_srq(dev);
1110         return ib_send_cm_req(id, &req);
1111 }
1112
1113 static int ipoib_cm_modify_tx_init(struct net_device *dev,
1114                                   struct ib_cm_id *cm_id, struct ib_qp *qp)
1115 {
1116         struct ipoib_dev_priv *priv = ipoib_priv(dev);
1117         struct ib_qp_attr qp_attr;
1118         int qp_attr_mask, ret;
1119         ret = ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index);
1120         if (ret) {
1121                 ipoib_warn(priv, "pkey 0x%x not found: %d\n", priv->pkey, ret);
1122                 return ret;
1123         }
1124
1125         qp_attr.qp_state = IB_QPS_INIT;
1126         qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
1127         qp_attr.port_num = priv->port;
1128         qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
1129
1130         ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
1131         if (ret) {
1132                 ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret);
1133                 return ret;
1134         }
1135         return 0;
1136 }
1137
1138 static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
1139                             struct sa_path_rec *pathrec)
1140 {
1141         struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
1142         unsigned int noio_flag;
1143         int ret;
1144
1145         noio_flag = memalloc_noio_save();
1146         p->tx_ring = vzalloc(ipoib_sendq_size * sizeof(*p->tx_ring));
1147         if (!p->tx_ring) {
1148                 ret = -ENOMEM;
1149                 goto err_tx;
1150         }
1151         memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring);
1152
1153         p->qp = ipoib_cm_create_tx_qp(p->dev, p);
1154         memalloc_noio_restore(noio_flag);
1155         if (IS_ERR(p->qp)) {
1156                 ret = PTR_ERR(p->qp);
1157                 ipoib_warn(priv, "failed to create tx qp: %d\n", ret);
1158                 goto err_qp;
1159         }
1160
1161         p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p);
1162         if (IS_ERR(p->id)) {
1163                 ret = PTR_ERR(p->id);
1164                 ipoib_warn(priv, "failed to create tx cm id: %d\n", ret);
1165                 goto err_id;
1166         }
1167
1168         ret = ipoib_cm_modify_tx_init(p->dev, p->id,  p->qp);
1169         if (ret) {
1170                 ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret);
1171                 goto err_modify_send;
1172         }
1173
1174         ret = ipoib_cm_send_req(p->dev, p->id, p->qp, qpn, pathrec);
1175         if (ret) {
1176                 ipoib_warn(priv, "failed to send cm req: %d\n", ret);
1177                 goto err_modify_send;
1178         }
1179
1180         ipoib_dbg(priv, "Request connection 0x%x for gid %pI6 qpn 0x%x\n",
1181                   p->qp->qp_num, pathrec->dgid.raw, qpn);
1182
1183         return 0;
1184
1185 err_modify_send:
1186         ib_destroy_cm_id(p->id);
1187 err_id:
1188         p->id = NULL;
1189         ib_destroy_qp(p->qp);
1190 err_qp:
1191         p->qp = NULL;
1192         vfree(p->tx_ring);
1193 err_tx:
1194         return ret;
1195 }
1196
1197 static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
1198 {
1199         struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
1200         struct ipoib_tx_buf *tx_req;
1201         unsigned long begin;
1202
1203         ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
1204                   p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail);
1205
1206         if (p->id)
1207                 ib_destroy_cm_id(p->id);
1208
1209         if (p->tx_ring) {
1210                 /* Wait for all sends to complete */
1211                 begin = jiffies;
1212                 while ((int) p->tx_tail - (int) p->tx_head < 0) {
1213                         if (time_after(jiffies, begin + 5 * HZ)) {
1214                                 ipoib_warn(priv, "timing out; %d sends not completed\n",
1215                                            p->tx_head - p->tx_tail);
1216                                 goto timeout;
1217                         }
1218
1219                         usleep_range(1000, 2000);
1220                 }
1221         }
1222
1223 timeout:
1224
1225         while ((int) p->tx_tail - (int) p->tx_head < 0) {
1226                 tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
1227                 ipoib_dma_unmap_tx(priv, tx_req);
1228                 dev_kfree_skb_any(tx_req->skb);
1229                 netif_tx_lock_bh(p->dev);
1230                 ++p->tx_tail;
1231                 ++priv->tx_tail;
1232                 if (unlikely(priv->tx_head - priv->tx_tail == ipoib_sendq_size >> 1) &&
1233                     netif_queue_stopped(p->dev) &&
1234                     test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
1235                         netif_wake_queue(p->dev);
1236                 netif_tx_unlock_bh(p->dev);
1237         }
1238
1239         if (p->qp)
1240                 ib_destroy_qp(p->qp);
1241
1242         vfree(p->tx_ring);
1243         kfree(p);
1244 }
1245
1246 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
1247                                struct ib_cm_event *event)
1248 {
1249         struct ipoib_cm_tx *tx = cm_id->context;
1250         struct ipoib_dev_priv *priv = ipoib_priv(tx->dev);
1251         struct net_device *dev = priv->dev;
1252         struct ipoib_neigh *neigh;
1253         unsigned long flags;
1254         int ret;
1255
1256         switch (event->event) {
1257         case IB_CM_DREQ_RECEIVED:
1258                 ipoib_dbg(priv, "DREQ received.\n");
1259                 ib_send_cm_drep(cm_id, NULL, 0);
1260                 break;
1261         case IB_CM_REP_RECEIVED:
1262                 ipoib_dbg(priv, "REP received.\n");
1263                 ret = ipoib_cm_rep_handler(cm_id, event);
1264                 if (ret)
1265                         ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
1266                                        NULL, 0, NULL, 0);
1267                 break;
1268         case IB_CM_REQ_ERROR:
1269         case IB_CM_REJ_RECEIVED:
1270         case IB_CM_TIMEWAIT_EXIT:
1271                 ipoib_dbg(priv, "CM error %d.\n", event->event);
1272                 netif_tx_lock_bh(dev);
1273                 spin_lock_irqsave(&priv->lock, flags);
1274                 neigh = tx->neigh;
1275
1276                 if (neigh) {
1277                         neigh->cm = NULL;
1278                         ipoib_neigh_free(neigh);
1279
1280                         tx->neigh = NULL;
1281                 }
1282
1283                 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1284                         list_move(&tx->list, &priv->cm.reap_list);
1285                         queue_work(priv->wq, &priv->cm.reap_task);
1286                 }
1287
1288                 spin_unlock_irqrestore(&priv->lock, flags);
1289                 netif_tx_unlock_bh(dev);
1290                 break;
1291         default:
1292                 break;
1293         }
1294
1295         return 0;
1296 }
1297
1298 struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path,
1299                                        struct ipoib_neigh *neigh)
1300 {
1301         struct ipoib_dev_priv *priv = ipoib_priv(dev);
1302         struct ipoib_cm_tx *tx;
1303
1304         tx = kzalloc(sizeof *tx, GFP_ATOMIC);
1305         if (!tx)
1306                 return NULL;
1307
1308         neigh->cm = tx;
1309         tx->neigh = neigh;
1310         tx->path = path;
1311         tx->dev = dev;
1312         list_add(&tx->list, &priv->cm.start_list);
1313         set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
1314         queue_work(priv->wq, &priv->cm.start_task);
1315         return tx;
1316 }
1317
1318 void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
1319 {
1320         struct ipoib_dev_priv *priv = ipoib_priv(tx->dev);
1321         unsigned long flags;
1322         if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1323                 spin_lock_irqsave(&priv->lock, flags);
1324                 list_move(&tx->list, &priv->cm.reap_list);
1325                 queue_work(priv->wq, &priv->cm.reap_task);
1326                 ipoib_dbg(priv, "Reap connection for gid %pI6\n",
1327                           tx->neigh->daddr + 4);
1328                 tx->neigh = NULL;
1329                 spin_unlock_irqrestore(&priv->lock, flags);
1330         }
1331 }
1332
1333 #define QPN_AND_OPTIONS_OFFSET  4
1334
1335 static void ipoib_cm_tx_start(struct work_struct *work)
1336 {
1337         struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1338                                                    cm.start_task);
1339         struct net_device *dev = priv->dev;
1340         struct ipoib_neigh *neigh;
1341         struct ipoib_cm_tx *p;
1342         unsigned long flags;
1343         struct ipoib_path *path;
1344         int ret;
1345
1346         struct sa_path_rec pathrec;
1347         u32 qpn;
1348
1349         netif_tx_lock_bh(dev);
1350         spin_lock_irqsave(&priv->lock, flags);
1351
1352         while (!list_empty(&priv->cm.start_list)) {
1353                 p = list_entry(priv->cm.start_list.next, typeof(*p), list);
1354                 list_del_init(&p->list);
1355                 neigh = p->neigh;
1356
1357                 qpn = IPOIB_QPN(neigh->daddr);
1358                 /*
1359                  * As long as the search is with these 2 locks,
1360                  * path existence indicates its validity.
1361                  */
1362                 path = __path_find(dev, neigh->daddr + QPN_AND_OPTIONS_OFFSET);
1363                 if (!path) {
1364                         pr_info("%s ignore not valid path %pI6\n",
1365                                 __func__,
1366                                 neigh->daddr + QPN_AND_OPTIONS_OFFSET);
1367                         goto free_neigh;
1368                 }
1369                 memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
1370
1371                 spin_unlock_irqrestore(&priv->lock, flags);
1372                 netif_tx_unlock_bh(dev);
1373
1374                 ret = ipoib_cm_tx_init(p, qpn, &pathrec);
1375
1376                 netif_tx_lock_bh(dev);
1377                 spin_lock_irqsave(&priv->lock, flags);
1378
1379                 if (ret) {
1380 free_neigh:
1381                         neigh = p->neigh;
1382                         if (neigh) {
1383                                 neigh->cm = NULL;
1384                                 ipoib_neigh_free(neigh);
1385                         }
1386                         list_del(&p->list);
1387                         kfree(p);
1388                 }
1389         }
1390
1391         spin_unlock_irqrestore(&priv->lock, flags);
1392         netif_tx_unlock_bh(dev);
1393 }
1394
1395 static void ipoib_cm_tx_reap(struct work_struct *work)
1396 {
1397         struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1398                                                    cm.reap_task);
1399         struct net_device *dev = priv->dev;
1400         struct ipoib_cm_tx *p;
1401         unsigned long flags;
1402
1403         netif_tx_lock_bh(dev);
1404         spin_lock_irqsave(&priv->lock, flags);
1405
1406         while (!list_empty(&priv->cm.reap_list)) {
1407                 p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
1408                 list_del_init(&p->list);
1409                 spin_unlock_irqrestore(&priv->lock, flags);
1410                 netif_tx_unlock_bh(dev);
1411                 ipoib_cm_tx_destroy(p);
1412                 netif_tx_lock_bh(dev);
1413                 spin_lock_irqsave(&priv->lock, flags);
1414         }
1415
1416         spin_unlock_irqrestore(&priv->lock, flags);
1417         netif_tx_unlock_bh(dev);
1418 }
1419
1420 static void ipoib_cm_skb_reap(struct work_struct *work)
1421 {
1422         struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1423                                                    cm.skb_task);
1424         struct net_device *dev = priv->dev;
1425         struct sk_buff *skb;
1426         unsigned long flags;
1427         unsigned mtu = priv->mcast_mtu;
1428
1429         netif_tx_lock_bh(dev);
1430         spin_lock_irqsave(&priv->lock, flags);
1431
1432         while ((skb = skb_dequeue(&priv->cm.skb_queue))) {
1433                 spin_unlock_irqrestore(&priv->lock, flags);
1434                 netif_tx_unlock_bh(dev);
1435
1436                 if (skb->protocol == htons(ETH_P_IP))
1437                         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
1438 #if IS_ENABLED(CONFIG_IPV6)
1439                 else if (skb->protocol == htons(ETH_P_IPV6))
1440                         icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1441 #endif
1442                 dev_kfree_skb_any(skb);
1443
1444                 netif_tx_lock_bh(dev);
1445                 spin_lock_irqsave(&priv->lock, flags);
1446         }
1447
1448         spin_unlock_irqrestore(&priv->lock, flags);
1449         netif_tx_unlock_bh(dev);
1450 }
1451
1452 void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
1453                            unsigned int mtu)
1454 {
1455         struct ipoib_dev_priv *priv = ipoib_priv(dev);
1456         int e = skb_queue_empty(&priv->cm.skb_queue);
1457
1458         if (skb_dst(skb))
1459                 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
1460
1461         skb_queue_tail(&priv->cm.skb_queue, skb);
1462         if (e)
1463                 queue_work(priv->wq, &priv->cm.skb_task);
1464 }
1465
1466 static void ipoib_cm_rx_reap(struct work_struct *work)
1467 {
1468         ipoib_cm_free_rx_reap_list(container_of(work, struct ipoib_dev_priv,
1469                                                 cm.rx_reap_task)->dev);
1470 }
1471
1472 static void ipoib_cm_stale_task(struct work_struct *work)
1473 {
1474         struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1475                                                    cm.stale_task.work);
1476         struct ipoib_cm_rx *p;
1477         int ret;
1478
1479         spin_lock_irq(&priv->lock);
1480         while (!list_empty(&priv->cm.passive_ids)) {
1481                 /* List is sorted by LRU, start from tail,
1482                  * stop when we see a recently used entry */
1483                 p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list);
1484                 if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT))
1485                         break;
1486                 list_move(&p->list, &priv->cm.rx_error_list);
1487                 p->state = IPOIB_CM_RX_ERROR;
1488                 spin_unlock_irq(&priv->lock);
1489                 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
1490                 if (ret)
1491                         ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
1492                 spin_lock_irq(&priv->lock);
1493         }
1494
1495         if (!list_empty(&priv->cm.passive_ids))
1496                 queue_delayed_work(priv->wq,
1497                                    &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
1498         spin_unlock_irq(&priv->lock);
1499 }
1500
1501 static ssize_t show_mode(struct device *d, struct device_attribute *attr,
1502                          char *buf)
1503 {
1504         struct net_device *dev = to_net_dev(d);
1505         struct ipoib_dev_priv *priv = ipoib_priv(dev);
1506
1507         if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
1508                 return sprintf(buf, "connected\n");
1509         else
1510                 return sprintf(buf, "datagram\n");
1511 }
1512
1513 static ssize_t set_mode(struct device *d, struct device_attribute *attr,
1514                         const char *buf, size_t count)
1515 {
1516         struct net_device *dev = to_net_dev(d);
1517         int ret;
1518         struct ipoib_dev_priv *priv = ipoib_priv(dev);
1519
1520         if (test_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags))
1521                 return -EPERM;
1522
1523         if (!mutex_trylock(&priv->sysfs_mutex))
1524                 return restart_syscall();
1525
1526         if (!rtnl_trylock()) {
1527                 mutex_unlock(&priv->sysfs_mutex);
1528                 return restart_syscall();
1529         }
1530
1531         ret = ipoib_set_mode(dev, buf);
1532
1533         /* The assumption is that the function ipoib_set_mode returned
1534          * with the rtnl held by it, if not the value -EBUSY returned,
1535          * then no need to rtnl_unlock
1536          */
1537         if (ret != -EBUSY)
1538                 rtnl_unlock();
1539         mutex_unlock(&priv->sysfs_mutex);
1540
1541         return (!ret || ret == -EBUSY) ? count : ret;
1542 }
1543
1544 static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode);
1545
1546 int ipoib_cm_add_mode_attr(struct net_device *dev)
1547 {
1548         return device_create_file(&dev->dev, &dev_attr_mode);
1549 }
1550
1551 static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
1552 {
1553         struct ipoib_dev_priv *priv = ipoib_priv(dev);
1554         struct ib_srq_init_attr srq_init_attr = {
1555                 .srq_type = IB_SRQT_BASIC,
1556                 .attr = {
1557                         .max_wr  = ipoib_recvq_size,
1558                         .max_sge = max_sge
1559                 }
1560         };
1561
1562         priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
1563         if (IS_ERR(priv->cm.srq)) {
1564                 if (PTR_ERR(priv->cm.srq) != -ENOSYS)
1565                         printk(KERN_WARNING "%s: failed to allocate SRQ, error %ld\n",
1566                                priv->ca->name, PTR_ERR(priv->cm.srq));
1567                 priv->cm.srq = NULL;
1568                 return;
1569         }
1570
1571         priv->cm.srq_ring = vzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring);
1572         if (!priv->cm.srq_ring) {
1573                 ib_destroy_srq(priv->cm.srq);
1574                 priv->cm.srq = NULL;
1575                 return;
1576         }
1577
1578 }
1579
1580 int ipoib_cm_dev_init(struct net_device *dev)
1581 {
1582         struct ipoib_dev_priv *priv = ipoib_priv(dev);
1583         int max_srq_sge, i;
1584
1585         INIT_LIST_HEAD(&priv->cm.passive_ids);
1586         INIT_LIST_HEAD(&priv->cm.reap_list);
1587         INIT_LIST_HEAD(&priv->cm.start_list);
1588         INIT_LIST_HEAD(&priv->cm.rx_error_list);
1589         INIT_LIST_HEAD(&priv->cm.rx_flush_list);
1590         INIT_LIST_HEAD(&priv->cm.rx_drain_list);
1591         INIT_LIST_HEAD(&priv->cm.rx_reap_list);
1592         INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start);
1593         INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap);
1594         INIT_WORK(&priv->cm.skb_task, ipoib_cm_skb_reap);
1595         INIT_WORK(&priv->cm.rx_reap_task, ipoib_cm_rx_reap);
1596         INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task);
1597
1598         skb_queue_head_init(&priv->cm.skb_queue);
1599
1600         ipoib_dbg(priv, "max_srq_sge=%d\n", priv->ca->attrs.max_srq_sge);
1601
1602         max_srq_sge = min_t(int, IPOIB_CM_RX_SG, priv->ca->attrs.max_srq_sge);
1603         ipoib_cm_create_srq(dev, max_srq_sge);
1604         if (ipoib_cm_has_srq(dev)) {
1605                 priv->cm.max_cm_mtu = max_srq_sge * PAGE_SIZE - 0x10;
1606                 priv->cm.num_frags  = max_srq_sge;
1607                 ipoib_dbg(priv, "max_cm_mtu = 0x%x, num_frags=%d\n",
1608                           priv->cm.max_cm_mtu, priv->cm.num_frags);
1609         } else {
1610                 priv->cm.max_cm_mtu = IPOIB_CM_MTU;
1611                 priv->cm.num_frags  = IPOIB_CM_RX_SG;
1612         }
1613
1614         ipoib_cm_init_rx_wr(dev, &priv->cm.rx_wr, priv->cm.rx_sge);
1615
1616         if (ipoib_cm_has_srq(dev)) {
1617                 for (i = 0; i < ipoib_recvq_size; ++i) {
1618                         if (!ipoib_cm_alloc_rx_skb(dev, priv->cm.srq_ring, i,
1619                                                    priv->cm.num_frags - 1,
1620                                                    priv->cm.srq_ring[i].mapping,
1621                                                    GFP_KERNEL)) {
1622                                 ipoib_warn(priv, "failed to allocate "
1623                                            "receive buffer %d\n", i);
1624                                 ipoib_cm_dev_cleanup(dev);
1625                                 return -ENOMEM;
1626                         }
1627
1628                         if (ipoib_cm_post_receive_srq(dev, i)) {
1629                                 ipoib_warn(priv, "ipoib_cm_post_receive_srq "
1630                                            "failed for buf %d\n", i);
1631                                 ipoib_cm_dev_cleanup(dev);
1632                                 return -EIO;
1633                         }
1634                 }
1635         }
1636
1637         priv->dev->dev_addr[0] = IPOIB_FLAGS_RC;
1638         return 0;
1639 }
1640
1641 void ipoib_cm_dev_cleanup(struct net_device *dev)
1642 {
1643         struct ipoib_dev_priv *priv = ipoib_priv(dev);
1644         int ret;
1645
1646         if (!priv->cm.srq)
1647                 return;
1648
1649         ipoib_dbg(priv, "Cleanup ipoib connected mode.\n");
1650
1651         ret = ib_destroy_srq(priv->cm.srq);
1652         if (ret)
1653                 ipoib_warn(priv, "ib_destroy_srq failed: %d\n", ret);
1654
1655         priv->cm.srq = NULL;
1656         if (!priv->cm.srq_ring)
1657                 return;
1658
1659         ipoib_cm_free_rx_ring(dev, priv->cm.srq_ring);
1660         priv->cm.srq_ring = NULL;
1661 }