Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[sfrench/cifs-2.6.git] / drivers / infiniband / sw / rxe / rxe_req.c
1 /*
2  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <linux/skbuff.h>
35
36 #include "rxe.h"
37 #include "rxe_loc.h"
38 #include "rxe_queue.h"
39
40 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
41                        u32 opcode);
42
43 static inline void retry_first_write_send(struct rxe_qp *qp,
44                                           struct rxe_send_wqe *wqe,
45                                           unsigned mask, int npsn)
46 {
47         int i;
48
49         for (i = 0; i < npsn; i++) {
50                 int to_send = (wqe->dma.resid > qp->mtu) ?
51                                 qp->mtu : wqe->dma.resid;
52
53                 qp->req.opcode = next_opcode(qp, wqe,
54                                              wqe->wr.opcode);
55
56                 if (wqe->wr.send_flags & IB_SEND_INLINE) {
57                         wqe->dma.resid -= to_send;
58                         wqe->dma.sge_offset += to_send;
59                 } else {
60                         advance_dma_data(&wqe->dma, to_send);
61                 }
62                 if (mask & WR_WRITE_MASK)
63                         wqe->iova += qp->mtu;
64         }
65 }
66
67 static void req_retry(struct rxe_qp *qp)
68 {
69         struct rxe_send_wqe *wqe;
70         unsigned int wqe_index;
71         unsigned int mask;
72         int npsn;
73         int first = 1;
74
75         wqe = queue_head(qp->sq.queue);
76         npsn = (qp->comp.psn - wqe->first_psn) & BTH_PSN_MASK;
77
78         qp->req.wqe_index       = consumer_index(qp->sq.queue);
79         qp->req.psn             = qp->comp.psn;
80         qp->req.opcode          = -1;
81
82         for (wqe_index = consumer_index(qp->sq.queue);
83                 wqe_index != producer_index(qp->sq.queue);
84                 wqe_index = next_index(qp->sq.queue, wqe_index)) {
85                 wqe = addr_from_index(qp->sq.queue, wqe_index);
86                 mask = wr_opcode_mask(wqe->wr.opcode, qp);
87
88                 if (wqe->state == wqe_state_posted)
89                         break;
90
91                 if (wqe->state == wqe_state_done)
92                         continue;
93
94                 wqe->iova = (mask & WR_ATOMIC_MASK) ?
95                              wqe->wr.wr.atomic.remote_addr :
96                              (mask & WR_READ_OR_WRITE_MASK) ?
97                              wqe->wr.wr.rdma.remote_addr :
98                              0;
99
100                 if (!first || (mask & WR_READ_MASK) == 0) {
101                         wqe->dma.resid = wqe->dma.length;
102                         wqe->dma.cur_sge = 0;
103                         wqe->dma.sge_offset = 0;
104                 }
105
106                 if (first) {
107                         first = 0;
108
109                         if (mask & WR_WRITE_OR_SEND_MASK)
110                                 retry_first_write_send(qp, wqe, mask, npsn);
111
112                         if (mask & WR_READ_MASK)
113                                 wqe->iova += npsn * qp->mtu;
114                 }
115
116                 wqe->state = wqe_state_posted;
117         }
118 }
119
120 void rnr_nak_timer(unsigned long data)
121 {
122         struct rxe_qp *qp = (struct rxe_qp *)data;
123
124         pr_debug("qp#%d rnr nak timer fired\n", qp_num(qp));
125         rxe_run_task(&qp->req.task, 1);
126 }
127
128 static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
129 {
130         struct rxe_send_wqe *wqe = queue_head(qp->sq.queue);
131         unsigned long flags;
132
133         if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
134                 /* check to see if we are drained;
135                  * state_lock used by requester and completer
136                  */
137                 spin_lock_irqsave(&qp->state_lock, flags);
138                 do {
139                         if (qp->req.state != QP_STATE_DRAIN) {
140                                 /* comp just finished */
141                                 spin_unlock_irqrestore(&qp->state_lock,
142                                                        flags);
143                                 break;
144                         }
145
146                         if (wqe && ((qp->req.wqe_index !=
147                                 consumer_index(qp->sq.queue)) ||
148                                 (wqe->state != wqe_state_posted))) {
149                                 /* comp not done yet */
150                                 spin_unlock_irqrestore(&qp->state_lock,
151                                                        flags);
152                                 break;
153                         }
154
155                         qp->req.state = QP_STATE_DRAINED;
156                         spin_unlock_irqrestore(&qp->state_lock, flags);
157
158                         if (qp->ibqp.event_handler) {
159                                 struct ib_event ev;
160
161                                 ev.device = qp->ibqp.device;
162                                 ev.element.qp = &qp->ibqp;
163                                 ev.event = IB_EVENT_SQ_DRAINED;
164                                 qp->ibqp.event_handler(&ev,
165                                         qp->ibqp.qp_context);
166                         }
167                 } while (0);
168         }
169
170         if (qp->req.wqe_index == producer_index(qp->sq.queue))
171                 return NULL;
172
173         wqe = addr_from_index(qp->sq.queue, qp->req.wqe_index);
174
175         if (unlikely((qp->req.state == QP_STATE_DRAIN ||
176                       qp->req.state == QP_STATE_DRAINED) &&
177                      (wqe->state != wqe_state_processing)))
178                 return NULL;
179
180         if (unlikely((wqe->wr.send_flags & IB_SEND_FENCE) &&
181                      (qp->req.wqe_index != consumer_index(qp->sq.queue)))) {
182                 qp->req.wait_fence = 1;
183                 return NULL;
184         }
185
186         wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
187         return wqe;
188 }
189
190 static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
191 {
192         switch (opcode) {
193         case IB_WR_RDMA_WRITE:
194                 if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
195                     qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
196                         return fits ?
197                                 IB_OPCODE_RC_RDMA_WRITE_LAST :
198                                 IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
199                 else
200                         return fits ?
201                                 IB_OPCODE_RC_RDMA_WRITE_ONLY :
202                                 IB_OPCODE_RC_RDMA_WRITE_FIRST;
203
204         case IB_WR_RDMA_WRITE_WITH_IMM:
205                 if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
206                     qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
207                         return fits ?
208                                 IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
209                                 IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
210                 else
211                         return fits ?
212                                 IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
213                                 IB_OPCODE_RC_RDMA_WRITE_FIRST;
214
215         case IB_WR_SEND:
216                 if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
217                     qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
218                         return fits ?
219                                 IB_OPCODE_RC_SEND_LAST :
220                                 IB_OPCODE_RC_SEND_MIDDLE;
221                 else
222                         return fits ?
223                                 IB_OPCODE_RC_SEND_ONLY :
224                                 IB_OPCODE_RC_SEND_FIRST;
225
226         case IB_WR_SEND_WITH_IMM:
227                 if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
228                     qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
229                         return fits ?
230                                 IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE :
231                                 IB_OPCODE_RC_SEND_MIDDLE;
232                 else
233                         return fits ?
234                                 IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE :
235                                 IB_OPCODE_RC_SEND_FIRST;
236
237         case IB_WR_RDMA_READ:
238                 return IB_OPCODE_RC_RDMA_READ_REQUEST;
239
240         case IB_WR_ATOMIC_CMP_AND_SWP:
241                 return IB_OPCODE_RC_COMPARE_SWAP;
242
243         case IB_WR_ATOMIC_FETCH_AND_ADD:
244                 return IB_OPCODE_RC_FETCH_ADD;
245
246         case IB_WR_SEND_WITH_INV:
247                 if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
248                     qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
249                         return fits ? IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE :
250                                 IB_OPCODE_RC_SEND_MIDDLE;
251                 else
252                         return fits ? IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE :
253                                 IB_OPCODE_RC_SEND_FIRST;
254         case IB_WR_REG_MR:
255         case IB_WR_LOCAL_INV:
256                 return opcode;
257         }
258
259         return -EINVAL;
260 }
261
262 static int next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits)
263 {
264         switch (opcode) {
265         case IB_WR_RDMA_WRITE:
266                 if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
267                     qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
268                         return fits ?
269                                 IB_OPCODE_UC_RDMA_WRITE_LAST :
270                                 IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
271                 else
272                         return fits ?
273                                 IB_OPCODE_UC_RDMA_WRITE_ONLY :
274                                 IB_OPCODE_UC_RDMA_WRITE_FIRST;
275
276         case IB_WR_RDMA_WRITE_WITH_IMM:
277                 if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
278                     qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
279                         return fits ?
280                                 IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
281                                 IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
282                 else
283                         return fits ?
284                                 IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
285                                 IB_OPCODE_UC_RDMA_WRITE_FIRST;
286
287         case IB_WR_SEND:
288                 if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
289                     qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
290                         return fits ?
291                                 IB_OPCODE_UC_SEND_LAST :
292                                 IB_OPCODE_UC_SEND_MIDDLE;
293                 else
294                         return fits ?
295                                 IB_OPCODE_UC_SEND_ONLY :
296                                 IB_OPCODE_UC_SEND_FIRST;
297
298         case IB_WR_SEND_WITH_IMM:
299                 if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
300                     qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
301                         return fits ?
302                                 IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE :
303                                 IB_OPCODE_UC_SEND_MIDDLE;
304                 else
305                         return fits ?
306                                 IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE :
307                                 IB_OPCODE_UC_SEND_FIRST;
308         }
309
310         return -EINVAL;
311 }
312
313 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
314                        u32 opcode)
315 {
316         int fits = (wqe->dma.resid <= qp->mtu);
317
318         switch (qp_type(qp)) {
319         case IB_QPT_RC:
320                 return next_opcode_rc(qp, opcode, fits);
321
322         case IB_QPT_UC:
323                 return next_opcode_uc(qp, opcode, fits);
324
325         case IB_QPT_SMI:
326         case IB_QPT_UD:
327         case IB_QPT_GSI:
328                 switch (opcode) {
329                 case IB_WR_SEND:
330                         return IB_OPCODE_UD_SEND_ONLY;
331
332                 case IB_WR_SEND_WITH_IMM:
333                         return IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
334                 }
335                 break;
336
337         default:
338                 break;
339         }
340
341         return -EINVAL;
342 }
343
344 static inline int check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
345 {
346         int depth;
347
348         if (wqe->has_rd_atomic)
349                 return 0;
350
351         qp->req.need_rd_atomic = 1;
352         depth = atomic_dec_return(&qp->req.rd_atomic);
353
354         if (depth >= 0) {
355                 qp->req.need_rd_atomic = 0;
356                 wqe->has_rd_atomic = 1;
357                 return 0;
358         }
359
360         atomic_inc(&qp->req.rd_atomic);
361         return -EAGAIN;
362 }
363
364 static inline int get_mtu(struct rxe_qp *qp)
365 {
366         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
367
368         if ((qp_type(qp) == IB_QPT_RC) || (qp_type(qp) == IB_QPT_UC))
369                 return qp->mtu;
370
371         return rxe->port.mtu_cap;
372 }
373
374 static struct sk_buff *init_req_packet(struct rxe_qp *qp,
375                                        struct rxe_send_wqe *wqe,
376                                        int opcode, int payload,
377                                        struct rxe_pkt_info *pkt)
378 {
379         struct rxe_dev          *rxe = to_rdev(qp->ibqp.device);
380         struct rxe_port         *port = &rxe->port;
381         struct sk_buff          *skb;
382         struct rxe_send_wr      *ibwr = &wqe->wr;
383         struct rxe_av           *av;
384         int                     pad = (-payload) & 0x3;
385         int                     paylen;
386         int                     solicited;
387         u16                     pkey;
388         u32                     qp_num;
389         int                     ack_req;
390
391         /* length from start of bth to end of icrc */
392         paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
393
394         /* pkt->hdr, rxe, port_num and mask are initialized in ifc
395          * layer
396          */
397         pkt->opcode     = opcode;
398         pkt->qp         = qp;
399         pkt->psn        = qp->req.psn;
400         pkt->mask       = rxe_opcode[opcode].mask;
401         pkt->paylen     = paylen;
402         pkt->offset     = 0;
403         pkt->wqe        = wqe;
404
405         /* init skb */
406         av = rxe_get_av(pkt);
407         skb = rxe_init_packet(rxe, av, paylen, pkt);
408         if (unlikely(!skb))
409                 return NULL;
410
411         /* init bth */
412         solicited = (ibwr->send_flags & IB_SEND_SOLICITED) &&
413                         (pkt->mask & RXE_END_MASK) &&
414                         ((pkt->mask & (RXE_SEND_MASK)) ||
415                         (pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) ==
416                         (RXE_WRITE_MASK | RXE_IMMDT_MASK));
417
418         pkey = (qp_type(qp) == IB_QPT_GSI) ?
419                  port->pkey_tbl[ibwr->wr.ud.pkey_index] :
420                  port->pkey_tbl[qp->attr.pkey_index];
421
422         qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn :
423                                          qp->attr.dest_qp_num;
424
425         ack_req = ((pkt->mask & RXE_END_MASK) ||
426                 (qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK));
427         if (ack_req)
428                 qp->req.noack_pkts = 0;
429
430         bth_init(pkt, pkt->opcode, solicited, 0, pad, pkey, qp_num,
431                  ack_req, pkt->psn);
432
433         /* init optional headers */
434         if (pkt->mask & RXE_RETH_MASK) {
435                 reth_set_rkey(pkt, ibwr->wr.rdma.rkey);
436                 reth_set_va(pkt, wqe->iova);
437                 reth_set_len(pkt, wqe->dma.length);
438         }
439
440         if (pkt->mask & RXE_IMMDT_MASK)
441                 immdt_set_imm(pkt, ibwr->ex.imm_data);
442
443         if (pkt->mask & RXE_IETH_MASK)
444                 ieth_set_rkey(pkt, ibwr->ex.invalidate_rkey);
445
446         if (pkt->mask & RXE_ATMETH_MASK) {
447                 atmeth_set_va(pkt, wqe->iova);
448                 if (opcode == IB_OPCODE_RC_COMPARE_SWAP ||
449                     opcode == IB_OPCODE_RD_COMPARE_SWAP) {
450                         atmeth_set_swap_add(pkt, ibwr->wr.atomic.swap);
451                         atmeth_set_comp(pkt, ibwr->wr.atomic.compare_add);
452                 } else {
453                         atmeth_set_swap_add(pkt, ibwr->wr.atomic.compare_add);
454                 }
455                 atmeth_set_rkey(pkt, ibwr->wr.atomic.rkey);
456         }
457
458         if (pkt->mask & RXE_DETH_MASK) {
459                 if (qp->ibqp.qp_num == 1)
460                         deth_set_qkey(pkt, GSI_QKEY);
461                 else
462                         deth_set_qkey(pkt, ibwr->wr.ud.remote_qkey);
463                 deth_set_sqp(pkt, qp->ibqp.qp_num);
464         }
465
466         return skb;
467 }
468
469 static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
470                        struct rxe_pkt_info *pkt, struct sk_buff *skb,
471                        int paylen)
472 {
473         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
474         u32 crc = 0;
475         u32 *p;
476         int err;
477
478         err = rxe_prepare(rxe, pkt, skb, &crc);
479         if (err)
480                 return err;
481
482         if (pkt->mask & RXE_WRITE_OR_SEND) {
483                 if (wqe->wr.send_flags & IB_SEND_INLINE) {
484                         u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset];
485
486                         crc = crc32_le(crc, tmp, paylen);
487
488                         memcpy(payload_addr(pkt), tmp, paylen);
489
490                         wqe->dma.resid -= paylen;
491                         wqe->dma.sge_offset += paylen;
492                 } else {
493                         err = copy_data(rxe, qp->pd, 0, &wqe->dma,
494                                         payload_addr(pkt), paylen,
495                                         from_mem_obj,
496                                         &crc);
497                         if (err)
498                                 return err;
499                 }
500         }
501         p = payload_addr(pkt) + paylen + bth_pad(pkt);
502
503         *p = ~crc;
504
505         return 0;
506 }
507
508 static void update_wqe_state(struct rxe_qp *qp,
509                 struct rxe_send_wqe *wqe,
510                 struct rxe_pkt_info *pkt)
511 {
512         if (pkt->mask & RXE_END_MASK) {
513                 if (qp_type(qp) == IB_QPT_RC)
514                         wqe->state = wqe_state_pending;
515         } else {
516                 wqe->state = wqe_state_processing;
517         }
518 }
519
520 static void update_wqe_psn(struct rxe_qp *qp,
521                            struct rxe_send_wqe *wqe,
522                            struct rxe_pkt_info *pkt,
523                            int payload)
524 {
525         /* number of packets left to send including current one */
526         int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu;
527
528         /* handle zero length packet case */
529         if (num_pkt == 0)
530                 num_pkt = 1;
531
532         if (pkt->mask & RXE_START_MASK) {
533                 wqe->first_psn = qp->req.psn;
534                 wqe->last_psn = (qp->req.psn + num_pkt - 1) & BTH_PSN_MASK;
535         }
536
537         if (pkt->mask & RXE_READ_MASK)
538                 qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK;
539         else
540                 qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
541 }
542
543 static void save_state(struct rxe_send_wqe *wqe,
544                        struct rxe_qp *qp,
545                        struct rxe_send_wqe *rollback_wqe,
546                        u32 *rollback_psn)
547 {
548         rollback_wqe->state     = wqe->state;
549         rollback_wqe->first_psn = wqe->first_psn;
550         rollback_wqe->last_psn  = wqe->last_psn;
551         *rollback_psn           = qp->req.psn;
552 }
553
554 static void rollback_state(struct rxe_send_wqe *wqe,
555                            struct rxe_qp *qp,
556                            struct rxe_send_wqe *rollback_wqe,
557                            u32 rollback_psn)
558 {
559         wqe->state     = rollback_wqe->state;
560         wqe->first_psn = rollback_wqe->first_psn;
561         wqe->last_psn  = rollback_wqe->last_psn;
562         qp->req.psn    = rollback_psn;
563 }
564
565 static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
566                          struct rxe_pkt_info *pkt, int payload)
567 {
568         qp->req.opcode = pkt->opcode;
569
570         if (pkt->mask & RXE_END_MASK)
571                 qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
572
573         qp->need_req_skb = 0;
574
575         if (qp->qp_timeout_jiffies && !timer_pending(&qp->retrans_timer))
576                 mod_timer(&qp->retrans_timer,
577                           jiffies + qp->qp_timeout_jiffies);
578 }
579
580 int rxe_requester(void *arg)
581 {
582         struct rxe_qp *qp = (struct rxe_qp *)arg;
583         struct rxe_pkt_info pkt;
584         struct sk_buff *skb;
585         struct rxe_send_wqe *wqe;
586         enum rxe_hdr_mask mask;
587         int payload;
588         int mtu;
589         int opcode;
590         int ret;
591         struct rxe_send_wqe rollback_wqe;
592         u32 rollback_psn;
593
594         rxe_add_ref(qp);
595
596 next_wqe:
597         if (unlikely(!qp->valid))
598                 goto exit;
599
600         if (unlikely(qp->req.state == QP_STATE_ERROR)) {
601                 rxe_drain_req_pkts(qp, true);
602                 goto exit;
603         }
604
605         if (unlikely(qp->req.state == QP_STATE_RESET)) {
606                 qp->req.wqe_index = consumer_index(qp->sq.queue);
607                 qp->req.opcode = -1;
608                 qp->req.need_rd_atomic = 0;
609                 qp->req.wait_psn = 0;
610                 qp->req.need_retry = 0;
611                 goto exit;
612         }
613
614         if (unlikely(qp->req.need_retry)) {
615                 req_retry(qp);
616                 qp->req.need_retry = 0;
617         }
618
619         wqe = req_next_wqe(qp);
620         if (unlikely(!wqe))
621                 goto exit;
622
623         if (wqe->mask & WR_REG_MASK) {
624                 if (wqe->wr.opcode == IB_WR_LOCAL_INV) {
625                         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
626                         struct rxe_mem *rmr;
627
628                         rmr = rxe_pool_get_index(&rxe->mr_pool,
629                                                  wqe->wr.ex.invalidate_rkey >> 8);
630                         if (!rmr) {
631                                 pr_err("No mr for key %#x\n",
632                                        wqe->wr.ex.invalidate_rkey);
633                                 wqe->state = wqe_state_error;
634                                 wqe->status = IB_WC_MW_BIND_ERR;
635                                 goto exit;
636                         }
637                         rmr->state = RXE_MEM_STATE_FREE;
638                         rxe_drop_ref(rmr);
639                         wqe->state = wqe_state_done;
640                         wqe->status = IB_WC_SUCCESS;
641                 } else if (wqe->wr.opcode == IB_WR_REG_MR) {
642                         struct rxe_mem *rmr = to_rmr(wqe->wr.wr.reg.mr);
643
644                         rmr->state = RXE_MEM_STATE_VALID;
645                         rmr->access = wqe->wr.wr.reg.access;
646                         rmr->lkey = wqe->wr.wr.reg.key;
647                         rmr->rkey = wqe->wr.wr.reg.key;
648                         wqe->state = wqe_state_done;
649                         wqe->status = IB_WC_SUCCESS;
650                 } else {
651                         goto exit;
652                 }
653                 qp->req.wqe_index = next_index(qp->sq.queue,
654                                                 qp->req.wqe_index);
655                 goto next_wqe;
656         }
657
658         if (unlikely(qp_type(qp) == IB_QPT_RC &&
659                      qp->req.psn > (qp->comp.psn + RXE_MAX_UNACKED_PSNS))) {
660                 qp->req.wait_psn = 1;
661                 goto exit;
662         }
663
664         /* Limit the number of inflight SKBs per QP */
665         if (unlikely(atomic_read(&qp->skb_out) >
666                      RXE_INFLIGHT_SKBS_PER_QP_HIGH)) {
667                 qp->need_req_skb = 1;
668                 goto exit;
669         }
670
671         opcode = next_opcode(qp, wqe, wqe->wr.opcode);
672         if (unlikely(opcode < 0)) {
673                 wqe->status = IB_WC_LOC_QP_OP_ERR;
674                 goto exit;
675         }
676
677         mask = rxe_opcode[opcode].mask;
678         if (unlikely(mask & RXE_READ_OR_ATOMIC)) {
679                 if (check_init_depth(qp, wqe))
680                         goto exit;
681         }
682
683         mtu = get_mtu(qp);
684         payload = (mask & RXE_WRITE_OR_SEND) ? wqe->dma.resid : 0;
685         if (payload > mtu) {
686                 if (qp_type(qp) == IB_QPT_UD) {
687                         /* C10-93.1.1: If the total sum of all the buffer lengths specified for a
688                          * UD message exceeds the MTU of the port as returned by QueryHCA, the CI
689                          * shall not emit any packets for this message. Further, the CI shall not
690                          * generate an error due to this condition.
691                          */
692
693                         /* fake a successful UD send */
694                         wqe->first_psn = qp->req.psn;
695                         wqe->last_psn = qp->req.psn;
696                         qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
697                         qp->req.opcode = IB_OPCODE_UD_SEND_ONLY;
698                         qp->req.wqe_index = next_index(qp->sq.queue,
699                                                        qp->req.wqe_index);
700                         wqe->state = wqe_state_done;
701                         wqe->status = IB_WC_SUCCESS;
702                         __rxe_do_task(&qp->comp.task);
703                         rxe_drop_ref(qp);
704                         return 0;
705                 }
706                 payload = mtu;
707         }
708
709         skb = init_req_packet(qp, wqe, opcode, payload, &pkt);
710         if (unlikely(!skb)) {
711                 pr_err("qp#%d Failed allocating skb\n", qp_num(qp));
712                 goto err;
713         }
714
715         if (fill_packet(qp, wqe, &pkt, skb, payload)) {
716                 pr_debug("qp#%d Error during fill packet\n", qp_num(qp));
717                 goto err;
718         }
719
720         /*
721          * To prevent a race on wqe access between requester and completer,
722          * wqe members state and psn need to be set before calling
723          * rxe_xmit_packet().
724          * Otherwise, completer might initiate an unjustified retry flow.
725          */
726         save_state(wqe, qp, &rollback_wqe, &rollback_psn);
727         update_wqe_state(qp, wqe, &pkt);
728         update_wqe_psn(qp, wqe, &pkt, payload);
729         ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb);
730         if (ret) {
731                 qp->need_req_skb = 1;
732
733                 rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
734
735                 if (ret == -EAGAIN) {
736                         kfree_skb(skb);
737                         rxe_run_task(&qp->req.task, 1);
738                         goto exit;
739                 }
740
741                 goto err;
742         }
743
744         update_state(qp, wqe, &pkt, payload);
745
746         goto next_wqe;
747
748 err:
749         kfree_skb(skb);
750         wqe->status = IB_WC_LOC_PROT_ERR;
751         wqe->state = wqe_state_error;
752         __rxe_do_task(&qp->comp.task);
753
754 exit:
755         rxe_drop_ref(qp);
756         return -EAGAIN;
757 }