Merge branch 'linux-4.17' of git://github.com/skeggsb/linux into drm-fixes
[sfrench/cifs-2.6.git] / drivers / infiniband / sw / rxe / rxe_resp.c
1 /*
2  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <linux/skbuff.h>
35
36 #include "rxe.h"
37 #include "rxe_loc.h"
38 #include "rxe_queue.h"
39
40 enum resp_states {
41         RESPST_NONE,
42         RESPST_GET_REQ,
43         RESPST_CHK_PSN,
44         RESPST_CHK_OP_SEQ,
45         RESPST_CHK_OP_VALID,
46         RESPST_CHK_RESOURCE,
47         RESPST_CHK_LENGTH,
48         RESPST_CHK_RKEY,
49         RESPST_EXECUTE,
50         RESPST_READ_REPLY,
51         RESPST_COMPLETE,
52         RESPST_ACKNOWLEDGE,
53         RESPST_CLEANUP,
54         RESPST_DUPLICATE_REQUEST,
55         RESPST_ERR_MALFORMED_WQE,
56         RESPST_ERR_UNSUPPORTED_OPCODE,
57         RESPST_ERR_MISALIGNED_ATOMIC,
58         RESPST_ERR_PSN_OUT_OF_SEQ,
59         RESPST_ERR_MISSING_OPCODE_FIRST,
60         RESPST_ERR_MISSING_OPCODE_LAST_C,
61         RESPST_ERR_MISSING_OPCODE_LAST_D1E,
62         RESPST_ERR_TOO_MANY_RDMA_ATM_REQ,
63         RESPST_ERR_RNR,
64         RESPST_ERR_RKEY_VIOLATION,
65         RESPST_ERR_LENGTH,
66         RESPST_ERR_CQ_OVERFLOW,
67         RESPST_ERROR,
68         RESPST_RESET,
69         RESPST_DONE,
70         RESPST_EXIT,
71 };
72
73 static char *resp_state_name[] = {
74         [RESPST_NONE]                           = "NONE",
75         [RESPST_GET_REQ]                        = "GET_REQ",
76         [RESPST_CHK_PSN]                        = "CHK_PSN",
77         [RESPST_CHK_OP_SEQ]                     = "CHK_OP_SEQ",
78         [RESPST_CHK_OP_VALID]                   = "CHK_OP_VALID",
79         [RESPST_CHK_RESOURCE]                   = "CHK_RESOURCE",
80         [RESPST_CHK_LENGTH]                     = "CHK_LENGTH",
81         [RESPST_CHK_RKEY]                       = "CHK_RKEY",
82         [RESPST_EXECUTE]                        = "EXECUTE",
83         [RESPST_READ_REPLY]                     = "READ_REPLY",
84         [RESPST_COMPLETE]                       = "COMPLETE",
85         [RESPST_ACKNOWLEDGE]                    = "ACKNOWLEDGE",
86         [RESPST_CLEANUP]                        = "CLEANUP",
87         [RESPST_DUPLICATE_REQUEST]              = "DUPLICATE_REQUEST",
88         [RESPST_ERR_MALFORMED_WQE]              = "ERR_MALFORMED_WQE",
89         [RESPST_ERR_UNSUPPORTED_OPCODE]         = "ERR_UNSUPPORTED_OPCODE",
90         [RESPST_ERR_MISALIGNED_ATOMIC]          = "ERR_MISALIGNED_ATOMIC",
91         [RESPST_ERR_PSN_OUT_OF_SEQ]             = "ERR_PSN_OUT_OF_SEQ",
92         [RESPST_ERR_MISSING_OPCODE_FIRST]       = "ERR_MISSING_OPCODE_FIRST",
93         [RESPST_ERR_MISSING_OPCODE_LAST_C]      = "ERR_MISSING_OPCODE_LAST_C",
94         [RESPST_ERR_MISSING_OPCODE_LAST_D1E]    = "ERR_MISSING_OPCODE_LAST_D1E",
95         [RESPST_ERR_TOO_MANY_RDMA_ATM_REQ]      = "ERR_TOO_MANY_RDMA_ATM_REQ",
96         [RESPST_ERR_RNR]                        = "ERR_RNR",
97         [RESPST_ERR_RKEY_VIOLATION]             = "ERR_RKEY_VIOLATION",
98         [RESPST_ERR_LENGTH]                     = "ERR_LENGTH",
99         [RESPST_ERR_CQ_OVERFLOW]                = "ERR_CQ_OVERFLOW",
100         [RESPST_ERROR]                          = "ERROR",
101         [RESPST_RESET]                          = "RESET",
102         [RESPST_DONE]                           = "DONE",
103         [RESPST_EXIT]                           = "EXIT",
104 };
105
106 /* rxe_recv calls here to add a request packet to the input queue */
107 void rxe_resp_queue_pkt(struct rxe_dev *rxe, struct rxe_qp *qp,
108                         struct sk_buff *skb)
109 {
110         int must_sched;
111         struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
112
113         skb_queue_tail(&qp->req_pkts, skb);
114
115         must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
116                         (skb_queue_len(&qp->req_pkts) > 1);
117
118         rxe_run_task(&qp->resp.task, must_sched);
119 }
120
121 static inline enum resp_states get_req(struct rxe_qp *qp,
122                                        struct rxe_pkt_info **pkt_p)
123 {
124         struct sk_buff *skb;
125
126         if (qp->resp.state == QP_STATE_ERROR) {
127                 skb = skb_dequeue(&qp->req_pkts);
128                 if (skb) {
129                         /* drain request packet queue */
130                         rxe_drop_ref(qp);
131                         kfree_skb(skb);
132                         return RESPST_GET_REQ;
133                 }
134
135                 /* go drain recv wr queue */
136                 return RESPST_CHK_RESOURCE;
137         }
138
139         skb = skb_peek(&qp->req_pkts);
140         if (!skb)
141                 return RESPST_EXIT;
142
143         *pkt_p = SKB_TO_PKT(skb);
144
145         return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN;
146 }
147
148 static enum resp_states check_psn(struct rxe_qp *qp,
149                                   struct rxe_pkt_info *pkt)
150 {
151         int diff = psn_compare(pkt->psn, qp->resp.psn);
152         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
153
154         switch (qp_type(qp)) {
155         case IB_QPT_RC:
156                 if (diff > 0) {
157                         if (qp->resp.sent_psn_nak)
158                                 return RESPST_CLEANUP;
159
160                         qp->resp.sent_psn_nak = 1;
161                         rxe_counter_inc(rxe, RXE_CNT_OUT_OF_SEQ_REQ);
162                         return RESPST_ERR_PSN_OUT_OF_SEQ;
163
164                 } else if (diff < 0) {
165                         rxe_counter_inc(rxe, RXE_CNT_DUP_REQ);
166                         return RESPST_DUPLICATE_REQUEST;
167                 }
168
169                 if (qp->resp.sent_psn_nak)
170                         qp->resp.sent_psn_nak = 0;
171
172                 break;
173
174         case IB_QPT_UC:
175                 if (qp->resp.drop_msg || diff != 0) {
176                         if (pkt->mask & RXE_START_MASK) {
177                                 qp->resp.drop_msg = 0;
178                                 return RESPST_CHK_OP_SEQ;
179                         }
180
181                         qp->resp.drop_msg = 1;
182                         return RESPST_CLEANUP;
183                 }
184                 break;
185         default:
186                 break;
187         }
188
189         return RESPST_CHK_OP_SEQ;
190 }
191
192 static enum resp_states check_op_seq(struct rxe_qp *qp,
193                                      struct rxe_pkt_info *pkt)
194 {
195         switch (qp_type(qp)) {
196         case IB_QPT_RC:
197                 switch (qp->resp.opcode) {
198                 case IB_OPCODE_RC_SEND_FIRST:
199                 case IB_OPCODE_RC_SEND_MIDDLE:
200                         switch (pkt->opcode) {
201                         case IB_OPCODE_RC_SEND_MIDDLE:
202                         case IB_OPCODE_RC_SEND_LAST:
203                         case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
204                         case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
205                                 return RESPST_CHK_OP_VALID;
206                         default:
207                                 return RESPST_ERR_MISSING_OPCODE_LAST_C;
208                         }
209
210                 case IB_OPCODE_RC_RDMA_WRITE_FIRST:
211                 case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
212                         switch (pkt->opcode) {
213                         case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
214                         case IB_OPCODE_RC_RDMA_WRITE_LAST:
215                         case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
216                                 return RESPST_CHK_OP_VALID;
217                         default:
218                                 return RESPST_ERR_MISSING_OPCODE_LAST_C;
219                         }
220
221                 default:
222                         switch (pkt->opcode) {
223                         case IB_OPCODE_RC_SEND_MIDDLE:
224                         case IB_OPCODE_RC_SEND_LAST:
225                         case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
226                         case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
227                         case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
228                         case IB_OPCODE_RC_RDMA_WRITE_LAST:
229                         case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
230                                 return RESPST_ERR_MISSING_OPCODE_FIRST;
231                         default:
232                                 return RESPST_CHK_OP_VALID;
233                         }
234                 }
235                 break;
236
237         case IB_QPT_UC:
238                 switch (qp->resp.opcode) {
239                 case IB_OPCODE_UC_SEND_FIRST:
240                 case IB_OPCODE_UC_SEND_MIDDLE:
241                         switch (pkt->opcode) {
242                         case IB_OPCODE_UC_SEND_MIDDLE:
243                         case IB_OPCODE_UC_SEND_LAST:
244                         case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
245                                 return RESPST_CHK_OP_VALID;
246                         default:
247                                 return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
248                         }
249
250                 case IB_OPCODE_UC_RDMA_WRITE_FIRST:
251                 case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
252                         switch (pkt->opcode) {
253                         case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
254                         case IB_OPCODE_UC_RDMA_WRITE_LAST:
255                         case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
256                                 return RESPST_CHK_OP_VALID;
257                         default:
258                                 return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
259                         }
260
261                 default:
262                         switch (pkt->opcode) {
263                         case IB_OPCODE_UC_SEND_MIDDLE:
264                         case IB_OPCODE_UC_SEND_LAST:
265                         case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
266                         case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
267                         case IB_OPCODE_UC_RDMA_WRITE_LAST:
268                         case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
269                                 qp->resp.drop_msg = 1;
270                                 return RESPST_CLEANUP;
271                         default:
272                                 return RESPST_CHK_OP_VALID;
273                         }
274                 }
275                 break;
276
277         default:
278                 return RESPST_CHK_OP_VALID;
279         }
280 }
281
282 static enum resp_states check_op_valid(struct rxe_qp *qp,
283                                        struct rxe_pkt_info *pkt)
284 {
285         switch (qp_type(qp)) {
286         case IB_QPT_RC:
287                 if (((pkt->mask & RXE_READ_MASK) &&
288                      !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_READ)) ||
289                     ((pkt->mask & RXE_WRITE_MASK) &&
290                      !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) ||
291                     ((pkt->mask & RXE_ATOMIC_MASK) &&
292                      !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) {
293                         return RESPST_ERR_UNSUPPORTED_OPCODE;
294                 }
295
296                 break;
297
298         case IB_QPT_UC:
299                 if ((pkt->mask & RXE_WRITE_MASK) &&
300                     !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) {
301                         qp->resp.drop_msg = 1;
302                         return RESPST_CLEANUP;
303                 }
304
305                 break;
306
307         case IB_QPT_UD:
308         case IB_QPT_SMI:
309         case IB_QPT_GSI:
310                 break;
311
312         default:
313                 WARN_ON_ONCE(1);
314                 break;
315         }
316
317         return RESPST_CHK_RESOURCE;
318 }
319
320 static enum resp_states get_srq_wqe(struct rxe_qp *qp)
321 {
322         struct rxe_srq *srq = qp->srq;
323         struct rxe_queue *q = srq->rq.queue;
324         struct rxe_recv_wqe *wqe;
325         struct ib_event ev;
326
327         if (srq->error)
328                 return RESPST_ERR_RNR;
329
330         spin_lock_bh(&srq->rq.consumer_lock);
331
332         wqe = queue_head(q);
333         if (!wqe) {
334                 spin_unlock_bh(&srq->rq.consumer_lock);
335                 return RESPST_ERR_RNR;
336         }
337
338         /* note kernel and user space recv wqes have same size */
339         memcpy(&qp->resp.srq_wqe, wqe, sizeof(qp->resp.srq_wqe));
340
341         qp->resp.wqe = &qp->resp.srq_wqe.wqe;
342         advance_consumer(q);
343
344         if (srq->limit && srq->ibsrq.event_handler &&
345             (queue_count(q) < srq->limit)) {
346                 srq->limit = 0;
347                 goto event;
348         }
349
350         spin_unlock_bh(&srq->rq.consumer_lock);
351         return RESPST_CHK_LENGTH;
352
353 event:
354         spin_unlock_bh(&srq->rq.consumer_lock);
355         ev.device = qp->ibqp.device;
356         ev.element.srq = qp->ibqp.srq;
357         ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
358         srq->ibsrq.event_handler(&ev, srq->ibsrq.srq_context);
359         return RESPST_CHK_LENGTH;
360 }
361
362 static enum resp_states check_resource(struct rxe_qp *qp,
363                                        struct rxe_pkt_info *pkt)
364 {
365         struct rxe_srq *srq = qp->srq;
366
367         if (qp->resp.state == QP_STATE_ERROR) {
368                 if (qp->resp.wqe) {
369                         qp->resp.status = IB_WC_WR_FLUSH_ERR;
370                         return RESPST_COMPLETE;
371                 } else if (!srq) {
372                         qp->resp.wqe = queue_head(qp->rq.queue);
373                         if (qp->resp.wqe) {
374                                 qp->resp.status = IB_WC_WR_FLUSH_ERR;
375                                 return RESPST_COMPLETE;
376                         } else {
377                                 return RESPST_EXIT;
378                         }
379                 } else {
380                         return RESPST_EXIT;
381                 }
382         }
383
384         if (pkt->mask & RXE_READ_OR_ATOMIC) {
385                 /* it is the requesters job to not send
386                  * too many read/atomic ops, we just
387                  * recycle the responder resource queue
388                  */
389                 if (likely(qp->attr.max_dest_rd_atomic > 0))
390                         return RESPST_CHK_LENGTH;
391                 else
392                         return RESPST_ERR_TOO_MANY_RDMA_ATM_REQ;
393         }
394
395         if (pkt->mask & RXE_RWR_MASK) {
396                 if (srq)
397                         return get_srq_wqe(qp);
398
399                 qp->resp.wqe = queue_head(qp->rq.queue);
400                 return (qp->resp.wqe) ? RESPST_CHK_LENGTH : RESPST_ERR_RNR;
401         }
402
403         return RESPST_CHK_LENGTH;
404 }
405
406 static enum resp_states check_length(struct rxe_qp *qp,
407                                      struct rxe_pkt_info *pkt)
408 {
409         switch (qp_type(qp)) {
410         case IB_QPT_RC:
411                 return RESPST_CHK_RKEY;
412
413         case IB_QPT_UC:
414                 return RESPST_CHK_RKEY;
415
416         default:
417                 return RESPST_CHK_RKEY;
418         }
419 }
420
421 static enum resp_states check_rkey(struct rxe_qp *qp,
422                                    struct rxe_pkt_info *pkt)
423 {
424         struct rxe_mem *mem = NULL;
425         u64 va;
426         u32 rkey;
427         u32 resid;
428         u32 pktlen;
429         int mtu = qp->mtu;
430         enum resp_states state;
431         int access;
432
433         if (pkt->mask & (RXE_READ_MASK | RXE_WRITE_MASK)) {
434                 if (pkt->mask & RXE_RETH_MASK) {
435                         qp->resp.va = reth_va(pkt);
436                         qp->resp.rkey = reth_rkey(pkt);
437                         qp->resp.resid = reth_len(pkt);
438                 }
439                 access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ
440                                                      : IB_ACCESS_REMOTE_WRITE;
441         } else if (pkt->mask & RXE_ATOMIC_MASK) {
442                 qp->resp.va = atmeth_va(pkt);
443                 qp->resp.rkey = atmeth_rkey(pkt);
444                 qp->resp.resid = sizeof(u64);
445                 access = IB_ACCESS_REMOTE_ATOMIC;
446         } else {
447                 return RESPST_EXECUTE;
448         }
449
450         /* A zero-byte op is not required to set an addr or rkey. */
451         if ((pkt->mask & (RXE_READ_MASK | RXE_WRITE_OR_SEND)) &&
452             (pkt->mask & RXE_RETH_MASK) &&
453             reth_len(pkt) == 0) {
454                 return RESPST_EXECUTE;
455         }
456
457         va      = qp->resp.va;
458         rkey    = qp->resp.rkey;
459         resid   = qp->resp.resid;
460         pktlen  = payload_size(pkt);
461
462         mem = lookup_mem(qp->pd, access, rkey, lookup_remote);
463         if (!mem) {
464                 state = RESPST_ERR_RKEY_VIOLATION;
465                 goto err;
466         }
467
468         if (unlikely(mem->state == RXE_MEM_STATE_FREE)) {
469                 state = RESPST_ERR_RKEY_VIOLATION;
470                 goto err;
471         }
472
473         if (mem_check_range(mem, va, resid)) {
474                 state = RESPST_ERR_RKEY_VIOLATION;
475                 goto err;
476         }
477
478         if (pkt->mask & RXE_WRITE_MASK)  {
479                 if (resid > mtu) {
480                         if (pktlen != mtu || bth_pad(pkt)) {
481                                 state = RESPST_ERR_LENGTH;
482                                 goto err;
483                         }
484                 } else {
485                         if (pktlen != resid) {
486                                 state = RESPST_ERR_LENGTH;
487                                 goto err;
488                         }
489                         if ((bth_pad(pkt) != (0x3 & (-resid)))) {
490                                 /* This case may not be exactly that
491                                  * but nothing else fits.
492                                  */
493                                 state = RESPST_ERR_LENGTH;
494                                 goto err;
495                         }
496                 }
497         }
498
499         WARN_ON_ONCE(qp->resp.mr);
500
501         qp->resp.mr = mem;
502         return RESPST_EXECUTE;
503
504 err:
505         if (mem)
506                 rxe_drop_ref(mem);
507         return state;
508 }
509
510 static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
511                                      int data_len)
512 {
513         int err;
514         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
515
516         err = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
517                         data_addr, data_len, to_mem_obj, NULL);
518         if (unlikely(err))
519                 return (err == -ENOSPC) ? RESPST_ERR_LENGTH
520                                         : RESPST_ERR_MALFORMED_WQE;
521
522         return RESPST_NONE;
523 }
524
525 static enum resp_states write_data_in(struct rxe_qp *qp,
526                                       struct rxe_pkt_info *pkt)
527 {
528         enum resp_states rc = RESPST_NONE;
529         int     err;
530         int data_len = payload_size(pkt);
531
532         err = rxe_mem_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt),
533                            data_len, to_mem_obj, NULL);
534         if (err) {
535                 rc = RESPST_ERR_RKEY_VIOLATION;
536                 goto out;
537         }
538
539         qp->resp.va += data_len;
540         qp->resp.resid -= data_len;
541
542 out:
543         return rc;
544 }
545
546 /* Guarantee atomicity of atomic operations at the machine level. */
547 static DEFINE_SPINLOCK(atomic_ops_lock);
548
549 static enum resp_states process_atomic(struct rxe_qp *qp,
550                                        struct rxe_pkt_info *pkt)
551 {
552         u64 iova = atmeth_va(pkt);
553         u64 *vaddr;
554         enum resp_states ret;
555         struct rxe_mem *mr = qp->resp.mr;
556
557         if (mr->state != RXE_MEM_STATE_VALID) {
558                 ret = RESPST_ERR_RKEY_VIOLATION;
559                 goto out;
560         }
561
562         vaddr = iova_to_vaddr(mr, iova, sizeof(u64));
563
564         /* check vaddr is 8 bytes aligned. */
565         if (!vaddr || (uintptr_t)vaddr & 7) {
566                 ret = RESPST_ERR_MISALIGNED_ATOMIC;
567                 goto out;
568         }
569
570         spin_lock_bh(&atomic_ops_lock);
571
572         qp->resp.atomic_orig = *vaddr;
573
574         if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP ||
575             pkt->opcode == IB_OPCODE_RD_COMPARE_SWAP) {
576                 if (*vaddr == atmeth_comp(pkt))
577                         *vaddr = atmeth_swap_add(pkt);
578         } else {
579                 *vaddr += atmeth_swap_add(pkt);
580         }
581
582         spin_unlock_bh(&atomic_ops_lock);
583
584         ret = RESPST_NONE;
585 out:
586         return ret;
587 }
588
589 static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
590                                           struct rxe_pkt_info *pkt,
591                                           struct rxe_pkt_info *ack,
592                                           int opcode,
593                                           int payload,
594                                           u32 psn,
595                                           u8 syndrome,
596                                           u32 *crcp)
597 {
598         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
599         struct sk_buff *skb;
600         u32 crc = 0;
601         u32 *p;
602         int paylen;
603         int pad;
604         int err;
605
606         /*
607          * allocate packet
608          */
609         pad = (-payload) & 0x3;
610         paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
611
612         skb = rxe_init_packet(rxe, &qp->pri_av, paylen, ack);
613         if (!skb)
614                 return NULL;
615
616         ack->qp = qp;
617         ack->opcode = opcode;
618         ack->mask = rxe_opcode[opcode].mask;
619         ack->offset = pkt->offset;
620         ack->paylen = paylen;
621
622         /* fill in bth using the request packet headers */
623         memcpy(ack->hdr, pkt->hdr, pkt->offset + RXE_BTH_BYTES);
624
625         bth_set_opcode(ack, opcode);
626         bth_set_qpn(ack, qp->attr.dest_qp_num);
627         bth_set_pad(ack, pad);
628         bth_set_se(ack, 0);
629         bth_set_psn(ack, psn);
630         bth_set_ack(ack, 0);
631         ack->psn = psn;
632
633         if (ack->mask & RXE_AETH_MASK) {
634                 aeth_set_syn(ack, syndrome);
635                 aeth_set_msn(ack, qp->resp.msn);
636         }
637
638         if (ack->mask & RXE_ATMACK_MASK)
639                 atmack_set_orig(ack, qp->resp.atomic_orig);
640
641         err = rxe_prepare(rxe, ack, skb, &crc);
642         if (err) {
643                 kfree_skb(skb);
644                 return NULL;
645         }
646
647         if (crcp) {
648                 /* CRC computation will be continued by the caller */
649                 *crcp = crc;
650         } else {
651                 p = payload_addr(ack) + payload + bth_pad(ack);
652                 *p = ~crc;
653         }
654
655         return skb;
656 }
657
658 /* RDMA read response. If res is not NULL, then we have a current RDMA request
659  * being processed or replayed.
660  */
661 static enum resp_states read_reply(struct rxe_qp *qp,
662                                    struct rxe_pkt_info *req_pkt)
663 {
664         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
665         struct rxe_pkt_info ack_pkt;
666         struct sk_buff *skb;
667         int mtu = qp->mtu;
668         enum resp_states state;
669         int payload;
670         int opcode;
671         int err;
672         struct resp_res *res = qp->resp.res;
673         u32 icrc;
674         u32 *p;
675
676         if (!res) {
677                 /* This is the first time we process that request. Get a
678                  * resource
679                  */
680                 res = &qp->resp.resources[qp->resp.res_head];
681
682                 free_rd_atomic_resource(qp, res);
683                 rxe_advance_resp_resource(qp);
684
685                 res->type               = RXE_READ_MASK;
686
687                 res->read.va            = qp->resp.va;
688                 res->read.va_org        = qp->resp.va;
689
690                 res->first_psn          = req_pkt->psn;
691
692                 if (reth_len(req_pkt)) {
693                         res->last_psn   = (req_pkt->psn +
694                                            (reth_len(req_pkt) + mtu - 1) /
695                                            mtu - 1) & BTH_PSN_MASK;
696                 } else {
697                         res->last_psn   = res->first_psn;
698                 }
699                 res->cur_psn            = req_pkt->psn;
700
701                 res->read.resid         = qp->resp.resid;
702                 res->read.length        = qp->resp.resid;
703                 res->read.rkey          = qp->resp.rkey;
704
705                 /* note res inherits the reference to mr from qp */
706                 res->read.mr            = qp->resp.mr;
707                 qp->resp.mr             = NULL;
708
709                 qp->resp.res            = res;
710                 res->state              = rdatm_res_state_new;
711         }
712
713         if (res->state == rdatm_res_state_new) {
714                 if (res->read.resid <= mtu)
715                         opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY;
716                 else
717                         opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST;
718         } else {
719                 if (res->read.resid > mtu)
720                         opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE;
721                 else
722                         opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST;
723         }
724
725         res->state = rdatm_res_state_next;
726
727         payload = min_t(int, res->read.resid, mtu);
728
729         skb = prepare_ack_packet(qp, req_pkt, &ack_pkt, opcode, payload,
730                                  res->cur_psn, AETH_ACK_UNLIMITED, &icrc);
731         if (!skb)
732                 return RESPST_ERR_RNR;
733
734         err = rxe_mem_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
735                            payload, from_mem_obj, &icrc);
736         if (err)
737                 pr_err("Failed copying memory\n");
738
739         p = payload_addr(&ack_pkt) + payload + bth_pad(&ack_pkt);
740         *p = ~icrc;
741
742         err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb);
743         if (err) {
744                 pr_err("Failed sending RDMA reply.\n");
745                 return RESPST_ERR_RNR;
746         }
747
748         res->read.va += payload;
749         res->read.resid -= payload;
750         res->cur_psn = (res->cur_psn + 1) & BTH_PSN_MASK;
751
752         if (res->read.resid > 0) {
753                 state = RESPST_DONE;
754         } else {
755                 qp->resp.res = NULL;
756                 qp->resp.opcode = -1;
757                 if (psn_compare(res->cur_psn, qp->resp.psn) >= 0)
758                         qp->resp.psn = res->cur_psn;
759                 state = RESPST_CLEANUP;
760         }
761
762         return state;
763 }
764
765 static void build_rdma_network_hdr(union rdma_network_hdr *hdr,
766                                    struct rxe_pkt_info *pkt)
767 {
768         struct sk_buff *skb = PKT_TO_SKB(pkt);
769
770         memset(hdr, 0, sizeof(*hdr));
771         if (skb->protocol == htons(ETH_P_IP))
772                 memcpy(&hdr->roce4grh, ip_hdr(skb), sizeof(hdr->roce4grh));
773         else if (skb->protocol == htons(ETH_P_IPV6))
774                 memcpy(&hdr->ibgrh, ipv6_hdr(skb), sizeof(hdr->ibgrh));
775 }
776
777 /* Executes a new request. A retried request never reach that function (send
778  * and writes are discarded, and reads and atomics are retried elsewhere.
779  */
780 static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
781 {
782         enum resp_states err;
783
784         if (pkt->mask & RXE_SEND_MASK) {
785                 if (qp_type(qp) == IB_QPT_UD ||
786                     qp_type(qp) == IB_QPT_SMI ||
787                     qp_type(qp) == IB_QPT_GSI) {
788                         union rdma_network_hdr hdr;
789
790                         build_rdma_network_hdr(&hdr, pkt);
791
792                         err = send_data_in(qp, &hdr, sizeof(hdr));
793                         if (err)
794                                 return err;
795                 }
796                 err = send_data_in(qp, payload_addr(pkt), payload_size(pkt));
797                 if (err)
798                         return err;
799         } else if (pkt->mask & RXE_WRITE_MASK) {
800                 err = write_data_in(qp, pkt);
801                 if (err)
802                         return err;
803         } else if (pkt->mask & RXE_READ_MASK) {
804                 /* For RDMA Read we can increment the msn now. See C9-148. */
805                 qp->resp.msn++;
806                 return RESPST_READ_REPLY;
807         } else if (pkt->mask & RXE_ATOMIC_MASK) {
808                 err = process_atomic(qp, pkt);
809                 if (err)
810                         return err;
811         } else {
812                 /* Unreachable */
813                 WARN_ON_ONCE(1);
814         }
815
816         /* next expected psn, read handles this separately */
817         qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
818
819         qp->resp.opcode = pkt->opcode;
820         qp->resp.status = IB_WC_SUCCESS;
821
822         if (pkt->mask & RXE_COMP_MASK) {
823                 /* We successfully processed this new request. */
824                 qp->resp.msn++;
825                 return RESPST_COMPLETE;
826         } else if (qp_type(qp) == IB_QPT_RC)
827                 return RESPST_ACKNOWLEDGE;
828         else
829                 return RESPST_CLEANUP;
830 }
831
832 static enum resp_states do_complete(struct rxe_qp *qp,
833                                     struct rxe_pkt_info *pkt)
834 {
835         struct rxe_cqe cqe;
836         struct ib_wc *wc = &cqe.ibwc;
837         struct ib_uverbs_wc *uwc = &cqe.uibwc;
838         struct rxe_recv_wqe *wqe = qp->resp.wqe;
839
840         if (unlikely(!wqe))
841                 return RESPST_CLEANUP;
842
843         memset(&cqe, 0, sizeof(cqe));
844
845         wc->wr_id               = wqe->wr_id;
846         wc->status              = qp->resp.status;
847         wc->qp                  = &qp->ibqp;
848
849         /* fields after status are not required for errors */
850         if (wc->status == IB_WC_SUCCESS) {
851                 wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
852                                 pkt->mask & RXE_WRITE_MASK) ?
853                                         IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
854                 wc->vendor_err = 0;
855                 wc->byte_len = wqe->dma.length - wqe->dma.resid;
856
857                 /* fields after byte_len are different between kernel and user
858                  * space
859                  */
860                 if (qp->rcq->is_user) {
861                         uwc->wc_flags = IB_WC_GRH;
862
863                         if (pkt->mask & RXE_IMMDT_MASK) {
864                                 uwc->wc_flags |= IB_WC_WITH_IMM;
865                                 uwc->ex.imm_data = immdt_imm(pkt);
866                         }
867
868                         if (pkt->mask & RXE_IETH_MASK) {
869                                 uwc->wc_flags |= IB_WC_WITH_INVALIDATE;
870                                 uwc->ex.invalidate_rkey = ieth_rkey(pkt);
871                         }
872
873                         uwc->qp_num             = qp->ibqp.qp_num;
874
875                         if (pkt->mask & RXE_DETH_MASK)
876                                 uwc->src_qp = deth_sqp(pkt);
877
878                         uwc->port_num           = qp->attr.port_num;
879                 } else {
880                         struct sk_buff *skb = PKT_TO_SKB(pkt);
881
882                         wc->wc_flags = IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE;
883                         if (skb->protocol == htons(ETH_P_IP))
884                                 wc->network_hdr_type = RDMA_NETWORK_IPV4;
885                         else
886                                 wc->network_hdr_type = RDMA_NETWORK_IPV6;
887
888                         if (pkt->mask & RXE_IMMDT_MASK) {
889                                 wc->wc_flags |= IB_WC_WITH_IMM;
890                                 wc->ex.imm_data = immdt_imm(pkt);
891                         }
892
893                         if (pkt->mask & RXE_IETH_MASK) {
894                                 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
895                                 struct rxe_mem *rmr;
896
897                                 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
898                                 wc->ex.invalidate_rkey = ieth_rkey(pkt);
899
900                                 rmr = rxe_pool_get_index(&rxe->mr_pool,
901                                                          wc->ex.invalidate_rkey >> 8);
902                                 if (unlikely(!rmr)) {
903                                         pr_err("Bad rkey %#x invalidation\n",
904                                                wc->ex.invalidate_rkey);
905                                         return RESPST_ERROR;
906                                 }
907                                 rmr->state = RXE_MEM_STATE_FREE;
908                                 rxe_drop_ref(rmr);
909                         }
910
911                         wc->qp                  = &qp->ibqp;
912
913                         if (pkt->mask & RXE_DETH_MASK)
914                                 wc->src_qp = deth_sqp(pkt);
915
916                         wc->port_num            = qp->attr.port_num;
917                 }
918         }
919
920         /* have copy for srq and reference for !srq */
921         if (!qp->srq)
922                 advance_consumer(qp->rq.queue);
923
924         qp->resp.wqe = NULL;
925
926         if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1))
927                 return RESPST_ERR_CQ_OVERFLOW;
928
929         if (qp->resp.state == QP_STATE_ERROR)
930                 return RESPST_CHK_RESOURCE;
931
932         if (!pkt)
933                 return RESPST_DONE;
934         else if (qp_type(qp) == IB_QPT_RC)
935                 return RESPST_ACKNOWLEDGE;
936         else
937                 return RESPST_CLEANUP;
938 }
939
940 static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
941                     u8 syndrome, u32 psn)
942 {
943         int err = 0;
944         struct rxe_pkt_info ack_pkt;
945         struct sk_buff *skb;
946         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
947
948         skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE,
949                                  0, psn, syndrome, NULL);
950         if (!skb) {
951                 err = -ENOMEM;
952                 goto err1;
953         }
954
955         err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb);
956         if (err)
957                 pr_err_ratelimited("Failed sending ack\n");
958
959 err1:
960         return err;
961 }
962
963 static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
964                            u8 syndrome)
965 {
966         int rc = 0;
967         struct rxe_pkt_info ack_pkt;
968         struct sk_buff *skb;
969         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
970         struct resp_res *res;
971
972         skb = prepare_ack_packet(qp, pkt, &ack_pkt,
973                                  IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, 0, pkt->psn,
974                                  syndrome, NULL);
975         if (!skb) {
976                 rc = -ENOMEM;
977                 goto out;
978         }
979
980         rxe_add_ref(qp);
981
982         res = &qp->resp.resources[qp->resp.res_head];
983         free_rd_atomic_resource(qp, res);
984         rxe_advance_resp_resource(qp);
985
986         memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(ack_pkt));
987         memset((unsigned char *)SKB_TO_PKT(skb) + sizeof(ack_pkt), 0,
988                sizeof(skb->cb) - sizeof(ack_pkt));
989
990         refcount_inc(&skb->users);
991         res->type = RXE_ATOMIC_MASK;
992         res->atomic.skb = skb;
993         res->first_psn = ack_pkt.psn;
994         res->last_psn  = ack_pkt.psn;
995         res->cur_psn   = ack_pkt.psn;
996
997         rc = rxe_xmit_packet(rxe, qp, &ack_pkt, skb);
998         if (rc) {
999                 pr_err_ratelimited("Failed sending ack\n");
1000                 rxe_drop_ref(qp);
1001         }
1002 out:
1003         return rc;
1004 }
1005
1006 static enum resp_states acknowledge(struct rxe_qp *qp,
1007                                     struct rxe_pkt_info *pkt)
1008 {
1009         if (qp_type(qp) != IB_QPT_RC)
1010                 return RESPST_CLEANUP;
1011
1012         if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED)
1013                 send_ack(qp, pkt, qp->resp.aeth_syndrome, pkt->psn);
1014         else if (pkt->mask & RXE_ATOMIC_MASK)
1015                 send_atomic_ack(qp, pkt, AETH_ACK_UNLIMITED);
1016         else if (bth_ack(pkt))
1017                 send_ack(qp, pkt, AETH_ACK_UNLIMITED, pkt->psn);
1018
1019         return RESPST_CLEANUP;
1020 }
1021
1022 static enum resp_states cleanup(struct rxe_qp *qp,
1023                                 struct rxe_pkt_info *pkt)
1024 {
1025         struct sk_buff *skb;
1026
1027         if (pkt) {
1028                 skb = skb_dequeue(&qp->req_pkts);
1029                 rxe_drop_ref(qp);
1030                 kfree_skb(skb);
1031         }
1032
1033         if (qp->resp.mr) {
1034                 rxe_drop_ref(qp->resp.mr);
1035                 qp->resp.mr = NULL;
1036         }
1037
1038         return RESPST_DONE;
1039 }
1040
1041 static struct resp_res *find_resource(struct rxe_qp *qp, u32 psn)
1042 {
1043         int i;
1044
1045         for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
1046                 struct resp_res *res = &qp->resp.resources[i];
1047
1048                 if (res->type == 0)
1049                         continue;
1050
1051                 if (psn_compare(psn, res->first_psn) >= 0 &&
1052                     psn_compare(psn, res->last_psn) <= 0) {
1053                         return res;
1054                 }
1055         }
1056
1057         return NULL;
1058 }
1059
1060 static enum resp_states duplicate_request(struct rxe_qp *qp,
1061                                           struct rxe_pkt_info *pkt)
1062 {
1063         enum resp_states rc;
1064         u32 prev_psn = (qp->resp.psn - 1) & BTH_PSN_MASK;
1065
1066         if (pkt->mask & RXE_SEND_MASK ||
1067             pkt->mask & RXE_WRITE_MASK) {
1068                 /* SEND. Ack again and cleanup. C9-105. */
1069                 if (bth_ack(pkt))
1070                         send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn);
1071                 rc = RESPST_CLEANUP;
1072                 goto out;
1073         } else if (pkt->mask & RXE_READ_MASK) {
1074                 struct resp_res *res;
1075
1076                 res = find_resource(qp, pkt->psn);
1077                 if (!res) {
1078                         /* Resource not found. Class D error.  Drop the
1079                          * request.
1080                          */
1081                         rc = RESPST_CLEANUP;
1082                         goto out;
1083                 } else {
1084                         /* Ensure this new request is the same as the previous
1085                          * one or a subset of it.
1086                          */
1087                         u64 iova = reth_va(pkt);
1088                         u32 resid = reth_len(pkt);
1089
1090                         if (iova < res->read.va_org ||
1091                             resid > res->read.length ||
1092                             (iova + resid) > (res->read.va_org +
1093                                               res->read.length)) {
1094                                 rc = RESPST_CLEANUP;
1095                                 goto out;
1096                         }
1097
1098                         if (reth_rkey(pkt) != res->read.rkey) {
1099                                 rc = RESPST_CLEANUP;
1100                                 goto out;
1101                         }
1102
1103                         res->cur_psn = pkt->psn;
1104                         res->state = (pkt->psn == res->first_psn) ?
1105                                         rdatm_res_state_new :
1106                                         rdatm_res_state_replay;
1107
1108                         /* Reset the resource, except length. */
1109                         res->read.va_org = iova;
1110                         res->read.va = iova;
1111                         res->read.resid = resid;
1112
1113                         /* Replay the RDMA read reply. */
1114                         qp->resp.res = res;
1115                         rc = RESPST_READ_REPLY;
1116                         goto out;
1117                 }
1118         } else {
1119                 struct resp_res *res;
1120
1121                 /* Find the operation in our list of responder resources. */
1122                 res = find_resource(qp, pkt->psn);
1123                 if (res) {
1124                         struct sk_buff *skb_copy;
1125
1126                         skb_copy = skb_clone(res->atomic.skb, GFP_ATOMIC);
1127                         if (skb_copy) {
1128                                 rxe_add_ref(qp); /* for the new SKB */
1129                         } else {
1130                                 pr_warn("Couldn't clone atomic resp\n");
1131                                 rc = RESPST_CLEANUP;
1132                                 goto out;
1133                         }
1134
1135                         /* Resend the result. */
1136                         rc = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp,
1137                                              pkt, skb_copy);
1138                         if (rc) {
1139                                 pr_err("Failed resending result. This flow is not handled - skb ignored\n");
1140                                 rxe_drop_ref(qp);
1141                                 rc = RESPST_CLEANUP;
1142                                 goto out;
1143                         }
1144                 }
1145
1146                 /* Resource not found. Class D error. Drop the request. */
1147                 rc = RESPST_CLEANUP;
1148                 goto out;
1149         }
1150 out:
1151         return rc;
1152 }
1153
1154 /* Process a class A or C. Both are treated the same in this implementation. */
1155 static void do_class_ac_error(struct rxe_qp *qp, u8 syndrome,
1156                               enum ib_wc_status status)
1157 {
1158         qp->resp.aeth_syndrome  = syndrome;
1159         qp->resp.status         = status;
1160
1161         /* indicate that we should go through the ERROR state */
1162         qp->resp.goto_error     = 1;
1163 }
1164
1165 static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
1166 {
1167         /* UC */
1168         if (qp->srq) {
1169                 /* Class E */
1170                 qp->resp.drop_msg = 1;
1171                 if (qp->resp.wqe) {
1172                         qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1173                         return RESPST_COMPLETE;
1174                 } else {
1175                         return RESPST_CLEANUP;
1176                 }
1177         } else {
1178                 /* Class D1. This packet may be the start of a
1179                  * new message and could be valid. The previous
1180                  * message is invalid and ignored. reset the
1181                  * recv wr to its original state
1182                  */
1183                 if (qp->resp.wqe) {
1184                         qp->resp.wqe->dma.resid = qp->resp.wqe->dma.length;
1185                         qp->resp.wqe->dma.cur_sge = 0;
1186                         qp->resp.wqe->dma.sge_offset = 0;
1187                         qp->resp.opcode = -1;
1188                 }
1189
1190                 if (qp->resp.mr) {
1191                         rxe_drop_ref(qp->resp.mr);
1192                         qp->resp.mr = NULL;
1193                 }
1194
1195                 return RESPST_CLEANUP;
1196         }
1197 }
1198
1199 static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
1200 {
1201         struct sk_buff *skb;
1202
1203         while ((skb = skb_dequeue(&qp->req_pkts))) {
1204                 rxe_drop_ref(qp);
1205                 kfree_skb(skb);
1206         }
1207
1208         if (notify)
1209                 return;
1210
1211         while (!qp->srq && qp->rq.queue && queue_head(qp->rq.queue))
1212                 advance_consumer(qp->rq.queue);
1213 }
1214
1215 int rxe_responder(void *arg)
1216 {
1217         struct rxe_qp *qp = (struct rxe_qp *)arg;
1218         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
1219         enum resp_states state;
1220         struct rxe_pkt_info *pkt = NULL;
1221         int ret = 0;
1222
1223         rxe_add_ref(qp);
1224
1225         qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
1226
1227         if (!qp->valid) {
1228                 ret = -EINVAL;
1229                 goto done;
1230         }
1231
1232         switch (qp->resp.state) {
1233         case QP_STATE_RESET:
1234                 state = RESPST_RESET;
1235                 break;
1236
1237         default:
1238                 state = RESPST_GET_REQ;
1239                 break;
1240         }
1241
1242         while (1) {
1243                 pr_debug("qp#%d state = %s\n", qp_num(qp),
1244                          resp_state_name[state]);
1245                 switch (state) {
1246                 case RESPST_GET_REQ:
1247                         state = get_req(qp, &pkt);
1248                         break;
1249                 case RESPST_CHK_PSN:
1250                         state = check_psn(qp, pkt);
1251                         break;
1252                 case RESPST_CHK_OP_SEQ:
1253                         state = check_op_seq(qp, pkt);
1254                         break;
1255                 case RESPST_CHK_OP_VALID:
1256                         state = check_op_valid(qp, pkt);
1257                         break;
1258                 case RESPST_CHK_RESOURCE:
1259                         state = check_resource(qp, pkt);
1260                         break;
1261                 case RESPST_CHK_LENGTH:
1262                         state = check_length(qp, pkt);
1263                         break;
1264                 case RESPST_CHK_RKEY:
1265                         state = check_rkey(qp, pkt);
1266                         break;
1267                 case RESPST_EXECUTE:
1268                         state = execute(qp, pkt);
1269                         break;
1270                 case RESPST_COMPLETE:
1271                         state = do_complete(qp, pkt);
1272                         break;
1273                 case RESPST_READ_REPLY:
1274                         state = read_reply(qp, pkt);
1275                         break;
1276                 case RESPST_ACKNOWLEDGE:
1277                         state = acknowledge(qp, pkt);
1278                         break;
1279                 case RESPST_CLEANUP:
1280                         state = cleanup(qp, pkt);
1281                         break;
1282                 case RESPST_DUPLICATE_REQUEST:
1283                         state = duplicate_request(qp, pkt);
1284                         break;
1285                 case RESPST_ERR_PSN_OUT_OF_SEQ:
1286                         /* RC only - Class B. Drop packet. */
1287                         send_ack(qp, pkt, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn);
1288                         state = RESPST_CLEANUP;
1289                         break;
1290
1291                 case RESPST_ERR_TOO_MANY_RDMA_ATM_REQ:
1292                 case RESPST_ERR_MISSING_OPCODE_FIRST:
1293                 case RESPST_ERR_MISSING_OPCODE_LAST_C:
1294                 case RESPST_ERR_UNSUPPORTED_OPCODE:
1295                 case RESPST_ERR_MISALIGNED_ATOMIC:
1296                         /* RC Only - Class C. */
1297                         do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1298                                           IB_WC_REM_INV_REQ_ERR);
1299                         state = RESPST_COMPLETE;
1300                         break;
1301
1302                 case RESPST_ERR_MISSING_OPCODE_LAST_D1E:
1303                         state = do_class_d1e_error(qp);
1304                         break;
1305                 case RESPST_ERR_RNR:
1306                         if (qp_type(qp) == IB_QPT_RC) {
1307                                 rxe_counter_inc(rxe, RXE_CNT_SND_RNR);
1308                                 /* RC - class B */
1309                                 send_ack(qp, pkt, AETH_RNR_NAK |
1310                                          (~AETH_TYPE_MASK &
1311                                          qp->attr.min_rnr_timer),
1312                                          pkt->psn);
1313                         } else {
1314                                 /* UD/UC - class D */
1315                                 qp->resp.drop_msg = 1;
1316                         }
1317                         state = RESPST_CLEANUP;
1318                         break;
1319
1320                 case RESPST_ERR_RKEY_VIOLATION:
1321                         if (qp_type(qp) == IB_QPT_RC) {
1322                                 /* Class C */
1323                                 do_class_ac_error(qp, AETH_NAK_REM_ACC_ERR,
1324                                                   IB_WC_REM_ACCESS_ERR);
1325                                 state = RESPST_COMPLETE;
1326                         } else {
1327                                 qp->resp.drop_msg = 1;
1328                                 if (qp->srq) {
1329                                         /* UC/SRQ Class D */
1330                                         qp->resp.status = IB_WC_REM_ACCESS_ERR;
1331                                         state = RESPST_COMPLETE;
1332                                 } else {
1333                                         /* UC/non-SRQ Class E. */
1334                                         state = RESPST_CLEANUP;
1335                                 }
1336                         }
1337                         break;
1338
1339                 case RESPST_ERR_LENGTH:
1340                         if (qp_type(qp) == IB_QPT_RC) {
1341                                 /* Class C */
1342                                 do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1343                                                   IB_WC_REM_INV_REQ_ERR);
1344                                 state = RESPST_COMPLETE;
1345                         } else if (qp->srq) {
1346                                 /* UC/UD - class E */
1347                                 qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1348                                 state = RESPST_COMPLETE;
1349                         } else {
1350                                 /* UC/UD - class D */
1351                                 qp->resp.drop_msg = 1;
1352                                 state = RESPST_CLEANUP;
1353                         }
1354                         break;
1355
1356                 case RESPST_ERR_MALFORMED_WQE:
1357                         /* All, Class A. */
1358                         do_class_ac_error(qp, AETH_NAK_REM_OP_ERR,
1359                                           IB_WC_LOC_QP_OP_ERR);
1360                         state = RESPST_COMPLETE;
1361                         break;
1362
1363                 case RESPST_ERR_CQ_OVERFLOW:
1364                         /* All - Class G */
1365                         state = RESPST_ERROR;
1366                         break;
1367
1368                 case RESPST_DONE:
1369                         if (qp->resp.goto_error) {
1370                                 state = RESPST_ERROR;
1371                                 break;
1372                         }
1373
1374                         goto done;
1375
1376                 case RESPST_EXIT:
1377                         if (qp->resp.goto_error) {
1378                                 state = RESPST_ERROR;
1379                                 break;
1380                         }
1381
1382                         goto exit;
1383
1384                 case RESPST_RESET:
1385                         rxe_drain_req_pkts(qp, false);
1386                         qp->resp.wqe = NULL;
1387                         goto exit;
1388
1389                 case RESPST_ERROR:
1390                         qp->resp.goto_error = 0;
1391                         pr_warn("qp#%d moved to error state\n", qp_num(qp));
1392                         rxe_qp_error(qp);
1393                         goto exit;
1394
1395                 default:
1396                         WARN_ON_ONCE(1);
1397                 }
1398         }
1399
1400 exit:
1401         ret = -EAGAIN;
1402 done:
1403         rxe_drop_ref(qp);
1404         return ret;
1405 }