2 * Copyright(c) 2015 - 2018 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 #include <rdma/rdma_vt.h>
50 #include <rdma/rdmavt_qp.h>
55 #include "verbs_txreq.h"
58 struct rvt_ack_entry *find_prev_entry(struct rvt_qp *qp, u32 psn, u8 *prev,
59 u8 *prev_ack, bool *scheduled)
60 __must_hold(&qp->s_lock)
62 struct rvt_ack_entry *e = NULL;
66 for (i = qp->r_head_ack_queue; ; i = p) {
67 if (i == qp->s_tail_ack_queue)
72 p = rvt_size_atomic(ib_to_rvt(qp->ibqp.device));
73 if (p == qp->r_head_ack_queue) {
77 e = &qp->s_ack_queue[p];
82 if (cmp_psn(psn, e->psn) >= 0) {
83 if (p == qp->s_tail_ack_queue &&
84 cmp_psn(psn, e->lpsn) <= 0)
99 * make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
100 * @dev: the device for this QP
101 * @qp: a pointer to the QP
102 * @ohdr: a pointer to the IB header being constructed
103 * @ps: the xmit packet state
105 * Return 1 if constructed; otherwise, return 0.
106 * Note that we are in the responder's side of the QP context.
107 * Note the QP s_lock must be held.
109 static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
110 struct ib_other_headers *ohdr,
111 struct hfi1_pkt_state *ps)
113 struct rvt_ack_entry *e;
116 u32 bth0 = 0, bth2 = 0;
117 u32 bth1 = qp->remote_qpn | (HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT);
120 struct hfi1_qp_priv *qpriv = qp->priv;
123 u8 next = qp->s_tail_ack_queue;
124 struct tid_rdma_request *req;
126 trace_hfi1_rsp_make_rc_ack(qp, 0);
127 lockdep_assert_held(&qp->s_lock);
128 /* Don't send an ACK if we aren't supposed to. */
129 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
132 if (qpriv->hdr_type == HFI1_PKT_TYPE_9B)
133 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
136 /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */
139 switch (qp->s_ack_state) {
140 case OP(RDMA_READ_RESPONSE_LAST):
141 case OP(RDMA_READ_RESPONSE_ONLY):
142 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
143 release_rdma_sge_mr(e);
145 case OP(ATOMIC_ACKNOWLEDGE):
147 * We can increment the tail pointer now that the last
148 * response has been sent instead of only being
151 if (++next > rvt_size_atomic(&dev->rdi))
154 * Only advance the s_acked_ack_queue pointer if there
155 * have been no TID RDMA requests.
157 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
158 if (e->opcode != TID_OP(WRITE_REQ) &&
159 qp->s_acked_ack_queue == qp->s_tail_ack_queue)
160 qp->s_acked_ack_queue = next;
161 qp->s_tail_ack_queue = next;
162 trace_hfi1_rsp_make_rc_ack(qp, e->psn);
165 case OP(ACKNOWLEDGE):
166 /* Check for no next entry in the queue. */
167 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
168 if (qp->s_flags & RVT_S_ACK_PENDING)
173 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
174 /* Check for tid write fence */
175 if ((qpriv->s_flags & HFI1_R_TID_WAIT_INTERLCK) ||
176 hfi1_tid_rdma_ack_interlock(qp, e)) {
177 iowait_set_flag(&qpriv->s_iowait, IOWAIT_PENDING_IB);
180 if (e->opcode == OP(RDMA_READ_REQUEST)) {
182 * If a RDMA read response is being resent and
183 * we haven't seen the duplicate request yet,
184 * then stop sending the remaining responses the
185 * responder has seen until the requester re-sends it.
187 len = e->rdma_sge.sge_length;
188 if (len && !e->rdma_sge.mr) {
189 if (qp->s_acked_ack_queue ==
190 qp->s_tail_ack_queue)
191 qp->s_acked_ack_queue =
192 qp->r_head_ack_queue;
193 qp->s_tail_ack_queue = qp->r_head_ack_queue;
196 /* Copy SGE state in case we need to resend */
197 ps->s_txreq->mr = e->rdma_sge.mr;
199 rvt_get_mr(ps->s_txreq->mr);
200 qp->s_ack_rdma_sge.sge = e->rdma_sge;
201 qp->s_ack_rdma_sge.num_sge = 1;
202 ps->s_txreq->ss = &qp->s_ack_rdma_sge;
205 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
207 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
210 ohdr->u.aeth = rvt_compute_aeth(qp);
212 qp->s_ack_rdma_psn = e->psn;
213 bth2 = mask_psn(qp->s_ack_rdma_psn++);
214 } else if (e->opcode == TID_OP(WRITE_REQ)) {
216 * If a TID RDMA WRITE RESP is being resent, we have to
217 * wait for the actual request. All requests that are to
218 * be resent will have their state set to
219 * TID_REQUEST_RESEND. When the new request arrives, the
220 * state will be changed to TID_REQUEST_RESEND_ACTIVE.
222 req = ack_to_tid_req(e);
223 if (req->state == TID_REQUEST_RESEND ||
224 req->state == TID_REQUEST_INIT_RESEND)
226 qp->s_ack_state = TID_OP(WRITE_RESP);
227 qp->s_ack_rdma_psn = mask_psn(e->psn + req->cur_seg);
229 } else if (e->opcode == TID_OP(READ_REQ)) {
231 * If a TID RDMA read response is being resent and
232 * we haven't seen the duplicate request yet,
233 * then stop sending the remaining responses the
234 * responder has seen until the requester re-sends it.
236 len = e->rdma_sge.sge_length;
237 if (len && !e->rdma_sge.mr) {
238 if (qp->s_acked_ack_queue ==
239 qp->s_tail_ack_queue)
240 qp->s_acked_ack_queue =
241 qp->r_head_ack_queue;
242 qp->s_tail_ack_queue = qp->r_head_ack_queue;
245 /* Copy SGE state in case we need to resend */
246 ps->s_txreq->mr = e->rdma_sge.mr;
248 rvt_get_mr(ps->s_txreq->mr);
249 qp->s_ack_rdma_sge.sge = e->rdma_sge;
250 qp->s_ack_rdma_sge.num_sge = 1;
251 qp->s_ack_state = TID_OP(READ_RESP);
254 /* COMPARE_SWAP or FETCH_ADD */
255 ps->s_txreq->ss = NULL;
257 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
258 ohdr->u.at.aeth = rvt_compute_aeth(qp);
259 ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth);
260 hwords += sizeof(ohdr->u.at) / sizeof(u32);
261 bth2 = mask_psn(e->psn);
264 trace_hfi1_tid_write_rsp_make_rc_ack(qp);
265 bth0 = qp->s_ack_state << 24;
268 case OP(RDMA_READ_RESPONSE_FIRST):
269 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
271 case OP(RDMA_READ_RESPONSE_MIDDLE):
272 ps->s_txreq->ss = &qp->s_ack_rdma_sge;
273 ps->s_txreq->mr = qp->s_ack_rdma_sge.sge.mr;
275 rvt_get_mr(ps->s_txreq->mr);
276 len = qp->s_ack_rdma_sge.sge.sge_length;
279 middle = HFI1_CAP_IS_KSET(SDMA_AHG);
281 ohdr->u.aeth = rvt_compute_aeth(qp);
283 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
284 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
287 bth0 = qp->s_ack_state << 24;
288 bth2 = mask_psn(qp->s_ack_rdma_psn++);
291 case TID_OP(WRITE_RESP):
294 * 1. Check if RVT_S_ACK_PENDING is set. If yes,
296 * 2. Attempt to allocate TID resources.
297 * 3. Remove RVT_S_RESP_PENDING flags from s_flags
298 * 4. If resources not available:
299 * 4.1 Set RVT_S_WAIT_TID_SPACE
300 * 4.2 Queue QP on RCD TID queue
301 * 4.3 Put QP on iowait list.
302 * 4.4 Build IB RNR NAK with appropriate timeout value
303 * 4.5 Return indication progress made.
304 * 5. If resources are available:
305 * 5.1 Program HW flow CSRs
306 * 5.2 Build TID RDMA WRITE RESP packet
307 * 5.3 If more resources needed, do 2.1 - 2.3.
308 * 5.4 Wake up next QP on RCD TID queue.
309 * 5.5 Return indication progress made.
312 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
313 req = ack_to_tid_req(e);
316 * Send scheduled RNR NAK's. RNR NAK's need to be sent at
317 * segment boundaries, not at request boundaries. Don't change
318 * s_ack_state because we are still in the middle of a request
320 if (qpriv->rnr_nak_state == TID_RNR_NAK_SEND &&
321 qp->s_tail_ack_queue == qpriv->r_tid_alloc &&
322 req->cur_seg == req->alloc_seg) {
323 qpriv->rnr_nak_state = TID_RNR_NAK_SENT;
324 goto normal_no_state;
327 bth2 = mask_psn(qp->s_ack_rdma_psn);
328 hdrlen = hfi1_build_tid_rdma_write_resp(qp, e, ohdr, &bth1,
335 bth0 = qp->s_ack_state << 24;
336 qp->s_ack_rdma_psn++;
337 trace_hfi1_tid_req_make_rc_ack_write(qp, 0, e->opcode, e->psn,
339 if (req->cur_seg != req->total_segs)
343 /* Do not free e->rdma_sge until all data are received */
344 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
347 case TID_OP(READ_RESP):
349 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
350 ps->s_txreq->ss = &qp->s_ack_rdma_sge;
351 delta = hfi1_build_tid_rdma_read_resp(qp, e, ohdr, &bth0,
360 * Increment qp->s_tail_ack_queue through s_ack_state
363 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
366 case TID_OP(READ_REQ):
372 * Send a regular ACK.
373 * Set the s_ack_state so we wait until after sending
374 * the ACK before setting s_ack_state to ACKNOWLEDGE
377 qp->s_ack_state = OP(SEND_ONLY);
381 cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
383 IB_AETH_CREDIT_SHIFT));
385 ohdr->u.aeth = rvt_compute_aeth(qp);
388 bth0 = OP(ACKNOWLEDGE) << 24;
389 bth2 = mask_psn(qp->s_ack_psn);
390 qp->s_flags &= ~RVT_S_ACK_PENDING;
391 ps->s_txreq->txreq.flags |= SDMA_TXREQ_F_VIP;
392 ps->s_txreq->ss = NULL;
394 qp->s_rdma_ack_cnt++;
395 ps->s_txreq->sde = qpriv->s_sde;
396 ps->s_txreq->s_cur_size = len;
397 ps->s_txreq->hdr_dwords = hwords;
398 hfi1_make_ruc_header(qp, ohdr, bth0, bth1, bth2, middle, ps);
401 spin_unlock_irqrestore(&qp->s_lock, ps->flags);
402 spin_lock_irqsave(&qp->r_lock, ps->flags);
403 spin_lock(&qp->s_lock);
404 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
405 spin_unlock(&qp->s_lock);
406 spin_unlock_irqrestore(&qp->r_lock, ps->flags);
407 spin_lock_irqsave(&qp->s_lock, ps->flags);
409 qp->s_ack_state = OP(ACKNOWLEDGE);
411 * Ensure s_rdma_ack_cnt changes are committed prior to resetting
415 qp->s_flags &= ~(RVT_S_RESP_PENDING
422 * hfi1_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
423 * @qp: a pointer to the QP
425 * Assumes s_lock is held.
427 * Return 1 if constructed; otherwise, return 0.
429 int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
431 struct hfi1_qp_priv *priv = qp->priv;
432 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
433 struct ib_other_headers *ohdr;
434 struct rvt_sge_state *ss = NULL;
435 struct rvt_swqe *wqe;
436 struct hfi1_swqe_priv *wpriv;
437 struct tid_rdma_request *req = NULL;
438 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
441 u32 bth0 = 0, bth2 = 0;
442 u32 bth1 = qp->remote_qpn | (HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT);
447 struct tid_rdma_flow *flow = NULL;
448 struct tid_rdma_params *remote;
450 trace_hfi1_sender_make_rc_req(qp);
451 lockdep_assert_held(&qp->s_lock);
452 ps->s_txreq = get_txreq(ps->dev, qp);
456 if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
457 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
459 if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)
460 ohdr = &ps->s_txreq->phdr.hdr.ibh.u.l.oth;
462 ohdr = &ps->s_txreq->phdr.hdr.ibh.u.oth;
464 /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */
466 if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
467 (hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))))
468 ohdr = &ps->s_txreq->phdr.hdr.opah.u.l.oth;
470 ohdr = &ps->s_txreq->phdr.hdr.opah.u.oth;
473 /* Sending responses has higher priority over sending requests. */
474 if ((qp->s_flags & RVT_S_RESP_PENDING) &&
475 make_rc_ack(dev, qp, ohdr, ps))
478 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
479 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
481 /* We are in the error state, flush the work request. */
482 if (qp->s_last == READ_ONCE(qp->s_head))
484 /* If DMAs are in progress, we can't flush immediately. */
485 if (iowait_sdma_pending(&priv->s_iowait)) {
486 qp->s_flags |= RVT_S_WAIT_DMA;
490 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
491 hfi1_trdma_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
492 IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
493 /* will get called again */
497 if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK | HFI1_S_WAIT_HALT))
500 if (cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) {
501 if (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
502 qp->s_flags |= RVT_S_WAIT_PSN;
505 qp->s_sending_psn = qp->s_psn;
506 qp->s_sending_hpsn = qp->s_psn - 1;
509 /* Send a request. */
510 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
512 switch (qp->s_state) {
514 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK))
517 * Resend an old request or start a new one.
519 * We keep track of the current SWQE so that
520 * we don't reset the "furthest progress" state
521 * if we need to back up.
524 if (qp->s_cur == qp->s_tail) {
525 /* Check if send work queue is empty. */
526 if (qp->s_tail == READ_ONCE(qp->s_head)) {
531 * If a fence is requested, wait for previous
532 * RDMA read and atomic operations to finish.
533 * However, there is no need to guard against
534 * TID RDMA READ after TID RDMA READ.
536 if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
537 qp->s_num_rd_atomic &&
538 (wqe->wr.opcode != IB_WR_TID_RDMA_READ ||
539 priv->pending_tid_r_segs < qp->s_num_rd_atomic)) {
540 qp->s_flags |= RVT_S_WAIT_FENCE;
544 * Local operations are processed immediately
545 * after all prior requests have completed
547 if (wqe->wr.opcode == IB_WR_REG_MR ||
548 wqe->wr.opcode == IB_WR_LOCAL_INV) {
552 if (qp->s_last != qp->s_cur)
554 if (++qp->s_cur == qp->s_size)
556 if (++qp->s_tail == qp->s_size)
558 if (!(wqe->wr.send_flags &
559 RVT_SEND_COMPLETION_ONLY)) {
560 err = rvt_invalidate_rkey(
562 wqe->wr.ex.invalidate_rkey);
565 rvt_send_complete(qp, wqe,
566 err ? IB_WC_LOC_PROT_ERR
569 atomic_dec(&qp->local_ops_pending);
574 qp->s_psn = wqe->psn;
577 * Note that we have to be careful not to modify the
578 * original work request since we may need to resend
583 bth2 = mask_psn(qp->s_psn);
586 * Interlock between various IB requests and TID RDMA
589 if ((priv->s_flags & HFI1_S_TID_WAIT_INTERLCK) ||
590 hfi1_tid_rdma_wqe_interlock(qp, wqe))
593 switch (wqe->wr.opcode) {
595 case IB_WR_SEND_WITH_IMM:
596 case IB_WR_SEND_WITH_INV:
597 /* If no credit, return. */
598 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
599 rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
600 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
604 qp->s_state = OP(SEND_FIRST);
608 if (wqe->wr.opcode == IB_WR_SEND) {
609 qp->s_state = OP(SEND_ONLY);
610 } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
611 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
612 /* Immediate data comes after the BTH */
613 ohdr->u.imm_data = wqe->wr.ex.imm_data;
616 qp->s_state = OP(SEND_ONLY_WITH_INVALIDATE);
617 /* Invalidate rkey comes after the BTH */
618 ohdr->u.ieth = cpu_to_be32(
619 wqe->wr.ex.invalidate_rkey);
622 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
623 bth0 |= IB_BTH_SOLICITED;
624 bth2 |= IB_BTH_REQ_ACK;
625 if (++qp->s_cur == qp->s_size)
629 case IB_WR_RDMA_WRITE:
630 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
632 goto no_flow_control;
633 case IB_WR_RDMA_WRITE_WITH_IMM:
634 /* If no credit, return. */
635 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
636 rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
637 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
642 wqe->rdma_wr.remote_addr,
644 ohdr->u.rc.reth.rkey =
645 cpu_to_be32(wqe->rdma_wr.rkey);
646 ohdr->u.rc.reth.length = cpu_to_be32(len);
647 hwords += sizeof(struct ib_reth) / sizeof(u32);
649 qp->s_state = OP(RDMA_WRITE_FIRST);
653 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
654 qp->s_state = OP(RDMA_WRITE_ONLY);
657 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
658 /* Immediate data comes after RETH */
659 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
661 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
662 bth0 |= IB_BTH_SOLICITED;
664 bth2 |= IB_BTH_REQ_ACK;
665 if (++qp->s_cur == qp->s_size)
669 case IB_WR_TID_RDMA_WRITE:
672 * Limit the number of TID RDMA WRITE requests.
674 if (atomic_read(&priv->n_tid_requests) >=
675 HFI1_TID_RDMA_WRITE_CNT)
678 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
682 hwords += hfi1_build_tid_rdma_write_req(qp, wqe, ohdr,
686 if (priv->s_tid_cur == HFI1_QP_WQE_INVALID) {
687 priv->s_tid_cur = qp->s_cur;
688 if (priv->s_tid_tail == HFI1_QP_WQE_INVALID) {
689 priv->s_tid_tail = qp->s_cur;
690 priv->s_state = TID_OP(WRITE_RESP);
692 } else if (priv->s_tid_cur == priv->s_tid_head) {
693 struct rvt_swqe *__w;
694 struct tid_rdma_request *__r;
696 __w = rvt_get_swqe_ptr(qp, priv->s_tid_cur);
697 __r = wqe_to_tid_req(__w);
700 * The s_tid_cur pointer is advanced to s_cur if
701 * any of the following conditions about the WQE
702 * to which s_ti_cur currently points to are
704 * 1. The request is not a TID RDMA WRITE
706 * 2. The request is in the INACTIVE or
707 * COMPLETE states (TID RDMA READ requests
708 * stay at INACTIVE and TID RDMA WRITE
709 * transition to COMPLETE when done),
710 * 3. The request is in the ACTIVE or SYNC
711 * state and the number of completed
712 * segments is equal to the total segment
714 * (If ACTIVE, the request is waiting for
715 * ACKs. If SYNC, the request has not
716 * received any responses because it's
717 * waiting on a sync point.)
719 if (__w->wr.opcode != IB_WR_TID_RDMA_WRITE ||
720 __r->state == TID_REQUEST_INACTIVE ||
721 __r->state == TID_REQUEST_COMPLETE ||
722 ((__r->state == TID_REQUEST_ACTIVE ||
723 __r->state == TID_REQUEST_SYNC) &&
724 __r->comp_seg == __r->total_segs)) {
725 if (priv->s_tid_tail ==
728 TID_OP(WRITE_DATA_LAST)) {
729 priv->s_tid_tail = qp->s_cur;
733 priv->s_tid_cur = qp->s_cur;
736 * A corner case: when the last TID RDMA WRITE
737 * request was completed, s_tid_head,
738 * s_tid_cur, and s_tid_tail all point to the
739 * same location. Other requests are posted and
740 * s_cur wraps around to the same location,
741 * where a new TID RDMA WRITE is posted. In
742 * this case, none of the indices need to be
743 * updated. However, the priv->s_state should.
745 if (priv->s_tid_tail == qp->s_cur &&
746 priv->s_state == TID_OP(WRITE_DATA_LAST))
747 priv->s_state = TID_OP(WRITE_RESP);
749 req = wqe_to_tid_req(wqe);
751 priv->s_tid_head = qp->s_cur;
752 priv->pending_tid_w_resp += req->total_segs;
753 atomic_inc(&priv->n_tid_requests);
754 atomic_dec(&priv->n_requests);
756 req->state = TID_REQUEST_RESEND;
757 req->comp_seg = delta_psn(bth2, wqe->psn);
759 * Pull back any segments since we are going
760 * to re-receive them.
762 req->setup_head = req->clear_tail;
763 priv->pending_tid_w_resp +=
764 delta_psn(wqe->lpsn, bth2) + 1;
767 trace_hfi1_tid_write_sender_make_req(qp, newreq);
768 trace_hfi1_tid_req_make_req_write(qp, newreq,
772 if (++qp->s_cur == qp->s_size)
776 case IB_WR_RDMA_READ:
778 * Don't allow more operations to be started
779 * than the QP limits allow.
781 if (qp->s_num_rd_atomic >=
782 qp->s_max_rd_atomic) {
783 qp->s_flags |= RVT_S_WAIT_RDMAR;
786 qp->s_num_rd_atomic++;
787 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
790 wqe->rdma_wr.remote_addr,
792 ohdr->u.rc.reth.rkey =
793 cpu_to_be32(wqe->rdma_wr.rkey);
794 ohdr->u.rc.reth.length = cpu_to_be32(len);
795 qp->s_state = OP(RDMA_READ_REQUEST);
796 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
799 bth2 |= IB_BTH_REQ_ACK;
800 if (++qp->s_cur == qp->s_size)
804 case IB_WR_TID_RDMA_READ:
805 trace_hfi1_tid_read_sender_make_req(qp, newreq);
807 req = wqe_to_tid_req(wqe);
808 trace_hfi1_tid_req_make_req_read(qp, newreq,
812 delta = cmp_psn(qp->s_psn, wqe->psn);
815 * Don't allow more operations to be started
816 * than the QP limits allow. We could get here under
817 * three conditions; (1) It's a new request; (2) We are
818 * sending the second or later segment of a request,
819 * but the qp->s_state is set to OP(RDMA_READ_REQUEST)
820 * when the last segment of a previous request is
821 * received just before this; (3) We are re-sending a
824 if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) {
825 qp->s_flags |= RVT_S_WAIT_RDMAR;
829 struct tid_rdma_flow *flow =
830 &req->flows[req->setup_head];
833 * Set up s_sge as it is needed for TID
834 * allocation. However, if the pages have been
835 * walked and mapped, skip it. An earlier try
836 * has failed to allocate the TID entries.
838 if (!flow->npagesets) {
839 qp->s_sge.sge = wqe->sg_list[0];
840 qp->s_sge.sg_list = wqe->sg_list + 1;
841 qp->s_sge.num_sge = wqe->wr.num_sge;
842 qp->s_sge.total_len = wqe->length;
843 qp->s_len = wqe->length;
845 req->clear_tail = req->setup_head;
846 req->flow_idx = req->setup_head;
847 req->state = TID_REQUEST_ACTIVE;
849 } else if (delta == 0) {
850 /* Re-send a request */
853 req->ack_pending = 0;
854 req->flow_idx = req->clear_tail;
855 req->state = TID_REQUEST_RESEND;
857 req->s_next_psn = qp->s_psn;
858 /* Read one segment at a time */
859 len = min_t(u32, req->seg_len,
860 wqe->length - req->seg_len * req->cur_seg);
861 delta = hfi1_build_tid_rdma_read_req(qp, wqe, ohdr,
865 /* Wait for TID space */
868 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
872 /* Check if this is the last segment */
873 if (req->cur_seg >= req->total_segs &&
874 ++qp->s_cur == qp->s_size)
878 case IB_WR_ATOMIC_CMP_AND_SWP:
879 case IB_WR_ATOMIC_FETCH_AND_ADD:
881 * Don't allow more operations to be started
882 * than the QP limits allow.
884 if (qp->s_num_rd_atomic >=
885 qp->s_max_rd_atomic) {
886 qp->s_flags |= RVT_S_WAIT_RDMAR;
889 qp->s_num_rd_atomic++;
893 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
895 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
896 wqe->wr.opcode == IB_WR_OPFN) {
897 qp->s_state = OP(COMPARE_SWAP);
898 put_ib_ateth_swap(wqe->atomic_wr.swap,
899 &ohdr->u.atomic_eth);
900 put_ib_ateth_compare(wqe->atomic_wr.compare_add,
901 &ohdr->u.atomic_eth);
903 qp->s_state = OP(FETCH_ADD);
904 put_ib_ateth_swap(wqe->atomic_wr.compare_add,
905 &ohdr->u.atomic_eth);
906 put_ib_ateth_compare(0, &ohdr->u.atomic_eth);
908 put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr,
909 &ohdr->u.atomic_eth);
910 ohdr->u.atomic_eth.rkey = cpu_to_be32(
911 wqe->atomic_wr.rkey);
912 hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
915 bth2 |= IB_BTH_REQ_ACK;
916 if (++qp->s_cur == qp->s_size)
923 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ) {
924 qp->s_sge.sge = wqe->sg_list[0];
925 qp->s_sge.sg_list = wqe->sg_list + 1;
926 qp->s_sge.num_sge = wqe->wr.num_sge;
927 qp->s_sge.total_len = wqe->length;
928 qp->s_len = wqe->length;
932 if (qp->s_tail >= qp->s_size)
935 if (wqe->wr.opcode == IB_WR_RDMA_READ ||
936 wqe->wr.opcode == IB_WR_TID_RDMA_WRITE)
937 qp->s_psn = wqe->lpsn + 1;
938 else if (wqe->wr.opcode == IB_WR_TID_RDMA_READ)
939 qp->s_psn = req->s_next_psn;
944 case OP(RDMA_READ_RESPONSE_FIRST):
946 * qp->s_state is normally set to the opcode of the
947 * last packet constructed for new requests and therefore
948 * is never set to RDMA read response.
949 * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
950 * thread to indicate a SEND needs to be restarted from an
951 * earlier PSN without interfering with the sending thread.
954 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
957 qp->s_state = OP(SEND_MIDDLE);
959 case OP(SEND_MIDDLE):
960 bth2 = mask_psn(qp->s_psn++);
965 middle = HFI1_CAP_IS_KSET(SDMA_AHG);
968 if (wqe->wr.opcode == IB_WR_SEND) {
969 qp->s_state = OP(SEND_LAST);
970 } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
971 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
972 /* Immediate data comes after the BTH */
973 ohdr->u.imm_data = wqe->wr.ex.imm_data;
976 qp->s_state = OP(SEND_LAST_WITH_INVALIDATE);
977 /* invalidate data comes after the BTH */
978 ohdr->u.ieth = cpu_to_be32(wqe->wr.ex.invalidate_rkey);
981 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
982 bth0 |= IB_BTH_SOLICITED;
983 bth2 |= IB_BTH_REQ_ACK;
985 if (qp->s_cur >= qp->s_size)
989 case OP(RDMA_READ_RESPONSE_LAST):
991 * qp->s_state is normally set to the opcode of the
992 * last packet constructed for new requests and therefore
993 * is never set to RDMA read response.
994 * RDMA_READ_RESPONSE_LAST is used by the ACK processing
995 * thread to indicate a RDMA write needs to be restarted from
996 * an earlier PSN without interfering with the sending thread.
999 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
1001 case OP(RDMA_WRITE_FIRST):
1002 qp->s_state = OP(RDMA_WRITE_MIDDLE);
1004 case OP(RDMA_WRITE_MIDDLE):
1005 bth2 = mask_psn(qp->s_psn++);
1010 middle = HFI1_CAP_IS_KSET(SDMA_AHG);
1013 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
1014 qp->s_state = OP(RDMA_WRITE_LAST);
1016 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
1017 /* Immediate data comes after the BTH */
1018 ohdr->u.imm_data = wqe->wr.ex.imm_data;
1020 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
1021 bth0 |= IB_BTH_SOLICITED;
1023 bth2 |= IB_BTH_REQ_ACK;
1025 if (qp->s_cur >= qp->s_size)
1029 case OP(RDMA_READ_RESPONSE_MIDDLE):
1031 * qp->s_state is normally set to the opcode of the
1032 * last packet constructed for new requests and therefore
1033 * is never set to RDMA read response.
1034 * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
1035 * thread to indicate a RDMA read needs to be restarted from
1036 * an earlier PSN without interfering with the sending thread.
1039 len = (delta_psn(qp->s_psn, wqe->psn)) * pmtu;
1041 wqe->rdma_wr.remote_addr + len,
1043 ohdr->u.rc.reth.rkey =
1044 cpu_to_be32(wqe->rdma_wr.rkey);
1045 ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
1046 qp->s_state = OP(RDMA_READ_REQUEST);
1047 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
1048 bth2 = mask_psn(qp->s_psn) | IB_BTH_REQ_ACK;
1049 qp->s_psn = wqe->lpsn + 1;
1053 if (qp->s_cur == qp->s_size)
1057 case TID_OP(WRITE_RESP):
1059 * This value for s_state is used for restarting a TID RDMA
1060 * WRITE request. See comment in OP(RDMA_READ_RESPONSE_MIDDLE
1063 req = wqe_to_tid_req(wqe);
1064 req->state = TID_REQUEST_RESEND;
1066 remote = rcu_dereference(priv->tid_rdma.remote);
1067 req->comp_seg = delta_psn(qp->s_psn, wqe->psn);
1068 len = wqe->length - (req->comp_seg * remote->max_len);
1071 bth2 = mask_psn(qp->s_psn);
1072 hwords += hfi1_build_tid_rdma_write_req(qp, wqe, ohdr, &bth1,
1074 qp->s_psn = wqe->lpsn + 1;
1076 qp->s_state = TID_OP(WRITE_REQ);
1077 priv->pending_tid_w_resp += delta_psn(wqe->lpsn, bth2) + 1;
1078 priv->s_tid_cur = qp->s_cur;
1079 if (++qp->s_cur == qp->s_size)
1081 trace_hfi1_tid_req_make_req_write(qp, 0, wqe->wr.opcode,
1082 wqe->psn, wqe->lpsn, req);
1085 case TID_OP(READ_RESP):
1086 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ)
1088 /* This is used to restart a TID read request */
1089 req = wqe_to_tid_req(wqe);
1092 * Back down. The field qp->s_psn has been set to the psn with
1093 * which the request should be restart. It's OK to use division
1094 * as this is on the retry path.
1096 req->cur_seg = delta_psn(qp->s_psn, wqe->psn) / priv->pkts_ps;
1099 * The following function need to be redefined to return the
1100 * status to make sure that we find the flow. At the same
1101 * time, we can use the req->state change to check if the
1102 * call succeeds or not.
1104 req->state = TID_REQUEST_RESEND;
1105 hfi1_tid_rdma_restart_req(qp, wqe, &bth2);
1106 if (req->state != TID_REQUEST_ACTIVE) {
1108 * Failed to find the flow. Release all allocated tid
1111 hfi1_kern_exp_rcv_clear_all(req);
1112 hfi1_kern_clear_hw_flow(priv->rcd, qp);
1114 hfi1_trdma_send_complete(qp, wqe, IB_WC_LOC_QP_OP_ERR);
1117 req->state = TID_REQUEST_RESEND;
1118 len = min_t(u32, req->seg_len,
1119 wqe->length - req->seg_len * req->cur_seg);
1120 flow = &req->flows[req->flow_idx];
1122 req->s_next_psn = flow->flow_state.ib_lpsn + 1;
1123 delta = hfi1_build_tid_rdma_read_packet(wqe, ohdr, &bth1,
1126 /* Wait for TID space */
1131 /* Check if this is the last segment */
1132 if (req->cur_seg >= req->total_segs &&
1133 ++qp->s_cur == qp->s_size)
1135 qp->s_psn = req->s_next_psn;
1136 trace_hfi1_tid_req_make_req_read(qp, 0, wqe->wr.opcode,
1137 wqe->psn, wqe->lpsn, req);
1139 case TID_OP(READ_REQ):
1140 req = wqe_to_tid_req(wqe);
1141 delta = cmp_psn(qp->s_psn, wqe->psn);
1143 * If the current WR is not TID RDMA READ, or this is the start
1144 * of a new request, we need to change the qp->s_state so that
1145 * the request can be set up properly.
1147 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ || delta == 0 ||
1148 qp->s_cur == qp->s_tail) {
1149 qp->s_state = OP(RDMA_READ_REQUEST);
1150 if (delta == 0 || qp->s_cur == qp->s_tail)
1157 if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) {
1158 qp->s_flags |= RVT_S_WAIT_RDMAR;
1163 /* Read one segment at a time */
1164 len = min_t(u32, req->seg_len,
1165 wqe->length - req->seg_len * req->cur_seg);
1166 delta = hfi1_build_tid_rdma_read_req(qp, wqe, ohdr, &bth1,
1169 /* Wait for TID space */
1174 /* Check if this is the last segment */
1175 if (req->cur_seg >= req->total_segs &&
1176 ++qp->s_cur == qp->s_size)
1178 qp->s_psn = req->s_next_psn;
1179 trace_hfi1_tid_req_make_req_read(qp, 0, wqe->wr.opcode,
1180 wqe->psn, wqe->lpsn, req);
1183 qp->s_sending_hpsn = bth2;
1184 delta = delta_psn(bth2, wqe->psn);
1185 if (delta && delta % HFI1_PSN_CREDIT == 0 &&
1186 wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
1187 bth2 |= IB_BTH_REQ_ACK;
1188 if (qp->s_flags & RVT_S_SEND_ONE) {
1189 qp->s_flags &= ~RVT_S_SEND_ONE;
1190 qp->s_flags |= RVT_S_WAIT_ACK;
1191 bth2 |= IB_BTH_REQ_ACK;
1194 ps->s_txreq->hdr_dwords = hwords;
1195 ps->s_txreq->sde = priv->s_sde;
1196 ps->s_txreq->ss = ss;
1197 ps->s_txreq->s_cur_size = len;
1198 hfi1_make_ruc_header(
1201 bth0 | (qp->s_state << 24),
1209 hfi1_put_txreq(ps->s_txreq);
1214 hfi1_put_txreq(ps->s_txreq);
1218 qp->s_flags &= ~RVT_S_BUSY;
1220 * If we didn't get a txreq, the QP will be woken up later to try
1221 * again. Set the flags to indicate which work item to wake
1224 iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
1228 static inline void hfi1_make_bth_aeth(struct rvt_qp *qp,
1229 struct ib_other_headers *ohdr,
1232 if (qp->r_nak_state)
1233 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
1235 IB_AETH_CREDIT_SHIFT));
1237 ohdr->u.aeth = rvt_compute_aeth(qp);
1239 ohdr->bth[0] = cpu_to_be32(bth0);
1240 ohdr->bth[1] = cpu_to_be32(bth1 | qp->remote_qpn);
1241 ohdr->bth[2] = cpu_to_be32(mask_psn(qp->r_ack_psn));
1244 static inline void hfi1_queue_rc_ack(struct hfi1_packet *packet, bool is_fecn)
1246 struct rvt_qp *qp = packet->qp;
1247 struct hfi1_ibport *ibp;
1248 unsigned long flags;
1250 spin_lock_irqsave(&qp->s_lock, flags);
1251 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
1253 ibp = rcd_to_iport(packet->rcd);
1254 this_cpu_inc(*ibp->rvp.rc_qacks);
1255 qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
1256 qp->s_nak_state = qp->r_nak_state;
1257 qp->s_ack_psn = qp->r_ack_psn;
1259 qp->s_flags |= RVT_S_ECN;
1261 /* Schedule the send tasklet. */
1262 hfi1_schedule_send(qp);
1264 spin_unlock_irqrestore(&qp->s_lock, flags);
1267 static inline void hfi1_make_rc_ack_9B(struct hfi1_packet *packet,
1268 struct hfi1_opa_header *opa_hdr,
1269 u8 sc5, bool is_fecn,
1270 u64 *pbc_flags, u32 *hwords,
1273 struct rvt_qp *qp = packet->qp;
1274 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
1275 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1276 struct ib_header *hdr = &opa_hdr->ibh;
1277 struct ib_other_headers *ohdr;
1278 u16 lrh0 = HFI1_LRH_BTH;
1282 opa_hdr->hdr_type = HFI1_PKT_TYPE_9B;
1284 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4 */
1287 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) {
1288 *hwords += hfi1_make_grh(ibp, &hdr->u.l.grh,
1289 rdma_ah_read_grh(&qp->remote_ah_attr),
1290 *hwords - 2, SIZE_OF_CRC);
1291 ohdr = &hdr->u.l.oth;
1292 lrh0 = HFI1_LRH_GRH;
1294 /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
1295 *pbc_flags |= ((!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT);
1297 /* read pkey_index w/o lock (its atomic) */
1298 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
1300 lrh0 |= (sc5 & IB_SC_MASK) << IB_SC_SHIFT |
1301 (rdma_ah_get_sl(&qp->remote_ah_attr) & IB_SL_MASK) <<
1304 hfi1_make_ib_hdr(hdr, lrh0, *hwords + SIZE_OF_CRC,
1305 opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), 9B),
1306 ppd->lid | rdma_ah_get_path_bits(&qp->remote_ah_attr));
1308 bth0 = pkey | (OP(ACKNOWLEDGE) << 24);
1309 if (qp->s_mig_state == IB_MIG_MIGRATED)
1310 bth0 |= IB_BTH_MIG_REQ;
1311 bth1 = (!!is_fecn) << IB_BECN_SHIFT;
1313 * Inline ACKs go out without the use of the Verbs send engine, so
1314 * we need to set the STL Verbs Extended bit here
1316 bth1 |= HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT;
1317 hfi1_make_bth_aeth(qp, ohdr, bth0, bth1);
1320 static inline void hfi1_make_rc_ack_16B(struct hfi1_packet *packet,
1321 struct hfi1_opa_header *opa_hdr,
1322 u8 sc5, bool is_fecn,
1323 u64 *pbc_flags, u32 *hwords,
1326 struct rvt_qp *qp = packet->qp;
1327 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
1328 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1329 struct hfi1_16b_header *hdr = &opa_hdr->opah;
1330 struct ib_other_headers *ohdr;
1333 bool becn = is_fecn;
1334 u8 l4 = OPA_16B_L4_IB_LOCAL;
1337 opa_hdr->hdr_type = HFI1_PKT_TYPE_16B;
1339 /* header size in 32-bit words 16B LRH+BTH+AETH = (16+12+4)/4 */
1341 extra_bytes = hfi1_get_16b_padding(*hwords << 2, 0);
1342 *nwords = SIZE_OF_CRC + ((extra_bytes + SIZE_OF_LT) >> 2);
1344 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
1345 hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))) {
1346 *hwords += hfi1_make_grh(ibp, &hdr->u.l.grh,
1347 rdma_ah_read_grh(&qp->remote_ah_attr),
1348 *hwords - 4, *nwords);
1349 ohdr = &hdr->u.l.oth;
1350 l4 = OPA_16B_L4_IB_GLOBAL;
1352 *pbc_flags |= PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC;
1354 /* read pkey_index w/o lock (its atomic) */
1355 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
1357 /* Convert dwords to flits */
1358 len = (*hwords + *nwords) >> 1;
1360 hfi1_make_16b_hdr(hdr, ppd->lid |
1361 (rdma_ah_get_path_bits(&qp->remote_ah_attr) &
1362 ((1 << ppd->lmc) - 1)),
1363 opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr),
1364 16B), len, pkey, becn, 0, l4, sc5);
1366 bth0 = pkey | (OP(ACKNOWLEDGE) << 24);
1367 bth0 |= extra_bytes << 20;
1368 if (qp->s_mig_state == IB_MIG_MIGRATED)
1369 bth1 = OPA_BTH_MIG_REQ;
1370 hfi1_make_bth_aeth(qp, ohdr, bth0, bth1);
1373 typedef void (*hfi1_make_rc_ack)(struct hfi1_packet *packet,
1374 struct hfi1_opa_header *opa_hdr,
1375 u8 sc5, bool is_fecn,
1376 u64 *pbc_flags, u32 *hwords,
1379 /* We support only two types - 9B and 16B for now */
1380 static const hfi1_make_rc_ack hfi1_make_rc_ack_tbl[2] = {
1381 [HFI1_PKT_TYPE_9B] = &hfi1_make_rc_ack_9B,
1382 [HFI1_PKT_TYPE_16B] = &hfi1_make_rc_ack_16B
1386 * hfi1_send_rc_ack - Construct an ACK packet and send it
1387 * @qp: a pointer to the QP
1389 * This is called from hfi1_rc_rcv() and handle_receive_interrupt().
1390 * Note that RDMA reads and atomics are handled in the
1391 * send side QP state and send engine.
1393 void hfi1_send_rc_ack(struct hfi1_packet *packet, bool is_fecn)
1395 struct hfi1_ctxtdata *rcd = packet->rcd;
1396 struct rvt_qp *qp = packet->qp;
1397 struct hfi1_ibport *ibp = rcd_to_iport(rcd);
1398 struct hfi1_qp_priv *priv = qp->priv;
1399 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1400 u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)];
1401 u64 pbc, pbc_flags = 0;
1405 struct pio_buf *pbuf;
1406 struct hfi1_opa_header opa_hdr;
1408 /* clear the defer count */
1411 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
1412 if (qp->s_flags & RVT_S_RESP_PENDING) {
1413 hfi1_queue_rc_ack(packet, is_fecn);
1417 /* Ensure s_rdma_ack_cnt changes are committed */
1418 if (qp->s_rdma_ack_cnt) {
1419 hfi1_queue_rc_ack(packet, is_fecn);
1423 /* Don't try to send ACKs if the link isn't ACTIVE */
1424 if (driver_lstate(ppd) != IB_PORT_ACTIVE)
1427 /* Make the appropriate header */
1428 hfi1_make_rc_ack_tbl[priv->hdr_type](packet, &opa_hdr, sc5, is_fecn,
1429 &pbc_flags, &hwords, &nwords);
1431 plen = 2 /* PBC */ + hwords + nwords;
1432 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps,
1433 sc_to_vlt(ppd->dd, sc5), plen);
1434 pbuf = sc_buffer_alloc(rcd->sc, plen, NULL, NULL);
1435 if (IS_ERR_OR_NULL(pbuf)) {
1437 * We have no room to send at the moment. Pass
1438 * responsibility for sending the ACK to the send engine
1439 * so that when enough buffer space becomes available,
1440 * the ACK is sent ahead of other outgoing packets.
1442 hfi1_queue_rc_ack(packet, is_fecn);
1445 trace_ack_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
1446 &opa_hdr, ib_is_sc5(sc5));
1448 /* write the pbc and data */
1449 ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
1450 (priv->hdr_type == HFI1_PKT_TYPE_9B ?
1451 (void *)&opa_hdr.ibh :
1452 (void *)&opa_hdr.opah), hwords);
1457 * update_num_rd_atomic - update the qp->s_num_rd_atomic
1459 * @psn: the packet sequence number to restart at
1462 * This is called from reset_psn() to update qp->s_num_rd_atomic
1463 * for the current wqe.
1464 * Called at interrupt level with the QP s_lock held.
1466 static void update_num_rd_atomic(struct rvt_qp *qp, u32 psn,
1467 struct rvt_swqe *wqe)
1469 u32 opcode = wqe->wr.opcode;
1471 if (opcode == IB_WR_RDMA_READ ||
1472 opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1473 opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
1474 qp->s_num_rd_atomic++;
1475 } else if (opcode == IB_WR_TID_RDMA_READ) {
1476 struct tid_rdma_request *req = wqe_to_tid_req(wqe);
1477 struct hfi1_qp_priv *priv = qp->priv;
1479 if (cmp_psn(psn, wqe->lpsn) <= 0) {
1482 cur_seg = (psn - wqe->psn) / priv->pkts_ps;
1483 req->ack_pending = cur_seg - req->comp_seg;
1484 priv->pending_tid_r_segs += req->ack_pending;
1485 qp->s_num_rd_atomic += req->ack_pending;
1487 priv->pending_tid_r_segs += req->total_segs;
1488 qp->s_num_rd_atomic += req->total_segs;
1494 * reset_psn - reset the QP state to send starting from PSN
1496 * @psn: the packet sequence number to restart at
1498 * This is called from hfi1_rc_rcv() to process an incoming RC ACK
1500 * Called at interrupt level with the QP s_lock held.
1502 static void reset_psn(struct rvt_qp *qp, u32 psn)
1504 u32 n = qp->s_acked;
1505 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n);
1507 struct hfi1_qp_priv *priv = qp->priv;
1509 lockdep_assert_held(&qp->s_lock);
1511 priv->pending_tid_r_segs = 0;
1512 priv->pending_tid_w_resp = 0;
1513 qp->s_num_rd_atomic = 0;
1516 * If we are starting the request from the beginning,
1517 * let the normal send code handle initialization.
1519 if (cmp_psn(psn, wqe->psn) <= 0) {
1520 qp->s_state = OP(SEND_LAST);
1523 update_num_rd_atomic(qp, psn, wqe);
1525 /* Find the work request opcode corresponding to the given PSN. */
1529 if (++n == qp->s_size)
1531 if (n == qp->s_tail)
1533 wqe = rvt_get_swqe_ptr(qp, n);
1534 diff = cmp_psn(psn, wqe->psn);
1536 /* Point wqe back to the previous one*/
1537 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
1542 * If we are starting the request from the beginning,
1543 * let the normal send code handle initialization.
1546 qp->s_state = OP(SEND_LAST);
1550 update_num_rd_atomic(qp, psn, wqe);
1552 opcode = wqe->wr.opcode;
1555 * Set the state to restart in the middle of a request.
1556 * Don't change the s_sge, s_cur_sge, or s_cur_size.
1557 * See hfi1_make_rc_req().
1561 case IB_WR_SEND_WITH_IMM:
1562 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
1565 case IB_WR_RDMA_WRITE:
1566 case IB_WR_RDMA_WRITE_WITH_IMM:
1567 qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
1570 case IB_WR_TID_RDMA_WRITE:
1571 qp->s_state = TID_OP(WRITE_RESP);
1574 case IB_WR_RDMA_READ:
1575 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
1578 case IB_WR_TID_RDMA_READ:
1579 qp->s_state = TID_OP(READ_RESP);
1584 * This case shouldn't happen since its only
1587 qp->s_state = OP(SEND_LAST);
1590 priv->s_flags &= ~HFI1_S_TID_WAIT_INTERLCK;
1593 * Set RVT_S_WAIT_PSN as rc_complete() may start the timer
1594 * asynchronously before the send engine can get scheduled.
1595 * Doing it in hfi1_make_rc_req() is too late.
1597 if ((cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
1598 (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
1599 qp->s_flags |= RVT_S_WAIT_PSN;
1600 qp->s_flags &= ~HFI1_S_AHG_VALID;
1601 trace_hfi1_sender_reset_psn(qp);
1605 * Back up requester to resend the last un-ACKed request.
1606 * The QP r_lock and s_lock should be held and interrupts disabled.
1608 void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
1610 struct hfi1_qp_priv *priv = qp->priv;
1611 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1612 struct hfi1_ibport *ibp;
1614 lockdep_assert_held(&qp->r_lock);
1615 lockdep_assert_held(&qp->s_lock);
1616 trace_hfi1_sender_restart_rc(qp);
1617 if (qp->s_retry == 0) {
1618 if (qp->s_mig_state == IB_MIG_ARMED) {
1619 hfi1_migrate_qp(qp);
1620 qp->s_retry = qp->s_retry_cnt;
1621 } else if (qp->s_last == qp->s_acked) {
1623 * We need special handling for the OPFN request WQEs as
1624 * they are not allowed to generate real user errors
1626 if (wqe->wr.opcode == IB_WR_OPFN) {
1627 struct hfi1_ibport *ibp =
1628 to_iport(qp->ibqp.device, qp->port_num);
1630 * Call opfn_conn_reply() with capcode and
1631 * remaining data as 0 to close out the
1634 opfn_conn_reply(qp, priv->opfn.curr);
1635 wqe = do_rc_completion(qp, wqe, ibp);
1636 qp->s_flags &= ~RVT_S_WAIT_ACK;
1638 trace_hfi1_tid_write_sender_restart_rc(qp, 0);
1639 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
1640 struct tid_rdma_request *req;
1642 req = wqe_to_tid_req(wqe);
1643 hfi1_kern_exp_rcv_clear_all(req);
1644 hfi1_kern_clear_hw_flow(priv->rcd, qp);
1647 hfi1_trdma_send_complete(qp, wqe,
1648 IB_WC_RETRY_EXC_ERR);
1649 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1652 } else { /* need to handle delayed completion */
1659 ibp = to_iport(qp->ibqp.device, qp->port_num);
1660 if (wqe->wr.opcode == IB_WR_RDMA_READ ||
1661 wqe->wr.opcode == IB_WR_TID_RDMA_READ)
1662 ibp->rvp.n_rc_resends++;
1664 ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
1666 qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR |
1667 RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN |
1668 RVT_S_WAIT_ACK | HFI1_S_WAIT_TID_RESP);
1670 qp->s_flags |= RVT_S_SEND_ONE;
1675 * Set qp->s_sending_psn to the next PSN after the given one.
1676 * This would be psn+1 except when RDMA reads or TID RDMA ops
1679 static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
1681 struct rvt_swqe *wqe;
1684 lockdep_assert_held(&qp->s_lock);
1685 /* Find the work request corresponding to the given PSN. */
1687 wqe = rvt_get_swqe_ptr(qp, n);
1688 if (cmp_psn(psn, wqe->lpsn) <= 0) {
1689 if (wqe->wr.opcode == IB_WR_RDMA_READ ||
1690 wqe->wr.opcode == IB_WR_TID_RDMA_READ ||
1691 wqe->wr.opcode == IB_WR_TID_RDMA_WRITE)
1692 qp->s_sending_psn = wqe->lpsn + 1;
1694 qp->s_sending_psn = psn + 1;
1697 if (++n == qp->s_size)
1699 if (n == qp->s_tail)
1705 * hfi1_rc_verbs_aborted - handle abort status
1707 * @opah: the opa header
1709 * This code modifies both ACK bit in BTH[2]
1710 * and the s_flags to go into send one mode.
1712 * This serves to throttle the send engine to only
1713 * send a single packet in the likely case the
1714 * a link has gone down.
1716 void hfi1_rc_verbs_aborted(struct rvt_qp *qp, struct hfi1_opa_header *opah)
1718 struct ib_other_headers *ohdr = hfi1_get_rc_ohdr(opah);
1719 u8 opcode = ib_bth_get_opcode(ohdr);
1722 /* ignore responses */
1723 if ((opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1724 opcode <= OP(ATOMIC_ACKNOWLEDGE)) ||
1725 opcode == TID_OP(READ_RESP) ||
1726 opcode == TID_OP(WRITE_RESP))
1729 psn = ib_bth_get_psn(ohdr) | IB_BTH_REQ_ACK;
1730 ohdr->bth[2] = cpu_to_be32(psn);
1731 qp->s_flags |= RVT_S_SEND_ONE;
1735 * This should be called with the QP s_lock held and interrupts disabled.
1737 void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
1739 struct ib_other_headers *ohdr;
1740 struct hfi1_qp_priv *priv = qp->priv;
1741 struct rvt_swqe *wqe;
1742 u32 opcode, head, tail;
1744 struct tid_rdma_request *req;
1746 lockdep_assert_held(&qp->s_lock);
1747 if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK))
1750 ohdr = hfi1_get_rc_ohdr(opah);
1751 opcode = ib_bth_get_opcode(ohdr);
1752 if ((opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1753 opcode <= OP(ATOMIC_ACKNOWLEDGE)) ||
1754 opcode == TID_OP(READ_RESP) ||
1755 opcode == TID_OP(WRITE_RESP)) {
1756 WARN_ON(!qp->s_rdma_ack_cnt);
1757 qp->s_rdma_ack_cnt--;
1761 psn = ib_bth_get_psn(ohdr);
1763 * Don't attempt to reset the sending PSN for packets in the
1764 * KDETH PSN space since the PSN does not match anything.
1766 if (opcode != TID_OP(WRITE_DATA) &&
1767 opcode != TID_OP(WRITE_DATA_LAST) &&
1768 opcode != TID_OP(ACK) && opcode != TID_OP(RESYNC))
1769 reset_sending_psn(qp, psn);
1771 /* Handle TID RDMA WRITE packets differently */
1772 if (opcode >= TID_OP(WRITE_REQ) &&
1773 opcode <= TID_OP(WRITE_DATA_LAST)) {
1774 head = priv->s_tid_head;
1775 tail = priv->s_tid_cur;
1777 * s_tid_cur is set to s_tid_head in the case, where
1778 * a new TID RDMA request is being started and all
1779 * previous ones have been completed.
1780 * Therefore, we need to do a secondary check in order
1781 * to properly determine whether we should start the
1784 wqe = rvt_get_swqe_ptr(qp, tail);
1785 req = wqe_to_tid_req(wqe);
1786 if (head == tail && req->comp_seg < req->total_segs) {
1788 tail = qp->s_size - 1;
1798 * Start timer after a packet requesting an ACK has been sent and
1799 * there are still requests that haven't been acked.
1801 if ((psn & IB_BTH_REQ_ACK) && tail != head &&
1802 opcode != TID_OP(WRITE_DATA) && opcode != TID_OP(WRITE_DATA_LAST) &&
1803 opcode != TID_OP(RESYNC) &&
1805 (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
1806 (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
1807 if (opcode == TID_OP(READ_REQ))
1808 rvt_add_retry_timer_ext(qp, priv->timeout_shift);
1810 rvt_add_retry_timer(qp);
1813 /* Start TID RDMA ACK timer */
1814 if ((opcode == TID_OP(WRITE_DATA) ||
1815 opcode == TID_OP(WRITE_DATA_LAST) ||
1816 opcode == TID_OP(RESYNC)) &&
1817 (psn & IB_BTH_REQ_ACK) &&
1818 !(priv->s_flags & HFI1_S_TID_RETRY_TIMER) &&
1819 (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
1821 * The TID RDMA ACK packet could be received before this
1822 * function is called. Therefore, add the timer only if TID
1823 * RDMA ACK packets are actually pending.
1825 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1826 req = wqe_to_tid_req(wqe);
1827 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE &&
1828 req->ack_seg < req->cur_seg)
1829 hfi1_add_tid_retry_timer(qp);
1832 while (qp->s_last != qp->s_acked) {
1833 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
1834 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 &&
1835 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
1837 trdma_clean_swqe(qp, wqe);
1838 rvt_qp_wqe_unreserve(qp, wqe);
1839 trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
1840 rvt_qp_complete_swqe(qp,
1842 ib_hfi1_wc_opcode[wqe->wr.opcode],
1846 * If we were waiting for sends to complete before re-sending,
1847 * and they are now complete, restart sending.
1849 trace_hfi1_sendcomplete(qp, psn);
1850 if (qp->s_flags & RVT_S_WAIT_PSN &&
1851 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
1852 qp->s_flags &= ~RVT_S_WAIT_PSN;
1853 qp->s_sending_psn = qp->s_psn;
1854 qp->s_sending_hpsn = qp->s_psn - 1;
1855 hfi1_schedule_send(qp);
1859 static inline void update_last_psn(struct rvt_qp *qp, u32 psn)
1861 qp->s_last_psn = psn;
1865 * Generate a SWQE completion.
1866 * This is similar to hfi1_send_complete but has to check to be sure
1867 * that the SGEs are not being referenced if the SWQE is being resent.
1869 struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
1870 struct rvt_swqe *wqe,
1871 struct hfi1_ibport *ibp)
1873 struct hfi1_qp_priv *priv = qp->priv;
1875 lockdep_assert_held(&qp->s_lock);
1877 * Don't decrement refcount and don't generate a
1878 * completion if the SWQE is being resent until the send
1881 trace_hfi1_rc_completion(qp, wqe->lpsn);
1882 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 ||
1883 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
1884 trdma_clean_swqe(qp, wqe);
1885 rvt_qp_wqe_unreserve(qp, wqe);
1886 trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
1887 rvt_qp_complete_swqe(qp,
1889 ib_hfi1_wc_opcode[wqe->wr.opcode],
1892 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1894 this_cpu_inc(*ibp->rvp.rc_delayed_comp);
1896 * If send progress not running attempt to progress
1899 if (ppd->dd->flags & HFI1_HAS_SEND_DMA) {
1900 struct sdma_engine *engine;
1901 u8 sl = rdma_ah_get_sl(&qp->remote_ah_attr);
1904 /* For now use sc to find engine */
1905 sc5 = ibp->sl_to_sc[sl];
1906 engine = qp_to_sdma_engine(qp, sc5);
1907 sdma_engine_progress_schedule(engine);
1911 qp->s_retry = qp->s_retry_cnt;
1913 * Don't update the last PSN if the request being completed is
1914 * a TID RDMA WRITE request.
1915 * Completion of the TID RDMA WRITE requests are done by the
1916 * TID RDMA ACKs and as such could be for a request that has
1917 * already been ACKed as far as the IB state machine is
1920 if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
1921 update_last_psn(qp, wqe->lpsn);
1924 * If we are completing a request which is in the process of
1925 * being resent, we can stop re-sending it since we know the
1926 * responder has already seen it.
1928 if (qp->s_acked == qp->s_cur) {
1929 if (++qp->s_cur >= qp->s_size)
1931 qp->s_acked = qp->s_cur;
1932 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
1933 if (qp->s_acked != qp->s_tail) {
1934 qp->s_state = OP(SEND_LAST);
1935 qp->s_psn = wqe->psn;
1938 if (++qp->s_acked >= qp->s_size)
1940 if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
1942 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1944 if (priv->s_flags & HFI1_S_TID_WAIT_INTERLCK) {
1945 priv->s_flags &= ~HFI1_S_TID_WAIT_INTERLCK;
1946 hfi1_schedule_send(qp);
1951 static void set_restart_qp(struct rvt_qp *qp, struct hfi1_ctxtdata *rcd)
1953 /* Retry this request. */
1954 if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
1955 qp->r_flags |= RVT_R_RDMAR_SEQ;
1956 hfi1_restart_rc(qp, qp->s_last_psn + 1, 0);
1957 if (list_empty(&qp->rspwait)) {
1958 qp->r_flags |= RVT_R_RSP_SEND;
1960 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1966 * update_qp_retry_state - Update qp retry state.
1968 * @psn: the packet sequence number of the TID RDMA WRITE RESP.
1969 * @spsn: The start psn for the given TID RDMA WRITE swqe.
1970 * @lpsn: The last psn for the given TID RDMA WRITE swqe.
1972 * This function is called to update the qp retry state upon
1973 * receiving a TID WRITE RESP after the qp is scheduled to retry
1976 static void update_qp_retry_state(struct rvt_qp *qp, u32 psn, u32 spsn,
1979 struct hfi1_qp_priv *qpriv = qp->priv;
1981 qp->s_psn = psn + 1;
1983 * If this is the first TID RDMA WRITE RESP packet for the current
1984 * request, change the s_state so that the retry will be processed
1985 * correctly. Similarly, if this is the last TID RDMA WRITE RESP
1986 * packet, change the s_state and advance the s_cur.
1988 if (cmp_psn(psn, lpsn) >= 0) {
1989 qp->s_cur = qpriv->s_tid_cur + 1;
1990 if (qp->s_cur >= qp->s_size)
1992 qp->s_state = TID_OP(WRITE_REQ);
1993 } else if (!cmp_psn(psn, spsn)) {
1994 qp->s_cur = qpriv->s_tid_cur;
1995 qp->s_state = TID_OP(WRITE_RESP);
2000 * do_rc_ack - process an incoming RC ACK
2001 * @qp: the QP the ACK came in on
2002 * @psn: the packet sequence number of the ACK
2003 * @opcode: the opcode of the request that resulted in the ACK
2005 * This is called from rc_rcv_resp() to process an incoming RC ACK
2007 * May be called at interrupt level, with the QP s_lock held.
2008 * Returns 1 if OK, 0 if current operation should be aborted (NAK).
2010 int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
2011 u64 val, struct hfi1_ctxtdata *rcd)
2013 struct hfi1_ibport *ibp;
2014 enum ib_wc_status status;
2015 struct hfi1_qp_priv *qpriv = qp->priv;
2016 struct rvt_swqe *wqe;
2020 struct rvt_dev_info *rdi;
2022 lockdep_assert_held(&qp->s_lock);
2024 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
2025 * requests and implicitly NAK RDMA read and atomic requests issued
2026 * before the NAK'ed request. The MSN won't include the NAK'ed
2027 * request but will include an ACK'ed request(s).
2030 if (aeth >> IB_AETH_NAK_SHIFT)
2032 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
2033 ibp = rcd_to_iport(rcd);
2036 * The MSN might be for a later WQE than the PSN indicates so
2037 * only complete WQEs that the PSN finishes.
2039 while ((diff = delta_psn(ack_psn, wqe->lpsn)) >= 0) {
2041 * RDMA_READ_RESPONSE_ONLY is a special case since
2042 * we want to generate completion events for everything
2043 * before the RDMA read, copy the data, then generate
2044 * the completion for the read.
2046 if (wqe->wr.opcode == IB_WR_RDMA_READ &&
2047 opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
2053 * If this request is a RDMA read or atomic, and the ACK is
2054 * for a later operation, this ACK NAKs the RDMA read or
2055 * atomic. In other words, only a RDMA_READ_LAST or ONLY
2056 * can ACK a RDMA read and likewise for atomic ops. Note
2057 * that the NAK case can only happen if relaxed ordering is
2058 * used and requests are sent after an RDMA read or atomic
2059 * is sent but before the response is received.
2061 if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
2062 (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
2063 (wqe->wr.opcode == IB_WR_TID_RDMA_READ &&
2064 (opcode != TID_OP(READ_RESP) || diff != 0)) ||
2065 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2066 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
2067 (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0)) ||
2068 (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE &&
2069 (delta_psn(psn, qp->s_last_psn) != 1))) {
2070 set_restart_qp(qp, rcd);
2072 * No need to process the ACK/NAK since we are
2073 * restarting an earlier request.
2077 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2078 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2079 u64 *vaddr = wqe->sg_list[0].vaddr;
2082 if (wqe->wr.opcode == IB_WR_OPFN)
2083 opfn_conn_reply(qp, val);
2085 if (qp->s_num_rd_atomic &&
2086 (wqe->wr.opcode == IB_WR_RDMA_READ ||
2087 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2088 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
2089 qp->s_num_rd_atomic--;
2090 /* Restart sending task if fence is complete */
2091 if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
2092 !qp->s_num_rd_atomic) {
2093 qp->s_flags &= ~(RVT_S_WAIT_FENCE |
2095 hfi1_schedule_send(qp);
2096 } else if (qp->s_flags & RVT_S_WAIT_RDMAR) {
2097 qp->s_flags &= ~(RVT_S_WAIT_RDMAR |
2099 hfi1_schedule_send(qp);
2104 * TID RDMA WRITE requests will be completed by the TID RDMA
2105 * ACK packet handler (see tid_rdma.c).
2107 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE)
2110 wqe = do_rc_completion(qp, wqe, ibp);
2111 if (qp->s_acked == qp->s_tail)
2115 trace_hfi1_rc_ack_do(qp, aeth, psn, wqe);
2116 trace_hfi1_sender_do_rc_ack(qp);
2117 switch (aeth >> IB_AETH_NAK_SHIFT) {
2119 this_cpu_inc(*ibp->rvp.rc_acks);
2120 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
2121 if (wqe_to_tid_req(wqe)->ack_pending)
2122 rvt_mod_retry_timer_ext(qp,
2123 qpriv->timeout_shift);
2125 rvt_stop_rc_timers(qp);
2126 } else if (qp->s_acked != qp->s_tail) {
2127 struct rvt_swqe *__w = NULL;
2129 if (qpriv->s_tid_cur != HFI1_QP_WQE_INVALID)
2130 __w = rvt_get_swqe_ptr(qp, qpriv->s_tid_cur);
2133 * Stop timers if we've received all of the TID RDMA
2134 * WRITE * responses.
2136 if (__w && __w->wr.opcode == IB_WR_TID_RDMA_WRITE &&
2137 opcode == TID_OP(WRITE_RESP)) {
2139 * Normally, the loop above would correctly
2140 * process all WQEs from s_acked onward and
2141 * either complete them or check for correct
2143 * However, for TID RDMA, due to pipelining,
2144 * the response may not be for the request at
2145 * s_acked so the above look would just be
2146 * skipped. This does not allow for checking
2147 * the PSN sequencing. It has to be done
2150 if (cmp_psn(psn, qp->s_last_psn + 1)) {
2151 set_restart_qp(qp, rcd);
2155 * If the psn is being resent, stop the
2158 if (qp->s_cur != qp->s_tail &&
2159 cmp_psn(qp->s_psn, psn) <= 0)
2160 update_qp_retry_state(qp, psn,
2163 else if (--qpriv->pending_tid_w_resp)
2164 rvt_mod_retry_timer(qp);
2166 rvt_stop_rc_timers(qp);
2169 * We are expecting more ACKs so
2170 * mod the retry timer.
2172 rvt_mod_retry_timer(qp);
2174 * We can stop re-sending the earlier packets
2175 * and continue with the next packet the
2178 if (cmp_psn(qp->s_psn, psn) <= 0)
2179 reset_psn(qp, psn + 1);
2182 /* No more acks - kill all timers */
2183 rvt_stop_rc_timers(qp);
2184 if (cmp_psn(qp->s_psn, psn) <= 0) {
2185 qp->s_state = OP(SEND_LAST);
2186 qp->s_psn = psn + 1;
2189 if (qp->s_flags & RVT_S_WAIT_ACK) {
2190 qp->s_flags &= ~RVT_S_WAIT_ACK;
2191 hfi1_schedule_send(qp);
2193 rvt_get_credit(qp, aeth);
2194 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
2195 qp->s_retry = qp->s_retry_cnt;
2197 * If the current request is a TID RDMA WRITE request and the
2198 * response is not a TID RDMA WRITE RESP packet, s_last_psn
2199 * can't be advanced.
2201 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE &&
2202 opcode != TID_OP(WRITE_RESP) &&
2203 cmp_psn(psn, wqe->psn) >= 0)
2205 update_last_psn(qp, psn);
2208 case 1: /* RNR NAK */
2209 ibp->rvp.n_rnr_naks++;
2210 if (qp->s_acked == qp->s_tail)
2212 if (qp->s_flags & RVT_S_WAIT_RNR)
2214 rdi = ib_to_rvt(qp->ibqp.device);
2215 if (qp->s_rnr_retry == 0 &&
2216 !((rdi->post_parms[wqe->wr.opcode].flags &
2217 RVT_OPERATION_IGN_RNR_CNT) &&
2218 qp->s_rnr_retry_cnt == 0)) {
2219 status = IB_WC_RNR_RETRY_EXC_ERR;
2222 if (qp->s_rnr_retry_cnt < 7 && qp->s_rnr_retry_cnt > 0)
2226 * The last valid PSN is the previous PSN. For TID RDMA WRITE
2227 * request, s_last_psn should be incremented only when a TID
2228 * RDMA WRITE RESP is received to avoid skipping lost TID RDMA
2229 * WRITE RESP packets.
2231 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) {
2232 reset_psn(qp, qp->s_last_psn + 1);
2234 update_last_psn(qp, psn - 1);
2238 ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
2239 qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
2240 rvt_stop_rc_timers(qp);
2241 rvt_add_rnr_timer(qp, aeth);
2245 if (qp->s_acked == qp->s_tail)
2247 /* The last valid PSN is the previous PSN. */
2248 update_last_psn(qp, psn - 1);
2249 switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
2250 IB_AETH_CREDIT_MASK) {
2251 case 0: /* PSN sequence error */
2252 ibp->rvp.n_seq_naks++;
2254 * Back up to the responder's expected PSN.
2255 * Note that we might get a NAK in the middle of an
2256 * RDMA READ response which terminates the RDMA
2259 hfi1_restart_rc(qp, psn, 0);
2260 hfi1_schedule_send(qp);
2263 case 1: /* Invalid Request */
2264 status = IB_WC_REM_INV_REQ_ERR;
2265 ibp->rvp.n_other_naks++;
2268 case 2: /* Remote Access Error */
2269 status = IB_WC_REM_ACCESS_ERR;
2270 ibp->rvp.n_other_naks++;
2273 case 3: /* Remote Operation Error */
2274 status = IB_WC_REM_OP_ERR;
2275 ibp->rvp.n_other_naks++;
2277 if (qp->s_last == qp->s_acked) {
2278 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ)
2279 hfi1_kern_read_tid_flow_free(qp);
2281 hfi1_trdma_send_complete(qp, wqe, status);
2282 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
2287 /* Ignore other reserved NAK error codes */
2290 qp->s_retry = qp->s_retry_cnt;
2291 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
2294 default: /* 2: reserved */
2296 /* Ignore reserved NAK codes. */
2299 /* cannot be reached */
2301 rvt_stop_rc_timers(qp);
2306 * We have seen an out of sequence RDMA read middle or last packet.
2307 * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
2309 static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn,
2310 struct hfi1_ctxtdata *rcd)
2312 struct rvt_swqe *wqe;
2314 lockdep_assert_held(&qp->s_lock);
2315 /* Remove QP from retry timer */
2316 rvt_stop_rc_timers(qp);
2318 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
2320 while (cmp_psn(psn, wqe->lpsn) > 0) {
2321 if (wqe->wr.opcode == IB_WR_RDMA_READ ||
2322 wqe->wr.opcode == IB_WR_TID_RDMA_READ ||
2323 wqe->wr.opcode == IB_WR_TID_RDMA_WRITE ||
2324 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2325 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
2327 wqe = do_rc_completion(qp, wqe, ibp);
2330 ibp->rvp.n_rdma_seq++;
2331 qp->r_flags |= RVT_R_RDMAR_SEQ;
2332 hfi1_restart_rc(qp, qp->s_last_psn + 1, 0);
2333 if (list_empty(&qp->rspwait)) {
2334 qp->r_flags |= RVT_R_RSP_SEND;
2336 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2341 * rc_rcv_resp - process an incoming RC response packet
2342 * @packet: data packet information
2344 * This is called from hfi1_rc_rcv() to process an incoming RC response
2345 * packet for the given QP.
2346 * Called at interrupt level.
2348 static void rc_rcv_resp(struct hfi1_packet *packet)
2350 struct hfi1_ctxtdata *rcd = packet->rcd;
2351 void *data = packet->payload;
2352 u32 tlen = packet->tlen;
2353 struct rvt_qp *qp = packet->qp;
2354 struct hfi1_ibport *ibp;
2355 struct ib_other_headers *ohdr = packet->ohdr;
2356 struct rvt_swqe *wqe;
2357 enum ib_wc_status status;
2358 unsigned long flags;
2362 u32 psn = ib_bth_get_psn(packet->ohdr);
2363 u32 pmtu = qp->pmtu;
2364 u16 hdrsize = packet->hlen;
2365 u8 opcode = packet->opcode;
2366 u8 pad = packet->pad;
2367 u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2);
2369 spin_lock_irqsave(&qp->s_lock, flags);
2370 trace_hfi1_ack(qp, psn);
2372 /* Ignore invalid responses. */
2373 if (cmp_psn(psn, READ_ONCE(qp->s_next_psn)) >= 0)
2376 /* Ignore duplicate responses. */
2377 diff = cmp_psn(psn, qp->s_last_psn);
2378 if (unlikely(diff <= 0)) {
2379 /* Update credits for "ghost" ACKs */
2380 if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
2381 aeth = be32_to_cpu(ohdr->u.aeth);
2382 if ((aeth >> IB_AETH_NAK_SHIFT) == 0)
2383 rvt_get_credit(qp, aeth);
2389 * Skip everything other than the PSN we expect, if we are waiting
2390 * for a reply to a restarted RDMA read or atomic op.
2392 if (qp->r_flags & RVT_R_RDMAR_SEQ) {
2393 if (cmp_psn(psn, qp->s_last_psn + 1) != 0)
2395 qp->r_flags &= ~RVT_R_RDMAR_SEQ;
2398 if (unlikely(qp->s_acked == qp->s_tail))
2400 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
2401 status = IB_WC_SUCCESS;
2404 case OP(ACKNOWLEDGE):
2405 case OP(ATOMIC_ACKNOWLEDGE):
2406 case OP(RDMA_READ_RESPONSE_FIRST):
2407 aeth = be32_to_cpu(ohdr->u.aeth);
2408 if (opcode == OP(ATOMIC_ACKNOWLEDGE))
2409 val = ib_u64_get(&ohdr->u.at.atomic_ack_eth);
2412 if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
2413 opcode != OP(RDMA_READ_RESPONSE_FIRST))
2415 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
2416 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
2419 * If this is a response to a resent RDMA read, we
2420 * have to be careful to copy the data to the right
2423 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
2427 case OP(RDMA_READ_RESPONSE_MIDDLE):
2428 /* no AETH, no ACK */
2429 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
2431 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
2434 if (unlikely(tlen != (hdrsize + pmtu + extra_bytes)))
2436 if (unlikely(pmtu >= qp->s_rdma_read_len))
2440 * We got a response so update the timeout.
2441 * 4.096 usec. * (1 << qp->timeout)
2443 rvt_mod_retry_timer(qp);
2444 if (qp->s_flags & RVT_S_WAIT_ACK) {
2445 qp->s_flags &= ~RVT_S_WAIT_ACK;
2446 hfi1_schedule_send(qp);
2449 if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
2450 qp->s_retry = qp->s_retry_cnt;
2453 * Update the RDMA receive state but do the copy w/o
2454 * holding the locks and blocking interrupts.
2456 qp->s_rdma_read_len -= pmtu;
2457 update_last_psn(qp, psn);
2458 spin_unlock_irqrestore(&qp->s_lock, flags);
2459 rvt_copy_sge(qp, &qp->s_rdma_read_sge,
2460 data, pmtu, false, false);
2463 case OP(RDMA_READ_RESPONSE_ONLY):
2464 aeth = be32_to_cpu(ohdr->u.aeth);
2465 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
2468 * Check that the data size is >= 0 && <= pmtu.
2469 * Remember to account for ICRC (4).
2471 if (unlikely(tlen < (hdrsize + extra_bytes)))
2474 * If this is a response to a resent RDMA read, we
2475 * have to be careful to copy the data to the right
2478 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
2479 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
2483 case OP(RDMA_READ_RESPONSE_LAST):
2484 /* ACKs READ req. */
2485 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
2487 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
2490 * Check that the data size is >= 1 && <= pmtu.
2491 * Remember to account for ICRC (4).
2493 if (unlikely(tlen <= (hdrsize + extra_bytes)))
2496 tlen -= hdrsize + extra_bytes;
2497 if (unlikely(tlen != qp->s_rdma_read_len))
2499 aeth = be32_to_cpu(ohdr->u.aeth);
2500 rvt_copy_sge(qp, &qp->s_rdma_read_sge,
2501 data, tlen, false, false);
2502 WARN_ON(qp->s_rdma_read_sge.num_sge);
2503 (void)do_rc_ack(qp, aeth, psn,
2504 OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
2509 status = IB_WC_LOC_QP_OP_ERR;
2513 ibp = rcd_to_iport(rcd);
2514 rdma_seq_err(qp, ibp, psn, rcd);
2518 status = IB_WC_LOC_LEN_ERR;
2520 if (qp->s_last == qp->s_acked) {
2521 rvt_send_complete(qp, wqe, status);
2522 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
2525 spin_unlock_irqrestore(&qp->s_lock, flags);
2530 static inline void rc_cancel_ack(struct rvt_qp *qp)
2533 if (list_empty(&qp->rspwait))
2535 list_del_init(&qp->rspwait);
2536 qp->r_flags &= ~RVT_R_RSP_NAK;
2541 * rc_rcv_error - process an incoming duplicate or error RC packet
2542 * @ohdr: the other headers for this packet
2543 * @data: the packet data
2544 * @qp: the QP for this packet
2545 * @opcode: the opcode for this packet
2546 * @psn: the packet sequence number for this packet
2547 * @diff: the difference between the PSN and the expected PSN
2549 * This is called from hfi1_rc_rcv() to process an unexpected
2550 * incoming RC packet for the given QP.
2551 * Called at interrupt level.
2552 * Return 1 if no more processing is needed; otherwise return 0 to
2553 * schedule a response to be sent.
2555 static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data,
2556 struct rvt_qp *qp, u32 opcode, u32 psn,
2557 int diff, struct hfi1_ctxtdata *rcd)
2559 struct hfi1_ibport *ibp = rcd_to_iport(rcd);
2560 struct rvt_ack_entry *e;
2561 unsigned long flags;
2563 u8 mra; /* most recent ACK */
2566 trace_hfi1_rcv_error(qp, psn);
2569 * Packet sequence error.
2570 * A NAK will ACK earlier sends and RDMA writes.
2571 * Don't queue the NAK if we already sent one.
2573 if (!qp->r_nak_state) {
2574 ibp->rvp.n_rc_seqnak++;
2575 qp->r_nak_state = IB_NAK_PSN_ERROR;
2576 /* Use the expected PSN. */
2577 qp->r_ack_psn = qp->r_psn;
2579 * Wait to send the sequence NAK until all packets
2580 * in the receive queue have been processed.
2581 * Otherwise, we end up propagating congestion.
2583 rc_defered_ack(rcd, qp);
2589 * Handle a duplicate request. Don't re-execute SEND, RDMA
2590 * write or atomic op. Don't NAK errors, just silently drop
2591 * the duplicate request. Note that r_sge, r_len, and
2592 * r_rcv_len may be in use so don't modify them.
2594 * We are supposed to ACK the earliest duplicate PSN but we
2595 * can coalesce an outstanding duplicate ACK. We have to
2596 * send the earliest so that RDMA reads can be restarted at
2597 * the requester's expected PSN.
2599 * First, find where this duplicate PSN falls within the
2600 * ACKs previously sent.
2601 * old_req is true if there is an older response that is scheduled
2602 * to be sent before sending this one.
2606 ibp->rvp.n_rc_dupreq++;
2608 spin_lock_irqsave(&qp->s_lock, flags);
2610 e = find_prev_entry(qp, psn, &prev, &mra, &old_req);
2613 case OP(RDMA_READ_REQUEST): {
2614 struct ib_reth *reth;
2619 * If we didn't find the RDMA read request in the ack queue,
2620 * we can ignore this request.
2622 if (!e || e->opcode != OP(RDMA_READ_REQUEST))
2624 /* RETH comes after BTH */
2625 reth = &ohdr->u.rc.reth;
2627 * Address range must be a subset of the original
2628 * request and start on pmtu boundaries.
2629 * We reuse the old ack_queue slot since the requester
2630 * should not back up and request an earlier PSN for the
2633 offset = delta_psn(psn, e->psn) * qp->pmtu;
2634 len = be32_to_cpu(reth->length);
2635 if (unlikely(offset + len != e->rdma_sge.sge_length))
2637 release_rdma_sge_mr(e);
2639 u32 rkey = be32_to_cpu(reth->rkey);
2640 u64 vaddr = get_ib_reth_vaddr(reth);
2643 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
2644 IB_ACCESS_REMOTE_READ);
2648 e->rdma_sge.vaddr = NULL;
2649 e->rdma_sge.length = 0;
2650 e->rdma_sge.sge_length = 0;
2655 if (qp->s_acked_ack_queue == qp->s_tail_ack_queue)
2656 qp->s_acked_ack_queue = prev;
2657 qp->s_tail_ack_queue = prev;
2661 case OP(COMPARE_SWAP):
2662 case OP(FETCH_ADD): {
2664 * If we didn't find the atomic request in the ack queue
2665 * or the send engine is already backed up to send an
2666 * earlier entry, we can ignore this request.
2668 if (!e || e->opcode != (u8)opcode || old_req)
2670 if (qp->s_tail_ack_queue == qp->s_acked_ack_queue)
2671 qp->s_acked_ack_queue = prev;
2672 qp->s_tail_ack_queue = prev;
2678 * Ignore this operation if it doesn't request an ACK
2679 * or an earlier RDMA read or atomic is going to be resent.
2681 if (!(psn & IB_BTH_REQ_ACK) || old_req)
2684 * Resend the most recent ACK if this request is
2685 * after all the previous RDMA reads and atomics.
2687 if (mra == qp->r_head_ack_queue) {
2688 spin_unlock_irqrestore(&qp->s_lock, flags);
2689 qp->r_nak_state = 0;
2690 qp->r_ack_psn = qp->r_psn - 1;
2695 * Resend the RDMA read or atomic op which
2696 * ACKs this duplicate request.
2698 if (qp->s_tail_ack_queue == qp->s_acked_ack_queue)
2699 qp->s_acked_ack_queue = mra;
2700 qp->s_tail_ack_queue = mra;
2703 qp->s_ack_state = OP(ACKNOWLEDGE);
2704 qp->s_flags |= RVT_S_RESP_PENDING;
2705 qp->r_nak_state = 0;
2706 hfi1_schedule_send(qp);
2709 spin_unlock_irqrestore(&qp->s_lock, flags);
2717 static void log_cca_event(struct hfi1_pportdata *ppd, u8 sl, u32 rlid,
2718 u32 lqpn, u32 rqpn, u8 svc_type)
2720 struct opa_hfi1_cong_log_event_internal *cc_event;
2721 unsigned long flags;
2723 if (sl >= OPA_MAX_SLS)
2726 spin_lock_irqsave(&ppd->cc_log_lock, flags);
2728 ppd->threshold_cong_event_map[sl / 8] |= 1 << (sl % 8);
2729 ppd->threshold_event_counter++;
2731 cc_event = &ppd->cc_events[ppd->cc_log_idx++];
2732 if (ppd->cc_log_idx == OPA_CONG_LOG_ELEMS)
2733 ppd->cc_log_idx = 0;
2734 cc_event->lqpn = lqpn & RVT_QPN_MASK;
2735 cc_event->rqpn = rqpn & RVT_QPN_MASK;
2737 cc_event->svc_type = svc_type;
2738 cc_event->rlid = rlid;
2739 /* keep timestamp in units of 1.024 usec */
2740 cc_event->timestamp = ktime_get_ns() / 1024;
2742 spin_unlock_irqrestore(&ppd->cc_log_lock, flags);
2745 void process_becn(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, u32 lqpn,
2746 u32 rqpn, u8 svc_type)
2748 struct cca_timer *cca_timer;
2749 u16 ccti, ccti_incr, ccti_timer, ccti_limit;
2750 u8 trigger_threshold;
2751 struct cc_state *cc_state;
2752 unsigned long flags;
2754 if (sl >= OPA_MAX_SLS)
2757 cc_state = get_cc_state(ppd);
2763 * 1) increase CCTI (for this SL)
2764 * 2) select IPG (i.e., call set_link_ipg())
2767 ccti_limit = cc_state->cct.ccti_limit;
2768 ccti_incr = cc_state->cong_setting.entries[sl].ccti_increase;
2769 ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer;
2771 cc_state->cong_setting.entries[sl].trigger_threshold;
2773 spin_lock_irqsave(&ppd->cca_timer_lock, flags);
2775 cca_timer = &ppd->cca_timer[sl];
2776 if (cca_timer->ccti < ccti_limit) {
2777 if (cca_timer->ccti + ccti_incr <= ccti_limit)
2778 cca_timer->ccti += ccti_incr;
2780 cca_timer->ccti = ccti_limit;
2784 ccti = cca_timer->ccti;
2786 if (!hrtimer_active(&cca_timer->hrtimer)) {
2787 /* ccti_timer is in units of 1.024 usec */
2788 unsigned long nsec = 1024 * ccti_timer;
2790 hrtimer_start(&cca_timer->hrtimer, ns_to_ktime(nsec),
2791 HRTIMER_MODE_REL_PINNED);
2794 spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);
2796 if ((trigger_threshold != 0) && (ccti >= trigger_threshold))
2797 log_cca_event(ppd, sl, rlid, lqpn, rqpn, svc_type);
2801 * hfi1_rc_rcv - process an incoming RC packet
2802 * @packet: data packet information
2804 * This is called from qp_rcv() to process an incoming RC packet
2806 * May be called at interrupt level.
2808 void hfi1_rc_rcv(struct hfi1_packet *packet)
2810 struct hfi1_ctxtdata *rcd = packet->rcd;
2811 void *data = packet->payload;
2812 u32 tlen = packet->tlen;
2813 struct rvt_qp *qp = packet->qp;
2814 struct hfi1_qp_priv *qpriv = qp->priv;
2815 struct hfi1_ibport *ibp = rcd_to_iport(rcd);
2816 struct ib_other_headers *ohdr = packet->ohdr;
2817 u32 opcode = packet->opcode;
2818 u32 hdrsize = packet->hlen;
2819 u32 psn = ib_bth_get_psn(packet->ohdr);
2820 u32 pad = packet->pad;
2822 u32 pmtu = qp->pmtu;
2824 struct ib_reth *reth;
2825 unsigned long flags;
2827 bool copy_last = false, fecn;
2829 u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2);
2831 lockdep_assert_held(&qp->r_lock);
2833 if (hfi1_ruc_check_hdr(ibp, packet))
2836 fecn = process_ecn(qp, packet);
2837 opfn_trigger_conn_request(qp, be32_to_cpu(ohdr->bth[1]));
2840 * Process responses (ACKs) before anything else. Note that the
2841 * packet sequence number will be for something in the send work
2842 * queue rather than the expected receive packet sequence number.
2843 * In other words, this QP is the requester.
2845 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
2846 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
2847 rc_rcv_resp(packet);
2851 /* Compute 24 bits worth of difference. */
2852 diff = delta_psn(psn, qp->r_psn);
2853 if (unlikely(diff)) {
2854 if (rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
2859 /* Check for opcode sequence errors. */
2860 switch (qp->r_state) {
2861 case OP(SEND_FIRST):
2862 case OP(SEND_MIDDLE):
2863 if (opcode == OP(SEND_MIDDLE) ||
2864 opcode == OP(SEND_LAST) ||
2865 opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
2866 opcode == OP(SEND_LAST_WITH_INVALIDATE))
2870 case OP(RDMA_WRITE_FIRST):
2871 case OP(RDMA_WRITE_MIDDLE):
2872 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
2873 opcode == OP(RDMA_WRITE_LAST) ||
2874 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
2879 if (opcode == OP(SEND_MIDDLE) ||
2880 opcode == OP(SEND_LAST) ||
2881 opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
2882 opcode == OP(SEND_LAST_WITH_INVALIDATE) ||
2883 opcode == OP(RDMA_WRITE_MIDDLE) ||
2884 opcode == OP(RDMA_WRITE_LAST) ||
2885 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
2888 * Note that it is up to the requester to not send a new
2889 * RDMA read or atomic operation before receiving an ACK
2890 * for the previous operation.
2895 if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
2898 /* OK, process the packet. */
2900 case OP(SEND_FIRST):
2901 ret = rvt_get_rwqe(qp, false);
2908 case OP(SEND_MIDDLE):
2909 case OP(RDMA_WRITE_MIDDLE):
2911 /* Check for invalid length PMTU or posted rwqe len. */
2913 * There will be no padding for 9B packet but 16B packets
2914 * will come in with some padding since we always add
2915 * CRC and LT bytes which will need to be flit aligned
2917 if (unlikely(tlen != (hdrsize + pmtu + extra_bytes)))
2919 qp->r_rcv_len += pmtu;
2920 if (unlikely(qp->r_rcv_len > qp->r_len))
2922 rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false);
2925 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
2927 ret = rvt_get_rwqe(qp, true);
2935 case OP(SEND_ONLY_WITH_IMMEDIATE):
2936 case OP(SEND_ONLY_WITH_INVALIDATE):
2937 ret = rvt_get_rwqe(qp, false);
2943 if (opcode == OP(SEND_ONLY))
2944 goto no_immediate_data;
2945 if (opcode == OP(SEND_ONLY_WITH_INVALIDATE))
2947 /* FALLTHROUGH -- for SEND_ONLY_WITH_IMMEDIATE */
2948 case OP(SEND_LAST_WITH_IMMEDIATE):
2950 wc.ex.imm_data = ohdr->u.imm_data;
2951 wc.wc_flags = IB_WC_WITH_IMM;
2953 case OP(SEND_LAST_WITH_INVALIDATE):
2955 rkey = be32_to_cpu(ohdr->u.ieth);
2956 if (rvt_invalidate_rkey(qp, rkey))
2957 goto no_immediate_data;
2958 wc.ex.invalidate_rkey = rkey;
2959 wc.wc_flags = IB_WC_WITH_INVALIDATE;
2961 case OP(RDMA_WRITE_LAST):
2962 copy_last = rvt_is_user_qp(qp);
2969 /* Check for invalid length. */
2970 /* LAST len should be >= 1 */
2971 if (unlikely(tlen < (hdrsize + extra_bytes)))
2973 /* Don't count the CRC(and padding and LT byte for 16B). */
2974 tlen -= (hdrsize + extra_bytes);
2975 wc.byte_len = tlen + qp->r_rcv_len;
2976 if (unlikely(wc.byte_len > qp->r_len))
2978 rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, copy_last);
2979 rvt_put_ss(&qp->r_sge);
2981 if (!__test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
2983 wc.wr_id = qp->r_wr_id;
2984 wc.status = IB_WC_SUCCESS;
2985 if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
2986 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
2987 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
2989 wc.opcode = IB_WC_RECV;
2991 wc.src_qp = qp->remote_qpn;
2992 wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
2994 * It seems that IB mandates the presence of an SL in a
2995 * work completion only for the UD transport (see section
2996 * 11.4.2 of IBTA Vol. 1).
2998 * However, the way the SL is chosen below is consistent
2999 * with the way that IB/qib works and is trying avoid
3000 * introducing incompatibilities.
3002 * See also OPA Vol. 1, section 9.7.6, and table 9-17.
3004 wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
3005 /* zero fields that are N/A */
3008 wc.dlid_path_bits = 0;
3010 /* Signal completion event if the solicited bit is set. */
3011 rvt_recv_cq(qp, &wc, ib_bth_is_solicited(ohdr));
3014 case OP(RDMA_WRITE_ONLY):
3015 copy_last = rvt_is_user_qp(qp);
3017 case OP(RDMA_WRITE_FIRST):
3018 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
3019 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
3022 reth = &ohdr->u.rc.reth;
3023 qp->r_len = be32_to_cpu(reth->length);
3025 qp->r_sge.sg_list = NULL;
3026 if (qp->r_len != 0) {
3027 u32 rkey = be32_to_cpu(reth->rkey);
3028 u64 vaddr = get_ib_reth_vaddr(reth);
3031 /* Check rkey & NAK */
3032 ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
3033 rkey, IB_ACCESS_REMOTE_WRITE);
3036 qp->r_sge.num_sge = 1;
3038 qp->r_sge.num_sge = 0;
3039 qp->r_sge.sge.mr = NULL;
3040 qp->r_sge.sge.vaddr = NULL;
3041 qp->r_sge.sge.length = 0;
3042 qp->r_sge.sge.sge_length = 0;
3044 if (opcode == OP(RDMA_WRITE_FIRST))
3046 else if (opcode == OP(RDMA_WRITE_ONLY))
3047 goto no_immediate_data;
3048 ret = rvt_get_rwqe(qp, true);
3052 /* peer will send again */
3053 rvt_put_ss(&qp->r_sge);
3056 wc.ex.imm_data = ohdr->u.rc.imm_data;
3057 wc.wc_flags = IB_WC_WITH_IMM;
3060 case OP(RDMA_READ_REQUEST): {
3061 struct rvt_ack_entry *e;
3065 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
3067 next = qp->r_head_ack_queue + 1;
3068 /* s_ack_queue is size rvt_size_atomic()+1 so use > not >= */
3069 if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
3071 spin_lock_irqsave(&qp->s_lock, flags);
3072 if (unlikely(next == qp->s_acked_ack_queue)) {
3073 if (!qp->s_ack_queue[next].sent)
3074 goto nack_inv_unlck;
3075 update_ack_queue(qp, next);
3077 e = &qp->s_ack_queue[qp->r_head_ack_queue];
3078 release_rdma_sge_mr(e);
3079 reth = &ohdr->u.rc.reth;
3080 len = be32_to_cpu(reth->length);
3082 u32 rkey = be32_to_cpu(reth->rkey);
3083 u64 vaddr = get_ib_reth_vaddr(reth);
3086 /* Check rkey & NAK */
3087 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr,
3088 rkey, IB_ACCESS_REMOTE_READ);
3090 goto nack_acc_unlck;
3092 * Update the next expected PSN. We add 1 later
3093 * below, so only add the remainder here.
3095 qp->r_psn += rvt_div_mtu(qp, len - 1);
3097 e->rdma_sge.mr = NULL;
3098 e->rdma_sge.vaddr = NULL;
3099 e->rdma_sge.length = 0;
3100 e->rdma_sge.sge_length = 0;
3105 e->lpsn = qp->r_psn;
3107 * We need to increment the MSN here instead of when we
3108 * finish sending the result since a duplicate request would
3109 * increment it more than once.
3113 qp->r_state = opcode;
3114 qp->r_nak_state = 0;
3115 qp->r_head_ack_queue = next;
3116 qpriv->r_tid_alloc = qp->r_head_ack_queue;
3118 /* Schedule the send engine. */
3119 qp->s_flags |= RVT_S_RESP_PENDING;
3121 qp->s_flags |= RVT_S_ECN;
3122 hfi1_schedule_send(qp);
3124 spin_unlock_irqrestore(&qp->s_lock, flags);
3128 case OP(COMPARE_SWAP):
3129 case OP(FETCH_ADD): {
3130 struct ib_atomic_eth *ateth = &ohdr->u.atomic_eth;
3131 u64 vaddr = get_ib_ateth_vaddr(ateth);
3132 bool opfn = opcode == OP(COMPARE_SWAP) &&
3133 vaddr == HFI1_VERBS_E_ATOMIC_VADDR;
3134 struct rvt_ack_entry *e;
3140 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
3143 next = qp->r_head_ack_queue + 1;
3144 if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
3146 spin_lock_irqsave(&qp->s_lock, flags);
3147 if (unlikely(next == qp->s_acked_ack_queue)) {
3148 if (!qp->s_ack_queue[next].sent)
3149 goto nack_inv_unlck;
3150 update_ack_queue(qp, next);
3152 e = &qp->s_ack_queue[qp->r_head_ack_queue];
3153 release_rdma_sge_mr(e);
3154 /* Process OPFN special virtual address */
3156 opfn_conn_response(qp, e, ateth);
3159 if (unlikely(vaddr & (sizeof(u64) - 1)))
3160 goto nack_inv_unlck;
3161 rkey = be32_to_cpu(ateth->rkey);
3162 /* Check rkey & NAK */
3163 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
3165 IB_ACCESS_REMOTE_ATOMIC)))
3166 goto nack_acc_unlck;
3167 /* Perform atomic OP and save result. */
3168 maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
3169 sdata = get_ib_ateth_swap(ateth);
3170 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
3171 (u64)atomic64_add_return(sdata, maddr) - sdata :
3172 (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
3173 get_ib_ateth_compare(ateth),
3175 rvt_put_mr(qp->r_sge.sge.mr);
3176 qp->r_sge.num_sge = 0;
3184 qp->r_state = opcode;
3185 qp->r_nak_state = 0;
3186 qp->r_head_ack_queue = next;
3187 qpriv->r_tid_alloc = qp->r_head_ack_queue;
3189 /* Schedule the send engine. */
3190 qp->s_flags |= RVT_S_RESP_PENDING;
3192 qp->s_flags |= RVT_S_ECN;
3193 hfi1_schedule_send(qp);
3195 spin_unlock_irqrestore(&qp->s_lock, flags);
3200 /* NAK unknown opcodes. */
3204 qp->r_state = opcode;
3205 qp->r_ack_psn = psn;
3206 qp->r_nak_state = 0;
3207 /* Send an ACK if requested or required. */
3208 if (psn & IB_BTH_REQ_ACK || fecn) {
3209 if (packet->numpkt == 0 || fecn ||
3210 qp->r_adefered >= HFI1_PSN_CREDIT) {
3215 rc_defered_ack(rcd, qp);
3220 qp->r_nak_state = qp->r_min_rnr_timer | IB_RNR_NAK;
3221 qp->r_ack_psn = qp->r_psn;
3222 /* Queue RNR NAK for later */
3223 rc_defered_ack(rcd, qp);
3227 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
3228 qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
3229 qp->r_ack_psn = qp->r_psn;
3230 /* Queue NAK for later */
3231 rc_defered_ack(rcd, qp);
3235 spin_unlock_irqrestore(&qp->s_lock, flags);
3237 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
3238 qp->r_nak_state = IB_NAK_INVALID_REQUEST;
3239 qp->r_ack_psn = qp->r_psn;
3240 /* Queue NAK for later */
3241 rc_defered_ack(rcd, qp);
3245 spin_unlock_irqrestore(&qp->s_lock, flags);
3247 rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
3248 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
3249 qp->r_ack_psn = qp->r_psn;
3251 hfi1_send_rc_ack(packet, fecn);
3254 void hfi1_rc_hdrerr(
3255 struct hfi1_ctxtdata *rcd,
3256 struct hfi1_packet *packet,
3259 struct hfi1_ibport *ibp = rcd_to_iport(rcd);
3264 if (hfi1_ruc_check_hdr(ibp, packet))
3267 psn = ib_bth_get_psn(packet->ohdr);
3268 opcode = ib_bth_get_opcode(packet->ohdr);
3270 /* Only deal with RDMA Writes for now */
3271 if (opcode < IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
3272 diff = delta_psn(psn, qp->r_psn);
3273 if (!qp->r_nak_state && diff >= 0) {
3274 ibp->rvp.n_rc_seqnak++;
3275 qp->r_nak_state = IB_NAK_PSN_ERROR;
3276 /* Use the expected PSN. */
3277 qp->r_ack_psn = qp->r_psn;
3279 * Wait to send the sequence
3280 * NAK until all packets
3281 * in the receive queue have
3283 * Otherwise, we end up
3284 * propagating congestion.
3286 rc_defered_ack(rcd, qp);
3287 } /* Out of sequence NAK */
3288 } /* QP Request NAKs */