2 * Copyright (c) 2018 Chelsio Communications, Inc.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * Written by: Atul Gupta (atul.gupta@chelsio.com)
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/workqueue.h>
14 #include <linux/skbuff.h>
15 #include <linux/timer.h>
16 #include <linux/notifier.h>
17 #include <linux/inetdevice.h>
19 #include <linux/tcp.h>
20 #include <linux/sched/signal.h>
22 #include <net/busy_poll.h>
23 #include <crypto/aes.h>
28 static bool is_tls_tx(struct chtls_sock *csk)
30 return csk->tlshws.txkey >= 0;
33 static bool is_tls_rx(struct chtls_sock *csk)
35 return csk->tlshws.rxkey >= 0;
38 static int data_sgl_len(const struct sk_buff *skb)
42 cnt = skb_shinfo(skb)->nr_frags;
43 return sgl_len(cnt) * 8;
46 static int nos_ivs(struct sock *sk, unsigned int size)
48 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
50 return DIV_ROUND_UP(size, csk->tlshws.mfs);
53 static int set_ivs_imm(struct sock *sk, const struct sk_buff *skb)
55 int ivs_size = nos_ivs(sk, skb->len) * CIPHER_BLOCK_SIZE;
56 int hlen = TLS_WR_CPL_LEN + data_sgl_len(skb);
58 if ((hlen + KEY_ON_MEM_SZ + ivs_size) <
59 MAX_IMM_OFLD_TX_DATA_WR_LEN) {
60 ULP_SKB_CB(skb)->ulp.tls.iv = 1;
63 ULP_SKB_CB(skb)->ulp.tls.iv = 0;
67 static int max_ivs_size(struct sock *sk, int size)
69 return nos_ivs(sk, size) * CIPHER_BLOCK_SIZE;
72 static int ivs_size(struct sock *sk, const struct sk_buff *skb)
74 return set_ivs_imm(sk, skb) ? (nos_ivs(sk, skb->len) *
75 CIPHER_BLOCK_SIZE) : 0;
78 static int flowc_wr_credits(int nparams, int *flowclenp)
80 int flowclen16, flowclen;
82 flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
83 flowclen16 = DIV_ROUND_UP(flowclen, 16);
84 flowclen = flowclen16 * 16;
87 *flowclenp = flowclen;
92 static struct sk_buff *create_flowc_wr_skb(struct sock *sk,
93 struct fw_flowc_wr *flowc,
96 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
99 skb = alloc_skb(flowclen, GFP_ATOMIC);
103 memcpy(__skb_put(skb, flowclen), flowc, flowclen);
104 skb_set_queue_mapping(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA);
109 static int send_flowc_wr(struct sock *sk, struct fw_flowc_wr *flowc,
112 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
113 struct tcp_sock *tp = tcp_sk(sk);
118 flowclen16 = flowclen / 16;
120 if (csk_flag(sk, CSK_TX_DATA_SENT)) {
121 skb = create_flowc_wr_skb(sk, flowc, flowclen);
126 ULPCB_FLAG_NO_HDR | ULPCB_FLAG_NO_APPEND);
130 ret = cxgb4_immdata_send(csk->egress_dev,
135 skb = create_flowc_wr_skb(sk, flowc, flowclen);
138 send_or_defer(sk, tp, skb, 0);
142 static u8 tcp_state_to_flowc_state(u8 state)
145 case TCP_ESTABLISHED:
146 return FW_FLOWC_MNEM_TCPSTATE_ESTABLISHED;
148 return FW_FLOWC_MNEM_TCPSTATE_CLOSEWAIT;
150 return FW_FLOWC_MNEM_TCPSTATE_FINWAIT1;
152 return FW_FLOWC_MNEM_TCPSTATE_CLOSING;
154 return FW_FLOWC_MNEM_TCPSTATE_LASTACK;
156 return FW_FLOWC_MNEM_TCPSTATE_FINWAIT2;
159 return FW_FLOWC_MNEM_TCPSTATE_ESTABLISHED;
162 int send_tx_flowc_wr(struct sock *sk, int compl,
163 u32 snd_nxt, u32 rcv_nxt)
165 struct flowc_packed {
166 struct fw_flowc_wr fc;
167 struct fw_flowc_mnemval mnemval[FW_FLOWC_MNEM_MAX];
169 int nparams, paramidx, flowclen16, flowclen;
170 struct fw_flowc_wr *flowc;
171 struct chtls_sock *csk;
174 csk = rcu_dereference_sk_user_data(sk);
176 memset(&sflowc, 0, sizeof(sflowc));
179 #define FLOWC_PARAM(__m, __v) \
181 flowc->mnemval[paramidx].mnemonic = FW_FLOWC_MNEM_##__m; \
182 flowc->mnemval[paramidx].val = cpu_to_be32(__v); \
188 FLOWC_PARAM(PFNVFN, FW_PFVF_CMD_PFN_V(csk->cdev->lldi->pf));
189 FLOWC_PARAM(CH, csk->tx_chan);
190 FLOWC_PARAM(PORT, csk->tx_chan);
191 FLOWC_PARAM(IQID, csk->rss_qid);
192 FLOWC_PARAM(SNDNXT, tp->snd_nxt);
193 FLOWC_PARAM(RCVNXT, tp->rcv_nxt);
194 FLOWC_PARAM(SNDBUF, csk->sndbuf);
195 FLOWC_PARAM(MSS, tp->mss_cache);
196 FLOWC_PARAM(TCPSTATE, tcp_state_to_flowc_state(sk->sk_state));
199 FLOWC_PARAM(RCV_SCALE, SND_WSCALE(tp));
201 if (csk->ulp_mode == ULP_MODE_TLS)
202 FLOWC_PARAM(ULD_MODE, ULP_MODE_TLS);
204 if (csk->tlshws.fcplenmax)
205 FLOWC_PARAM(TXDATAPLEN_MAX, csk->tlshws.fcplenmax);
210 flowclen16 = flowc_wr_credits(nparams, &flowclen);
211 flowc->op_to_nparams =
212 cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
213 FW_WR_COMPL_V(compl) |
214 FW_FLOWC_WR_NPARAMS_V(nparams));
215 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) |
216 FW_WR_FLOWID_V(csk->tid));
218 return send_flowc_wr(sk, flowc, flowclen);
222 static int tls_copy_ivs(struct sock *sk, struct sk_buff *skb)
225 struct chtls_sock *csk;
226 unsigned char *iv_loc;
227 struct chtls_hws *hws;
233 csk = rcu_dereference_sk_user_data(sk);
235 number_of_ivs = nos_ivs(sk, skb->len);
237 if (number_of_ivs > MAX_IVS_PAGE) {
238 pr_warn("MAX IVs in PAGE exceeded %d\n", number_of_ivs);
242 /* generate the IVs */
243 ivs = kmalloc_array(CIPHER_BLOCK_SIZE, number_of_ivs, GFP_ATOMIC);
246 get_random_bytes(ivs, number_of_ivs * CIPHER_BLOCK_SIZE);
248 if (skb_ulp_tls_iv_imm(skb)) {
249 /* send the IVs as immediate data in the WR */
250 iv_loc = (unsigned char *)__skb_push(skb, number_of_ivs *
253 memcpy(iv_loc, ivs, number_of_ivs * CIPHER_BLOCK_SIZE);
255 hws->ivsize = number_of_ivs * CIPHER_BLOCK_SIZE;
257 /* Send the IVs as sgls */
258 /* Already accounted IV DSGL for credits */
259 skb_shinfo(skb)->nr_frags--;
260 page = alloc_pages(sk->sk_allocation | __GFP_COMP, 0);
262 pr_info("%s : Page allocation for IVs failed\n",
267 memcpy(page_address(page), ivs, number_of_ivs *
269 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, 0,
270 number_of_ivs * CIPHER_BLOCK_SIZE);
279 static void tls_copy_tx_key(struct sock *sk, struct sk_buff *skb)
281 struct ulptx_sc_memrd *sc_memrd;
282 struct chtls_sock *csk;
283 struct chtls_dev *cdev;
284 struct ulptx_idata *sc;
285 struct chtls_hws *hws;
289 csk = rcu_dereference_sk_user_data(sk);
293 immdlen = sizeof(*sc) + sizeof(*sc_memrd);
294 kaddr = keyid_to_addr(cdev->kmap.start, hws->txkey);
295 sc = (struct ulptx_idata *)__skb_push(skb, immdlen);
297 sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP));
299 sc_memrd = (struct ulptx_sc_memrd *)(sc + 1);
300 sc_memrd->cmd_to_len =
301 htonl(ULPTX_CMD_V(ULP_TX_SC_MEMRD) |
302 ULP_TX_SC_MORE_V(1) |
303 ULPTX_LEN16_V(hws->keylen >> 4));
304 sc_memrd->addr = htonl(kaddr);
308 static u64 tlstx_incr_seqnum(struct chtls_hws *hws)
310 return hws->tx_seq_no++;
313 static bool is_sg_request(const struct sk_buff *skb)
315 return skb->peeked ||
316 (skb->len > MAX_IMM_ULPTX_WR_LEN);
320 * Returns true if an sk_buff carries urgent data.
322 static bool skb_urgent(struct sk_buff *skb)
324 return ULP_SKB_CB(skb)->flags & ULPCB_FLAG_URG;
327 /* TLS content type for CPL SFO */
328 static unsigned char tls_content_type(unsigned char content_type)
330 switch (content_type) {
331 case TLS_HDR_TYPE_CCS:
332 return CPL_TX_TLS_SFO_TYPE_CCS;
333 case TLS_HDR_TYPE_ALERT:
334 return CPL_TX_TLS_SFO_TYPE_ALERT;
335 case TLS_HDR_TYPE_HANDSHAKE:
336 return CPL_TX_TLS_SFO_TYPE_HANDSHAKE;
337 case TLS_HDR_TYPE_HEARTBEAT:
338 return CPL_TX_TLS_SFO_TYPE_HEARTBEAT;
340 return CPL_TX_TLS_SFO_TYPE_DATA;
343 static void tls_tx_data_wr(struct sock *sk, struct sk_buff *skb,
344 int dlen, int tls_immd, u32 credits,
347 struct fw_tlstx_data_wr *req_wr;
348 struct cpl_tx_tls_sfo *req_cpl;
349 unsigned int wr_ulp_mode_force;
350 struct tls_scmd *updated_scmd;
351 unsigned char data_type;
352 struct chtls_sock *csk;
353 struct net_device *dev;
354 struct chtls_hws *hws;
355 struct tls_scmd *scmd;
356 struct adapter *adap;
362 csk = rcu_dereference_sk_user_data(sk);
363 iv_imm = skb_ulp_tls_iv_imm(skb);
364 dev = csk->egress_dev;
365 adap = netdev2adap(dev);
370 dlen = (dlen < hws->mfs) ? dlen : hws->mfs;
371 atomic_inc(&adap->chcr_stats.tls_pdu_tx);
374 updated_scmd->seqno_numivs &= 0xffffff80;
375 updated_scmd->seqno_numivs |= SCMD_NUM_IVS_V(pdus);
376 hws->scmd = *updated_scmd;
378 req = (unsigned char *)__skb_push(skb, sizeof(struct cpl_tx_tls_sfo));
379 req_cpl = (struct cpl_tx_tls_sfo *)req;
380 req = (unsigned char *)__skb_push(skb, (sizeof(struct
383 req_wr = (struct fw_tlstx_data_wr *)req;
384 immd_len = (tls_immd ? dlen : 0);
385 req_wr->op_to_immdlen =
386 htonl(FW_WR_OP_V(FW_TLSTX_DATA_WR) |
387 FW_TLSTX_DATA_WR_COMPL_V(1) |
388 FW_TLSTX_DATA_WR_IMMDLEN_V(immd_len));
389 req_wr->flowid_len16 = htonl(FW_TLSTX_DATA_WR_FLOWID_V(csk->tid) |
390 FW_TLSTX_DATA_WR_LEN16_V(credits));
391 wr_ulp_mode_force = TX_ULP_MODE_V(ULP_MODE_TLS);
393 if (is_sg_request(skb))
394 wr_ulp_mode_force |= FW_OFLD_TX_DATA_WR_ALIGNPLD_F |
395 ((tcp_sk(sk)->nonagle & TCP_NAGLE_OFF) ? 0 :
396 FW_OFLD_TX_DATA_WR_SHOVE_F);
398 req_wr->lsodisable_to_flags =
399 htonl(TX_ULP_MODE_V(ULP_MODE_TLS) |
400 TX_URG_V(skb_urgent(skb)) |
401 T6_TX_FORCE_F | wr_ulp_mode_force |
402 TX_SHOVE_V((!csk_flag(sk, CSK_TX_MORE_DATA)) &&
403 skb_queue_empty(&csk->txq)));
405 req_wr->ctxloc_to_exp =
406 htonl(FW_TLSTX_DATA_WR_NUMIVS_V(pdus) |
407 FW_TLSTX_DATA_WR_EXP_V(expn) |
408 FW_TLSTX_DATA_WR_CTXLOC_V(CHTLS_KEY_CONTEXT_DDR) |
409 FW_TLSTX_DATA_WR_IVDSGL_V(!iv_imm) |
410 FW_TLSTX_DATA_WR_KEYSIZE_V(hws->keylen >> 4));
412 /* Fill in the length */
413 req_wr->plen = htonl(len);
414 req_wr->mfs = htons(hws->mfs);
415 req_wr->adjustedplen_pkd =
416 htons(FW_TLSTX_DATA_WR_ADJUSTEDPLEN_V(hws->adjustlen));
417 req_wr->expinplenmax_pkd =
418 htons(FW_TLSTX_DATA_WR_EXPINPLENMAX_V(hws->expansion));
419 req_wr->pdusinplenmax_pkd =
420 FW_TLSTX_DATA_WR_PDUSINPLENMAX_V(hws->pdus);
423 data_type = tls_content_type(ULP_SKB_CB(skb)->ulp.tls.type);
424 req_cpl->op_to_seg_len = htonl(CPL_TX_TLS_SFO_OPCODE_V(CPL_TX_TLS_SFO) |
425 CPL_TX_TLS_SFO_DATA_TYPE_V(data_type) |
426 CPL_TX_TLS_SFO_CPL_LEN_V(2) |
427 CPL_TX_TLS_SFO_SEG_LEN_V(dlen));
428 req_cpl->pld_len = htonl(len - expn);
430 req_cpl->type_protover = htonl(CPL_TX_TLS_SFO_TYPE_V
431 ((data_type == CPL_TX_TLS_SFO_TYPE_HEARTBEAT) ?
432 TLS_HDR_TYPE_HEARTBEAT : 0) |
433 CPL_TX_TLS_SFO_PROTOVER_V(0));
435 /* create the s-command */
437 req_cpl->seqno_numivs = cpu_to_be32(hws->scmd.seqno_numivs);
438 req_cpl->ivgen_hdrlen = cpu_to_be32(hws->scmd.ivgen_hdrlen);
439 req_cpl->scmd1 = cpu_to_be64(tlstx_incr_seqnum(hws));
443 * Calculate the TLS data expansion size
445 static int chtls_expansion_size(struct sock *sk, int data_len,
447 unsigned short *pducnt)
449 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
450 struct chtls_hws *hws = &csk->tlshws;
451 struct tls_scmd *scmd = &hws->scmd;
452 int fragsize = hws->mfs;
458 if (SCMD_CIPH_MODE_G(scmd->seqno_numivs) ==
459 SCMD_CIPH_MODE_AES_GCM) {
460 expppdu = GCM_TAG_SIZE + AEAD_EXPLICIT_DATA_SIZE +
464 *pducnt = data_len / (expppdu + fragsize);
469 expnsize = (*pducnt) * expppdu;
472 fragcnt = (data_len / fragsize);
473 expnsize = fragcnt * expppdu;
474 fragleft = data_len % fragsize;
481 /* WR with IV, KEY and CPL SFO added */
482 static void make_tlstx_data_wr(struct sock *sk, struct sk_buff *skb,
483 int tls_tx_imm, int tls_len, u32 credits)
485 unsigned short pdus_per_ulp = 0;
486 struct chtls_sock *csk;
487 struct chtls_hws *hws;
491 csk = rcu_dereference_sk_user_data(sk);
493 pdus = DIV_ROUND_UP(tls_len, hws->mfs);
494 expn_sz = chtls_expansion_size(sk, tls_len, 0, NULL);
496 hws->expansion = chtls_expansion_size(sk,
499 hws->pdus = pdus_per_ulp;
500 hws->adjustlen = hws->pdus *
501 ((hws->expansion / hws->pdus) + hws->mfs);
504 if (tls_copy_ivs(sk, skb))
506 tls_copy_tx_key(sk, skb);
507 tls_tx_data_wr(sk, skb, tls_len, tls_tx_imm, credits, expn_sz, pdus);
508 hws->tx_seq_no += (pdus - 1);
511 static void make_tx_data_wr(struct sock *sk, struct sk_buff *skb,
512 unsigned int immdlen, int len,
513 u32 credits, u32 compl)
515 struct fw_ofld_tx_data_wr *req;
516 unsigned int wr_ulp_mode_force;
517 struct chtls_sock *csk;
520 csk = rcu_dereference_sk_user_data(sk);
521 opcode = FW_OFLD_TX_DATA_WR;
523 req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req));
524 req->op_to_immdlen = htonl(WR_OP_V(opcode) |
525 FW_WR_COMPL_V(compl) |
526 FW_WR_IMMDLEN_V(immdlen));
527 req->flowid_len16 = htonl(FW_WR_FLOWID_V(csk->tid) |
528 FW_WR_LEN16_V(credits));
530 wr_ulp_mode_force = TX_ULP_MODE_V(csk->ulp_mode);
531 if (is_sg_request(skb))
532 wr_ulp_mode_force |= FW_OFLD_TX_DATA_WR_ALIGNPLD_F |
533 ((tcp_sk(sk)->nonagle & TCP_NAGLE_OFF) ? 0 :
534 FW_OFLD_TX_DATA_WR_SHOVE_F);
536 req->tunnel_to_proxy = htonl(wr_ulp_mode_force |
537 TX_URG_V(skb_urgent(skb)) |
538 TX_SHOVE_V((!csk_flag(sk, CSK_TX_MORE_DATA)) &&
539 skb_queue_empty(&csk->txq)));
540 req->plen = htonl(len);
543 static int chtls_wr_size(struct chtls_sock *csk, const struct sk_buff *skb,
548 wr_size = TLS_WR_CPL_LEN;
549 wr_size += KEY_ON_MEM_SZ;
550 wr_size += ivs_size(csk->sk, skb);
555 /* frags counted for IV dsgl */
556 if (!skb_ulp_tls_iv_imm(skb))
557 skb_shinfo(skb)->nr_frags++;
562 static bool is_ofld_imm(struct chtls_sock *csk, const struct sk_buff *skb)
564 int length = skb->len;
566 if (skb->peeked || skb->len > MAX_IMM_ULPTX_WR_LEN)
569 if (likely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NEED_HDR)) {
570 /* Check TLS header len for Immediate */
571 if (csk->ulp_mode == ULP_MODE_TLS &&
572 skb_ulp_tls_inline(skb))
573 length += chtls_wr_size(csk, skb, true);
575 length += sizeof(struct fw_ofld_tx_data_wr);
577 return length <= MAX_IMM_OFLD_TX_DATA_WR_LEN;
582 static unsigned int calc_tx_flits(const struct sk_buff *skb,
583 unsigned int immdlen)
585 unsigned int flits, cnt;
587 flits = immdlen / 8; /* headers */
588 cnt = skb_shinfo(skb)->nr_frags;
589 if (skb_tail_pointer(skb) != skb_transport_header(skb))
591 return flits + sgl_len(cnt);
594 static void arp_failure_discard(void *handle, struct sk_buff *skb)
599 int chtls_push_frames(struct chtls_sock *csk, int comp)
601 struct chtls_hws *hws = &csk->tlshws;
608 wr_size = sizeof(struct fw_ofld_tx_data_wr);
612 if (unlikely(sk_in_state(sk, TCPF_SYN_SENT | TCPF_CLOSE)))
615 if (unlikely(csk_flag(sk, CSK_ABORT_SHUTDOWN)))
618 while (csk->wr_credits && (skb = skb_peek(&csk->txq)) &&
619 (!(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_HOLD) ||
620 skb_queue_len(&csk->txq) > 1)) {
621 unsigned int credit_len = skb->len;
622 unsigned int credits_needed;
623 unsigned int completion = 0;
624 int tls_len = skb->len;/* TLS data len before IV/key */
625 unsigned int immdlen;
626 int len = skb->len; /* length [ulp bytes] inserted by hw */
631 if (!is_ofld_imm(csk, skb)) {
632 immdlen = skb_transport_offset(skb);
633 if (skb_ulp_tls_inline(skb))
634 wr_size = chtls_wr_size(csk, skb, false);
635 credit_len = 8 * calc_tx_flits(skb, immdlen);
637 if (skb_ulp_tls_inline(skb)) {
638 wr_size = chtls_wr_size(csk, skb, false);
642 if (likely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NEED_HDR))
643 credit_len += wr_size;
644 credits_needed = DIV_ROUND_UP(credit_len, 16);
645 if (!csk_flag_nochk(csk, CSK_TX_DATA_SENT)) {
646 flowclen16 = send_tx_flowc_wr(sk, 1, tp->snd_nxt,
650 csk->wr_credits -= flowclen16;
651 csk->wr_unacked += flowclen16;
652 csk->wr_nondata += flowclen16;
653 csk_set_flag(csk, CSK_TX_DATA_SENT);
656 if (csk->wr_credits < credits_needed) {
657 if (skb_ulp_tls_inline(skb) &&
658 !skb_ulp_tls_iv_imm(skb))
659 skb_shinfo(skb)->nr_frags--;
663 __skb_unlink(skb, &csk->txq);
664 skb_set_queue_mapping(skb, (csk->txq_idx << 1) |
667 hws->txqid = (skb->queue_mapping >> 1);
668 skb->csum = (__force __wsum)(credits_needed + csk->wr_nondata);
669 csk->wr_credits -= credits_needed;
670 csk->wr_unacked += credits_needed;
672 enqueue_wr(csk, skb);
674 if (likely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NEED_HDR)) {
675 if ((comp && csk->wr_unacked == credits_needed) ||
676 (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_COMPL) ||
677 csk->wr_unacked >= csk->wr_max_credits / 2) {
681 if (skb_ulp_tls_inline(skb))
682 make_tlstx_data_wr(sk, skb, tls_tx_imm,
683 tls_len, credits_needed);
685 make_tx_data_wr(sk, skb, immdlen, len,
686 credits_needed, completion);
688 tp->lsndtime = tcp_time_stamp(tp);
690 ULP_SKB_CB(skb)->flags &= ~ULPCB_FLAG_NEED_HDR;
692 struct cpl_close_con_req *req = cplhdr(skb);
693 unsigned int cmd = CPL_OPCODE_G(ntohl
696 if (cmd == CPL_CLOSE_CON_REQ)
698 CSK_CLOSE_CON_REQUESTED);
700 if ((ULP_SKB_CB(skb)->flags & ULPCB_FLAG_COMPL) &&
701 (csk->wr_unacked >= csk->wr_max_credits / 2)) {
702 req->wr.wr_hi |= htonl(FW_WR_COMPL_F);
706 total_size += skb->truesize;
707 if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_BARRIER)
708 csk_set_flag(csk, CSK_TX_WAIT_IDLE);
709 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
710 cxgb4_l2t_send(csk->egress_dev, skb, csk->l2t_entry);
712 sk->sk_wmem_queued -= total_size;
716 static void mark_urg(struct tcp_sock *tp, int flags,
719 if (unlikely(flags & MSG_OOB)) {
720 tp->snd_up = tp->write_seq;
721 ULP_SKB_CB(skb)->flags = ULPCB_FLAG_URG |
723 ULPCB_FLAG_NO_APPEND |
729 * Returns true if a connection should send more data to TCP engine
731 static bool should_push(struct sock *sk)
733 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
734 struct chtls_dev *cdev = csk->cdev;
735 struct tcp_sock *tp = tcp_sk(sk);
738 * If we've released our offload resources there's nothing to do ...
744 * If there aren't any work requests in flight, or there isn't enough
745 * data in flight, or Nagle is off then send the current TX_DATA
746 * otherwise hold it and wait to accumulate more data.
748 return csk->wr_credits == csk->wr_max_credits ||
749 (tp->nonagle & TCP_NAGLE_OFF);
753 * Returns true if a TCP socket is corked.
755 static bool corked(const struct tcp_sock *tp, int flags)
757 return (flags & MSG_MORE) || (tp->nonagle & TCP_NAGLE_CORK);
761 * Returns true if a send should try to push new data.
763 static bool send_should_push(struct sock *sk, int flags)
765 return should_push(sk) && !corked(tcp_sk(sk), flags);
768 void chtls_tcp_push(struct sock *sk, int flags)
770 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
771 int qlen = skb_queue_len(&csk->txq);
774 struct sk_buff *skb = skb_peek_tail(&csk->txq);
775 struct tcp_sock *tp = tcp_sk(sk);
777 mark_urg(tp, flags, skb);
779 if (!(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) &&
781 ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_HOLD;
785 ULP_SKB_CB(skb)->flags &= ~ULPCB_FLAG_HOLD;
787 ((ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) ||
789 chtls_push_frames(csk, 1);
794 * Calculate the size for a new send sk_buff. It's maximum size so we can
795 * pack lots of data into it, unless we plan to send it immediately, in which
796 * case we size it more tightly.
798 * Note: we don't bother compensating for MSS < PAGE_SIZE because it doesn't
799 * arise in normal cases and when it does we are just wasting memory.
801 static int select_size(struct sock *sk, int io_len, int flags, int len)
803 const int pgbreak = SKB_MAX_HEAD(len);
806 * If the data wouldn't fit in the main body anyway, put only the
807 * header in the main body so it can use immediate data and place all
808 * the payload in page fragments.
810 if (io_len > pgbreak)
814 * If we will be accumulating payload get a large main body.
816 if (!send_should_push(sk, flags))
822 void skb_entail(struct sock *sk, struct sk_buff *skb, int flags)
824 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
825 struct tcp_sock *tp = tcp_sk(sk);
827 ULP_SKB_CB(skb)->seq = tp->write_seq;
828 ULP_SKB_CB(skb)->flags = flags;
829 __skb_queue_tail(&csk->txq, skb);
830 sk->sk_wmem_queued += skb->truesize;
832 if (TCP_PAGE(sk) && TCP_OFF(sk)) {
833 put_page(TCP_PAGE(sk));
839 static struct sk_buff *get_tx_skb(struct sock *sk, int size)
843 skb = alloc_skb(size + TX_HEADER_LEN, sk->sk_allocation);
845 skb_reserve(skb, TX_HEADER_LEN);
846 skb_entail(sk, skb, ULPCB_FLAG_NEED_HDR);
847 skb_reset_transport_header(skb);
852 static struct sk_buff *get_record_skb(struct sock *sk, int size, bool zcopy)
854 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
857 skb = alloc_skb(((zcopy ? 0 : size) + TX_TLSHDR_LEN +
858 KEY_ON_MEM_SZ + max_ivs_size(sk, size)),
861 skb_reserve(skb, (TX_TLSHDR_LEN +
862 KEY_ON_MEM_SZ + max_ivs_size(sk, size)));
863 skb_entail(sk, skb, ULPCB_FLAG_NEED_HDR);
864 skb_reset_transport_header(skb);
865 ULP_SKB_CB(skb)->ulp.tls.ofld = 1;
866 ULP_SKB_CB(skb)->ulp.tls.type = csk->tlshws.type;
871 static void tx_skb_finalize(struct sk_buff *skb)
873 struct ulp_skb_cb *cb = ULP_SKB_CB(skb);
875 if (!(cb->flags & ULPCB_FLAG_NO_HDR))
876 cb->flags = ULPCB_FLAG_NEED_HDR;
877 cb->flags |= ULPCB_FLAG_NO_APPEND;
880 static void push_frames_if_head(struct sock *sk)
882 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
884 if (skb_queue_len(&csk->txq) == 1)
885 chtls_push_frames(csk, 1);
888 static int chtls_skb_copy_to_page_nocache(struct sock *sk,
889 struct iov_iter *from,
896 err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) +
897 off, copy, skb->len);
902 skb->data_len += copy;
903 skb->truesize += copy;
904 sk->sk_wmem_queued += copy;
908 /* Read TLS header to find content type and data length */
909 static int tls_header_read(struct tls_hdr *thdr, struct iov_iter *from)
911 if (copy_from_iter(thdr, sizeof(*thdr), from) != sizeof(*thdr))
913 return (__force int)cpu_to_be16(thdr->length);
916 static int csk_mem_free(struct chtls_dev *cdev, struct sock *sk)
918 return (cdev->max_host_sndbuf - sk->sk_wmem_queued);
921 static int csk_wait_memory(struct chtls_dev *cdev,
922 struct sock *sk, long *timeo_p)
924 DEFINE_WAIT_FUNC(wait, woken_wake_function);
930 current_timeo = *timeo_p;
931 noblock = (*timeo_p ? false : true);
932 sndbuf = cdev->max_host_sndbuf;
933 if (csk_mem_free(cdev, sk)) {
934 current_timeo = (prandom_u32() % (HZ / 5)) + 2;
935 vm_wait = (prandom_u32() % (HZ / 5)) + 2;
938 add_wait_queue(sk_sleep(sk), &wait);
940 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
942 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
946 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
949 if (signal_pending(current))
951 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
952 if (csk_mem_free(cdev, sk) && !vm_wait)
955 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
956 sk->sk_write_pending++;
957 sk_wait_event(sk, ¤t_timeo, sk->sk_err ||
958 (sk->sk_shutdown & SEND_SHUTDOWN) ||
959 (csk_mem_free(cdev, sk) && !vm_wait), &wait);
960 sk->sk_write_pending--;
963 vm_wait -= current_timeo;
964 current_timeo = *timeo_p;
965 if (current_timeo != MAX_SCHEDULE_TIMEOUT) {
966 current_timeo -= vm_wait;
967 if (current_timeo < 0)
972 *timeo_p = current_timeo;
975 remove_wait_queue(sk_sleep(sk), &wait);
984 err = sock_intr_errno(*timeo_p);
988 int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
990 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
991 struct chtls_dev *cdev = csk->cdev;
992 struct tcp_sock *tp = tcp_sk(sk);
1000 flags = msg->msg_flags;
1001 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1003 if (!sk_in_state(sk, TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
1004 err = sk_stream_wait_connect(sk, &timeo);
1009 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1011 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
1015 csk_set_flag(csk, CSK_TX_MORE_DATA);
1017 while (msg_data_left(msg)) {
1020 skb = skb_peek_tail(&csk->txq);
1022 copy = mss - skb->len;
1023 skb->ip_summed = CHECKSUM_UNNECESSARY;
1025 if (!csk_mem_free(cdev, sk))
1026 goto wait_for_sndbuf;
1028 if (is_tls_tx(csk) && !csk->tlshws.txleft) {
1031 recordsz = tls_header_read(&hdr, &msg->msg_iter);
1032 size -= TLS_HEADER_LENGTH;
1033 copied += TLS_HEADER_LENGTH;
1034 csk->tlshws.txleft = recordsz;
1035 csk->tlshws.type = hdr.type;
1037 ULP_SKB_CB(skb)->ulp.tls.type = hdr.type;
1040 if (!skb || (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) ||
1044 tx_skb_finalize(skb);
1045 push_frames_if_head(sk);
1048 if (is_tls_tx(csk)) {
1049 skb = get_record_skb(sk,
1056 skb = get_tx_skb(sk,
1057 select_size(sk, size, flags,
1061 goto wait_for_memory;
1063 skb->ip_summed = CHECKSUM_UNNECESSARY;
1069 if (skb_tailroom(skb) > 0) {
1070 copy = min(copy, skb_tailroom(skb));
1072 copy = min_t(int, copy, csk->tlshws.txleft);
1073 err = skb_add_data_nocache(sk, skb,
1074 &msg->msg_iter, copy);
1078 int i = skb_shinfo(skb)->nr_frags;
1079 struct page *page = TCP_PAGE(sk);
1080 int pg_size = PAGE_SIZE;
1081 int off = TCP_OFF(sk);
1085 pg_size <<= compound_order(page);
1086 if (off < pg_size &&
1087 skb_can_coalesce(skb, i, page, off)) {
1092 if (i == (is_tls_tx(csk) ? (MAX_SKB_FRAGS - 1) :
1096 if (page && off == pg_size) {
1098 TCP_PAGE(sk) = page = NULL;
1099 pg_size = PAGE_SIZE;
1103 gfp_t gfp = sk->sk_allocation;
1104 int order = cdev->send_page_order;
1107 page = alloc_pages(gfp | __GFP_COMP |
1113 compound_order(page);
1116 page = alloc_page(gfp);
1117 pg_size = PAGE_SIZE;
1120 goto wait_for_memory;
1124 if (copy > pg_size - off)
1125 copy = pg_size - off;
1127 copy = min_t(int, copy, csk->tlshws.txleft);
1129 err = chtls_skb_copy_to_page_nocache(sk, &msg->msg_iter,
1132 if (unlikely(err)) {
1133 if (!TCP_PAGE(sk)) {
1134 TCP_PAGE(sk) = page;
1139 /* Update the skb. */
1141 skb_shinfo(skb)->frags[i - 1].size += copy;
1143 skb_fill_page_desc(skb, i, page, off, copy);
1144 if (off + copy < pg_size) {
1145 /* space left keep page */
1147 TCP_PAGE(sk) = page;
1149 TCP_PAGE(sk) = NULL;
1152 TCP_OFF(sk) = off + copy;
1154 if (unlikely(skb->len == mss))
1155 tx_skb_finalize(skb);
1156 tp->write_seq += copy;
1161 csk->tlshws.txleft -= copy;
1163 if (corked(tp, flags) &&
1164 (sk_stream_wspace(sk) < sk_stream_min_wspace(sk)))
1165 ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_NO_APPEND;
1170 if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND)
1171 push_frames_if_head(sk);
1174 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1176 err = csk_wait_memory(cdev, sk, &timeo);
1181 csk_reset_flag(csk, CSK_TX_MORE_DATA);
1183 chtls_tcp_push(sk, flags);
1189 __skb_unlink(skb, &csk->txq);
1190 sk->sk_wmem_queued -= skb->truesize;
1197 if (csk_conn_inline(csk))
1198 csk_reset_flag(csk, CSK_TX_MORE_DATA);
1199 copied = sk_stream_error(sk, flags, err);
1203 int chtls_sendpage(struct sock *sk, struct page *page,
1204 int offset, size_t size, int flags)
1206 struct chtls_sock *csk;
1207 struct chtls_dev *cdev;
1208 int mss, err, copied;
1209 struct tcp_sock *tp;
1214 csk = rcu_dereference_sk_user_data(sk);
1216 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1218 err = sk_stream_wait_connect(sk, &timeo);
1219 if (!sk_in_state(sk, TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
1224 csk_set_flag(csk, CSK_TX_MORE_DATA);
1227 struct sk_buff *skb = skb_peek_tail(&csk->txq);
1230 if (!skb || (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) ||
1231 (copy = mss - skb->len) <= 0) {
1233 if (!csk_mem_free(cdev, sk))
1234 goto wait_for_sndbuf;
1236 if (is_tls_tx(csk)) {
1237 skb = get_record_skb(sk,
1238 select_size(sk, size,
1243 skb = get_tx_skb(sk, 0);
1246 goto wait_for_memory;
1252 i = skb_shinfo(skb)->nr_frags;
1253 if (skb_can_coalesce(skb, i, page, offset)) {
1254 skb_shinfo(skb)->frags[i - 1].size += copy;
1255 } else if (i < MAX_SKB_FRAGS) {
1257 skb_fill_page_desc(skb, i, page, offset, copy);
1259 tx_skb_finalize(skb);
1260 push_frames_if_head(sk);
1265 if (skb->len == mss)
1266 tx_skb_finalize(skb);
1267 skb->data_len += copy;
1268 skb->truesize += copy;
1269 sk->sk_wmem_queued += copy;
1270 tp->write_seq += copy;
1275 if (corked(tp, flags) &&
1276 (sk_stream_wspace(sk) < sk_stream_min_wspace(sk)))
1277 ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_NO_APPEND;
1282 if (unlikely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND))
1283 push_frames_if_head(sk);
1286 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1288 err = csk_wait_memory(cdev, sk, &timeo);
1293 csk_reset_flag(csk, CSK_TX_MORE_DATA);
1295 chtls_tcp_push(sk, flags);
1305 if (csk_conn_inline(csk))
1306 csk_reset_flag(csk, CSK_TX_MORE_DATA);
1307 copied = sk_stream_error(sk, flags, err);
1311 static void chtls_select_window(struct sock *sk)
1313 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
1314 struct tcp_sock *tp = tcp_sk(sk);
1315 unsigned int wnd = tp->rcv_wnd;
1317 wnd = max_t(unsigned int, wnd, tcp_full_space(sk));
1318 wnd = max_t(unsigned int, MIN_RCV_WND, wnd);
1320 if (wnd > MAX_RCV_WND)
1324 * Check if we need to grow the receive window in response to an increase in
1325 * the socket's receive buffer size. Some applications increase the buffer
1326 * size dynamically and rely on the window to grow accordingly.
1329 if (wnd > tp->rcv_wnd) {
1330 tp->rcv_wup -= wnd - tp->rcv_wnd;
1332 /* Mark the receive window as updated */
1333 csk_reset_flag(csk, CSK_UPDATE_RCV_WND);
1338 * Send RX credits through an RX_DATA_ACK CPL message. We are permitted
1339 * to return without sending the message in case we cannot allocate
1340 * an sk_buff. Returns the number of credits sent.
1342 static u32 send_rx_credits(struct chtls_sock *csk, u32 credits)
1344 struct cpl_rx_data_ack *req;
1345 struct sk_buff *skb;
1347 skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
1350 __skb_put(skb, sizeof(*req));
1351 req = (struct cpl_rx_data_ack *)skb->head;
1353 set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id);
1354 INIT_TP_WR(req, csk->tid);
1355 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
1357 req->credit_dack = cpu_to_be32(RX_CREDITS_V(credits) |
1359 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
1363 #define CREDIT_RETURN_STATE (TCPF_ESTABLISHED | \
1368 * Called after some received data has been read. It returns RX credits
1369 * to the HW for the amount of data processed.
1371 static void chtls_cleanup_rbuf(struct sock *sk, int copied)
1373 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
1374 struct tcp_sock *tp;
1381 if (!sk_in_state(sk, CREDIT_RETURN_STATE))
1384 chtls_select_window(sk);
1386 credits = tp->copied_seq - tp->rcv_wup;
1387 if (unlikely(!credits))
1391 * For coalescing to work effectively ensure the receive window has
1392 * at least 16KB left.
1394 must_send = credits + 16384 >= tp->rcv_wnd;
1396 if (must_send || credits >= thres)
1397 tp->rcv_wup += send_rx_credits(csk, credits);
1400 static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
1401 int nonblock, int flags, int *addr_len)
1403 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
1404 struct net_device *dev = csk->egress_dev;
1405 struct chtls_hws *hws = &csk->tlshws;
1406 struct tcp_sock *tp = tcp_sk(sk);
1407 struct adapter *adap;
1408 unsigned long avail;
1415 adap = netdev2adap(dev);
1418 timeo = sock_rcvtimeo(sk, nonblock);
1419 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1422 if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
1423 chtls_cleanup_rbuf(sk, copied);
1426 struct sk_buff *skb;
1429 if (unlikely(tp->urg_data &&
1430 tp->urg_seq == tp->copied_seq)) {
1433 if (signal_pending(current)) {
1434 copied = timeo ? sock_intr_errno(timeo) :
1439 skb = skb_peek(&sk->sk_receive_queue);
1442 if (csk->wr_credits &&
1443 skb_queue_len(&csk->txq) &&
1444 chtls_push_frames(csk, csk->wr_credits ==
1445 csk->wr_max_credits))
1446 sk->sk_write_space(sk);
1448 if (copied >= target && !sk->sk_backlog.tail)
1452 if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
1453 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1454 signal_pending(current))
1460 if (sock_flag(sk, SOCK_DONE))
1463 copied = sock_error(sk);
1466 if (sk->sk_shutdown & RCV_SHUTDOWN)
1468 if (sk->sk_state == TCP_CLOSE) {
1476 if (signal_pending(current)) {
1477 copied = sock_intr_errno(timeo);
1481 if (sk->sk_backlog.tail) {
1484 chtls_cleanup_rbuf(sk, copied);
1488 if (copied >= target)
1490 chtls_cleanup_rbuf(sk, copied);
1491 sk_wait_data(sk, &timeo, NULL);
1495 skb_dst_set(skb, NULL);
1496 __skb_unlink(skb, &sk->sk_receive_queue);
1499 if (!copied && !timeo) {
1504 if (copied < target) {
1511 offset = hws->copied_seq;
1512 avail = skb->len - offset;
1516 if (unlikely(tp->urg_data)) {
1517 u32 urg_offset = tp->urg_seq - tp->copied_seq;
1519 if (urg_offset < avail) {
1522 } else if (!sock_flag(sk, SOCK_URGINLINE)) {
1523 /* First byte is urgent, skip */
1532 if (skb_copy_datagram_msg(skb, offset, msg, avail)) {
1541 hws->copied_seq += avail;
1543 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq))
1546 if ((avail + offset) >= skb->len) {
1547 if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) {
1548 tp->copied_seq += skb->len;
1549 hws->rcvpld = skb->hdr_len;
1551 tp->copied_seq += hws->rcvpld;
1553 chtls_free_skb(sk, skb);
1555 hws->copied_seq = 0;
1556 if (copied >= target &&
1557 !skb_peek(&sk->sk_receive_queue))
1563 chtls_cleanup_rbuf(sk, copied);
1569 * Peek at data in a socket's receive buffer.
1571 static int peekmsg(struct sock *sk, struct msghdr *msg,
1572 size_t len, int nonblock, int flags)
1574 struct tcp_sock *tp = tcp_sk(sk);
1575 u32 peek_seq, offset;
1576 struct sk_buff *skb;
1578 size_t avail; /* amount of available data in current skb */
1582 timeo = sock_rcvtimeo(sk, nonblock);
1583 peek_seq = tp->copied_seq;
1586 if (unlikely(tp->urg_data && tp->urg_seq == peek_seq)) {
1589 if (signal_pending(current)) {
1590 copied = timeo ? sock_intr_errno(timeo) :
1596 skb_queue_walk(&sk->sk_receive_queue, skb) {
1597 offset = peek_seq - ULP_SKB_CB(skb)->seq;
1598 if (offset < skb->len)
1602 /* empty receive queue */
1605 if (sock_flag(sk, SOCK_DONE))
1608 copied = sock_error(sk);
1611 if (sk->sk_shutdown & RCV_SHUTDOWN)
1613 if (sk->sk_state == TCP_CLOSE) {
1621 if (signal_pending(current)) {
1622 copied = sock_intr_errno(timeo);
1626 if (sk->sk_backlog.tail) {
1627 /* Do not sleep, just process backlog. */
1631 sk_wait_data(sk, &timeo, NULL);
1634 if (unlikely(peek_seq != tp->copied_seq)) {
1635 if (net_ratelimit())
1636 pr_info("TCP(%s:%d), race in MSG_PEEK.\n",
1637 current->comm, current->pid);
1638 peek_seq = tp->copied_seq;
1643 avail = skb->len - offset;
1647 * Do we have urgent data here? We need to skip over the
1650 if (unlikely(tp->urg_data)) {
1651 u32 urg_offset = tp->urg_seq - peek_seq;
1653 if (urg_offset < avail) {
1655 * The amount of data we are preparing to copy
1656 * contains urgent data.
1658 if (!urg_offset) { /* First byte is urgent */
1659 if (!sock_flag(sk, SOCK_URGINLINE)) {
1667 /* stop short of the urgent data */
1674 * If MSG_TRUNC is specified the data is discarded.
1676 if (likely(!(flags & MSG_TRUNC)))
1677 if (skb_copy_datagram_msg(skb, offset, msg, len)) {
1692 int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
1693 int nonblock, int flags, int *addr_len)
1695 struct tcp_sock *tp = tcp_sk(sk);
1696 struct chtls_sock *csk;
1697 struct chtls_hws *hws;
1698 unsigned long avail; /* amount of available data in current skb */
1703 int target; /* Read at least this many bytes */
1707 if (unlikely(flags & MSG_OOB))
1708 return tcp_prot.recvmsg(sk, msg, len, nonblock, flags,
1711 if (unlikely(flags & MSG_PEEK))
1712 return peekmsg(sk, msg, len, nonblock, flags);
1714 if (sk_can_busy_loop(sk) &&
1715 skb_queue_empty(&sk->sk_receive_queue) &&
1716 sk->sk_state == TCP_ESTABLISHED)
1717 sk_busy_loop(sk, nonblock);
1720 csk = rcu_dereference_sk_user_data(sk);
1724 return chtls_pt_recvmsg(sk, msg, len, nonblock,
1727 timeo = sock_rcvtimeo(sk, nonblock);
1728 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1731 if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
1732 chtls_cleanup_rbuf(sk, copied);
1735 struct sk_buff *skb;
1738 if (unlikely(tp->urg_data && tp->urg_seq == tp->copied_seq)) {
1741 if (signal_pending(current)) {
1742 copied = timeo ? sock_intr_errno(timeo) :
1748 skb = skb_peek(&sk->sk_receive_queue);
1752 if (csk->wr_credits &&
1753 skb_queue_len(&csk->txq) &&
1754 chtls_push_frames(csk, csk->wr_credits ==
1755 csk->wr_max_credits))
1756 sk->sk_write_space(sk);
1758 if (copied >= target && !sk->sk_backlog.tail)
1762 if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
1763 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1764 signal_pending(current))
1767 if (sock_flag(sk, SOCK_DONE))
1770 copied = sock_error(sk);
1773 if (sk->sk_shutdown & RCV_SHUTDOWN)
1775 if (sk->sk_state == TCP_CLOSE) {
1783 if (signal_pending(current)) {
1784 copied = sock_intr_errno(timeo);
1789 if (sk->sk_backlog.tail) {
1792 chtls_cleanup_rbuf(sk, copied);
1796 if (copied >= target)
1798 chtls_cleanup_rbuf(sk, copied);
1799 sk_wait_data(sk, &timeo, NULL);
1804 chtls_kfree_skb(sk, skb);
1805 if (!copied && !timeo) {
1810 if (copied < target)
1816 offset = tp->copied_seq - ULP_SKB_CB(skb)->seq;
1817 avail = skb->len - offset;
1821 if (unlikely(tp->urg_data)) {
1822 u32 urg_offset = tp->urg_seq - tp->copied_seq;
1824 if (urg_offset < avail) {
1827 } else if (!sock_flag(sk, SOCK_URGINLINE)) {
1837 if (likely(!(flags & MSG_TRUNC))) {
1838 if (skb_copy_datagram_msg(skb, offset,
1847 tp->copied_seq += avail;
1852 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq))
1855 if (avail + offset >= skb->len) {
1857 chtls_free_skb(sk, skb);
1860 if (copied >= target &&
1861 !skb_peek(&sk->sk_receive_queue))
1867 chtls_cleanup_rbuf(sk, copied);