2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/secure_seq.h>
76 #include <net/busy_poll.h>
78 #include <linux/inet.h>
79 #include <linux/ipv6.h>
80 #include <linux/stddef.h>
81 #include <linux/proc_fs.h>
82 #include <linux/seq_file.h>
83 #include <linux/inetdevice.h>
85 #include <crypto/hash.h>
86 #include <linux/scatterlist.h>
88 #include <trace/events/tcp.h>
90 #ifdef CONFIG_TCP_MD5SIG
91 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
92 __be32 daddr, __be32 saddr, const struct tcphdr *th);
95 struct inet_hashinfo tcp_hashinfo;
96 EXPORT_SYMBOL(tcp_hashinfo);
98 static u32 tcp_v4_init_seq(const struct sk_buff *skb)
100 return secure_tcp_seq(ip_hdr(skb)->daddr,
103 tcp_hdr(skb)->source);
106 static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
108 return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
111 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
113 const struct inet_timewait_sock *tw = inet_twsk(sktw);
114 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
115 struct tcp_sock *tp = tcp_sk(sk);
116 int reuse = sock_net(sk)->ipv4.sysctl_tcp_tw_reuse;
119 /* Still does not detect *everything* that goes through
120 * lo, since we require a loopback src or dst address
121 * or direct binding to 'lo' interface.
123 bool loopback = false;
124 if (tw->tw_bound_dev_if == LOOPBACK_IFINDEX)
126 #if IS_ENABLED(CONFIG_IPV6)
127 if (tw->tw_family == AF_INET6) {
128 if (ipv6_addr_loopback(&tw->tw_v6_daddr) ||
129 (ipv6_addr_v4mapped(&tw->tw_v6_daddr) &&
130 (tw->tw_v6_daddr.s6_addr[12] == 127)) ||
131 ipv6_addr_loopback(&tw->tw_v6_rcv_saddr) ||
132 (ipv6_addr_v4mapped(&tw->tw_v6_rcv_saddr) &&
133 (tw->tw_v6_rcv_saddr.s6_addr[12] == 127)))
138 if (ipv4_is_loopback(tw->tw_daddr) ||
139 ipv4_is_loopback(tw->tw_rcv_saddr))
146 /* With PAWS, it is safe from the viewpoint
147 of data integrity. Even without PAWS it is safe provided sequence
148 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
150 Actually, the idea is close to VJ's one, only timestamp cache is
151 held not per host, but per port pair and TW bucket is used as state
154 If TW bucket has been already destroyed we fall back to VJ's scheme
155 and use initial timestamp retrieved from peer table.
157 if (tcptw->tw_ts_recent_stamp &&
158 (!twp || (reuse && time_after32(ktime_get_seconds(),
159 tcptw->tw_ts_recent_stamp)))) {
160 /* In case of repair and re-using TIME-WAIT sockets we still
161 * want to be sure that it is safe as above but honor the
162 * sequence numbers and time stamps set as part of the repair
165 * Without this check re-using a TIME-WAIT socket with TCP
166 * repair would accumulate a -1 on the repair assigned
167 * sequence number. The first time it is reused the sequence
168 * is -1, the second time -2, etc. This fixes that issue
169 * without appearing to create any others.
171 if (likely(!tp->repair)) {
172 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
173 if (tp->write_seq == 0)
175 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
176 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
184 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
186 static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr,
189 /* This check is replicated from tcp_v4_connect() and intended to
190 * prevent BPF program called below from accessing bytes that are out
191 * of the bound specified by user in addr_len.
193 if (addr_len < sizeof(struct sockaddr_in))
196 sock_owned_by_me(sk);
198 return BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr);
201 /* This will initiate an outgoing connection. */
202 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
204 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
205 struct inet_sock *inet = inet_sk(sk);
206 struct tcp_sock *tp = tcp_sk(sk);
207 __be16 orig_sport, orig_dport;
208 __be32 daddr, nexthop;
212 struct ip_options_rcu *inet_opt;
213 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
215 if (addr_len < sizeof(struct sockaddr_in))
218 if (usin->sin_family != AF_INET)
219 return -EAFNOSUPPORT;
221 nexthop = daddr = usin->sin_addr.s_addr;
222 inet_opt = rcu_dereference_protected(inet->inet_opt,
223 lockdep_sock_is_held(sk));
224 if (inet_opt && inet_opt->opt.srr) {
227 nexthop = inet_opt->opt.faddr;
230 orig_sport = inet->inet_sport;
231 orig_dport = usin->sin_port;
232 fl4 = &inet->cork.fl.u.ip4;
233 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
234 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
236 orig_sport, orig_dport, sk);
239 if (err == -ENETUNREACH)
240 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
244 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
249 if (!inet_opt || !inet_opt->opt.srr)
252 if (!inet->inet_saddr)
253 inet->inet_saddr = fl4->saddr;
254 sk_rcv_saddr_set(sk, inet->inet_saddr);
256 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
257 /* Reset inherited state */
258 tp->rx_opt.ts_recent = 0;
259 tp->rx_opt.ts_recent_stamp = 0;
260 if (likely(!tp->repair))
264 inet->inet_dport = usin->sin_port;
265 sk_daddr_set(sk, daddr);
267 inet_csk(sk)->icsk_ext_hdr_len = 0;
269 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
271 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
273 /* Socket identity is still unknown (sport may be zero).
274 * However we set state to SYN-SENT and not releasing socket
275 * lock select source port, enter ourselves into the hash tables and
276 * complete initialization after this.
278 tcp_set_state(sk, TCP_SYN_SENT);
279 err = inet_hash_connect(tcp_death_row, sk);
285 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
286 inet->inet_sport, inet->inet_dport, sk);
292 /* OK, now commit destination to socket. */
293 sk->sk_gso_type = SKB_GSO_TCPV4;
294 sk_setup_caps(sk, &rt->dst);
297 if (likely(!tp->repair)) {
299 tp->write_seq = secure_tcp_seq(inet->inet_saddr,
303 tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
308 inet->inet_id = tp->write_seq ^ jiffies;
310 if (tcp_fastopen_defer_connect(sk, &err))
315 err = tcp_connect(sk);
324 * This unhashes the socket and releases the local port,
327 tcp_set_state(sk, TCP_CLOSE);
329 sk->sk_route_caps = 0;
330 inet->inet_dport = 0;
333 EXPORT_SYMBOL(tcp_v4_connect);
336 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
337 * It can be called through tcp_release_cb() if socket was owned by user
338 * at the time tcp_v4_err() was called to handle ICMP message.
340 void tcp_v4_mtu_reduced(struct sock *sk)
342 struct inet_sock *inet = inet_sk(sk);
343 struct dst_entry *dst;
346 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
348 mtu = tcp_sk(sk)->mtu_info;
349 dst = inet_csk_update_pmtu(sk, mtu);
353 /* Something is about to be wrong... Remember soft error
354 * for the case, if this connection will not able to recover.
356 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
357 sk->sk_err_soft = EMSGSIZE;
361 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
362 ip_sk_accept_pmtu(sk) &&
363 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
364 tcp_sync_mss(sk, mtu);
366 /* Resend the TCP packet because it's
367 * clear that the old packet has been
368 * dropped. This is the new "fast" path mtu
371 tcp_simple_retransmit(sk);
372 } /* else let the usual retransmit timer handle it */
374 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
376 static void do_redirect(struct sk_buff *skb, struct sock *sk)
378 struct dst_entry *dst = __sk_dst_check(sk, 0);
381 dst->ops->redirect(dst, sk, skb);
385 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
386 void tcp_req_err(struct sock *sk, u32 seq, bool abort)
388 struct request_sock *req = inet_reqsk(sk);
389 struct net *net = sock_net(sk);
391 /* ICMPs are not backlogged, hence we cannot get
392 * an established socket here.
394 if (seq != tcp_rsk(req)->snt_isn) {
395 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
398 * Still in SYN_RECV, just remove it silently.
399 * There is no good way to pass the error to the newly
400 * created socket, and POSIX does not want network
401 * errors returned from accept().
403 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
404 tcp_listendrop(req->rsk_listener);
408 EXPORT_SYMBOL(tcp_req_err);
411 * This routine is called by the ICMP module when it gets some
412 * sort of error condition. If err < 0 then the socket should
413 * be closed and the error returned to the user. If err > 0
414 * it's just the icmp type << 8 | icmp code. After adjustment
415 * header points to the first 8 bytes of the tcp header. We need
416 * to find the appropriate port.
418 * The locking strategy used here is very "optimistic". When
419 * someone else accesses the socket the ICMP is just dropped
420 * and for some paths there is no check at all.
421 * A more general error queue to queue errors for later handling
422 * is probably better.
426 int tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
428 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
429 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
430 struct inet_connection_sock *icsk;
432 struct inet_sock *inet;
433 const int type = icmp_hdr(icmp_skb)->type;
434 const int code = icmp_hdr(icmp_skb)->code;
437 struct request_sock *fastopen;
442 struct net *net = dev_net(icmp_skb->dev);
444 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
445 th->dest, iph->saddr, ntohs(th->source),
446 inet_iif(icmp_skb), 0);
448 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
451 if (sk->sk_state == TCP_TIME_WAIT) {
452 inet_twsk_put(inet_twsk(sk));
455 seq = ntohl(th->seq);
456 if (sk->sk_state == TCP_NEW_SYN_RECV) {
457 tcp_req_err(sk, seq, type == ICMP_PARAMETERPROB ||
458 type == ICMP_TIME_EXCEEDED ||
459 (type == ICMP_DEST_UNREACH &&
460 (code == ICMP_NET_UNREACH ||
461 code == ICMP_HOST_UNREACH)));
466 /* If too many ICMPs get dropped on busy
467 * servers this needs to be solved differently.
468 * We do take care of PMTU discovery (RFC1191) special case :
469 * we can receive locally generated ICMP messages while socket is held.
471 if (sock_owned_by_user(sk)) {
472 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
473 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
475 if (sk->sk_state == TCP_CLOSE)
478 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
479 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
485 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
486 fastopen = tp->fastopen_rsk;
487 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
488 if (sk->sk_state != TCP_LISTEN &&
489 !between(seq, snd_una, tp->snd_nxt)) {
490 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
496 if (!sock_owned_by_user(sk))
497 do_redirect(icmp_skb, sk);
499 case ICMP_SOURCE_QUENCH:
500 /* Just silently ignore these. */
502 case ICMP_PARAMETERPROB:
505 case ICMP_DEST_UNREACH:
506 if (code > NR_ICMP_UNREACH)
509 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
510 /* We are not interested in TCP_LISTEN and open_requests
511 * (SYN-ACKs send out by Linux are always <576bytes so
512 * they should go through unfragmented).
514 if (sk->sk_state == TCP_LISTEN)
518 if (!sock_owned_by_user(sk)) {
519 tcp_v4_mtu_reduced(sk);
521 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
527 err = icmp_err_convert[code].errno;
528 /* check if icmp_skb allows revert of backoff
529 * (see draft-zimmermann-tcp-lcd) */
530 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
532 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
533 !icsk->icsk_backoff || fastopen)
536 if (sock_owned_by_user(sk))
539 skb = tcp_rtx_queue_head(sk);
540 if (WARN_ON_ONCE(!skb))
543 icsk->icsk_backoff--;
544 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
546 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
549 tcp_mstamp_refresh(tp);
550 delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
551 remaining = icsk->icsk_rto -
552 usecs_to_jiffies(delta_us);
555 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
556 remaining, TCP_RTO_MAX);
558 /* RTO revert clocked out retransmission.
559 * Will retransmit now */
560 tcp_retransmit_timer(sk);
564 case ICMP_TIME_EXCEEDED:
571 switch (sk->sk_state) {
574 /* Only in fast or simultaneous open. If a fast open socket is
575 * is already accepted it is treated as a connected one below.
577 if (fastopen && !fastopen->sk)
580 if (!sock_owned_by_user(sk)) {
583 sk->sk_error_report(sk);
587 sk->sk_err_soft = err;
592 /* If we've already connected we will keep trying
593 * until we time out, or the user gives up.
595 * rfc1122 4.2.3.9 allows to consider as hard errors
596 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
597 * but it is obsoleted by pmtu discovery).
599 * Note, that in modern internet, where routing is unreliable
600 * and in each dark corner broken firewalls sit, sending random
601 * errors ordered by their masters even this two messages finally lose
602 * their original sense (even Linux sends invalid PORT_UNREACHs)
604 * Now we are in compliance with RFCs.
609 if (!sock_owned_by_user(sk) && inet->recverr) {
611 sk->sk_error_report(sk);
612 } else { /* Only an error on timeout */
613 sk->sk_err_soft = err;
622 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
624 struct tcphdr *th = tcp_hdr(skb);
626 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
627 skb->csum_start = skb_transport_header(skb) - skb->head;
628 skb->csum_offset = offsetof(struct tcphdr, check);
631 /* This routine computes an IPv4 TCP checksum. */
632 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
634 const struct inet_sock *inet = inet_sk(sk);
636 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
638 EXPORT_SYMBOL(tcp_v4_send_check);
641 * This routine will send an RST to the other tcp.
643 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
645 * Answer: if a packet caused RST, it is not for a socket
646 * existing in our system, if it is matched to a socket,
647 * it is just duplicate segment or bug in other side's TCP.
648 * So that we build reply only basing on parameters
649 * arrived with segment.
650 * Exception: precedence violation. We do not implement it in any case.
653 static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
655 const struct tcphdr *th = tcp_hdr(skb);
658 #ifdef CONFIG_TCP_MD5SIG
659 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
662 struct ip_reply_arg arg;
663 #ifdef CONFIG_TCP_MD5SIG
664 struct tcp_md5sig_key *key = NULL;
665 const __u8 *hash_location = NULL;
666 unsigned char newhash[16];
668 struct sock *sk1 = NULL;
673 /* Never send a reset in response to a reset. */
677 /* If sk not NULL, it means we did a successful lookup and incoming
678 * route had to be correct. prequeue might have dropped our dst.
680 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
683 /* Swap the send and the receive. */
684 memset(&rep, 0, sizeof(rep));
685 rep.th.dest = th->source;
686 rep.th.source = th->dest;
687 rep.th.doff = sizeof(struct tcphdr) / 4;
691 rep.th.seq = th->ack_seq;
694 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
695 skb->len - (th->doff << 2));
698 memset(&arg, 0, sizeof(arg));
699 arg.iov[0].iov_base = (unsigned char *)&rep;
700 arg.iov[0].iov_len = sizeof(rep.th);
702 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
703 #ifdef CONFIG_TCP_MD5SIG
705 hash_location = tcp_parse_md5sig_option(th);
706 if (sk && sk_fullsock(sk)) {
707 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
708 &ip_hdr(skb)->saddr, AF_INET);
709 } else if (hash_location) {
711 * active side is lost. Try to find listening socket through
712 * source port, and then find md5 key through listening socket.
713 * we are not loose security here:
714 * Incoming packet is checked with md5 hash with finding key,
715 * no RST generated if md5 hash doesn't match.
717 sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
719 th->source, ip_hdr(skb)->daddr,
720 ntohs(th->source), inet_iif(skb),
722 /* don't send rst if it can't find key */
726 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
727 &ip_hdr(skb)->saddr, AF_INET);
732 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
733 if (genhash || memcmp(hash_location, newhash, 16) != 0)
739 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
741 (TCPOPT_MD5SIG << 8) |
743 /* Update length and the length the header thinks exists */
744 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
745 rep.th.doff = arg.iov[0].iov_len / 4;
747 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
748 key, ip_hdr(skb)->saddr,
749 ip_hdr(skb)->daddr, &rep.th);
752 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
753 ip_hdr(skb)->saddr, /* XXX */
754 arg.iov[0].iov_len, IPPROTO_TCP, 0);
755 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
756 arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
758 /* When socket is gone, all binding information is lost.
759 * routing might fail in this case. No choice here, if we choose to force
760 * input interface, we will misroute in case of asymmetric route.
763 arg.bound_dev_if = sk->sk_bound_dev_if;
765 trace_tcp_send_reset(sk, skb);
768 BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
769 offsetof(struct inet_timewait_sock, tw_bound_dev_if));
771 arg.tos = ip_hdr(skb)->tos;
772 arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
774 ctl_sk = *this_cpu_ptr(net->ipv4.tcp_sk);
776 ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
777 inet_twsk(sk)->tw_mark : sk->sk_mark;
778 ip_send_unicast_reply(ctl_sk,
779 skb, &TCP_SKB_CB(skb)->header.h4.opt,
780 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
781 &arg, arg.iov[0].iov_len);
784 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
785 __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
788 #ifdef CONFIG_TCP_MD5SIG
794 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
795 outside socket context is ugly, certainly. What can I do?
798 static void tcp_v4_send_ack(const struct sock *sk,
799 struct sk_buff *skb, u32 seq, u32 ack,
800 u32 win, u32 tsval, u32 tsecr, int oif,
801 struct tcp_md5sig_key *key,
802 int reply_flags, u8 tos)
804 const struct tcphdr *th = tcp_hdr(skb);
807 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
808 #ifdef CONFIG_TCP_MD5SIG
809 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
813 struct net *net = sock_net(sk);
814 struct ip_reply_arg arg;
817 memset(&rep.th, 0, sizeof(struct tcphdr));
818 memset(&arg, 0, sizeof(arg));
820 arg.iov[0].iov_base = (unsigned char *)&rep;
821 arg.iov[0].iov_len = sizeof(rep.th);
823 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
824 (TCPOPT_TIMESTAMP << 8) |
826 rep.opt[1] = htonl(tsval);
827 rep.opt[2] = htonl(tsecr);
828 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
831 /* Swap the send and the receive. */
832 rep.th.dest = th->source;
833 rep.th.source = th->dest;
834 rep.th.doff = arg.iov[0].iov_len / 4;
835 rep.th.seq = htonl(seq);
836 rep.th.ack_seq = htonl(ack);
838 rep.th.window = htons(win);
840 #ifdef CONFIG_TCP_MD5SIG
842 int offset = (tsecr) ? 3 : 0;
844 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
846 (TCPOPT_MD5SIG << 8) |
848 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
849 rep.th.doff = arg.iov[0].iov_len/4;
851 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
852 key, ip_hdr(skb)->saddr,
853 ip_hdr(skb)->daddr, &rep.th);
856 arg.flags = reply_flags;
857 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
858 ip_hdr(skb)->saddr, /* XXX */
859 arg.iov[0].iov_len, IPPROTO_TCP, 0);
860 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
862 arg.bound_dev_if = oif;
864 arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
866 ctl_sk = *this_cpu_ptr(net->ipv4.tcp_sk);
868 ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
869 inet_twsk(sk)->tw_mark : sk->sk_mark;
870 ip_send_unicast_reply(ctl_sk,
871 skb, &TCP_SKB_CB(skb)->header.h4.opt,
872 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
873 &arg, arg.iov[0].iov_len);
876 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
880 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
882 struct inet_timewait_sock *tw = inet_twsk(sk);
883 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
885 tcp_v4_send_ack(sk, skb,
886 tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
887 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
888 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
891 tcp_twsk_md5_key(tcptw),
892 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
899 static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
900 struct request_sock *req)
902 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
903 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
905 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
909 * The window field (SEG.WND) of every outgoing segment, with the
910 * exception of <SYN> segments, MUST be right-shifted by
911 * Rcv.Wind.Shift bits:
913 tcp_v4_send_ack(sk, skb, seq,
914 tcp_rsk(req)->rcv_nxt,
915 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
916 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
919 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->saddr,
921 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
926 * Send a SYN-ACK after having received a SYN.
927 * This still operates on a request_sock only, not on a big
930 static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
932 struct request_sock *req,
933 struct tcp_fastopen_cookie *foc,
934 enum tcp_synack_type synack_type)
936 const struct inet_request_sock *ireq = inet_rsk(req);
941 /* First, grab a route. */
942 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
945 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
948 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
951 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
953 rcu_dereference(ireq->ireq_opt));
955 err = net_xmit_eval(err);
962 * IPv4 request_sock destructor.
964 static void tcp_v4_reqsk_destructor(struct request_sock *req)
966 kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
969 #ifdef CONFIG_TCP_MD5SIG
971 * RFC2385 MD5 checksumming requires a mapping of
972 * IP address->MD5 Key.
973 * We need to maintain these in the sk structure.
976 DEFINE_STATIC_KEY_FALSE(tcp_md5_needed);
977 EXPORT_SYMBOL(tcp_md5_needed);
979 /* Find the Key structure for an address. */
980 struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk,
981 const union tcp_md5_addr *addr,
984 const struct tcp_sock *tp = tcp_sk(sk);
985 struct tcp_md5sig_key *key;
986 const struct tcp_md5sig_info *md5sig;
988 struct tcp_md5sig_key *best_match = NULL;
991 /* caller either holds rcu_read_lock() or socket lock */
992 md5sig = rcu_dereference_check(tp->md5sig_info,
993 lockdep_sock_is_held(sk));
997 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
998 if (key->family != family)
1001 if (family == AF_INET) {
1002 mask = inet_make_mask(key->prefixlen);
1003 match = (key->addr.a4.s_addr & mask) ==
1004 (addr->a4.s_addr & mask);
1005 #if IS_ENABLED(CONFIG_IPV6)
1006 } else if (family == AF_INET6) {
1007 match = ipv6_prefix_equal(&key->addr.a6, &addr->a6,
1014 if (match && (!best_match ||
1015 key->prefixlen > best_match->prefixlen))
1020 EXPORT_SYMBOL(__tcp_md5_do_lookup);
1022 static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
1023 const union tcp_md5_addr *addr,
1024 int family, u8 prefixlen)
1026 const struct tcp_sock *tp = tcp_sk(sk);
1027 struct tcp_md5sig_key *key;
1028 unsigned int size = sizeof(struct in_addr);
1029 const struct tcp_md5sig_info *md5sig;
1031 /* caller either holds rcu_read_lock() or socket lock */
1032 md5sig = rcu_dereference_check(tp->md5sig_info,
1033 lockdep_sock_is_held(sk));
1036 #if IS_ENABLED(CONFIG_IPV6)
1037 if (family == AF_INET6)
1038 size = sizeof(struct in6_addr);
1040 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
1041 if (key->family != family)
1043 if (!memcmp(&key->addr, addr, size) &&
1044 key->prefixlen == prefixlen)
1050 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1051 const struct sock *addr_sk)
1053 const union tcp_md5_addr *addr;
1055 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
1056 return tcp_md5_do_lookup(sk, addr, AF_INET);
1058 EXPORT_SYMBOL(tcp_v4_md5_lookup);
1060 /* This can be called on a newly created socket, from other files */
1061 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1062 int family, u8 prefixlen, const u8 *newkey, u8 newkeylen,
1065 /* Add Key to the list */
1066 struct tcp_md5sig_key *key;
1067 struct tcp_sock *tp = tcp_sk(sk);
1068 struct tcp_md5sig_info *md5sig;
1070 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
1072 /* Pre-existing entry - just update that one. */
1073 memcpy(key->key, newkey, newkeylen);
1074 key->keylen = newkeylen;
1078 md5sig = rcu_dereference_protected(tp->md5sig_info,
1079 lockdep_sock_is_held(sk));
1081 md5sig = kmalloc(sizeof(*md5sig), gfp);
1085 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1086 INIT_HLIST_HEAD(&md5sig->head);
1087 rcu_assign_pointer(tp->md5sig_info, md5sig);
1090 key = sock_kmalloc(sk, sizeof(*key), gfp);
1093 if (!tcp_alloc_md5sig_pool()) {
1094 sock_kfree_s(sk, key, sizeof(*key));
1098 memcpy(key->key, newkey, newkeylen);
1099 key->keylen = newkeylen;
1100 key->family = family;
1101 key->prefixlen = prefixlen;
1102 memcpy(&key->addr, addr,
1103 (family == AF_INET6) ? sizeof(struct in6_addr) :
1104 sizeof(struct in_addr));
1105 hlist_add_head_rcu(&key->node, &md5sig->head);
1108 EXPORT_SYMBOL(tcp_md5_do_add);
1110 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
1113 struct tcp_md5sig_key *key;
1115 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
1118 hlist_del_rcu(&key->node);
1119 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1120 kfree_rcu(key, rcu);
1123 EXPORT_SYMBOL(tcp_md5_do_del);
1125 static void tcp_clear_md5_list(struct sock *sk)
1127 struct tcp_sock *tp = tcp_sk(sk);
1128 struct tcp_md5sig_key *key;
1129 struct hlist_node *n;
1130 struct tcp_md5sig_info *md5sig;
1132 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1134 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1135 hlist_del_rcu(&key->node);
1136 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1137 kfree_rcu(key, rcu);
1141 static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
1142 char __user *optval, int optlen)
1144 struct tcp_md5sig cmd;
1145 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1148 if (optlen < sizeof(cmd))
1151 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1154 if (sin->sin_family != AF_INET)
1157 if (optname == TCP_MD5SIG_EXT &&
1158 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
1159 prefixlen = cmd.tcpm_prefixlen;
1164 if (!cmd.tcpm_keylen)
1165 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1166 AF_INET, prefixlen);
1168 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1171 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1172 AF_INET, prefixlen, cmd.tcpm_key, cmd.tcpm_keylen,
1176 static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
1177 __be32 daddr, __be32 saddr,
1178 const struct tcphdr *th, int nbytes)
1180 struct tcp4_pseudohdr *bp;
1181 struct scatterlist sg;
1188 bp->protocol = IPPROTO_TCP;
1189 bp->len = cpu_to_be16(nbytes);
1191 _th = (struct tcphdr *)(bp + 1);
1192 memcpy(_th, th, sizeof(*th));
1195 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
1196 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
1197 sizeof(*bp) + sizeof(*th));
1198 return crypto_ahash_update(hp->md5_req);
1201 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1202 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1204 struct tcp_md5sig_pool *hp;
1205 struct ahash_request *req;
1207 hp = tcp_get_md5sig_pool();
1209 goto clear_hash_noput;
1212 if (crypto_ahash_init(req))
1214 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
1216 if (tcp_md5_hash_key(hp, key))
1218 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1219 if (crypto_ahash_final(req))
1222 tcp_put_md5sig_pool();
1226 tcp_put_md5sig_pool();
1228 memset(md5_hash, 0, 16);
1232 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1233 const struct sock *sk,
1234 const struct sk_buff *skb)
1236 struct tcp_md5sig_pool *hp;
1237 struct ahash_request *req;
1238 const struct tcphdr *th = tcp_hdr(skb);
1239 __be32 saddr, daddr;
1241 if (sk) { /* valid for establish/request sockets */
1242 saddr = sk->sk_rcv_saddr;
1243 daddr = sk->sk_daddr;
1245 const struct iphdr *iph = ip_hdr(skb);
1250 hp = tcp_get_md5sig_pool();
1252 goto clear_hash_noput;
1255 if (crypto_ahash_init(req))
1258 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
1260 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1262 if (tcp_md5_hash_key(hp, key))
1264 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1265 if (crypto_ahash_final(req))
1268 tcp_put_md5sig_pool();
1272 tcp_put_md5sig_pool();
1274 memset(md5_hash, 0, 16);
1277 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1281 /* Called with rcu_read_lock() */
1282 static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1283 const struct sk_buff *skb)
1285 #ifdef CONFIG_TCP_MD5SIG
1287 * This gets called for each TCP segment that arrives
1288 * so we want to be efficient.
1289 * We have 3 drop cases:
1290 * o No MD5 hash and one expected.
1291 * o MD5 hash and we're not expecting one.
1292 * o MD5 hash and its wrong.
1294 const __u8 *hash_location = NULL;
1295 struct tcp_md5sig_key *hash_expected;
1296 const struct iphdr *iph = ip_hdr(skb);
1297 const struct tcphdr *th = tcp_hdr(skb);
1299 unsigned char newhash[16];
1301 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1303 hash_location = tcp_parse_md5sig_option(th);
1305 /* We've parsed the options - do we have a hash? */
1306 if (!hash_expected && !hash_location)
1309 if (hash_expected && !hash_location) {
1310 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1314 if (!hash_expected && hash_location) {
1315 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1319 /* Okay, so this is hash_expected and hash_location -
1320 * so we need to calculate the checksum.
1322 genhash = tcp_v4_md5_hash_skb(newhash,
1326 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1327 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
1328 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1329 &iph->saddr, ntohs(th->source),
1330 &iph->daddr, ntohs(th->dest),
1331 genhash ? " tcp_v4_calc_md5_hash failed"
1340 static void tcp_v4_init_req(struct request_sock *req,
1341 const struct sock *sk_listener,
1342 struct sk_buff *skb)
1344 struct inet_request_sock *ireq = inet_rsk(req);
1345 struct net *net = sock_net(sk_listener);
1347 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1348 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1349 RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
1352 static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1354 const struct request_sock *req)
1356 return inet_csk_route_req(sk, &fl->u.ip4, req);
1359 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1361 .obj_size = sizeof(struct tcp_request_sock),
1362 .rtx_syn_ack = tcp_rtx_synack,
1363 .send_ack = tcp_v4_reqsk_send_ack,
1364 .destructor = tcp_v4_reqsk_destructor,
1365 .send_reset = tcp_v4_send_reset,
1366 .syn_ack_timeout = tcp_syn_ack_timeout,
1369 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1370 .mss_clamp = TCP_MSS_DEFAULT,
1371 #ifdef CONFIG_TCP_MD5SIG
1372 .req_md5_lookup = tcp_v4_md5_lookup,
1373 .calc_md5_hash = tcp_v4_md5_hash_skb,
1375 .init_req = tcp_v4_init_req,
1376 #ifdef CONFIG_SYN_COOKIES
1377 .cookie_init_seq = cookie_v4_init_sequence,
1379 .route_req = tcp_v4_route_req,
1380 .init_seq = tcp_v4_init_seq,
1381 .init_ts_off = tcp_v4_init_ts_off,
1382 .send_synack = tcp_v4_send_synack,
1385 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1387 /* Never answer to SYNs send to broadcast or multicast */
1388 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1391 return tcp_conn_request(&tcp_request_sock_ops,
1392 &tcp_request_sock_ipv4_ops, sk, skb);
1398 EXPORT_SYMBOL(tcp_v4_conn_request);
1402 * The three way handshake has completed - we got a valid synack -
1403 * now create the new socket.
1405 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1406 struct request_sock *req,
1407 struct dst_entry *dst,
1408 struct request_sock *req_unhash,
1411 struct inet_request_sock *ireq;
1412 struct inet_sock *newinet;
1413 struct tcp_sock *newtp;
1415 #ifdef CONFIG_TCP_MD5SIG
1416 struct tcp_md5sig_key *key;
1418 struct ip_options_rcu *inet_opt;
1420 if (sk_acceptq_is_full(sk))
1423 newsk = tcp_create_openreq_child(sk, req, skb);
1427 newsk->sk_gso_type = SKB_GSO_TCPV4;
1428 inet_sk_rx_dst_set(newsk, skb);
1430 newtp = tcp_sk(newsk);
1431 newinet = inet_sk(newsk);
1432 ireq = inet_rsk(req);
1433 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1434 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1435 newsk->sk_bound_dev_if = ireq->ir_iif;
1436 newinet->inet_saddr = ireq->ir_loc_addr;
1437 inet_opt = rcu_dereference(ireq->ireq_opt);
1438 RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
1439 newinet->mc_index = inet_iif(skb);
1440 newinet->mc_ttl = ip_hdr(skb)->ttl;
1441 newinet->rcv_tos = ip_hdr(skb)->tos;
1442 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1444 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1445 newinet->inet_id = newtp->write_seq ^ jiffies;
1448 dst = inet_csk_route_child_sock(sk, newsk, req);
1452 /* syncookie case : see end of cookie_v4_check() */
1454 sk_setup_caps(newsk, dst);
1456 tcp_ca_openreq_child(newsk, dst);
1458 tcp_sync_mss(newsk, dst_mtu(dst));
1459 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1461 tcp_initialize_rcv_mss(newsk);
1463 #ifdef CONFIG_TCP_MD5SIG
1464 /* Copy over the MD5 key from the original socket */
1465 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1469 * We're using one, so create a matching key
1470 * on the newsk structure. If we fail to get
1471 * memory, then we end up not copying the key
1474 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1475 AF_INET, 32, key->key, key->keylen, GFP_ATOMIC);
1476 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1480 if (__inet_inherit_port(sk, newsk) < 0)
1482 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1483 if (likely(*own_req)) {
1484 tcp_move_syn(newtp, req);
1485 ireq->ireq_opt = NULL;
1487 newinet->inet_opt = NULL;
1492 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1499 newinet->inet_opt = NULL;
1500 inet_csk_prepare_forced_close(newsk);
1504 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1506 static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1508 #ifdef CONFIG_SYN_COOKIES
1509 const struct tcphdr *th = tcp_hdr(skb);
1512 sk = cookie_v4_check(sk, skb);
1517 /* The socket must have it's spinlock held when we get
1518 * here, unless it is a TCP_LISTEN socket.
1520 * We have a potential double-lock case here, so even when
1521 * doing backlog processing we use the BH locking scheme.
1522 * This is because we cannot sleep with the original spinlock
1525 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1529 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1530 struct dst_entry *dst = sk->sk_rx_dst;
1532 sock_rps_save_rxhash(sk, skb);
1533 sk_mark_napi_id(sk, skb);
1535 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1536 !dst->ops->check(dst, 0)) {
1538 sk->sk_rx_dst = NULL;
1541 tcp_rcv_established(sk, skb);
1545 if (tcp_checksum_complete(skb))
1548 if (sk->sk_state == TCP_LISTEN) {
1549 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1554 if (tcp_child_process(sk, nsk, skb)) {
1561 sock_rps_save_rxhash(sk, skb);
1563 if (tcp_rcv_state_process(sk, skb)) {
1570 tcp_v4_send_reset(rsk, skb);
1573 /* Be careful here. If this function gets more complicated and
1574 * gcc suffers from register pressure on the x86, sk (in %ebx)
1575 * might be destroyed here. This current version compiles correctly,
1576 * but you have been warned.
1581 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1582 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1585 EXPORT_SYMBOL(tcp_v4_do_rcv);
1587 int tcp_v4_early_demux(struct sk_buff *skb)
1589 const struct iphdr *iph;
1590 const struct tcphdr *th;
1593 if (skb->pkt_type != PACKET_HOST)
1596 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1602 if (th->doff < sizeof(struct tcphdr) / 4)
1605 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1606 iph->saddr, th->source,
1607 iph->daddr, ntohs(th->dest),
1608 skb->skb_iif, inet_sdif(skb));
1611 skb->destructor = sock_edemux;
1612 if (sk_fullsock(sk)) {
1613 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1616 dst = dst_check(dst, 0);
1618 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1619 skb_dst_set_noref(skb, dst);
1625 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
1627 u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf;
1628 struct skb_shared_info *shinfo;
1629 const struct tcphdr *th;
1630 struct tcphdr *thtail;
1631 struct sk_buff *tail;
1632 unsigned int hdrlen;
1637 /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1638 * we can fix skb->truesize to its real value to avoid future drops.
1639 * This is valid because skb is not yet charged to the socket.
1640 * It has been noticed pure SACK packets were sometimes dropped
1641 * (if cooked by drivers without copybreak feature).
1647 if (unlikely(tcp_checksum_complete(skb))) {
1649 __TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1650 __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1654 /* Attempt coalescing to last skb in backlog, even if we are
1656 * This is okay because skb capacity is limited to MAX_SKB_FRAGS.
1658 th = (const struct tcphdr *)skb->data;
1659 hdrlen = th->doff * 4;
1660 shinfo = skb_shinfo(skb);
1662 if (!shinfo->gso_size)
1663 shinfo->gso_size = skb->len - hdrlen;
1665 if (!shinfo->gso_segs)
1666 shinfo->gso_segs = 1;
1668 tail = sk->sk_backlog.tail;
1671 thtail = (struct tcphdr *)tail->data;
1673 if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq ||
1674 TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield ||
1675 ((TCP_SKB_CB(tail)->tcp_flags |
1676 TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_URG) ||
1677 ((TCP_SKB_CB(tail)->tcp_flags ^
1678 TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
1679 #ifdef CONFIG_TLS_DEVICE
1680 tail->decrypted != skb->decrypted ||
1682 thtail->doff != th->doff ||
1683 memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
1686 __skb_pull(skb, hdrlen);
1687 if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
1688 thtail->window = th->window;
1690 TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;
1692 if (after(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))
1693 TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
1695 TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1697 if (TCP_SKB_CB(skb)->has_rxtstamp) {
1698 TCP_SKB_CB(tail)->has_rxtstamp = true;
1699 tail->tstamp = skb->tstamp;
1700 skb_hwtstamps(tail)->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
1703 /* Not as strict as GRO. We only need to carry mss max value */
1704 skb_shinfo(tail)->gso_size = max(shinfo->gso_size,
1705 skb_shinfo(tail)->gso_size);
1707 gso_segs = skb_shinfo(tail)->gso_segs + shinfo->gso_segs;
1708 skb_shinfo(tail)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
1710 sk->sk_backlog.len += delta;
1711 __NET_INC_STATS(sock_net(sk),
1712 LINUX_MIB_TCPBACKLOGCOALESCE);
1713 kfree_skb_partial(skb, fragstolen);
1716 __skb_push(skb, hdrlen);
1719 /* Only socket owner can try to collapse/prune rx queues
1720 * to reduce memory overhead, so add a little headroom here.
1721 * Few sockets backlog are possibly concurrently non empty.
1725 if (unlikely(sk_add_backlog(sk, skb, limit))) {
1727 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
1732 EXPORT_SYMBOL(tcp_add_backlog);
1734 int tcp_filter(struct sock *sk, struct sk_buff *skb)
1736 struct tcphdr *th = (struct tcphdr *)skb->data;
1738 return sk_filter_trim_cap(sk, skb, th->doff * 4);
1740 EXPORT_SYMBOL(tcp_filter);
1742 static void tcp_v4_restore_cb(struct sk_buff *skb)
1744 memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
1745 sizeof(struct inet_skb_parm));
1748 static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
1749 const struct tcphdr *th)
1751 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1752 * barrier() makes sure compiler wont play fool^Waliasing games.
1754 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1755 sizeof(struct inet_skb_parm));
1758 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1759 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1760 skb->len - th->doff * 4);
1761 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1762 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1763 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1764 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1765 TCP_SKB_CB(skb)->sacked = 0;
1766 TCP_SKB_CB(skb)->has_rxtstamp =
1767 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1774 int tcp_v4_rcv(struct sk_buff *skb)
1776 struct net *net = dev_net(skb->dev);
1777 int sdif = inet_sdif(skb);
1778 const struct iphdr *iph;
1779 const struct tcphdr *th;
1784 if (skb->pkt_type != PACKET_HOST)
1787 /* Count it even if it's bad */
1788 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1790 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1793 th = (const struct tcphdr *)skb->data;
1795 if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
1797 if (!pskb_may_pull(skb, th->doff * 4))
1800 /* An explanation is required here, I think.
1801 * Packet length and doff are validated by header prediction,
1802 * provided case of th->doff==0 is eliminated.
1803 * So, we defer the checks. */
1805 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1808 th = (const struct tcphdr *)skb->data;
1811 sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
1812 th->dest, sdif, &refcounted);
1817 if (sk->sk_state == TCP_TIME_WAIT)
1820 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1821 struct request_sock *req = inet_reqsk(sk);
1822 bool req_stolen = false;
1825 sk = req->rsk_listener;
1826 if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
1827 sk_drops_add(sk, skb);
1831 if (tcp_checksum_complete(skb)) {
1835 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1836 inet_csk_reqsk_queue_drop_and_put(sk, req);
1839 /* We own a reference on the listener, increase it again
1840 * as we might lose it too soon.
1845 if (!tcp_filter(sk, skb)) {
1846 th = (const struct tcphdr *)skb->data;
1848 tcp_v4_fill_cb(skb, iph, th);
1849 nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
1854 /* Another cpu got exclusive access to req
1855 * and created a full blown socket.
1856 * Try to feed this packet to this socket
1857 * instead of discarding it.
1859 tcp_v4_restore_cb(skb);
1863 goto discard_and_relse;
1867 tcp_v4_restore_cb(skb);
1868 } else if (tcp_child_process(sk, nsk, skb)) {
1869 tcp_v4_send_reset(nsk, skb);
1870 goto discard_and_relse;
1876 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1877 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1878 goto discard_and_relse;
1881 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1882 goto discard_and_relse;
1884 if (tcp_v4_inbound_md5_hash(sk, skb))
1885 goto discard_and_relse;
1889 if (tcp_filter(sk, skb))
1890 goto discard_and_relse;
1891 th = (const struct tcphdr *)skb->data;
1893 tcp_v4_fill_cb(skb, iph, th);
1897 if (sk->sk_state == TCP_LISTEN) {
1898 ret = tcp_v4_do_rcv(sk, skb);
1899 goto put_and_return;
1902 sk_incoming_cpu_update(sk);
1904 bh_lock_sock_nested(sk);
1905 tcp_segs_in(tcp_sk(sk), skb);
1907 if (!sock_owned_by_user(sk)) {
1908 ret = tcp_v4_do_rcv(sk, skb);
1909 } else if (tcp_add_backlog(sk, skb)) {
1910 goto discard_and_relse;
1921 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1924 tcp_v4_fill_cb(skb, iph, th);
1926 if (tcp_checksum_complete(skb)) {
1928 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1930 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1932 tcp_v4_send_reset(NULL, skb);
1936 /* Discard frame. */
1941 sk_drops_add(sk, skb);
1947 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1948 inet_twsk_put(inet_twsk(sk));
1952 tcp_v4_fill_cb(skb, iph, th);
1954 if (tcp_checksum_complete(skb)) {
1955 inet_twsk_put(inet_twsk(sk));
1958 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1960 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1963 iph->saddr, th->source,
1964 iph->daddr, th->dest,
1968 inet_twsk_deschedule_put(inet_twsk(sk));
1970 tcp_v4_restore_cb(skb);
1978 tcp_v4_timewait_ack(sk, skb);
1981 tcp_v4_send_reset(sk, skb);
1982 inet_twsk_deschedule_put(inet_twsk(sk));
1984 case TCP_TW_SUCCESS:;
1989 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1990 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1991 .twsk_unique = tcp_twsk_unique,
1992 .twsk_destructor= tcp_twsk_destructor,
1995 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1997 struct dst_entry *dst = skb_dst(skb);
1999 if (dst && dst_hold_safe(dst)) {
2000 sk->sk_rx_dst = dst;
2001 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
2004 EXPORT_SYMBOL(inet_sk_rx_dst_set);
2006 const struct inet_connection_sock_af_ops ipv4_specific = {
2007 .queue_xmit = ip_queue_xmit,
2008 .send_check = tcp_v4_send_check,
2009 .rebuild_header = inet_sk_rebuild_header,
2010 .sk_rx_dst_set = inet_sk_rx_dst_set,
2011 .conn_request = tcp_v4_conn_request,
2012 .syn_recv_sock = tcp_v4_syn_recv_sock,
2013 .net_header_len = sizeof(struct iphdr),
2014 .setsockopt = ip_setsockopt,
2015 .getsockopt = ip_getsockopt,
2016 .addr2sockaddr = inet_csk_addr2sockaddr,
2017 .sockaddr_len = sizeof(struct sockaddr_in),
2018 #ifdef CONFIG_COMPAT
2019 .compat_setsockopt = compat_ip_setsockopt,
2020 .compat_getsockopt = compat_ip_getsockopt,
2022 .mtu_reduced = tcp_v4_mtu_reduced,
2024 EXPORT_SYMBOL(ipv4_specific);
2026 #ifdef CONFIG_TCP_MD5SIG
2027 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
2028 .md5_lookup = tcp_v4_md5_lookup,
2029 .calc_md5_hash = tcp_v4_md5_hash_skb,
2030 .md5_parse = tcp_v4_parse_md5_keys,
2034 /* NOTE: A lot of things set to zero explicitly by call to
2035 * sk_alloc() so need not be done here.
2037 static int tcp_v4_init_sock(struct sock *sk)
2039 struct inet_connection_sock *icsk = inet_csk(sk);
2043 icsk->icsk_af_ops = &ipv4_specific;
2045 #ifdef CONFIG_TCP_MD5SIG
2046 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
2052 void tcp_v4_destroy_sock(struct sock *sk)
2054 struct tcp_sock *tp = tcp_sk(sk);
2056 trace_tcp_destroy_sock(sk);
2058 tcp_clear_xmit_timers(sk);
2060 tcp_cleanup_congestion_control(sk);
2062 tcp_cleanup_ulp(sk);
2064 /* Cleanup up the write buffer. */
2065 tcp_write_queue_purge(sk);
2067 /* Check if we want to disable active TFO */
2068 tcp_fastopen_active_disable_ofo_check(sk);
2070 /* Cleans up our, hopefully empty, out_of_order_queue. */
2071 skb_rbtree_purge(&tp->out_of_order_queue);
2073 #ifdef CONFIG_TCP_MD5SIG
2074 /* Clean up the MD5 key list, if any */
2075 if (tp->md5sig_info) {
2076 tcp_clear_md5_list(sk);
2077 kfree_rcu(rcu_dereference_protected(tp->md5sig_info, 1), rcu);
2078 tp->md5sig_info = NULL;
2082 /* Clean up a referenced TCP bind bucket. */
2083 if (inet_csk(sk)->icsk_bind_hash)
2086 BUG_ON(tp->fastopen_rsk);
2088 /* If socket is aborted during connect operation */
2089 tcp_free_fastopen_req(tp);
2090 tcp_fastopen_destroy_cipher(sk);
2091 tcp_saved_syn_free(tp);
2093 sk_sockets_allocated_dec(sk);
2095 EXPORT_SYMBOL(tcp_v4_destroy_sock);
2097 #ifdef CONFIG_PROC_FS
2098 /* Proc filesystem TCP sock list dumping. */
2101 * Get next listener socket follow cur. If cur is NULL, get first socket
2102 * starting from bucket given in st->bucket; when st->bucket is zero the
2103 * very first socket in the hash table is returned.
2105 static void *listening_get_next(struct seq_file *seq, void *cur)
2107 struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
2108 struct tcp_iter_state *st = seq->private;
2109 struct net *net = seq_file_net(seq);
2110 struct inet_listen_hashbucket *ilb;
2111 struct sock *sk = cur;
2115 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2116 spin_lock(&ilb->lock);
2117 sk = sk_head(&ilb->head);
2121 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2127 sk_for_each_from(sk) {
2128 if (!net_eq(sock_net(sk), net))
2130 if (sk->sk_family == afinfo->family)
2133 spin_unlock(&ilb->lock);
2135 if (++st->bucket < INET_LHTABLE_SIZE)
2140 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2142 struct tcp_iter_state *st = seq->private;
2147 rc = listening_get_next(seq, NULL);
2149 while (rc && *pos) {
2150 rc = listening_get_next(seq, rc);
2156 static inline bool empty_bucket(const struct tcp_iter_state *st)
2158 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
2162 * Get first established socket starting from bucket given in st->bucket.
2163 * If st->bucket is zero, the very first socket in the hash is returned.
2165 static void *established_get_first(struct seq_file *seq)
2167 struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
2168 struct tcp_iter_state *st = seq->private;
2169 struct net *net = seq_file_net(seq);
2173 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2175 struct hlist_nulls_node *node;
2176 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2178 /* Lockless fast path for the common case of empty buckets */
2179 if (empty_bucket(st))
2183 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2184 if (sk->sk_family != afinfo->family ||
2185 !net_eq(sock_net(sk), net)) {
2191 spin_unlock_bh(lock);
2197 static void *established_get_next(struct seq_file *seq, void *cur)
2199 struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
2200 struct sock *sk = cur;
2201 struct hlist_nulls_node *node;
2202 struct tcp_iter_state *st = seq->private;
2203 struct net *net = seq_file_net(seq);
2208 sk = sk_nulls_next(sk);
2210 sk_nulls_for_each_from(sk, node) {
2211 if (sk->sk_family == afinfo->family &&
2212 net_eq(sock_net(sk), net))
2216 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2218 return established_get_first(seq);
2221 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2223 struct tcp_iter_state *st = seq->private;
2227 rc = established_get_first(seq);
2230 rc = established_get_next(seq, rc);
2236 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2239 struct tcp_iter_state *st = seq->private;
2241 st->state = TCP_SEQ_STATE_LISTENING;
2242 rc = listening_get_idx(seq, &pos);
2245 st->state = TCP_SEQ_STATE_ESTABLISHED;
2246 rc = established_get_idx(seq, pos);
2252 static void *tcp_seek_last_pos(struct seq_file *seq)
2254 struct tcp_iter_state *st = seq->private;
2255 int offset = st->offset;
2256 int orig_num = st->num;
2259 switch (st->state) {
2260 case TCP_SEQ_STATE_LISTENING:
2261 if (st->bucket >= INET_LHTABLE_SIZE)
2263 st->state = TCP_SEQ_STATE_LISTENING;
2264 rc = listening_get_next(seq, NULL);
2265 while (offset-- && rc)
2266 rc = listening_get_next(seq, rc);
2270 st->state = TCP_SEQ_STATE_ESTABLISHED;
2272 case TCP_SEQ_STATE_ESTABLISHED:
2273 if (st->bucket > tcp_hashinfo.ehash_mask)
2275 rc = established_get_first(seq);
2276 while (offset-- && rc)
2277 rc = established_get_next(seq, rc);
2285 void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2287 struct tcp_iter_state *st = seq->private;
2290 if (*pos && *pos == st->last_pos) {
2291 rc = tcp_seek_last_pos(seq);
2296 st->state = TCP_SEQ_STATE_LISTENING;
2300 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2303 st->last_pos = *pos;
2306 EXPORT_SYMBOL(tcp_seq_start);
2308 void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2310 struct tcp_iter_state *st = seq->private;
2313 if (v == SEQ_START_TOKEN) {
2314 rc = tcp_get_idx(seq, 0);
2318 switch (st->state) {
2319 case TCP_SEQ_STATE_LISTENING:
2320 rc = listening_get_next(seq, v);
2322 st->state = TCP_SEQ_STATE_ESTABLISHED;
2325 rc = established_get_first(seq);
2328 case TCP_SEQ_STATE_ESTABLISHED:
2329 rc = established_get_next(seq, v);
2334 st->last_pos = *pos;
2337 EXPORT_SYMBOL(tcp_seq_next);
2339 void tcp_seq_stop(struct seq_file *seq, void *v)
2341 struct tcp_iter_state *st = seq->private;
2343 switch (st->state) {
2344 case TCP_SEQ_STATE_LISTENING:
2345 if (v != SEQ_START_TOKEN)
2346 spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock);
2348 case TCP_SEQ_STATE_ESTABLISHED:
2350 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2354 EXPORT_SYMBOL(tcp_seq_stop);
2356 static void get_openreq4(const struct request_sock *req,
2357 struct seq_file *f, int i)
2359 const struct inet_request_sock *ireq = inet_rsk(req);
2360 long delta = req->rsk_timer.expires - jiffies;
2362 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2363 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2368 ntohs(ireq->ir_rmt_port),
2370 0, 0, /* could print option size, but that is af dependent. */
2371 1, /* timers active (only the expire timer) */
2372 jiffies_delta_to_clock_t(delta),
2374 from_kuid_munged(seq_user_ns(f),
2375 sock_i_uid(req->rsk_listener)),
2376 0, /* non standard timer */
2377 0, /* open_requests have no inode */
2382 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2385 unsigned long timer_expires;
2386 const struct tcp_sock *tp = tcp_sk(sk);
2387 const struct inet_connection_sock *icsk = inet_csk(sk);
2388 const struct inet_sock *inet = inet_sk(sk);
2389 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2390 __be32 dest = inet->inet_daddr;
2391 __be32 src = inet->inet_rcv_saddr;
2392 __u16 destp = ntohs(inet->inet_dport);
2393 __u16 srcp = ntohs(inet->inet_sport);
2397 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2398 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
2399 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2401 timer_expires = icsk->icsk_timeout;
2402 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2404 timer_expires = icsk->icsk_timeout;
2405 } else if (timer_pending(&sk->sk_timer)) {
2407 timer_expires = sk->sk_timer.expires;
2410 timer_expires = jiffies;
2413 state = inet_sk_state_load(sk);
2414 if (state == TCP_LISTEN)
2415 rx_queue = sk->sk_ack_backlog;
2417 /* Because we don't lock the socket,
2418 * we might find a transient negative value.
2420 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2422 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2423 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2424 i, src, srcp, dest, destp, state,
2425 tp->write_seq - tp->snd_una,
2428 jiffies_delta_to_clock_t(timer_expires - jiffies),
2429 icsk->icsk_retransmits,
2430 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2431 icsk->icsk_probes_out,
2433 refcount_read(&sk->sk_refcnt), sk,
2434 jiffies_to_clock_t(icsk->icsk_rto),
2435 jiffies_to_clock_t(icsk->icsk_ack.ato),
2436 (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sk),
2438 state == TCP_LISTEN ?
2439 fastopenq->max_qlen :
2440 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2443 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2444 struct seq_file *f, int i)
2446 long delta = tw->tw_timer.expires - jiffies;
2450 dest = tw->tw_daddr;
2451 src = tw->tw_rcv_saddr;
2452 destp = ntohs(tw->tw_dport);
2453 srcp = ntohs(tw->tw_sport);
2455 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2456 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2457 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2458 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2459 refcount_read(&tw->tw_refcnt), tw);
2464 static int tcp4_seq_show(struct seq_file *seq, void *v)
2466 struct tcp_iter_state *st;
2467 struct sock *sk = v;
2469 seq_setwidth(seq, TMPSZ - 1);
2470 if (v == SEQ_START_TOKEN) {
2471 seq_puts(seq, " sl local_address rem_address st tx_queue "
2472 "rx_queue tr tm->when retrnsmt uid timeout "
2478 if (sk->sk_state == TCP_TIME_WAIT)
2479 get_timewait4_sock(v, seq, st->num);
2480 else if (sk->sk_state == TCP_NEW_SYN_RECV)
2481 get_openreq4(v, seq, st->num);
2483 get_tcp4_sock(v, seq, st->num);
2489 static const struct seq_operations tcp4_seq_ops = {
2490 .show = tcp4_seq_show,
2491 .start = tcp_seq_start,
2492 .next = tcp_seq_next,
2493 .stop = tcp_seq_stop,
2496 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2500 static int __net_init tcp4_proc_init_net(struct net *net)
2502 if (!proc_create_net_data("tcp", 0444, net->proc_net, &tcp4_seq_ops,
2503 sizeof(struct tcp_iter_state), &tcp4_seq_afinfo))
2508 static void __net_exit tcp4_proc_exit_net(struct net *net)
2510 remove_proc_entry("tcp", net->proc_net);
2513 static struct pernet_operations tcp4_net_ops = {
2514 .init = tcp4_proc_init_net,
2515 .exit = tcp4_proc_exit_net,
2518 int __init tcp4_proc_init(void)
2520 return register_pernet_subsys(&tcp4_net_ops);
2523 void tcp4_proc_exit(void)
2525 unregister_pernet_subsys(&tcp4_net_ops);
2527 #endif /* CONFIG_PROC_FS */
2529 struct proto tcp_prot = {
2531 .owner = THIS_MODULE,
2533 .pre_connect = tcp_v4_pre_connect,
2534 .connect = tcp_v4_connect,
2535 .disconnect = tcp_disconnect,
2536 .accept = inet_csk_accept,
2538 .init = tcp_v4_init_sock,
2539 .destroy = tcp_v4_destroy_sock,
2540 .shutdown = tcp_shutdown,
2541 .setsockopt = tcp_setsockopt,
2542 .getsockopt = tcp_getsockopt,
2543 .keepalive = tcp_set_keepalive,
2544 .recvmsg = tcp_recvmsg,
2545 .sendmsg = tcp_sendmsg,
2546 .sendpage = tcp_sendpage,
2547 .backlog_rcv = tcp_v4_do_rcv,
2548 .release_cb = tcp_release_cb,
2550 .unhash = inet_unhash,
2551 .get_port = inet_csk_get_port,
2552 .enter_memory_pressure = tcp_enter_memory_pressure,
2553 .leave_memory_pressure = tcp_leave_memory_pressure,
2554 .stream_memory_free = tcp_stream_memory_free,
2555 .sockets_allocated = &tcp_sockets_allocated,
2556 .orphan_count = &tcp_orphan_count,
2557 .memory_allocated = &tcp_memory_allocated,
2558 .memory_pressure = &tcp_memory_pressure,
2559 .sysctl_mem = sysctl_tcp_mem,
2560 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
2561 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
2562 .max_header = MAX_TCP_HEADER,
2563 .obj_size = sizeof(struct tcp_sock),
2564 .slab_flags = SLAB_TYPESAFE_BY_RCU,
2565 .twsk_prot = &tcp_timewait_sock_ops,
2566 .rsk_prot = &tcp_request_sock_ops,
2567 .h.hashinfo = &tcp_hashinfo,
2568 .no_autobind = true,
2569 #ifdef CONFIG_COMPAT
2570 .compat_setsockopt = compat_tcp_setsockopt,
2571 .compat_getsockopt = compat_tcp_getsockopt,
2573 .diag_destroy = tcp_abort,
2575 EXPORT_SYMBOL(tcp_prot);
2577 static void __net_exit tcp_sk_exit(struct net *net)
2581 module_put(net->ipv4.tcp_congestion_control->owner);
2583 for_each_possible_cpu(cpu)
2584 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2585 free_percpu(net->ipv4.tcp_sk);
2588 static int __net_init tcp_sk_init(struct net *net)
2592 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2593 if (!net->ipv4.tcp_sk)
2596 for_each_possible_cpu(cpu) {
2599 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2603 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2605 /* Please enforce IP_DF and IPID==0 for RST and
2606 * ACK sent in SYN-RECV and TIME-WAIT state.
2608 inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;
2610 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2613 net->ipv4.sysctl_tcp_ecn = 2;
2614 net->ipv4.sysctl_tcp_ecn_fallback = 1;
2616 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2617 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2618 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2620 net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
2621 net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
2622 net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
2624 net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
2625 net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
2626 net->ipv4.sysctl_tcp_syncookies = 1;
2627 net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
2628 net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
2629 net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
2630 net->ipv4.sysctl_tcp_orphan_retries = 0;
2631 net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
2632 net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
2633 net->ipv4.sysctl_tcp_tw_reuse = 2;
2635 cnt = tcp_hashinfo.ehash_mask + 1;
2636 net->ipv4.tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
2637 net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
2639 net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256);
2640 net->ipv4.sysctl_tcp_sack = 1;
2641 net->ipv4.sysctl_tcp_window_scaling = 1;
2642 net->ipv4.sysctl_tcp_timestamps = 1;
2643 net->ipv4.sysctl_tcp_early_retrans = 3;
2644 net->ipv4.sysctl_tcp_recovery = TCP_RACK_LOSS_DETECTION;
2645 net->ipv4.sysctl_tcp_slow_start_after_idle = 1; /* By default, RFC2861 behavior. */
2646 net->ipv4.sysctl_tcp_retrans_collapse = 1;
2647 net->ipv4.sysctl_tcp_max_reordering = 300;
2648 net->ipv4.sysctl_tcp_dsack = 1;
2649 net->ipv4.sysctl_tcp_app_win = 31;
2650 net->ipv4.sysctl_tcp_adv_win_scale = 1;
2651 net->ipv4.sysctl_tcp_frto = 2;
2652 net->ipv4.sysctl_tcp_moderate_rcvbuf = 1;
2653 /* This limits the percentage of the congestion window which we
2654 * will allow a single TSO frame to consume. Building TSO frames
2655 * which are too large can cause TCP streams to be bursty.
2657 net->ipv4.sysctl_tcp_tso_win_divisor = 3;
2658 /* Default TSQ limit of 16 TSO segments */
2659 net->ipv4.sysctl_tcp_limit_output_bytes = 16 * 65536;
2660 /* rfc5961 challenge ack rate limiting */
2661 net->ipv4.sysctl_tcp_challenge_ack_limit = 1000;
2662 net->ipv4.sysctl_tcp_min_tso_segs = 2;
2663 net->ipv4.sysctl_tcp_min_rtt_wlen = 300;
2664 net->ipv4.sysctl_tcp_autocorking = 1;
2665 net->ipv4.sysctl_tcp_invalid_ratelimit = HZ/2;
2666 net->ipv4.sysctl_tcp_pacing_ss_ratio = 200;
2667 net->ipv4.sysctl_tcp_pacing_ca_ratio = 120;
2668 if (net != &init_net) {
2669 memcpy(net->ipv4.sysctl_tcp_rmem,
2670 init_net.ipv4.sysctl_tcp_rmem,
2671 sizeof(init_net.ipv4.sysctl_tcp_rmem));
2672 memcpy(net->ipv4.sysctl_tcp_wmem,
2673 init_net.ipv4.sysctl_tcp_wmem,
2674 sizeof(init_net.ipv4.sysctl_tcp_wmem));
2676 net->ipv4.sysctl_tcp_comp_sack_delay_ns = NSEC_PER_MSEC;
2677 net->ipv4.sysctl_tcp_comp_sack_nr = 44;
2678 net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
2679 spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock);
2680 net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60;
2681 atomic_set(&net->ipv4.tfo_active_disable_times, 0);
2683 /* Reno is always built in */
2684 if (!net_eq(net, &init_net) &&
2685 try_module_get(init_net.ipv4.tcp_congestion_control->owner))
2686 net->ipv4.tcp_congestion_control = init_net.ipv4.tcp_congestion_control;
2688 net->ipv4.tcp_congestion_control = &tcp_reno;
2697 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2701 inet_twsk_purge(&tcp_hashinfo, AF_INET);
2703 list_for_each_entry(net, net_exit_list, exit_list)
2704 tcp_fastopen_ctx_destroy(net);
2707 static struct pernet_operations __net_initdata tcp_sk_ops = {
2708 .init = tcp_sk_init,
2709 .exit = tcp_sk_exit,
2710 .exit_batch = tcp_sk_exit_batch,
2713 void __init tcp_v4_init(void)
2715 if (register_pernet_subsys(&tcp_sk_ops))
2716 panic("Failed to create the TCP control socket.\n");