Merge tag 'ixp4xx-for-armsoc' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[sfrench/cifs-2.6.git] / net / ipv6 / tcp_ipv6.c
1 /*
2  *      TCP over IPv6
3  *      Linux INET6 implementation
4  *
5  *      Authors:
6  *      Pedro Roque             <roque@di.fc.ul.pt>
7  *
8  *      Based on:
9  *      linux/net/ipv4/tcp.c
10  *      linux/net/ipv4/tcp_input.c
11  *      linux/net/ipv4/tcp_output.c
12  *
13  *      Fixes:
14  *      Hideaki YOSHIFUJI       :       sin6_scope_id support
15  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
16  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
17  *                                      a single port at the same time.
18  *      YOSHIFUJI Hideaki @USAGI:       convert /proc/net/tcp6 to seq_file.
19  *
20  *      This program is free software; you can redistribute it and/or
21  *      modify it under the terms of the GNU General Public License
22  *      as published by the Free Software Foundation; either version
23  *      2 of the License, or (at your option) any later version.
24  */
25
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/busy_poll.h>
65
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
68
69 #include <crypto/hash.h>
70 #include <linux/scatterlist.h>
71
72 #include <trace/events/tcp.h>
73
74 static void     tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
75 static void     tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
76                                       struct request_sock *req);
77
78 static int      tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
79
80 static const struct inet_connection_sock_af_ops ipv6_mapped;
81 static const struct inet_connection_sock_af_ops ipv6_specific;
82 #ifdef CONFIG_TCP_MD5SIG
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
84 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
85 #else
86 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
87                                                    const struct in6_addr *addr)
88 {
89         return NULL;
90 }
91 #endif
92
93 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
94 {
95         struct dst_entry *dst = skb_dst(skb);
96
97         if (dst && dst_hold_safe(dst)) {
98                 const struct rt6_info *rt = (const struct rt6_info *)dst;
99
100                 sk->sk_rx_dst = dst;
101                 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
102                 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
103         }
104 }
105
106 static u32 tcp_v6_init_seq(const struct sk_buff *skb)
107 {
108         return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
109                                 ipv6_hdr(skb)->saddr.s6_addr32,
110                                 tcp_hdr(skb)->dest,
111                                 tcp_hdr(skb)->source);
112 }
113
114 static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
115 {
116         return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
117                                    ipv6_hdr(skb)->saddr.s6_addr32);
118 }
119
120 static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
121                               int addr_len)
122 {
123         /* This check is replicated from tcp_v6_connect() and intended to
124          * prevent BPF program called below from accessing bytes that are out
125          * of the bound specified by user in addr_len.
126          */
127         if (addr_len < SIN6_LEN_RFC2133)
128                 return -EINVAL;
129
130         sock_owned_by_me(sk);
131
132         return BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr);
133 }
134
135 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
136                           int addr_len)
137 {
138         struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
139         struct inet_sock *inet = inet_sk(sk);
140         struct inet_connection_sock *icsk = inet_csk(sk);
141         struct ipv6_pinfo *np = inet6_sk(sk);
142         struct tcp_sock *tp = tcp_sk(sk);
143         struct in6_addr *saddr = NULL, *final_p, final;
144         struct ipv6_txoptions *opt;
145         struct flowi6 fl6;
146         struct dst_entry *dst;
147         int addr_type;
148         int err;
149         struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
150
151         if (addr_len < SIN6_LEN_RFC2133)
152                 return -EINVAL;
153
154         if (usin->sin6_family != AF_INET6)
155                 return -EAFNOSUPPORT;
156
157         memset(&fl6, 0, sizeof(fl6));
158
159         if (np->sndflow) {
160                 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
161                 IP6_ECN_flow_init(fl6.flowlabel);
162                 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
163                         struct ip6_flowlabel *flowlabel;
164                         flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
165                         if (!flowlabel)
166                                 return -EINVAL;
167                         fl6_sock_release(flowlabel);
168                 }
169         }
170
171         /*
172          *      connect() to INADDR_ANY means loopback (BSD'ism).
173          */
174
175         if (ipv6_addr_any(&usin->sin6_addr)) {
176                 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
177                         ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
178                                                &usin->sin6_addr);
179                 else
180                         usin->sin6_addr = in6addr_loopback;
181         }
182
183         addr_type = ipv6_addr_type(&usin->sin6_addr);
184
185         if (addr_type & IPV6_ADDR_MULTICAST)
186                 return -ENETUNREACH;
187
188         if (addr_type&IPV6_ADDR_LINKLOCAL) {
189                 if (addr_len >= sizeof(struct sockaddr_in6) &&
190                     usin->sin6_scope_id) {
191                         /* If interface is set while binding, indices
192                          * must coincide.
193                          */
194                         if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id))
195                                 return -EINVAL;
196
197                         sk->sk_bound_dev_if = usin->sin6_scope_id;
198                 }
199
200                 /* Connect to link-local address requires an interface */
201                 if (!sk->sk_bound_dev_if)
202                         return -EINVAL;
203         }
204
205         if (tp->rx_opt.ts_recent_stamp &&
206             !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
207                 tp->rx_opt.ts_recent = 0;
208                 tp->rx_opt.ts_recent_stamp = 0;
209                 tp->write_seq = 0;
210         }
211
212         sk->sk_v6_daddr = usin->sin6_addr;
213         np->flow_label = fl6.flowlabel;
214
215         /*
216          *      TCP over IPv4
217          */
218
219         if (addr_type & IPV6_ADDR_MAPPED) {
220                 u32 exthdrlen = icsk->icsk_ext_hdr_len;
221                 struct sockaddr_in sin;
222
223                 if (__ipv6_only_sock(sk))
224                         return -ENETUNREACH;
225
226                 sin.sin_family = AF_INET;
227                 sin.sin_port = usin->sin6_port;
228                 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
229
230                 icsk->icsk_af_ops = &ipv6_mapped;
231                 sk->sk_backlog_rcv = tcp_v4_do_rcv;
232 #ifdef CONFIG_TCP_MD5SIG
233                 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
234 #endif
235
236                 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
237
238                 if (err) {
239                         icsk->icsk_ext_hdr_len = exthdrlen;
240                         icsk->icsk_af_ops = &ipv6_specific;
241                         sk->sk_backlog_rcv = tcp_v6_do_rcv;
242 #ifdef CONFIG_TCP_MD5SIG
243                         tp->af_specific = &tcp_sock_ipv6_specific;
244 #endif
245                         goto failure;
246                 }
247                 np->saddr = sk->sk_v6_rcv_saddr;
248
249                 return err;
250         }
251
252         if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
253                 saddr = &sk->sk_v6_rcv_saddr;
254
255         fl6.flowi6_proto = IPPROTO_TCP;
256         fl6.daddr = sk->sk_v6_daddr;
257         fl6.saddr = saddr ? *saddr : np->saddr;
258         fl6.flowi6_oif = sk->sk_bound_dev_if;
259         fl6.flowi6_mark = sk->sk_mark;
260         fl6.fl6_dport = usin->sin6_port;
261         fl6.fl6_sport = inet->inet_sport;
262         fl6.flowi6_uid = sk->sk_uid;
263
264         opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
265         final_p = fl6_update_dst(&fl6, opt, &final);
266
267         security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
268
269         dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
270         if (IS_ERR(dst)) {
271                 err = PTR_ERR(dst);
272                 goto failure;
273         }
274
275         if (!saddr) {
276                 saddr = &fl6.saddr;
277                 sk->sk_v6_rcv_saddr = *saddr;
278         }
279
280         /* set the source address */
281         np->saddr = *saddr;
282         inet->inet_rcv_saddr = LOOPBACK4_IPV6;
283
284         sk->sk_gso_type = SKB_GSO_TCPV6;
285         ip6_dst_store(sk, dst, NULL, NULL);
286
287         icsk->icsk_ext_hdr_len = 0;
288         if (opt)
289                 icsk->icsk_ext_hdr_len = opt->opt_flen +
290                                          opt->opt_nflen;
291
292         tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
293
294         inet->inet_dport = usin->sin6_port;
295
296         tcp_set_state(sk, TCP_SYN_SENT);
297         err = inet6_hash_connect(tcp_death_row, sk);
298         if (err)
299                 goto late_failure;
300
301         sk_set_txhash(sk);
302
303         if (likely(!tp->repair)) {
304                 if (!tp->write_seq)
305                         tp->write_seq = secure_tcpv6_seq(np->saddr.s6_addr32,
306                                                          sk->sk_v6_daddr.s6_addr32,
307                                                          inet->inet_sport,
308                                                          inet->inet_dport);
309                 tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
310                                                    np->saddr.s6_addr32,
311                                                    sk->sk_v6_daddr.s6_addr32);
312         }
313
314         if (tcp_fastopen_defer_connect(sk, &err))
315                 return err;
316         if (err)
317                 goto late_failure;
318
319         err = tcp_connect(sk);
320         if (err)
321                 goto late_failure;
322
323         return 0;
324
325 late_failure:
326         tcp_set_state(sk, TCP_CLOSE);
327 failure:
328         inet->inet_dport = 0;
329         sk->sk_route_caps = 0;
330         return err;
331 }
332
333 static void tcp_v6_mtu_reduced(struct sock *sk)
334 {
335         struct dst_entry *dst;
336
337         if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
338                 return;
339
340         dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
341         if (!dst)
342                 return;
343
344         if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
345                 tcp_sync_mss(sk, dst_mtu(dst));
346                 tcp_simple_retransmit(sk);
347         }
348 }
349
350 static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
351                 u8 type, u8 code, int offset, __be32 info)
352 {
353         const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
354         const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
355         struct net *net = dev_net(skb->dev);
356         struct request_sock *fastopen;
357         struct ipv6_pinfo *np;
358         struct tcp_sock *tp;
359         __u32 seq, snd_una;
360         struct sock *sk;
361         bool fatal;
362         int err;
363
364         sk = __inet6_lookup_established(net, &tcp_hashinfo,
365                                         &hdr->daddr, th->dest,
366                                         &hdr->saddr, ntohs(th->source),
367                                         skb->dev->ifindex, inet6_sdif(skb));
368
369         if (!sk) {
370                 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
371                                   ICMP6_MIB_INERRORS);
372                 return -ENOENT;
373         }
374
375         if (sk->sk_state == TCP_TIME_WAIT) {
376                 inet_twsk_put(inet_twsk(sk));
377                 return 0;
378         }
379         seq = ntohl(th->seq);
380         fatal = icmpv6_err_convert(type, code, &err);
381         if (sk->sk_state == TCP_NEW_SYN_RECV) {
382                 tcp_req_err(sk, seq, fatal);
383                 return 0;
384         }
385
386         bh_lock_sock(sk);
387         if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
388                 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
389
390         if (sk->sk_state == TCP_CLOSE)
391                 goto out;
392
393         if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
394                 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
395                 goto out;
396         }
397
398         tp = tcp_sk(sk);
399         /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
400         fastopen = tp->fastopen_rsk;
401         snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
402         if (sk->sk_state != TCP_LISTEN &&
403             !between(seq, snd_una, tp->snd_nxt)) {
404                 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
405                 goto out;
406         }
407
408         np = inet6_sk(sk);
409
410         if (type == NDISC_REDIRECT) {
411                 if (!sock_owned_by_user(sk)) {
412                         struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
413
414                         if (dst)
415                                 dst->ops->redirect(dst, sk, skb);
416                 }
417                 goto out;
418         }
419
420         if (type == ICMPV6_PKT_TOOBIG) {
421                 /* We are not interested in TCP_LISTEN and open_requests
422                  * (SYN-ACKs send out by Linux are always <576bytes so
423                  * they should go through unfragmented).
424                  */
425                 if (sk->sk_state == TCP_LISTEN)
426                         goto out;
427
428                 if (!ip6_sk_accept_pmtu(sk))
429                         goto out;
430
431                 tp->mtu_info = ntohl(info);
432                 if (!sock_owned_by_user(sk))
433                         tcp_v6_mtu_reduced(sk);
434                 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
435                                            &sk->sk_tsq_flags))
436                         sock_hold(sk);
437                 goto out;
438         }
439
440
441         /* Might be for an request_sock */
442         switch (sk->sk_state) {
443         case TCP_SYN_SENT:
444         case TCP_SYN_RECV:
445                 /* Only in fast or simultaneous open. If a fast open socket is
446                  * is already accepted it is treated as a connected one below.
447                  */
448                 if (fastopen && !fastopen->sk)
449                         break;
450
451                 if (!sock_owned_by_user(sk)) {
452                         sk->sk_err = err;
453                         sk->sk_error_report(sk);                /* Wake people up to see the error (see connect in sock.c) */
454
455                         tcp_done(sk);
456                 } else
457                         sk->sk_err_soft = err;
458                 goto out;
459         }
460
461         if (!sock_owned_by_user(sk) && np->recverr) {
462                 sk->sk_err = err;
463                 sk->sk_error_report(sk);
464         } else
465                 sk->sk_err_soft = err;
466
467 out:
468         bh_unlock_sock(sk);
469         sock_put(sk);
470         return 0;
471 }
472
473
474 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
475                               struct flowi *fl,
476                               struct request_sock *req,
477                               struct tcp_fastopen_cookie *foc,
478                               enum tcp_synack_type synack_type)
479 {
480         struct inet_request_sock *ireq = inet_rsk(req);
481         struct ipv6_pinfo *np = inet6_sk(sk);
482         struct ipv6_txoptions *opt;
483         struct flowi6 *fl6 = &fl->u.ip6;
484         struct sk_buff *skb;
485         int err = -ENOMEM;
486
487         /* First, grab a route. */
488         if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
489                                                IPPROTO_TCP)) == NULL)
490                 goto done;
491
492         skb = tcp_make_synack(sk, dst, req, foc, synack_type);
493
494         if (skb) {
495                 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
496                                     &ireq->ir_v6_rmt_addr);
497
498                 fl6->daddr = ireq->ir_v6_rmt_addr;
499                 if (np->repflow && ireq->pktopts)
500                         fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
501
502                 rcu_read_lock();
503                 opt = ireq->ipv6_opt;
504                 if (!opt)
505                         opt = rcu_dereference(np->opt);
506                 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
507                 rcu_read_unlock();
508                 err = net_xmit_eval(err);
509         }
510
511 done:
512         return err;
513 }
514
515
516 static void tcp_v6_reqsk_destructor(struct request_sock *req)
517 {
518         kfree(inet_rsk(req)->ipv6_opt);
519         kfree_skb(inet_rsk(req)->pktopts);
520 }
521
522 #ifdef CONFIG_TCP_MD5SIG
523 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
524                                                    const struct in6_addr *addr)
525 {
526         return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
527 }
528
529 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
530                                                 const struct sock *addr_sk)
531 {
532         return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
533 }
534
535 static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
536                                  char __user *optval, int optlen)
537 {
538         struct tcp_md5sig cmd;
539         struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
540         u8 prefixlen;
541
542         if (optlen < sizeof(cmd))
543                 return -EINVAL;
544
545         if (copy_from_user(&cmd, optval, sizeof(cmd)))
546                 return -EFAULT;
547
548         if (sin6->sin6_family != AF_INET6)
549                 return -EINVAL;
550
551         if (optname == TCP_MD5SIG_EXT &&
552             cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
553                 prefixlen = cmd.tcpm_prefixlen;
554                 if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
555                                         prefixlen > 32))
556                         return -EINVAL;
557         } else {
558                 prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
559         }
560
561         if (!cmd.tcpm_keylen) {
562                 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
563                         return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
564                                               AF_INET, prefixlen);
565                 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
566                                       AF_INET6, prefixlen);
567         }
568
569         if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
570                 return -EINVAL;
571
572         if (ipv6_addr_v4mapped(&sin6->sin6_addr))
573                 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
574                                       AF_INET, prefixlen, cmd.tcpm_key,
575                                       cmd.tcpm_keylen, GFP_KERNEL);
576
577         return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
578                               AF_INET6, prefixlen, cmd.tcpm_key,
579                               cmd.tcpm_keylen, GFP_KERNEL);
580 }
581
582 static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
583                                    const struct in6_addr *daddr,
584                                    const struct in6_addr *saddr,
585                                    const struct tcphdr *th, int nbytes)
586 {
587         struct tcp6_pseudohdr *bp;
588         struct scatterlist sg;
589         struct tcphdr *_th;
590
591         bp = hp->scratch;
592         /* 1. TCP pseudo-header (RFC2460) */
593         bp->saddr = *saddr;
594         bp->daddr = *daddr;
595         bp->protocol = cpu_to_be32(IPPROTO_TCP);
596         bp->len = cpu_to_be32(nbytes);
597
598         _th = (struct tcphdr *)(bp + 1);
599         memcpy(_th, th, sizeof(*th));
600         _th->check = 0;
601
602         sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
603         ahash_request_set_crypt(hp->md5_req, &sg, NULL,
604                                 sizeof(*bp) + sizeof(*th));
605         return crypto_ahash_update(hp->md5_req);
606 }
607
608 static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
609                                const struct in6_addr *daddr, struct in6_addr *saddr,
610                                const struct tcphdr *th)
611 {
612         struct tcp_md5sig_pool *hp;
613         struct ahash_request *req;
614
615         hp = tcp_get_md5sig_pool();
616         if (!hp)
617                 goto clear_hash_noput;
618         req = hp->md5_req;
619
620         if (crypto_ahash_init(req))
621                 goto clear_hash;
622         if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
623                 goto clear_hash;
624         if (tcp_md5_hash_key(hp, key))
625                 goto clear_hash;
626         ahash_request_set_crypt(req, NULL, md5_hash, 0);
627         if (crypto_ahash_final(req))
628                 goto clear_hash;
629
630         tcp_put_md5sig_pool();
631         return 0;
632
633 clear_hash:
634         tcp_put_md5sig_pool();
635 clear_hash_noput:
636         memset(md5_hash, 0, 16);
637         return 1;
638 }
639
640 static int tcp_v6_md5_hash_skb(char *md5_hash,
641                                const struct tcp_md5sig_key *key,
642                                const struct sock *sk,
643                                const struct sk_buff *skb)
644 {
645         const struct in6_addr *saddr, *daddr;
646         struct tcp_md5sig_pool *hp;
647         struct ahash_request *req;
648         const struct tcphdr *th = tcp_hdr(skb);
649
650         if (sk) { /* valid for establish/request sockets */
651                 saddr = &sk->sk_v6_rcv_saddr;
652                 daddr = &sk->sk_v6_daddr;
653         } else {
654                 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
655                 saddr = &ip6h->saddr;
656                 daddr = &ip6h->daddr;
657         }
658
659         hp = tcp_get_md5sig_pool();
660         if (!hp)
661                 goto clear_hash_noput;
662         req = hp->md5_req;
663
664         if (crypto_ahash_init(req))
665                 goto clear_hash;
666
667         if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
668                 goto clear_hash;
669         if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
670                 goto clear_hash;
671         if (tcp_md5_hash_key(hp, key))
672                 goto clear_hash;
673         ahash_request_set_crypt(req, NULL, md5_hash, 0);
674         if (crypto_ahash_final(req))
675                 goto clear_hash;
676
677         tcp_put_md5sig_pool();
678         return 0;
679
680 clear_hash:
681         tcp_put_md5sig_pool();
682 clear_hash_noput:
683         memset(md5_hash, 0, 16);
684         return 1;
685 }
686
687 #endif
688
689 static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
690                                     const struct sk_buff *skb)
691 {
692 #ifdef CONFIG_TCP_MD5SIG
693         const __u8 *hash_location = NULL;
694         struct tcp_md5sig_key *hash_expected;
695         const struct ipv6hdr *ip6h = ipv6_hdr(skb);
696         const struct tcphdr *th = tcp_hdr(skb);
697         int genhash;
698         u8 newhash[16];
699
700         hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
701         hash_location = tcp_parse_md5sig_option(th);
702
703         /* We've parsed the options - do we have a hash? */
704         if (!hash_expected && !hash_location)
705                 return false;
706
707         if (hash_expected && !hash_location) {
708                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
709                 return true;
710         }
711
712         if (!hash_expected && hash_location) {
713                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
714                 return true;
715         }
716
717         /* check the signature */
718         genhash = tcp_v6_md5_hash_skb(newhash,
719                                       hash_expected,
720                                       NULL, skb);
721
722         if (genhash || memcmp(hash_location, newhash, 16) != 0) {
723                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
724                 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
725                                      genhash ? "failed" : "mismatch",
726                                      &ip6h->saddr, ntohs(th->source),
727                                      &ip6h->daddr, ntohs(th->dest));
728                 return true;
729         }
730 #endif
731         return false;
732 }
733
734 static void tcp_v6_init_req(struct request_sock *req,
735                             const struct sock *sk_listener,
736                             struct sk_buff *skb)
737 {
738         bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
739         struct inet_request_sock *ireq = inet_rsk(req);
740         const struct ipv6_pinfo *np = inet6_sk(sk_listener);
741
742         ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
743         ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
744
745         /* So that link locals have meaning */
746         if ((!sk_listener->sk_bound_dev_if || l3_slave) &&
747             ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
748                 ireq->ir_iif = tcp_v6_iif(skb);
749
750         if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
751             (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
752              np->rxopt.bits.rxinfo ||
753              np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
754              np->rxopt.bits.rxohlim || np->repflow)) {
755                 refcount_inc(&skb->users);
756                 ireq->pktopts = skb;
757         }
758 }
759
760 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
761                                           struct flowi *fl,
762                                           const struct request_sock *req)
763 {
764         return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
765 }
766
767 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
768         .family         =       AF_INET6,
769         .obj_size       =       sizeof(struct tcp6_request_sock),
770         .rtx_syn_ack    =       tcp_rtx_synack,
771         .send_ack       =       tcp_v6_reqsk_send_ack,
772         .destructor     =       tcp_v6_reqsk_destructor,
773         .send_reset     =       tcp_v6_send_reset,
774         .syn_ack_timeout =      tcp_syn_ack_timeout,
775 };
776
777 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
778         .mss_clamp      =       IPV6_MIN_MTU - sizeof(struct tcphdr) -
779                                 sizeof(struct ipv6hdr),
780 #ifdef CONFIG_TCP_MD5SIG
781         .req_md5_lookup =       tcp_v6_md5_lookup,
782         .calc_md5_hash  =       tcp_v6_md5_hash_skb,
783 #endif
784         .init_req       =       tcp_v6_init_req,
785 #ifdef CONFIG_SYN_COOKIES
786         .cookie_init_seq =      cookie_v6_init_sequence,
787 #endif
788         .route_req      =       tcp_v6_route_req,
789         .init_seq       =       tcp_v6_init_seq,
790         .init_ts_off    =       tcp_v6_init_ts_off,
791         .send_synack    =       tcp_v6_send_synack,
792 };
793
794 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
795                                  u32 ack, u32 win, u32 tsval, u32 tsecr,
796                                  int oif, struct tcp_md5sig_key *key, int rst,
797                                  u8 tclass, __be32 label)
798 {
799         const struct tcphdr *th = tcp_hdr(skb);
800         struct tcphdr *t1;
801         struct sk_buff *buff;
802         struct flowi6 fl6;
803         struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
804         struct sock *ctl_sk = net->ipv6.tcp_sk;
805         unsigned int tot_len = sizeof(struct tcphdr);
806         struct dst_entry *dst;
807         __be32 *topt;
808         __u32 mark = 0;
809
810         if (tsecr)
811                 tot_len += TCPOLEN_TSTAMP_ALIGNED;
812 #ifdef CONFIG_TCP_MD5SIG
813         if (key)
814                 tot_len += TCPOLEN_MD5SIG_ALIGNED;
815 #endif
816
817         buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
818                          GFP_ATOMIC);
819         if (!buff)
820                 return;
821
822         skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
823
824         t1 = skb_push(buff, tot_len);
825         skb_reset_transport_header(buff);
826
827         /* Swap the send and the receive. */
828         memset(t1, 0, sizeof(*t1));
829         t1->dest = th->source;
830         t1->source = th->dest;
831         t1->doff = tot_len / 4;
832         t1->seq = htonl(seq);
833         t1->ack_seq = htonl(ack);
834         t1->ack = !rst || !th->ack;
835         t1->rst = rst;
836         t1->window = htons(win);
837
838         topt = (__be32 *)(t1 + 1);
839
840         if (tsecr) {
841                 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
842                                 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
843                 *topt++ = htonl(tsval);
844                 *topt++ = htonl(tsecr);
845         }
846
847 #ifdef CONFIG_TCP_MD5SIG
848         if (key) {
849                 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
850                                 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
851                 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
852                                     &ipv6_hdr(skb)->saddr,
853                                     &ipv6_hdr(skb)->daddr, t1);
854         }
855 #endif
856
857         memset(&fl6, 0, sizeof(fl6));
858         fl6.daddr = ipv6_hdr(skb)->saddr;
859         fl6.saddr = ipv6_hdr(skb)->daddr;
860         fl6.flowlabel = label;
861
862         buff->ip_summed = CHECKSUM_PARTIAL;
863         buff->csum = 0;
864
865         __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
866
867         fl6.flowi6_proto = IPPROTO_TCP;
868         if (rt6_need_strict(&fl6.daddr) && !oif)
869                 fl6.flowi6_oif = tcp_v6_iif(skb);
870         else {
871                 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
872                         oif = skb->skb_iif;
873
874                 fl6.flowi6_oif = oif;
875         }
876
877         if (sk)
878                 mark = (sk->sk_state == TCP_TIME_WAIT) ?
879                         inet_twsk(sk)->tw_mark : sk->sk_mark;
880         fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark) ?: mark;
881         fl6.fl6_dport = t1->dest;
882         fl6.fl6_sport = t1->source;
883         fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
884         security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
885
886         /* Pass a socket to ip6_dst_lookup either it is for RST
887          * Underlying function will use this to retrieve the network
888          * namespace
889          */
890         dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
891         if (!IS_ERR(dst)) {
892                 skb_dst_set(buff, dst);
893                 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
894                 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
895                 if (rst)
896                         TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
897                 return;
898         }
899
900         kfree_skb(buff);
901 }
902
903 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
904 {
905         const struct tcphdr *th = tcp_hdr(skb);
906         u32 seq = 0, ack_seq = 0;
907         struct tcp_md5sig_key *key = NULL;
908 #ifdef CONFIG_TCP_MD5SIG
909         const __u8 *hash_location = NULL;
910         struct ipv6hdr *ipv6h = ipv6_hdr(skb);
911         unsigned char newhash[16];
912         int genhash;
913         struct sock *sk1 = NULL;
914 #endif
915         int oif = 0;
916
917         if (th->rst)
918                 return;
919
920         /* If sk not NULL, it means we did a successful lookup and incoming
921          * route had to be correct. prequeue might have dropped our dst.
922          */
923         if (!sk && !ipv6_unicast_destination(skb))
924                 return;
925
926 #ifdef CONFIG_TCP_MD5SIG
927         rcu_read_lock();
928         hash_location = tcp_parse_md5sig_option(th);
929         if (sk && sk_fullsock(sk)) {
930                 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
931         } else if (hash_location) {
932                 /*
933                  * active side is lost. Try to find listening socket through
934                  * source port, and then find md5 key through listening socket.
935                  * we are not loose security here:
936                  * Incoming packet is checked with md5 hash with finding key,
937                  * no RST generated if md5 hash doesn't match.
938                  */
939                 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
940                                            &tcp_hashinfo, NULL, 0,
941                                            &ipv6h->saddr,
942                                            th->source, &ipv6h->daddr,
943                                            ntohs(th->source),
944                                            tcp_v6_iif_l3_slave(skb),
945                                            tcp_v6_sdif(skb));
946                 if (!sk1)
947                         goto out;
948
949                 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
950                 if (!key)
951                         goto out;
952
953                 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
954                 if (genhash || memcmp(hash_location, newhash, 16) != 0)
955                         goto out;
956         }
957 #endif
958
959         if (th->ack)
960                 seq = ntohl(th->ack_seq);
961         else
962                 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
963                           (th->doff << 2);
964
965         if (sk) {
966                 oif = sk->sk_bound_dev_if;
967                 if (sk_fullsock(sk))
968                         trace_tcp_send_reset(sk, skb);
969         }
970
971         tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
972
973 #ifdef CONFIG_TCP_MD5SIG
974 out:
975         rcu_read_unlock();
976 #endif
977 }
978
979 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
980                             u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
981                             struct tcp_md5sig_key *key, u8 tclass,
982                             __be32 label)
983 {
984         tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
985                              tclass, label);
986 }
987
988 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
989 {
990         struct inet_timewait_sock *tw = inet_twsk(sk);
991         struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
992
993         tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
994                         tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
995                         tcp_time_stamp_raw() + tcptw->tw_ts_offset,
996                         tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
997                         tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
998
999         inet_twsk_put(tw);
1000 }
1001
1002 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
1003                                   struct request_sock *req)
1004 {
1005         /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
1006          * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
1007          */
1008         /* RFC 7323 2.3
1009          * The window field (SEG.WND) of every outgoing segment, with the
1010          * exception of <SYN> segments, MUST be right-shifted by
1011          * Rcv.Wind.Shift bits:
1012          */
1013         tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
1014                         tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
1015                         tcp_rsk(req)->rcv_nxt,
1016                         req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
1017                         tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
1018                         req->ts_recent, sk->sk_bound_dev_if,
1019                         tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr),
1020                         0, 0);
1021 }
1022
1023
1024 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
1025 {
1026 #ifdef CONFIG_SYN_COOKIES
1027         const struct tcphdr *th = tcp_hdr(skb);
1028
1029         if (!th->syn)
1030                 sk = cookie_v6_check(sk, skb);
1031 #endif
1032         return sk;
1033 }
1034
1035 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1036 {
1037         if (skb->protocol == htons(ETH_P_IP))
1038                 return tcp_v4_conn_request(sk, skb);
1039
1040         if (!ipv6_unicast_destination(skb))
1041                 goto drop;
1042
1043         return tcp_conn_request(&tcp6_request_sock_ops,
1044                                 &tcp_request_sock_ipv6_ops, sk, skb);
1045
1046 drop:
1047         tcp_listendrop(sk);
1048         return 0; /* don't send reset */
1049 }
1050
1051 static void tcp_v6_restore_cb(struct sk_buff *skb)
1052 {
1053         /* We need to move header back to the beginning if xfrm6_policy_check()
1054          * and tcp_v6_fill_cb() are going to be called again.
1055          * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1056          */
1057         memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1058                 sizeof(struct inet6_skb_parm));
1059 }
1060
1061 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1062                                          struct request_sock *req,
1063                                          struct dst_entry *dst,
1064                                          struct request_sock *req_unhash,
1065                                          bool *own_req)
1066 {
1067         struct inet_request_sock *ireq;
1068         struct ipv6_pinfo *newnp;
1069         const struct ipv6_pinfo *np = inet6_sk(sk);
1070         struct ipv6_txoptions *opt;
1071         struct tcp6_sock *newtcp6sk;
1072         struct inet_sock *newinet;
1073         struct tcp_sock *newtp;
1074         struct sock *newsk;
1075 #ifdef CONFIG_TCP_MD5SIG
1076         struct tcp_md5sig_key *key;
1077 #endif
1078         struct flowi6 fl6;
1079
1080         if (skb->protocol == htons(ETH_P_IP)) {
1081                 /*
1082                  *      v6 mapped
1083                  */
1084
1085                 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1086                                              req_unhash, own_req);
1087
1088                 if (!newsk)
1089                         return NULL;
1090
1091                 newtcp6sk = (struct tcp6_sock *)newsk;
1092                 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1093
1094                 newinet = inet_sk(newsk);
1095                 newnp = inet6_sk(newsk);
1096                 newtp = tcp_sk(newsk);
1097
1098                 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1099
1100                 newnp->saddr = newsk->sk_v6_rcv_saddr;
1101
1102                 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1103                 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1104 #ifdef CONFIG_TCP_MD5SIG
1105                 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1106 #endif
1107
1108                 newnp->ipv6_mc_list = NULL;
1109                 newnp->ipv6_ac_list = NULL;
1110                 newnp->ipv6_fl_list = NULL;
1111                 newnp->pktoptions  = NULL;
1112                 newnp->opt         = NULL;
1113                 newnp->mcast_oif   = inet_iif(skb);
1114                 newnp->mcast_hops  = ip_hdr(skb)->ttl;
1115                 newnp->rcv_flowinfo = 0;
1116                 if (np->repflow)
1117                         newnp->flow_label = 0;
1118
1119                 /*
1120                  * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1121                  * here, tcp_create_openreq_child now does this for us, see the comment in
1122                  * that function for the gory details. -acme
1123                  */
1124
1125                 /* It is tricky place. Until this moment IPv4 tcp
1126                    worked with IPv6 icsk.icsk_af_ops.
1127                    Sync it now.
1128                  */
1129                 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1130
1131                 return newsk;
1132         }
1133
1134         ireq = inet_rsk(req);
1135
1136         if (sk_acceptq_is_full(sk))
1137                 goto out_overflow;
1138
1139         if (!dst) {
1140                 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1141                 if (!dst)
1142                         goto out;
1143         }
1144
1145         newsk = tcp_create_openreq_child(sk, req, skb);
1146         if (!newsk)
1147                 goto out_nonewsk;
1148
1149         /*
1150          * No need to charge this sock to the relevant IPv6 refcnt debug socks
1151          * count here, tcp_create_openreq_child now does this for us, see the
1152          * comment in that function for the gory details. -acme
1153          */
1154
1155         newsk->sk_gso_type = SKB_GSO_TCPV6;
1156         ip6_dst_store(newsk, dst, NULL, NULL);
1157         inet6_sk_rx_dst_set(newsk, skb);
1158
1159         newtcp6sk = (struct tcp6_sock *)newsk;
1160         inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1161
1162         newtp = tcp_sk(newsk);
1163         newinet = inet_sk(newsk);
1164         newnp = inet6_sk(newsk);
1165
1166         memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1167
1168         newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1169         newnp->saddr = ireq->ir_v6_loc_addr;
1170         newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1171         newsk->sk_bound_dev_if = ireq->ir_iif;
1172
1173         /* Now IPv6 options...
1174
1175            First: no IPv4 options.
1176          */
1177         newinet->inet_opt = NULL;
1178         newnp->ipv6_mc_list = NULL;
1179         newnp->ipv6_ac_list = NULL;
1180         newnp->ipv6_fl_list = NULL;
1181
1182         /* Clone RX bits */
1183         newnp->rxopt.all = np->rxopt.all;
1184
1185         newnp->pktoptions = NULL;
1186         newnp->opt        = NULL;
1187         newnp->mcast_oif  = tcp_v6_iif(skb);
1188         newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1189         newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1190         if (np->repflow)
1191                 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1192
1193         /* Clone native IPv6 options from listening socket (if any)
1194
1195            Yes, keeping reference count would be much more clever,
1196            but we make one more one thing there: reattach optmem
1197            to newsk.
1198          */
1199         opt = ireq->ipv6_opt;
1200         if (!opt)
1201                 opt = rcu_dereference(np->opt);
1202         if (opt) {
1203                 opt = ipv6_dup_options(newsk, opt);
1204                 RCU_INIT_POINTER(newnp->opt, opt);
1205         }
1206         inet_csk(newsk)->icsk_ext_hdr_len = 0;
1207         if (opt)
1208                 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1209                                                     opt->opt_flen;
1210
1211         tcp_ca_openreq_child(newsk, dst);
1212
1213         tcp_sync_mss(newsk, dst_mtu(dst));
1214         newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1215
1216         tcp_initialize_rcv_mss(newsk);
1217
1218         newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1219         newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1220
1221 #ifdef CONFIG_TCP_MD5SIG
1222         /* Copy over the MD5 key from the original socket */
1223         key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1224         if (key) {
1225                 /* We're using one, so create a matching key
1226                  * on the newsk structure. If we fail to get
1227                  * memory, then we end up not copying the key
1228                  * across. Shucks.
1229                  */
1230                 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1231                                AF_INET6, 128, key->key, key->keylen,
1232                                sk_gfp_mask(sk, GFP_ATOMIC));
1233         }
1234 #endif
1235
1236         if (__inet_inherit_port(sk, newsk) < 0) {
1237                 inet_csk_prepare_forced_close(newsk);
1238                 tcp_done(newsk);
1239                 goto out;
1240         }
1241         *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1242         if (*own_req) {
1243                 tcp_move_syn(newtp, req);
1244
1245                 /* Clone pktoptions received with SYN, if we own the req */
1246                 if (ireq->pktopts) {
1247                         newnp->pktoptions = skb_clone(ireq->pktopts,
1248                                                       sk_gfp_mask(sk, GFP_ATOMIC));
1249                         consume_skb(ireq->pktopts);
1250                         ireq->pktopts = NULL;
1251                         if (newnp->pktoptions) {
1252                                 tcp_v6_restore_cb(newnp->pktoptions);
1253                                 skb_set_owner_r(newnp->pktoptions, newsk);
1254                         }
1255                 }
1256         }
1257
1258         return newsk;
1259
1260 out_overflow:
1261         __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1262 out_nonewsk:
1263         dst_release(dst);
1264 out:
1265         tcp_listendrop(sk);
1266         return NULL;
1267 }
1268
1269 /* The socket must have it's spinlock held when we get
1270  * here, unless it is a TCP_LISTEN socket.
1271  *
1272  * We have a potential double-lock case here, so even when
1273  * doing backlog processing we use the BH locking scheme.
1274  * This is because we cannot sleep with the original spinlock
1275  * held.
1276  */
1277 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1278 {
1279         struct ipv6_pinfo *np = inet6_sk(sk);
1280         struct tcp_sock *tp;
1281         struct sk_buff *opt_skb = NULL;
1282
1283         /* Imagine: socket is IPv6. IPv4 packet arrives,
1284            goes to IPv4 receive handler and backlogged.
1285            From backlog it always goes here. Kerboom...
1286            Fortunately, tcp_rcv_established and rcv_established
1287            handle them correctly, but it is not case with
1288            tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1289          */
1290
1291         if (skb->protocol == htons(ETH_P_IP))
1292                 return tcp_v4_do_rcv(sk, skb);
1293
1294         /*
1295          *      socket locking is here for SMP purposes as backlog rcv
1296          *      is currently called with bh processing disabled.
1297          */
1298
1299         /* Do Stevens' IPV6_PKTOPTIONS.
1300
1301            Yes, guys, it is the only place in our code, where we
1302            may make it not affecting IPv4.
1303            The rest of code is protocol independent,
1304            and I do not like idea to uglify IPv4.
1305
1306            Actually, all the idea behind IPV6_PKTOPTIONS
1307            looks not very well thought. For now we latch
1308            options, received in the last packet, enqueued
1309            by tcp. Feel free to propose better solution.
1310                                                --ANK (980728)
1311          */
1312         if (np->rxopt.all)
1313                 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1314
1315         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1316                 struct dst_entry *dst = sk->sk_rx_dst;
1317
1318                 sock_rps_save_rxhash(sk, skb);
1319                 sk_mark_napi_id(sk, skb);
1320                 if (dst) {
1321                         if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1322                             dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1323                                 dst_release(dst);
1324                                 sk->sk_rx_dst = NULL;
1325                         }
1326                 }
1327
1328                 tcp_rcv_established(sk, skb);
1329                 if (opt_skb)
1330                         goto ipv6_pktoptions;
1331                 return 0;
1332         }
1333
1334         if (tcp_checksum_complete(skb))
1335                 goto csum_err;
1336
1337         if (sk->sk_state == TCP_LISTEN) {
1338                 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1339
1340                 if (!nsk)
1341                         goto discard;
1342
1343                 if (nsk != sk) {
1344                         if (tcp_child_process(sk, nsk, skb))
1345                                 goto reset;
1346                         if (opt_skb)
1347                                 __kfree_skb(opt_skb);
1348                         return 0;
1349                 }
1350         } else
1351                 sock_rps_save_rxhash(sk, skb);
1352
1353         if (tcp_rcv_state_process(sk, skb))
1354                 goto reset;
1355         if (opt_skb)
1356                 goto ipv6_pktoptions;
1357         return 0;
1358
1359 reset:
1360         tcp_v6_send_reset(sk, skb);
1361 discard:
1362         if (opt_skb)
1363                 __kfree_skb(opt_skb);
1364         kfree_skb(skb);
1365         return 0;
1366 csum_err:
1367         TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1368         TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1369         goto discard;
1370
1371
1372 ipv6_pktoptions:
1373         /* Do you ask, what is it?
1374
1375            1. skb was enqueued by tcp.
1376            2. skb is added to tail of read queue, rather than out of order.
1377            3. socket is not in passive state.
1378            4. Finally, it really contains options, which user wants to receive.
1379          */
1380         tp = tcp_sk(sk);
1381         if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1382             !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1383                 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1384                         np->mcast_oif = tcp_v6_iif(opt_skb);
1385                 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1386                         np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1387                 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1388                         np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1389                 if (np->repflow)
1390                         np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1391                 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1392                         skb_set_owner_r(opt_skb, sk);
1393                         tcp_v6_restore_cb(opt_skb);
1394                         opt_skb = xchg(&np->pktoptions, opt_skb);
1395                 } else {
1396                         __kfree_skb(opt_skb);
1397                         opt_skb = xchg(&np->pktoptions, NULL);
1398                 }
1399         }
1400
1401         kfree_skb(opt_skb);
1402         return 0;
1403 }
1404
1405 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1406                            const struct tcphdr *th)
1407 {
1408         /* This is tricky: we move IP6CB at its correct location into
1409          * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1410          * _decode_session6() uses IP6CB().
1411          * barrier() makes sure compiler won't play aliasing games.
1412          */
1413         memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1414                 sizeof(struct inet6_skb_parm));
1415         barrier();
1416
1417         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1418         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1419                                     skb->len - th->doff*4);
1420         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1421         TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1422         TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1423         TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1424         TCP_SKB_CB(skb)->sacked = 0;
1425         TCP_SKB_CB(skb)->has_rxtstamp =
1426                         skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1427 }
1428
1429 static int tcp_v6_rcv(struct sk_buff *skb)
1430 {
1431         int sdif = inet6_sdif(skb);
1432         const struct tcphdr *th;
1433         const struct ipv6hdr *hdr;
1434         bool refcounted;
1435         struct sock *sk;
1436         int ret;
1437         struct net *net = dev_net(skb->dev);
1438
1439         if (skb->pkt_type != PACKET_HOST)
1440                 goto discard_it;
1441
1442         /*
1443          *      Count it even if it's bad.
1444          */
1445         __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1446
1447         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1448                 goto discard_it;
1449
1450         th = (const struct tcphdr *)skb->data;
1451
1452         if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1453                 goto bad_packet;
1454         if (!pskb_may_pull(skb, th->doff*4))
1455                 goto discard_it;
1456
1457         if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1458                 goto csum_error;
1459
1460         th = (const struct tcphdr *)skb->data;
1461         hdr = ipv6_hdr(skb);
1462
1463 lookup:
1464         sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1465                                 th->source, th->dest, inet6_iif(skb), sdif,
1466                                 &refcounted);
1467         if (!sk)
1468                 goto no_tcp_socket;
1469
1470 process:
1471         if (sk->sk_state == TCP_TIME_WAIT)
1472                 goto do_time_wait;
1473
1474         if (sk->sk_state == TCP_NEW_SYN_RECV) {
1475                 struct request_sock *req = inet_reqsk(sk);
1476                 bool req_stolen = false;
1477                 struct sock *nsk;
1478
1479                 sk = req->rsk_listener;
1480                 if (tcp_v6_inbound_md5_hash(sk, skb)) {
1481                         sk_drops_add(sk, skb);
1482                         reqsk_put(req);
1483                         goto discard_it;
1484                 }
1485                 if (tcp_checksum_complete(skb)) {
1486                         reqsk_put(req);
1487                         goto csum_error;
1488                 }
1489                 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1490                         inet_csk_reqsk_queue_drop_and_put(sk, req);
1491                         goto lookup;
1492                 }
1493                 sock_hold(sk);
1494                 refcounted = true;
1495                 nsk = NULL;
1496                 if (!tcp_filter(sk, skb)) {
1497                         th = (const struct tcphdr *)skb->data;
1498                         hdr = ipv6_hdr(skb);
1499                         tcp_v6_fill_cb(skb, hdr, th);
1500                         nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
1501                 }
1502                 if (!nsk) {
1503                         reqsk_put(req);
1504                         if (req_stolen) {
1505                                 /* Another cpu got exclusive access to req
1506                                  * and created a full blown socket.
1507                                  * Try to feed this packet to this socket
1508                                  * instead of discarding it.
1509                                  */
1510                                 tcp_v6_restore_cb(skb);
1511                                 sock_put(sk);
1512                                 goto lookup;
1513                         }
1514                         goto discard_and_relse;
1515                 }
1516                 if (nsk == sk) {
1517                         reqsk_put(req);
1518                         tcp_v6_restore_cb(skb);
1519                 } else if (tcp_child_process(sk, nsk, skb)) {
1520                         tcp_v6_send_reset(nsk, skb);
1521                         goto discard_and_relse;
1522                 } else {
1523                         sock_put(sk);
1524                         return 0;
1525                 }
1526         }
1527         if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1528                 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1529                 goto discard_and_relse;
1530         }
1531
1532         if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1533                 goto discard_and_relse;
1534
1535         if (tcp_v6_inbound_md5_hash(sk, skb))
1536                 goto discard_and_relse;
1537
1538         if (tcp_filter(sk, skb))
1539                 goto discard_and_relse;
1540         th = (const struct tcphdr *)skb->data;
1541         hdr = ipv6_hdr(skb);
1542         tcp_v6_fill_cb(skb, hdr, th);
1543
1544         skb->dev = NULL;
1545
1546         if (sk->sk_state == TCP_LISTEN) {
1547                 ret = tcp_v6_do_rcv(sk, skb);
1548                 goto put_and_return;
1549         }
1550
1551         sk_incoming_cpu_update(sk);
1552
1553         bh_lock_sock_nested(sk);
1554         tcp_segs_in(tcp_sk(sk), skb);
1555         ret = 0;
1556         if (!sock_owned_by_user(sk)) {
1557                 ret = tcp_v6_do_rcv(sk, skb);
1558         } else if (tcp_add_backlog(sk, skb)) {
1559                 goto discard_and_relse;
1560         }
1561         bh_unlock_sock(sk);
1562
1563 put_and_return:
1564         if (refcounted)
1565                 sock_put(sk);
1566         return ret ? -1 : 0;
1567
1568 no_tcp_socket:
1569         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1570                 goto discard_it;
1571
1572         tcp_v6_fill_cb(skb, hdr, th);
1573
1574         if (tcp_checksum_complete(skb)) {
1575 csum_error:
1576                 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1577 bad_packet:
1578                 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1579         } else {
1580                 tcp_v6_send_reset(NULL, skb);
1581         }
1582
1583 discard_it:
1584         kfree_skb(skb);
1585         return 0;
1586
1587 discard_and_relse:
1588         sk_drops_add(sk, skb);
1589         if (refcounted)
1590                 sock_put(sk);
1591         goto discard_it;
1592
1593 do_time_wait:
1594         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1595                 inet_twsk_put(inet_twsk(sk));
1596                 goto discard_it;
1597         }
1598
1599         tcp_v6_fill_cb(skb, hdr, th);
1600
1601         if (tcp_checksum_complete(skb)) {
1602                 inet_twsk_put(inet_twsk(sk));
1603                 goto csum_error;
1604         }
1605
1606         switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1607         case TCP_TW_SYN:
1608         {
1609                 struct sock *sk2;
1610
1611                 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1612                                             skb, __tcp_hdrlen(th),
1613                                             &ipv6_hdr(skb)->saddr, th->source,
1614                                             &ipv6_hdr(skb)->daddr,
1615                                             ntohs(th->dest),
1616                                             tcp_v6_iif_l3_slave(skb),
1617                                             sdif);
1618                 if (sk2) {
1619                         struct inet_timewait_sock *tw = inet_twsk(sk);
1620                         inet_twsk_deschedule_put(tw);
1621                         sk = sk2;
1622                         tcp_v6_restore_cb(skb);
1623                         refcounted = false;
1624                         goto process;
1625                 }
1626         }
1627                 /* to ACK */
1628                 /* fall through */
1629         case TCP_TW_ACK:
1630                 tcp_v6_timewait_ack(sk, skb);
1631                 break;
1632         case TCP_TW_RST:
1633                 tcp_v6_send_reset(sk, skb);
1634                 inet_twsk_deschedule_put(inet_twsk(sk));
1635                 goto discard_it;
1636         case TCP_TW_SUCCESS:
1637                 ;
1638         }
1639         goto discard_it;
1640 }
1641
1642 static void tcp_v6_early_demux(struct sk_buff *skb)
1643 {
1644         const struct ipv6hdr *hdr;
1645         const struct tcphdr *th;
1646         struct sock *sk;
1647
1648         if (skb->pkt_type != PACKET_HOST)
1649                 return;
1650
1651         if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1652                 return;
1653
1654         hdr = ipv6_hdr(skb);
1655         th = tcp_hdr(skb);
1656
1657         if (th->doff < sizeof(struct tcphdr) / 4)
1658                 return;
1659
1660         /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1661         sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1662                                         &hdr->saddr, th->source,
1663                                         &hdr->daddr, ntohs(th->dest),
1664                                         inet6_iif(skb), inet6_sdif(skb));
1665         if (sk) {
1666                 skb->sk = sk;
1667                 skb->destructor = sock_edemux;
1668                 if (sk_fullsock(sk)) {
1669                         struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1670
1671                         if (dst)
1672                                 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1673                         if (dst &&
1674                             inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1675                                 skb_dst_set_noref(skb, dst);
1676                 }
1677         }
1678 }
1679
1680 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1681         .twsk_obj_size  = sizeof(struct tcp6_timewait_sock),
1682         .twsk_unique    = tcp_twsk_unique,
1683         .twsk_destructor = tcp_twsk_destructor,
1684 };
1685
1686 static const struct inet_connection_sock_af_ops ipv6_specific = {
1687         .queue_xmit        = inet6_csk_xmit,
1688         .send_check        = tcp_v6_send_check,
1689         .rebuild_header    = inet6_sk_rebuild_header,
1690         .sk_rx_dst_set     = inet6_sk_rx_dst_set,
1691         .conn_request      = tcp_v6_conn_request,
1692         .syn_recv_sock     = tcp_v6_syn_recv_sock,
1693         .net_header_len    = sizeof(struct ipv6hdr),
1694         .net_frag_header_len = sizeof(struct frag_hdr),
1695         .setsockopt        = ipv6_setsockopt,
1696         .getsockopt        = ipv6_getsockopt,
1697         .addr2sockaddr     = inet6_csk_addr2sockaddr,
1698         .sockaddr_len      = sizeof(struct sockaddr_in6),
1699 #ifdef CONFIG_COMPAT
1700         .compat_setsockopt = compat_ipv6_setsockopt,
1701         .compat_getsockopt = compat_ipv6_getsockopt,
1702 #endif
1703         .mtu_reduced       = tcp_v6_mtu_reduced,
1704 };
1705
1706 #ifdef CONFIG_TCP_MD5SIG
1707 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1708         .md5_lookup     =       tcp_v6_md5_lookup,
1709         .calc_md5_hash  =       tcp_v6_md5_hash_skb,
1710         .md5_parse      =       tcp_v6_parse_md5_keys,
1711 };
1712 #endif
1713
1714 /*
1715  *      TCP over IPv4 via INET6 API
1716  */
1717 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1718         .queue_xmit        = ip_queue_xmit,
1719         .send_check        = tcp_v4_send_check,
1720         .rebuild_header    = inet_sk_rebuild_header,
1721         .sk_rx_dst_set     = inet_sk_rx_dst_set,
1722         .conn_request      = tcp_v6_conn_request,
1723         .syn_recv_sock     = tcp_v6_syn_recv_sock,
1724         .net_header_len    = sizeof(struct iphdr),
1725         .setsockopt        = ipv6_setsockopt,
1726         .getsockopt        = ipv6_getsockopt,
1727         .addr2sockaddr     = inet6_csk_addr2sockaddr,
1728         .sockaddr_len      = sizeof(struct sockaddr_in6),
1729 #ifdef CONFIG_COMPAT
1730         .compat_setsockopt = compat_ipv6_setsockopt,
1731         .compat_getsockopt = compat_ipv6_getsockopt,
1732 #endif
1733         .mtu_reduced       = tcp_v4_mtu_reduced,
1734 };
1735
1736 #ifdef CONFIG_TCP_MD5SIG
1737 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1738         .md5_lookup     =       tcp_v4_md5_lookup,
1739         .calc_md5_hash  =       tcp_v4_md5_hash_skb,
1740         .md5_parse      =       tcp_v6_parse_md5_keys,
1741 };
1742 #endif
1743
1744 /* NOTE: A lot of things set to zero explicitly by call to
1745  *       sk_alloc() so need not be done here.
1746  */
1747 static int tcp_v6_init_sock(struct sock *sk)
1748 {
1749         struct inet_connection_sock *icsk = inet_csk(sk);
1750
1751         tcp_init_sock(sk);
1752
1753         icsk->icsk_af_ops = &ipv6_specific;
1754
1755 #ifdef CONFIG_TCP_MD5SIG
1756         tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1757 #endif
1758
1759         return 0;
1760 }
1761
1762 static void tcp_v6_destroy_sock(struct sock *sk)
1763 {
1764         tcp_v4_destroy_sock(sk);
1765         inet6_destroy_sock(sk);
1766 }
1767
1768 #ifdef CONFIG_PROC_FS
1769 /* Proc filesystem TCPv6 sock list dumping. */
1770 static void get_openreq6(struct seq_file *seq,
1771                          const struct request_sock *req, int i)
1772 {
1773         long ttd = req->rsk_timer.expires - jiffies;
1774         const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1775         const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1776
1777         if (ttd < 0)
1778                 ttd = 0;
1779
1780         seq_printf(seq,
1781                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1782                    "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1783                    i,
1784                    src->s6_addr32[0], src->s6_addr32[1],
1785                    src->s6_addr32[2], src->s6_addr32[3],
1786                    inet_rsk(req)->ir_num,
1787                    dest->s6_addr32[0], dest->s6_addr32[1],
1788                    dest->s6_addr32[2], dest->s6_addr32[3],
1789                    ntohs(inet_rsk(req)->ir_rmt_port),
1790                    TCP_SYN_RECV,
1791                    0, 0, /* could print option size, but that is af dependent. */
1792                    1,   /* timers active (only the expire timer) */
1793                    jiffies_to_clock_t(ttd),
1794                    req->num_timeout,
1795                    from_kuid_munged(seq_user_ns(seq),
1796                                     sock_i_uid(req->rsk_listener)),
1797                    0,  /* non standard timer */
1798                    0, /* open_requests have no inode */
1799                    0, req);
1800 }
1801
1802 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1803 {
1804         const struct in6_addr *dest, *src;
1805         __u16 destp, srcp;
1806         int timer_active;
1807         unsigned long timer_expires;
1808         const struct inet_sock *inet = inet_sk(sp);
1809         const struct tcp_sock *tp = tcp_sk(sp);
1810         const struct inet_connection_sock *icsk = inet_csk(sp);
1811         const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1812         int rx_queue;
1813         int state;
1814
1815         dest  = &sp->sk_v6_daddr;
1816         src   = &sp->sk_v6_rcv_saddr;
1817         destp = ntohs(inet->inet_dport);
1818         srcp  = ntohs(inet->inet_sport);
1819
1820         if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
1821             icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
1822             icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1823                 timer_active    = 1;
1824                 timer_expires   = icsk->icsk_timeout;
1825         } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1826                 timer_active    = 4;
1827                 timer_expires   = icsk->icsk_timeout;
1828         } else if (timer_pending(&sp->sk_timer)) {
1829                 timer_active    = 2;
1830                 timer_expires   = sp->sk_timer.expires;
1831         } else {
1832                 timer_active    = 0;
1833                 timer_expires = jiffies;
1834         }
1835
1836         state = inet_sk_state_load(sp);
1837         if (state == TCP_LISTEN)
1838                 rx_queue = sp->sk_ack_backlog;
1839         else
1840                 /* Because we don't lock the socket,
1841                  * we might find a transient negative value.
1842                  */
1843                 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1844
1845         seq_printf(seq,
1846                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1847                    "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1848                    i,
1849                    src->s6_addr32[0], src->s6_addr32[1],
1850                    src->s6_addr32[2], src->s6_addr32[3], srcp,
1851                    dest->s6_addr32[0], dest->s6_addr32[1],
1852                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
1853                    state,
1854                    tp->write_seq - tp->snd_una,
1855                    rx_queue,
1856                    timer_active,
1857                    jiffies_delta_to_clock_t(timer_expires - jiffies),
1858                    icsk->icsk_retransmits,
1859                    from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1860                    icsk->icsk_probes_out,
1861                    sock_i_ino(sp),
1862                    refcount_read(&sp->sk_refcnt), sp,
1863                    jiffies_to_clock_t(icsk->icsk_rto),
1864                    jiffies_to_clock_t(icsk->icsk_ack.ato),
1865                    (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sp),
1866                    tp->snd_cwnd,
1867                    state == TCP_LISTEN ?
1868                         fastopenq->max_qlen :
1869                         (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1870                    );
1871 }
1872
1873 static void get_timewait6_sock(struct seq_file *seq,
1874                                struct inet_timewait_sock *tw, int i)
1875 {
1876         long delta = tw->tw_timer.expires - jiffies;
1877         const struct in6_addr *dest, *src;
1878         __u16 destp, srcp;
1879
1880         dest = &tw->tw_v6_daddr;
1881         src  = &tw->tw_v6_rcv_saddr;
1882         destp = ntohs(tw->tw_dport);
1883         srcp  = ntohs(tw->tw_sport);
1884
1885         seq_printf(seq,
1886                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1887                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1888                    i,
1889                    src->s6_addr32[0], src->s6_addr32[1],
1890                    src->s6_addr32[2], src->s6_addr32[3], srcp,
1891                    dest->s6_addr32[0], dest->s6_addr32[1],
1892                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
1893                    tw->tw_substate, 0, 0,
1894                    3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1895                    refcount_read(&tw->tw_refcnt), tw);
1896 }
1897
1898 static int tcp6_seq_show(struct seq_file *seq, void *v)
1899 {
1900         struct tcp_iter_state *st;
1901         struct sock *sk = v;
1902
1903         if (v == SEQ_START_TOKEN) {
1904                 seq_puts(seq,
1905                          "  sl  "
1906                          "local_address                         "
1907                          "remote_address                        "
1908                          "st tx_queue rx_queue tr tm->when retrnsmt"
1909                          "   uid  timeout inode\n");
1910                 goto out;
1911         }
1912         st = seq->private;
1913
1914         if (sk->sk_state == TCP_TIME_WAIT)
1915                 get_timewait6_sock(seq, v, st->num);
1916         else if (sk->sk_state == TCP_NEW_SYN_RECV)
1917                 get_openreq6(seq, v, st->num);
1918         else
1919                 get_tcp6_sock(seq, v, st->num);
1920 out:
1921         return 0;
1922 }
1923
1924 static const struct seq_operations tcp6_seq_ops = {
1925         .show           = tcp6_seq_show,
1926         .start          = tcp_seq_start,
1927         .next           = tcp_seq_next,
1928         .stop           = tcp_seq_stop,
1929 };
1930
1931 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1932         .family         = AF_INET6,
1933 };
1934
1935 int __net_init tcp6_proc_init(struct net *net)
1936 {
1937         if (!proc_create_net_data("tcp6", 0444, net->proc_net, &tcp6_seq_ops,
1938                         sizeof(struct tcp_iter_state), &tcp6_seq_afinfo))
1939                 return -ENOMEM;
1940         return 0;
1941 }
1942
1943 void tcp6_proc_exit(struct net *net)
1944 {
1945         remove_proc_entry("tcp6", net->proc_net);
1946 }
1947 #endif
1948
1949 struct proto tcpv6_prot = {
1950         .name                   = "TCPv6",
1951         .owner                  = THIS_MODULE,
1952         .close                  = tcp_close,
1953         .pre_connect            = tcp_v6_pre_connect,
1954         .connect                = tcp_v6_connect,
1955         .disconnect             = tcp_disconnect,
1956         .accept                 = inet_csk_accept,
1957         .ioctl                  = tcp_ioctl,
1958         .init                   = tcp_v6_init_sock,
1959         .destroy                = tcp_v6_destroy_sock,
1960         .shutdown               = tcp_shutdown,
1961         .setsockopt             = tcp_setsockopt,
1962         .getsockopt             = tcp_getsockopt,
1963         .keepalive              = tcp_set_keepalive,
1964         .recvmsg                = tcp_recvmsg,
1965         .sendmsg                = tcp_sendmsg,
1966         .sendpage               = tcp_sendpage,
1967         .backlog_rcv            = tcp_v6_do_rcv,
1968         .release_cb             = tcp_release_cb,
1969         .hash                   = inet6_hash,
1970         .unhash                 = inet_unhash,
1971         .get_port               = inet_csk_get_port,
1972         .enter_memory_pressure  = tcp_enter_memory_pressure,
1973         .leave_memory_pressure  = tcp_leave_memory_pressure,
1974         .stream_memory_free     = tcp_stream_memory_free,
1975         .sockets_allocated      = &tcp_sockets_allocated,
1976         .memory_allocated       = &tcp_memory_allocated,
1977         .memory_pressure        = &tcp_memory_pressure,
1978         .orphan_count           = &tcp_orphan_count,
1979         .sysctl_mem             = sysctl_tcp_mem,
1980         .sysctl_wmem_offset     = offsetof(struct net, ipv4.sysctl_tcp_wmem),
1981         .sysctl_rmem_offset     = offsetof(struct net, ipv4.sysctl_tcp_rmem),
1982         .max_header             = MAX_TCP_HEADER,
1983         .obj_size               = sizeof(struct tcp6_sock),
1984         .slab_flags             = SLAB_TYPESAFE_BY_RCU,
1985         .twsk_prot              = &tcp6_timewait_sock_ops,
1986         .rsk_prot               = &tcp6_request_sock_ops,
1987         .h.hashinfo             = &tcp_hashinfo,
1988         .no_autobind            = true,
1989 #ifdef CONFIG_COMPAT
1990         .compat_setsockopt      = compat_tcp_setsockopt,
1991         .compat_getsockopt      = compat_tcp_getsockopt,
1992 #endif
1993         .diag_destroy           = tcp_abort,
1994 };
1995
1996 /* thinking of making this const? Don't.
1997  * early_demux can change based on sysctl.
1998  */
1999 static struct inet6_protocol tcpv6_protocol = {
2000         .early_demux    =       tcp_v6_early_demux,
2001         .early_demux_handler =  tcp_v6_early_demux,
2002         .handler        =       tcp_v6_rcv,
2003         .err_handler    =       tcp_v6_err,
2004         .flags          =       INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2005 };
2006
2007 static struct inet_protosw tcpv6_protosw = {
2008         .type           =       SOCK_STREAM,
2009         .protocol       =       IPPROTO_TCP,
2010         .prot           =       &tcpv6_prot,
2011         .ops            =       &inet6_stream_ops,
2012         .flags          =       INET_PROTOSW_PERMANENT |
2013                                 INET_PROTOSW_ICSK,
2014 };
2015
2016 static int __net_init tcpv6_net_init(struct net *net)
2017 {
2018         return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2019                                     SOCK_RAW, IPPROTO_TCP, net);
2020 }
2021
2022 static void __net_exit tcpv6_net_exit(struct net *net)
2023 {
2024         inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2025 }
2026
2027 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2028 {
2029         inet_twsk_purge(&tcp_hashinfo, AF_INET6);
2030 }
2031
2032 static struct pernet_operations tcpv6_net_ops = {
2033         .init       = tcpv6_net_init,
2034         .exit       = tcpv6_net_exit,
2035         .exit_batch = tcpv6_net_exit_batch,
2036 };
2037
2038 int __init tcpv6_init(void)
2039 {
2040         int ret;
2041
2042         ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2043         if (ret)
2044                 goto out;
2045
2046         /* register inet6 protocol */
2047         ret = inet6_register_protosw(&tcpv6_protosw);
2048         if (ret)
2049                 goto out_tcpv6_protocol;
2050
2051         ret = register_pernet_subsys(&tcpv6_net_ops);
2052         if (ret)
2053                 goto out_tcpv6_protosw;
2054 out:
2055         return ret;
2056
2057 out_tcpv6_protosw:
2058         inet6_unregister_protosw(&tcpv6_protosw);
2059 out_tcpv6_protocol:
2060         inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2061         goto out;
2062 }
2063
2064 void tcpv6_exit(void)
2065 {
2066         unregister_pernet_subsys(&tcpv6_net_ops);
2067         inet6_unregister_protosw(&tcpv6_protosw);
2068         inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2069 }