kill dentry_update_name_case()
[sfrench/cifs-2.6.git] / net / ipv6 / tcp_ipv6.c
1 /*
2  *      TCP over IPv6
3  *      Linux INET6 implementation
4  *
5  *      Authors:
6  *      Pedro Roque             <roque@di.fc.ul.pt>
7  *
8  *      Based on:
9  *      linux/net/ipv4/tcp.c
10  *      linux/net/ipv4/tcp_input.c
11  *      linux/net/ipv4/tcp_output.c
12  *
13  *      Fixes:
14  *      Hideaki YOSHIFUJI       :       sin6_scope_id support
15  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
16  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
17  *                                      a single port at the same time.
18  *      YOSHIFUJI Hideaki @USAGI:       convert /proc/net/tcp6 to seq_file.
19  *
20  *      This program is free software; you can redistribute it and/or
21  *      modify it under the terms of the GNU General Public License
22  *      as published by the Free Software Foundation; either version
23  *      2 of the License, or (at your option) any later version.
24  */
25
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/busy_poll.h>
65
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
68
69 #include <crypto/hash.h>
70 #include <linux/scatterlist.h>
71
72 #include <trace/events/tcp.h>
73
74 static void     tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
75 static void     tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
76                                       struct request_sock *req);
77
78 static int      tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
79
80 static const struct inet_connection_sock_af_ops ipv6_mapped;
81 static const struct inet_connection_sock_af_ops ipv6_specific;
82 #ifdef CONFIG_TCP_MD5SIG
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
84 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
85 #else
86 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
87                                                    const struct in6_addr *addr)
88 {
89         return NULL;
90 }
91 #endif
92
93 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
94 {
95         struct dst_entry *dst = skb_dst(skb);
96
97         if (dst && dst_hold_safe(dst)) {
98                 const struct rt6_info *rt = (const struct rt6_info *)dst;
99
100                 sk->sk_rx_dst = dst;
101                 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
102                 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
103         }
104 }
105
106 static u32 tcp_v6_init_seq(const struct sk_buff *skb)
107 {
108         return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
109                                 ipv6_hdr(skb)->saddr.s6_addr32,
110                                 tcp_hdr(skb)->dest,
111                                 tcp_hdr(skb)->source);
112 }
113
114 static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
115 {
116         return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
117                                    ipv6_hdr(skb)->saddr.s6_addr32);
118 }
119
120 static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
121                               int addr_len)
122 {
123         /* This check is replicated from tcp_v6_connect() and intended to
124          * prevent BPF program called below from accessing bytes that are out
125          * of the bound specified by user in addr_len.
126          */
127         if (addr_len < SIN6_LEN_RFC2133)
128                 return -EINVAL;
129
130         sock_owned_by_me(sk);
131
132         return BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr);
133 }
134
135 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
136                           int addr_len)
137 {
138         struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
139         struct inet_sock *inet = inet_sk(sk);
140         struct inet_connection_sock *icsk = inet_csk(sk);
141         struct ipv6_pinfo *np = inet6_sk(sk);
142         struct tcp_sock *tp = tcp_sk(sk);
143         struct in6_addr *saddr = NULL, *final_p, final;
144         struct ipv6_txoptions *opt;
145         struct flowi6 fl6;
146         struct dst_entry *dst;
147         int addr_type;
148         int err;
149         struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
150
151         if (addr_len < SIN6_LEN_RFC2133)
152                 return -EINVAL;
153
154         if (usin->sin6_family != AF_INET6)
155                 return -EAFNOSUPPORT;
156
157         memset(&fl6, 0, sizeof(fl6));
158
159         if (np->sndflow) {
160                 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
161                 IP6_ECN_flow_init(fl6.flowlabel);
162                 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
163                         struct ip6_flowlabel *flowlabel;
164                         flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
165                         if (!flowlabel)
166                                 return -EINVAL;
167                         fl6_sock_release(flowlabel);
168                 }
169         }
170
171         /*
172          *      connect() to INADDR_ANY means loopback (BSD'ism).
173          */
174
175         if (ipv6_addr_any(&usin->sin6_addr)) {
176                 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
177                         ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
178                                                &usin->sin6_addr);
179                 else
180                         usin->sin6_addr = in6addr_loopback;
181         }
182
183         addr_type = ipv6_addr_type(&usin->sin6_addr);
184
185         if (addr_type & IPV6_ADDR_MULTICAST)
186                 return -ENETUNREACH;
187
188         if (addr_type&IPV6_ADDR_LINKLOCAL) {
189                 if (addr_len >= sizeof(struct sockaddr_in6) &&
190                     usin->sin6_scope_id) {
191                         /* If interface is set while binding, indices
192                          * must coincide.
193                          */
194                         if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id))
195                                 return -EINVAL;
196
197                         sk->sk_bound_dev_if = usin->sin6_scope_id;
198                 }
199
200                 /* Connect to link-local address requires an interface */
201                 if (!sk->sk_bound_dev_if)
202                         return -EINVAL;
203         }
204
205         if (tp->rx_opt.ts_recent_stamp &&
206             !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
207                 tp->rx_opt.ts_recent = 0;
208                 tp->rx_opt.ts_recent_stamp = 0;
209                 tp->write_seq = 0;
210         }
211
212         sk->sk_v6_daddr = usin->sin6_addr;
213         np->flow_label = fl6.flowlabel;
214
215         /*
216          *      TCP over IPv4
217          */
218
219         if (addr_type & IPV6_ADDR_MAPPED) {
220                 u32 exthdrlen = icsk->icsk_ext_hdr_len;
221                 struct sockaddr_in sin;
222
223                 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
224
225                 if (__ipv6_only_sock(sk))
226                         return -ENETUNREACH;
227
228                 sin.sin_family = AF_INET;
229                 sin.sin_port = usin->sin6_port;
230                 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
231
232                 icsk->icsk_af_ops = &ipv6_mapped;
233                 sk->sk_backlog_rcv = tcp_v4_do_rcv;
234 #ifdef CONFIG_TCP_MD5SIG
235                 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
236 #endif
237
238                 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
239
240                 if (err) {
241                         icsk->icsk_ext_hdr_len = exthdrlen;
242                         icsk->icsk_af_ops = &ipv6_specific;
243                         sk->sk_backlog_rcv = tcp_v6_do_rcv;
244 #ifdef CONFIG_TCP_MD5SIG
245                         tp->af_specific = &tcp_sock_ipv6_specific;
246 #endif
247                         goto failure;
248                 }
249                 np->saddr = sk->sk_v6_rcv_saddr;
250
251                 return err;
252         }
253
254         if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
255                 saddr = &sk->sk_v6_rcv_saddr;
256
257         fl6.flowi6_proto = IPPROTO_TCP;
258         fl6.daddr = sk->sk_v6_daddr;
259         fl6.saddr = saddr ? *saddr : np->saddr;
260         fl6.flowi6_oif = sk->sk_bound_dev_if;
261         fl6.flowi6_mark = sk->sk_mark;
262         fl6.fl6_dport = usin->sin6_port;
263         fl6.fl6_sport = inet->inet_sport;
264         fl6.flowi6_uid = sk->sk_uid;
265
266         opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
267         final_p = fl6_update_dst(&fl6, opt, &final);
268
269         security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
270
271         dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
272         if (IS_ERR(dst)) {
273                 err = PTR_ERR(dst);
274                 goto failure;
275         }
276
277         if (!saddr) {
278                 saddr = &fl6.saddr;
279                 sk->sk_v6_rcv_saddr = *saddr;
280         }
281
282         /* set the source address */
283         np->saddr = *saddr;
284         inet->inet_rcv_saddr = LOOPBACK4_IPV6;
285
286         sk->sk_gso_type = SKB_GSO_TCPV6;
287         ip6_dst_store(sk, dst, NULL, NULL);
288
289         icsk->icsk_ext_hdr_len = 0;
290         if (opt)
291                 icsk->icsk_ext_hdr_len = opt->opt_flen +
292                                          opt->opt_nflen;
293
294         tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
295
296         inet->inet_dport = usin->sin6_port;
297
298         tcp_set_state(sk, TCP_SYN_SENT);
299         err = inet6_hash_connect(tcp_death_row, sk);
300         if (err)
301                 goto late_failure;
302
303         sk_set_txhash(sk);
304
305         if (likely(!tp->repair)) {
306                 if (!tp->write_seq)
307                         tp->write_seq = secure_tcpv6_seq(np->saddr.s6_addr32,
308                                                          sk->sk_v6_daddr.s6_addr32,
309                                                          inet->inet_sport,
310                                                          inet->inet_dport);
311                 tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
312                                                    np->saddr.s6_addr32,
313                                                    sk->sk_v6_daddr.s6_addr32);
314         }
315
316         if (tcp_fastopen_defer_connect(sk, &err))
317                 return err;
318         if (err)
319                 goto late_failure;
320
321         err = tcp_connect(sk);
322         if (err)
323                 goto late_failure;
324
325         return 0;
326
327 late_failure:
328         tcp_set_state(sk, TCP_CLOSE);
329 failure:
330         inet->inet_dport = 0;
331         sk->sk_route_caps = 0;
332         return err;
333 }
334
335 static void tcp_v6_mtu_reduced(struct sock *sk)
336 {
337         struct dst_entry *dst;
338
339         if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
340                 return;
341
342         dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
343         if (!dst)
344                 return;
345
346         if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
347                 tcp_sync_mss(sk, dst_mtu(dst));
348                 tcp_simple_retransmit(sk);
349         }
350 }
351
352 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
353                 u8 type, u8 code, int offset, __be32 info)
354 {
355         const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
356         const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
357         struct net *net = dev_net(skb->dev);
358         struct request_sock *fastopen;
359         struct ipv6_pinfo *np;
360         struct tcp_sock *tp;
361         __u32 seq, snd_una;
362         struct sock *sk;
363         bool fatal;
364         int err;
365
366         sk = __inet6_lookup_established(net, &tcp_hashinfo,
367                                         &hdr->daddr, th->dest,
368                                         &hdr->saddr, ntohs(th->source),
369                                         skb->dev->ifindex, inet6_sdif(skb));
370
371         if (!sk) {
372                 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
373                                   ICMP6_MIB_INERRORS);
374                 return;
375         }
376
377         if (sk->sk_state == TCP_TIME_WAIT) {
378                 inet_twsk_put(inet_twsk(sk));
379                 return;
380         }
381         seq = ntohl(th->seq);
382         fatal = icmpv6_err_convert(type, code, &err);
383         if (sk->sk_state == TCP_NEW_SYN_RECV)
384                 return tcp_req_err(sk, seq, fatal);
385
386         bh_lock_sock(sk);
387         if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
388                 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
389
390         if (sk->sk_state == TCP_CLOSE)
391                 goto out;
392
393         if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
394                 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
395                 goto out;
396         }
397
398         tp = tcp_sk(sk);
399         /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
400         fastopen = tp->fastopen_rsk;
401         snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
402         if (sk->sk_state != TCP_LISTEN &&
403             !between(seq, snd_una, tp->snd_nxt)) {
404                 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
405                 goto out;
406         }
407
408         np = inet6_sk(sk);
409
410         if (type == NDISC_REDIRECT) {
411                 if (!sock_owned_by_user(sk)) {
412                         struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
413
414                         if (dst)
415                                 dst->ops->redirect(dst, sk, skb);
416                 }
417                 goto out;
418         }
419
420         if (type == ICMPV6_PKT_TOOBIG) {
421                 /* We are not interested in TCP_LISTEN and open_requests
422                  * (SYN-ACKs send out by Linux are always <576bytes so
423                  * they should go through unfragmented).
424                  */
425                 if (sk->sk_state == TCP_LISTEN)
426                         goto out;
427
428                 if (!ip6_sk_accept_pmtu(sk))
429                         goto out;
430
431                 tp->mtu_info = ntohl(info);
432                 if (!sock_owned_by_user(sk))
433                         tcp_v6_mtu_reduced(sk);
434                 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
435                                            &sk->sk_tsq_flags))
436                         sock_hold(sk);
437                 goto out;
438         }
439
440
441         /* Might be for an request_sock */
442         switch (sk->sk_state) {
443         case TCP_SYN_SENT:
444         case TCP_SYN_RECV:
445                 /* Only in fast or simultaneous open. If a fast open socket is
446                  * is already accepted it is treated as a connected one below.
447                  */
448                 if (fastopen && !fastopen->sk)
449                         break;
450
451                 if (!sock_owned_by_user(sk)) {
452                         sk->sk_err = err;
453                         sk->sk_error_report(sk);                /* Wake people up to see the error (see connect in sock.c) */
454
455                         tcp_done(sk);
456                 } else
457                         sk->sk_err_soft = err;
458                 goto out;
459         }
460
461         if (!sock_owned_by_user(sk) && np->recverr) {
462                 sk->sk_err = err;
463                 sk->sk_error_report(sk);
464         } else
465                 sk->sk_err_soft = err;
466
467 out:
468         bh_unlock_sock(sk);
469         sock_put(sk);
470 }
471
472
473 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
474                               struct flowi *fl,
475                               struct request_sock *req,
476                               struct tcp_fastopen_cookie *foc,
477                               enum tcp_synack_type synack_type)
478 {
479         struct inet_request_sock *ireq = inet_rsk(req);
480         struct ipv6_pinfo *np = inet6_sk(sk);
481         struct ipv6_txoptions *opt;
482         struct flowi6 *fl6 = &fl->u.ip6;
483         struct sk_buff *skb;
484         int err = -ENOMEM;
485
486         /* First, grab a route. */
487         if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
488                                                IPPROTO_TCP)) == NULL)
489                 goto done;
490
491         skb = tcp_make_synack(sk, dst, req, foc, synack_type);
492
493         if (skb) {
494                 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
495                                     &ireq->ir_v6_rmt_addr);
496
497                 fl6->daddr = ireq->ir_v6_rmt_addr;
498                 if (np->repflow && ireq->pktopts)
499                         fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
500
501                 rcu_read_lock();
502                 opt = ireq->ipv6_opt;
503                 if (!opt)
504                         opt = rcu_dereference(np->opt);
505                 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
506                 rcu_read_unlock();
507                 err = net_xmit_eval(err);
508         }
509
510 done:
511         return err;
512 }
513
514
515 static void tcp_v6_reqsk_destructor(struct request_sock *req)
516 {
517         kfree(inet_rsk(req)->ipv6_opt);
518         kfree_skb(inet_rsk(req)->pktopts);
519 }
520
521 #ifdef CONFIG_TCP_MD5SIG
522 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
523                                                    const struct in6_addr *addr)
524 {
525         return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
526 }
527
528 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
529                                                 const struct sock *addr_sk)
530 {
531         return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
532 }
533
534 static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
535                                  char __user *optval, int optlen)
536 {
537         struct tcp_md5sig cmd;
538         struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
539         u8 prefixlen;
540
541         if (optlen < sizeof(cmd))
542                 return -EINVAL;
543
544         if (copy_from_user(&cmd, optval, sizeof(cmd)))
545                 return -EFAULT;
546
547         if (sin6->sin6_family != AF_INET6)
548                 return -EINVAL;
549
550         if (optname == TCP_MD5SIG_EXT &&
551             cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
552                 prefixlen = cmd.tcpm_prefixlen;
553                 if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
554                                         prefixlen > 32))
555                         return -EINVAL;
556         } else {
557                 prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
558         }
559
560         if (!cmd.tcpm_keylen) {
561                 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
562                         return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
563                                               AF_INET, prefixlen);
564                 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
565                                       AF_INET6, prefixlen);
566         }
567
568         if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
569                 return -EINVAL;
570
571         if (ipv6_addr_v4mapped(&sin6->sin6_addr))
572                 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
573                                       AF_INET, prefixlen, cmd.tcpm_key,
574                                       cmd.tcpm_keylen, GFP_KERNEL);
575
576         return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
577                               AF_INET6, prefixlen, cmd.tcpm_key,
578                               cmd.tcpm_keylen, GFP_KERNEL);
579 }
580
581 static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
582                                    const struct in6_addr *daddr,
583                                    const struct in6_addr *saddr,
584                                    const struct tcphdr *th, int nbytes)
585 {
586         struct tcp6_pseudohdr *bp;
587         struct scatterlist sg;
588         struct tcphdr *_th;
589
590         bp = hp->scratch;
591         /* 1. TCP pseudo-header (RFC2460) */
592         bp->saddr = *saddr;
593         bp->daddr = *daddr;
594         bp->protocol = cpu_to_be32(IPPROTO_TCP);
595         bp->len = cpu_to_be32(nbytes);
596
597         _th = (struct tcphdr *)(bp + 1);
598         memcpy(_th, th, sizeof(*th));
599         _th->check = 0;
600
601         sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
602         ahash_request_set_crypt(hp->md5_req, &sg, NULL,
603                                 sizeof(*bp) + sizeof(*th));
604         return crypto_ahash_update(hp->md5_req);
605 }
606
607 static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
608                                const struct in6_addr *daddr, struct in6_addr *saddr,
609                                const struct tcphdr *th)
610 {
611         struct tcp_md5sig_pool *hp;
612         struct ahash_request *req;
613
614         hp = tcp_get_md5sig_pool();
615         if (!hp)
616                 goto clear_hash_noput;
617         req = hp->md5_req;
618
619         if (crypto_ahash_init(req))
620                 goto clear_hash;
621         if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
622                 goto clear_hash;
623         if (tcp_md5_hash_key(hp, key))
624                 goto clear_hash;
625         ahash_request_set_crypt(req, NULL, md5_hash, 0);
626         if (crypto_ahash_final(req))
627                 goto clear_hash;
628
629         tcp_put_md5sig_pool();
630         return 0;
631
632 clear_hash:
633         tcp_put_md5sig_pool();
634 clear_hash_noput:
635         memset(md5_hash, 0, 16);
636         return 1;
637 }
638
639 static int tcp_v6_md5_hash_skb(char *md5_hash,
640                                const struct tcp_md5sig_key *key,
641                                const struct sock *sk,
642                                const struct sk_buff *skb)
643 {
644         const struct in6_addr *saddr, *daddr;
645         struct tcp_md5sig_pool *hp;
646         struct ahash_request *req;
647         const struct tcphdr *th = tcp_hdr(skb);
648
649         if (sk) { /* valid for establish/request sockets */
650                 saddr = &sk->sk_v6_rcv_saddr;
651                 daddr = &sk->sk_v6_daddr;
652         } else {
653                 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
654                 saddr = &ip6h->saddr;
655                 daddr = &ip6h->daddr;
656         }
657
658         hp = tcp_get_md5sig_pool();
659         if (!hp)
660                 goto clear_hash_noput;
661         req = hp->md5_req;
662
663         if (crypto_ahash_init(req))
664                 goto clear_hash;
665
666         if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
667                 goto clear_hash;
668         if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
669                 goto clear_hash;
670         if (tcp_md5_hash_key(hp, key))
671                 goto clear_hash;
672         ahash_request_set_crypt(req, NULL, md5_hash, 0);
673         if (crypto_ahash_final(req))
674                 goto clear_hash;
675
676         tcp_put_md5sig_pool();
677         return 0;
678
679 clear_hash:
680         tcp_put_md5sig_pool();
681 clear_hash_noput:
682         memset(md5_hash, 0, 16);
683         return 1;
684 }
685
686 #endif
687
688 static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
689                                     const struct sk_buff *skb)
690 {
691 #ifdef CONFIG_TCP_MD5SIG
692         const __u8 *hash_location = NULL;
693         struct tcp_md5sig_key *hash_expected;
694         const struct ipv6hdr *ip6h = ipv6_hdr(skb);
695         const struct tcphdr *th = tcp_hdr(skb);
696         int genhash;
697         u8 newhash[16];
698
699         hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
700         hash_location = tcp_parse_md5sig_option(th);
701
702         /* We've parsed the options - do we have a hash? */
703         if (!hash_expected && !hash_location)
704                 return false;
705
706         if (hash_expected && !hash_location) {
707                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
708                 return true;
709         }
710
711         if (!hash_expected && hash_location) {
712                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
713                 return true;
714         }
715
716         /* check the signature */
717         genhash = tcp_v6_md5_hash_skb(newhash,
718                                       hash_expected,
719                                       NULL, skb);
720
721         if (genhash || memcmp(hash_location, newhash, 16) != 0) {
722                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
723                 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
724                                      genhash ? "failed" : "mismatch",
725                                      &ip6h->saddr, ntohs(th->source),
726                                      &ip6h->daddr, ntohs(th->dest));
727                 return true;
728         }
729 #endif
730         return false;
731 }
732
733 static void tcp_v6_init_req(struct request_sock *req,
734                             const struct sock *sk_listener,
735                             struct sk_buff *skb)
736 {
737         struct inet_request_sock *ireq = inet_rsk(req);
738         const struct ipv6_pinfo *np = inet6_sk(sk_listener);
739
740         ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
741         ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
742
743         /* So that link locals have meaning */
744         if (!sk_listener->sk_bound_dev_if &&
745             ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
746                 ireq->ir_iif = tcp_v6_iif(skb);
747
748         if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
749             (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
750              np->rxopt.bits.rxinfo ||
751              np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
752              np->rxopt.bits.rxohlim || np->repflow)) {
753                 refcount_inc(&skb->users);
754                 ireq->pktopts = skb;
755         }
756 }
757
758 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
759                                           struct flowi *fl,
760                                           const struct request_sock *req)
761 {
762         return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
763 }
764
765 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
766         .family         =       AF_INET6,
767         .obj_size       =       sizeof(struct tcp6_request_sock),
768         .rtx_syn_ack    =       tcp_rtx_synack,
769         .send_ack       =       tcp_v6_reqsk_send_ack,
770         .destructor     =       tcp_v6_reqsk_destructor,
771         .send_reset     =       tcp_v6_send_reset,
772         .syn_ack_timeout =      tcp_syn_ack_timeout,
773 };
774
775 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
776         .mss_clamp      =       IPV6_MIN_MTU - sizeof(struct tcphdr) -
777                                 sizeof(struct ipv6hdr),
778 #ifdef CONFIG_TCP_MD5SIG
779         .req_md5_lookup =       tcp_v6_md5_lookup,
780         .calc_md5_hash  =       tcp_v6_md5_hash_skb,
781 #endif
782         .init_req       =       tcp_v6_init_req,
783 #ifdef CONFIG_SYN_COOKIES
784         .cookie_init_seq =      cookie_v6_init_sequence,
785 #endif
786         .route_req      =       tcp_v6_route_req,
787         .init_seq       =       tcp_v6_init_seq,
788         .init_ts_off    =       tcp_v6_init_ts_off,
789         .send_synack    =       tcp_v6_send_synack,
790 };
791
792 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
793                                  u32 ack, u32 win, u32 tsval, u32 tsecr,
794                                  int oif, struct tcp_md5sig_key *key, int rst,
795                                  u8 tclass, __be32 label)
796 {
797         const struct tcphdr *th = tcp_hdr(skb);
798         struct tcphdr *t1;
799         struct sk_buff *buff;
800         struct flowi6 fl6;
801         struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
802         struct sock *ctl_sk = net->ipv6.tcp_sk;
803         unsigned int tot_len = sizeof(struct tcphdr);
804         struct dst_entry *dst;
805         __be32 *topt;
806         __u32 mark = 0;
807
808         if (tsecr)
809                 tot_len += TCPOLEN_TSTAMP_ALIGNED;
810 #ifdef CONFIG_TCP_MD5SIG
811         if (key)
812                 tot_len += TCPOLEN_MD5SIG_ALIGNED;
813 #endif
814
815         buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
816                          GFP_ATOMIC);
817         if (!buff)
818                 return;
819
820         skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
821
822         t1 = skb_push(buff, tot_len);
823         skb_reset_transport_header(buff);
824
825         /* Swap the send and the receive. */
826         memset(t1, 0, sizeof(*t1));
827         t1->dest = th->source;
828         t1->source = th->dest;
829         t1->doff = tot_len / 4;
830         t1->seq = htonl(seq);
831         t1->ack_seq = htonl(ack);
832         t1->ack = !rst || !th->ack;
833         t1->rst = rst;
834         t1->window = htons(win);
835
836         topt = (__be32 *)(t1 + 1);
837
838         if (tsecr) {
839                 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
840                                 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
841                 *topt++ = htonl(tsval);
842                 *topt++ = htonl(tsecr);
843         }
844
845 #ifdef CONFIG_TCP_MD5SIG
846         if (key) {
847                 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
848                                 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
849                 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
850                                     &ipv6_hdr(skb)->saddr,
851                                     &ipv6_hdr(skb)->daddr, t1);
852         }
853 #endif
854
855         memset(&fl6, 0, sizeof(fl6));
856         fl6.daddr = ipv6_hdr(skb)->saddr;
857         fl6.saddr = ipv6_hdr(skb)->daddr;
858         fl6.flowlabel = label;
859
860         buff->ip_summed = CHECKSUM_PARTIAL;
861         buff->csum = 0;
862
863         __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
864
865         fl6.flowi6_proto = IPPROTO_TCP;
866         if (rt6_need_strict(&fl6.daddr) && !oif)
867                 fl6.flowi6_oif = tcp_v6_iif(skb);
868         else {
869                 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
870                         oif = skb->skb_iif;
871
872                 fl6.flowi6_oif = oif;
873         }
874
875         if (sk)
876                 mark = (sk->sk_state == TCP_TIME_WAIT) ?
877                         inet_twsk(sk)->tw_mark : sk->sk_mark;
878         fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark) ?: mark;
879         fl6.fl6_dport = t1->dest;
880         fl6.fl6_sport = t1->source;
881         fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
882         security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
883
884         /* Pass a socket to ip6_dst_lookup either it is for RST
885          * Underlying function will use this to retrieve the network
886          * namespace
887          */
888         dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
889         if (!IS_ERR(dst)) {
890                 skb_dst_set(buff, dst);
891                 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
892                 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
893                 if (rst)
894                         TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
895                 return;
896         }
897
898         kfree_skb(buff);
899 }
900
901 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
902 {
903         const struct tcphdr *th = tcp_hdr(skb);
904         u32 seq = 0, ack_seq = 0;
905         struct tcp_md5sig_key *key = NULL;
906 #ifdef CONFIG_TCP_MD5SIG
907         const __u8 *hash_location = NULL;
908         struct ipv6hdr *ipv6h = ipv6_hdr(skb);
909         unsigned char newhash[16];
910         int genhash;
911         struct sock *sk1 = NULL;
912 #endif
913         int oif = 0;
914
915         if (th->rst)
916                 return;
917
918         /* If sk not NULL, it means we did a successful lookup and incoming
919          * route had to be correct. prequeue might have dropped our dst.
920          */
921         if (!sk && !ipv6_unicast_destination(skb))
922                 return;
923
924 #ifdef CONFIG_TCP_MD5SIG
925         rcu_read_lock();
926         hash_location = tcp_parse_md5sig_option(th);
927         if (sk && sk_fullsock(sk)) {
928                 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
929         } else if (hash_location) {
930                 /*
931                  * active side is lost. Try to find listening socket through
932                  * source port, and then find md5 key through listening socket.
933                  * we are not loose security here:
934                  * Incoming packet is checked with md5 hash with finding key,
935                  * no RST generated if md5 hash doesn't match.
936                  */
937                 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
938                                            &tcp_hashinfo, NULL, 0,
939                                            &ipv6h->saddr,
940                                            th->source, &ipv6h->daddr,
941                                            ntohs(th->source), tcp_v6_iif(skb),
942                                            tcp_v6_sdif(skb));
943                 if (!sk1)
944                         goto out;
945
946                 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
947                 if (!key)
948                         goto out;
949
950                 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
951                 if (genhash || memcmp(hash_location, newhash, 16) != 0)
952                         goto out;
953         }
954 #endif
955
956         if (th->ack)
957                 seq = ntohl(th->ack_seq);
958         else
959                 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
960                           (th->doff << 2);
961
962         if (sk) {
963                 oif = sk->sk_bound_dev_if;
964                 if (sk_fullsock(sk))
965                         trace_tcp_send_reset(sk, skb);
966         }
967
968         tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
969
970 #ifdef CONFIG_TCP_MD5SIG
971 out:
972         rcu_read_unlock();
973 #endif
974 }
975
976 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
977                             u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
978                             struct tcp_md5sig_key *key, u8 tclass,
979                             __be32 label)
980 {
981         tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
982                              tclass, label);
983 }
984
985 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
986 {
987         struct inet_timewait_sock *tw = inet_twsk(sk);
988         struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
989
990         tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
991                         tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
992                         tcp_time_stamp_raw() + tcptw->tw_ts_offset,
993                         tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
994                         tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
995
996         inet_twsk_put(tw);
997 }
998
999 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
1000                                   struct request_sock *req)
1001 {
1002         /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
1003          * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
1004          */
1005         /* RFC 7323 2.3
1006          * The window field (SEG.WND) of every outgoing segment, with the
1007          * exception of <SYN> segments, MUST be right-shifted by
1008          * Rcv.Wind.Shift bits:
1009          */
1010         tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
1011                         tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
1012                         tcp_rsk(req)->rcv_nxt,
1013                         req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
1014                         tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
1015                         req->ts_recent, sk->sk_bound_dev_if,
1016                         tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr),
1017                         0, 0);
1018 }
1019
1020
1021 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
1022 {
1023 #ifdef CONFIG_SYN_COOKIES
1024         const struct tcphdr *th = tcp_hdr(skb);
1025
1026         if (!th->syn)
1027                 sk = cookie_v6_check(sk, skb);
1028 #endif
1029         return sk;
1030 }
1031
1032 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1033 {
1034         if (skb->protocol == htons(ETH_P_IP))
1035                 return tcp_v4_conn_request(sk, skb);
1036
1037         if (!ipv6_unicast_destination(skb))
1038                 goto drop;
1039
1040         return tcp_conn_request(&tcp6_request_sock_ops,
1041                                 &tcp_request_sock_ipv6_ops, sk, skb);
1042
1043 drop:
1044         tcp_listendrop(sk);
1045         return 0; /* don't send reset */
1046 }
1047
1048 static void tcp_v6_restore_cb(struct sk_buff *skb)
1049 {
1050         /* We need to move header back to the beginning if xfrm6_policy_check()
1051          * and tcp_v6_fill_cb() are going to be called again.
1052          * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1053          */
1054         memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1055                 sizeof(struct inet6_skb_parm));
1056 }
1057
1058 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1059                                          struct request_sock *req,
1060                                          struct dst_entry *dst,
1061                                          struct request_sock *req_unhash,
1062                                          bool *own_req)
1063 {
1064         struct inet_request_sock *ireq;
1065         struct ipv6_pinfo *newnp;
1066         const struct ipv6_pinfo *np = inet6_sk(sk);
1067         struct ipv6_txoptions *opt;
1068         struct tcp6_sock *newtcp6sk;
1069         struct inet_sock *newinet;
1070         struct tcp_sock *newtp;
1071         struct sock *newsk;
1072 #ifdef CONFIG_TCP_MD5SIG
1073         struct tcp_md5sig_key *key;
1074 #endif
1075         struct flowi6 fl6;
1076
1077         if (skb->protocol == htons(ETH_P_IP)) {
1078                 /*
1079                  *      v6 mapped
1080                  */
1081
1082                 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1083                                              req_unhash, own_req);
1084
1085                 if (!newsk)
1086                         return NULL;
1087
1088                 newtcp6sk = (struct tcp6_sock *)newsk;
1089                 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1090
1091                 newinet = inet_sk(newsk);
1092                 newnp = inet6_sk(newsk);
1093                 newtp = tcp_sk(newsk);
1094
1095                 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1096
1097                 newnp->saddr = newsk->sk_v6_rcv_saddr;
1098
1099                 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1100                 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1101 #ifdef CONFIG_TCP_MD5SIG
1102                 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1103 #endif
1104
1105                 newnp->ipv6_mc_list = NULL;
1106                 newnp->ipv6_ac_list = NULL;
1107                 newnp->ipv6_fl_list = NULL;
1108                 newnp->pktoptions  = NULL;
1109                 newnp->opt         = NULL;
1110                 newnp->mcast_oif   = tcp_v6_iif(skb);
1111                 newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1112                 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1113                 if (np->repflow)
1114                         newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1115
1116                 /*
1117                  * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1118                  * here, tcp_create_openreq_child now does this for us, see the comment in
1119                  * that function for the gory details. -acme
1120                  */
1121
1122                 /* It is tricky place. Until this moment IPv4 tcp
1123                    worked with IPv6 icsk.icsk_af_ops.
1124                    Sync it now.
1125                  */
1126                 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1127
1128                 return newsk;
1129         }
1130
1131         ireq = inet_rsk(req);
1132
1133         if (sk_acceptq_is_full(sk))
1134                 goto out_overflow;
1135
1136         if (!dst) {
1137                 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1138                 if (!dst)
1139                         goto out;
1140         }
1141
1142         newsk = tcp_create_openreq_child(sk, req, skb);
1143         if (!newsk)
1144                 goto out_nonewsk;
1145
1146         /*
1147          * No need to charge this sock to the relevant IPv6 refcnt debug socks
1148          * count here, tcp_create_openreq_child now does this for us, see the
1149          * comment in that function for the gory details. -acme
1150          */
1151
1152         newsk->sk_gso_type = SKB_GSO_TCPV6;
1153         ip6_dst_store(newsk, dst, NULL, NULL);
1154         inet6_sk_rx_dst_set(newsk, skb);
1155
1156         newtcp6sk = (struct tcp6_sock *)newsk;
1157         inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1158
1159         newtp = tcp_sk(newsk);
1160         newinet = inet_sk(newsk);
1161         newnp = inet6_sk(newsk);
1162
1163         memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1164
1165         newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1166         newnp->saddr = ireq->ir_v6_loc_addr;
1167         newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1168         newsk->sk_bound_dev_if = ireq->ir_iif;
1169
1170         /* Now IPv6 options...
1171
1172            First: no IPv4 options.
1173          */
1174         newinet->inet_opt = NULL;
1175         newnp->ipv6_mc_list = NULL;
1176         newnp->ipv6_ac_list = NULL;
1177         newnp->ipv6_fl_list = NULL;
1178
1179         /* Clone RX bits */
1180         newnp->rxopt.all = np->rxopt.all;
1181
1182         newnp->pktoptions = NULL;
1183         newnp->opt        = NULL;
1184         newnp->mcast_oif  = tcp_v6_iif(skb);
1185         newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1186         newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1187         if (np->repflow)
1188                 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1189
1190         /* Clone native IPv6 options from listening socket (if any)
1191
1192            Yes, keeping reference count would be much more clever,
1193            but we make one more one thing there: reattach optmem
1194            to newsk.
1195          */
1196         opt = ireq->ipv6_opt;
1197         if (!opt)
1198                 opt = rcu_dereference(np->opt);
1199         if (opt) {
1200                 opt = ipv6_dup_options(newsk, opt);
1201                 RCU_INIT_POINTER(newnp->opt, opt);
1202         }
1203         inet_csk(newsk)->icsk_ext_hdr_len = 0;
1204         if (opt)
1205                 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1206                                                     opt->opt_flen;
1207
1208         tcp_ca_openreq_child(newsk, dst);
1209
1210         tcp_sync_mss(newsk, dst_mtu(dst));
1211         newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1212
1213         tcp_initialize_rcv_mss(newsk);
1214
1215         newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1216         newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1217
1218 #ifdef CONFIG_TCP_MD5SIG
1219         /* Copy over the MD5 key from the original socket */
1220         key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1221         if (key) {
1222                 /* We're using one, so create a matching key
1223                  * on the newsk structure. If we fail to get
1224                  * memory, then we end up not copying the key
1225                  * across. Shucks.
1226                  */
1227                 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1228                                AF_INET6, 128, key->key, key->keylen,
1229                                sk_gfp_mask(sk, GFP_ATOMIC));
1230         }
1231 #endif
1232
1233         if (__inet_inherit_port(sk, newsk) < 0) {
1234                 inet_csk_prepare_forced_close(newsk);
1235                 tcp_done(newsk);
1236                 goto out;
1237         }
1238         *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1239         if (*own_req) {
1240                 tcp_move_syn(newtp, req);
1241
1242                 /* Clone pktoptions received with SYN, if we own the req */
1243                 if (ireq->pktopts) {
1244                         newnp->pktoptions = skb_clone(ireq->pktopts,
1245                                                       sk_gfp_mask(sk, GFP_ATOMIC));
1246                         consume_skb(ireq->pktopts);
1247                         ireq->pktopts = NULL;
1248                         if (newnp->pktoptions) {
1249                                 tcp_v6_restore_cb(newnp->pktoptions);
1250                                 skb_set_owner_r(newnp->pktoptions, newsk);
1251                         }
1252                 }
1253         }
1254
1255         return newsk;
1256
1257 out_overflow:
1258         __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1259 out_nonewsk:
1260         dst_release(dst);
1261 out:
1262         tcp_listendrop(sk);
1263         return NULL;
1264 }
1265
1266 /* The socket must have it's spinlock held when we get
1267  * here, unless it is a TCP_LISTEN socket.
1268  *
1269  * We have a potential double-lock case here, so even when
1270  * doing backlog processing we use the BH locking scheme.
1271  * This is because we cannot sleep with the original spinlock
1272  * held.
1273  */
1274 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1275 {
1276         struct ipv6_pinfo *np = inet6_sk(sk);
1277         struct tcp_sock *tp;
1278         struct sk_buff *opt_skb = NULL;
1279
1280         /* Imagine: socket is IPv6. IPv4 packet arrives,
1281            goes to IPv4 receive handler and backlogged.
1282            From backlog it always goes here. Kerboom...
1283            Fortunately, tcp_rcv_established and rcv_established
1284            handle them correctly, but it is not case with
1285            tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1286          */
1287
1288         if (skb->protocol == htons(ETH_P_IP))
1289                 return tcp_v4_do_rcv(sk, skb);
1290
1291         /*
1292          *      socket locking is here for SMP purposes as backlog rcv
1293          *      is currently called with bh processing disabled.
1294          */
1295
1296         /* Do Stevens' IPV6_PKTOPTIONS.
1297
1298            Yes, guys, it is the only place in our code, where we
1299            may make it not affecting IPv4.
1300            The rest of code is protocol independent,
1301            and I do not like idea to uglify IPv4.
1302
1303            Actually, all the idea behind IPV6_PKTOPTIONS
1304            looks not very well thought. For now we latch
1305            options, received in the last packet, enqueued
1306            by tcp. Feel free to propose better solution.
1307                                                --ANK (980728)
1308          */
1309         if (np->rxopt.all)
1310                 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1311
1312         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1313                 struct dst_entry *dst = sk->sk_rx_dst;
1314
1315                 sock_rps_save_rxhash(sk, skb);
1316                 sk_mark_napi_id(sk, skb);
1317                 if (dst) {
1318                         if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1319                             dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1320                                 dst_release(dst);
1321                                 sk->sk_rx_dst = NULL;
1322                         }
1323                 }
1324
1325                 tcp_rcv_established(sk, skb);
1326                 if (opt_skb)
1327                         goto ipv6_pktoptions;
1328                 return 0;
1329         }
1330
1331         if (tcp_checksum_complete(skb))
1332                 goto csum_err;
1333
1334         if (sk->sk_state == TCP_LISTEN) {
1335                 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1336
1337                 if (!nsk)
1338                         goto discard;
1339
1340                 if (nsk != sk) {
1341                         if (tcp_child_process(sk, nsk, skb))
1342                                 goto reset;
1343                         if (opt_skb)
1344                                 __kfree_skb(opt_skb);
1345                         return 0;
1346                 }
1347         } else
1348                 sock_rps_save_rxhash(sk, skb);
1349
1350         if (tcp_rcv_state_process(sk, skb))
1351                 goto reset;
1352         if (opt_skb)
1353                 goto ipv6_pktoptions;
1354         return 0;
1355
1356 reset:
1357         tcp_v6_send_reset(sk, skb);
1358 discard:
1359         if (opt_skb)
1360                 __kfree_skb(opt_skb);
1361         kfree_skb(skb);
1362         return 0;
1363 csum_err:
1364         TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1365         TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1366         goto discard;
1367
1368
1369 ipv6_pktoptions:
1370         /* Do you ask, what is it?
1371
1372            1. skb was enqueued by tcp.
1373            2. skb is added to tail of read queue, rather than out of order.
1374            3. socket is not in passive state.
1375            4. Finally, it really contains options, which user wants to receive.
1376          */
1377         tp = tcp_sk(sk);
1378         if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1379             !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1380                 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1381                         np->mcast_oif = tcp_v6_iif(opt_skb);
1382                 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1383                         np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1384                 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1385                         np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1386                 if (np->repflow)
1387                         np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1388                 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1389                         skb_set_owner_r(opt_skb, sk);
1390                         tcp_v6_restore_cb(opt_skb);
1391                         opt_skb = xchg(&np->pktoptions, opt_skb);
1392                 } else {
1393                         __kfree_skb(opt_skb);
1394                         opt_skb = xchg(&np->pktoptions, NULL);
1395                 }
1396         }
1397
1398         kfree_skb(opt_skb);
1399         return 0;
1400 }
1401
1402 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1403                            const struct tcphdr *th)
1404 {
1405         /* This is tricky: we move IP6CB at its correct location into
1406          * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1407          * _decode_session6() uses IP6CB().
1408          * barrier() makes sure compiler won't play aliasing games.
1409          */
1410         memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1411                 sizeof(struct inet6_skb_parm));
1412         barrier();
1413
1414         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1415         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1416                                     skb->len - th->doff*4);
1417         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1418         TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1419         TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1420         TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1421         TCP_SKB_CB(skb)->sacked = 0;
1422         TCP_SKB_CB(skb)->has_rxtstamp =
1423                         skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1424 }
1425
1426 static int tcp_v6_rcv(struct sk_buff *skb)
1427 {
1428         int sdif = inet6_sdif(skb);
1429         const struct tcphdr *th;
1430         const struct ipv6hdr *hdr;
1431         bool refcounted;
1432         struct sock *sk;
1433         int ret;
1434         struct net *net = dev_net(skb->dev);
1435
1436         if (skb->pkt_type != PACKET_HOST)
1437                 goto discard_it;
1438
1439         /*
1440          *      Count it even if it's bad.
1441          */
1442         __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1443
1444         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1445                 goto discard_it;
1446
1447         th = (const struct tcphdr *)skb->data;
1448
1449         if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1450                 goto bad_packet;
1451         if (!pskb_may_pull(skb, th->doff*4))
1452                 goto discard_it;
1453
1454         if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1455                 goto csum_error;
1456
1457         th = (const struct tcphdr *)skb->data;
1458         hdr = ipv6_hdr(skb);
1459
1460 lookup:
1461         sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1462                                 th->source, th->dest, inet6_iif(skb), sdif,
1463                                 &refcounted);
1464         if (!sk)
1465                 goto no_tcp_socket;
1466
1467 process:
1468         if (sk->sk_state == TCP_TIME_WAIT)
1469                 goto do_time_wait;
1470
1471         if (sk->sk_state == TCP_NEW_SYN_RECV) {
1472                 struct request_sock *req = inet_reqsk(sk);
1473                 bool req_stolen = false;
1474                 struct sock *nsk;
1475
1476                 sk = req->rsk_listener;
1477                 if (tcp_v6_inbound_md5_hash(sk, skb)) {
1478                         sk_drops_add(sk, skb);
1479                         reqsk_put(req);
1480                         goto discard_it;
1481                 }
1482                 if (tcp_checksum_complete(skb)) {
1483                         reqsk_put(req);
1484                         goto csum_error;
1485                 }
1486                 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1487                         inet_csk_reqsk_queue_drop_and_put(sk, req);
1488                         goto lookup;
1489                 }
1490                 sock_hold(sk);
1491                 refcounted = true;
1492                 nsk = NULL;
1493                 if (!tcp_filter(sk, skb)) {
1494                         th = (const struct tcphdr *)skb->data;
1495                         hdr = ipv6_hdr(skb);
1496                         tcp_v6_fill_cb(skb, hdr, th);
1497                         nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
1498                 }
1499                 if (!nsk) {
1500                         reqsk_put(req);
1501                         if (req_stolen) {
1502                                 /* Another cpu got exclusive access to req
1503                                  * and created a full blown socket.
1504                                  * Try to feed this packet to this socket
1505                                  * instead of discarding it.
1506                                  */
1507                                 tcp_v6_restore_cb(skb);
1508                                 sock_put(sk);
1509                                 goto lookup;
1510                         }
1511                         goto discard_and_relse;
1512                 }
1513                 if (nsk == sk) {
1514                         reqsk_put(req);
1515                         tcp_v6_restore_cb(skb);
1516                 } else if (tcp_child_process(sk, nsk, skb)) {
1517                         tcp_v6_send_reset(nsk, skb);
1518                         goto discard_and_relse;
1519                 } else {
1520                         sock_put(sk);
1521                         return 0;
1522                 }
1523         }
1524         if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1525                 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1526                 goto discard_and_relse;
1527         }
1528
1529         if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1530                 goto discard_and_relse;
1531
1532         if (tcp_v6_inbound_md5_hash(sk, skb))
1533                 goto discard_and_relse;
1534
1535         if (tcp_filter(sk, skb))
1536                 goto discard_and_relse;
1537         th = (const struct tcphdr *)skb->data;
1538         hdr = ipv6_hdr(skb);
1539         tcp_v6_fill_cb(skb, hdr, th);
1540
1541         skb->dev = NULL;
1542
1543         if (sk->sk_state == TCP_LISTEN) {
1544                 ret = tcp_v6_do_rcv(sk, skb);
1545                 goto put_and_return;
1546         }
1547
1548         sk_incoming_cpu_update(sk);
1549
1550         bh_lock_sock_nested(sk);
1551         tcp_segs_in(tcp_sk(sk), skb);
1552         ret = 0;
1553         if (!sock_owned_by_user(sk)) {
1554                 ret = tcp_v6_do_rcv(sk, skb);
1555         } else if (tcp_add_backlog(sk, skb)) {
1556                 goto discard_and_relse;
1557         }
1558         bh_unlock_sock(sk);
1559
1560 put_and_return:
1561         if (refcounted)
1562                 sock_put(sk);
1563         return ret ? -1 : 0;
1564
1565 no_tcp_socket:
1566         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1567                 goto discard_it;
1568
1569         tcp_v6_fill_cb(skb, hdr, th);
1570
1571         if (tcp_checksum_complete(skb)) {
1572 csum_error:
1573                 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1574 bad_packet:
1575                 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1576         } else {
1577                 tcp_v6_send_reset(NULL, skb);
1578         }
1579
1580 discard_it:
1581         kfree_skb(skb);
1582         return 0;
1583
1584 discard_and_relse:
1585         sk_drops_add(sk, skb);
1586         if (refcounted)
1587                 sock_put(sk);
1588         goto discard_it;
1589
1590 do_time_wait:
1591         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1592                 inet_twsk_put(inet_twsk(sk));
1593                 goto discard_it;
1594         }
1595
1596         tcp_v6_fill_cb(skb, hdr, th);
1597
1598         if (tcp_checksum_complete(skb)) {
1599                 inet_twsk_put(inet_twsk(sk));
1600                 goto csum_error;
1601         }
1602
1603         switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1604         case TCP_TW_SYN:
1605         {
1606                 struct sock *sk2;
1607
1608                 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1609                                             skb, __tcp_hdrlen(th),
1610                                             &ipv6_hdr(skb)->saddr, th->source,
1611                                             &ipv6_hdr(skb)->daddr,
1612                                             ntohs(th->dest), tcp_v6_iif(skb),
1613                                             sdif);
1614                 if (sk2) {
1615                         struct inet_timewait_sock *tw = inet_twsk(sk);
1616                         inet_twsk_deschedule_put(tw);
1617                         sk = sk2;
1618                         tcp_v6_restore_cb(skb);
1619                         refcounted = false;
1620                         goto process;
1621                 }
1622         }
1623                 /* to ACK */
1624                 /* fall through */
1625         case TCP_TW_ACK:
1626                 tcp_v6_timewait_ack(sk, skb);
1627                 break;
1628         case TCP_TW_RST:
1629                 tcp_v6_send_reset(sk, skb);
1630                 inet_twsk_deschedule_put(inet_twsk(sk));
1631                 goto discard_it;
1632         case TCP_TW_SUCCESS:
1633                 ;
1634         }
1635         goto discard_it;
1636 }
1637
1638 static void tcp_v6_early_demux(struct sk_buff *skb)
1639 {
1640         const struct ipv6hdr *hdr;
1641         const struct tcphdr *th;
1642         struct sock *sk;
1643
1644         if (skb->pkt_type != PACKET_HOST)
1645                 return;
1646
1647         if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1648                 return;
1649
1650         hdr = ipv6_hdr(skb);
1651         th = tcp_hdr(skb);
1652
1653         if (th->doff < sizeof(struct tcphdr) / 4)
1654                 return;
1655
1656         /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1657         sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1658                                         &hdr->saddr, th->source,
1659                                         &hdr->daddr, ntohs(th->dest),
1660                                         inet6_iif(skb), inet6_sdif(skb));
1661         if (sk) {
1662                 skb->sk = sk;
1663                 skb->destructor = sock_edemux;
1664                 if (sk_fullsock(sk)) {
1665                         struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1666
1667                         if (dst)
1668                                 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1669                         if (dst &&
1670                             inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1671                                 skb_dst_set_noref(skb, dst);
1672                 }
1673         }
1674 }
1675
1676 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1677         .twsk_obj_size  = sizeof(struct tcp6_timewait_sock),
1678         .twsk_unique    = tcp_twsk_unique,
1679         .twsk_destructor = tcp_twsk_destructor,
1680 };
1681
1682 static const struct inet_connection_sock_af_ops ipv6_specific = {
1683         .queue_xmit        = inet6_csk_xmit,
1684         .send_check        = tcp_v6_send_check,
1685         .rebuild_header    = inet6_sk_rebuild_header,
1686         .sk_rx_dst_set     = inet6_sk_rx_dst_set,
1687         .conn_request      = tcp_v6_conn_request,
1688         .syn_recv_sock     = tcp_v6_syn_recv_sock,
1689         .net_header_len    = sizeof(struct ipv6hdr),
1690         .net_frag_header_len = sizeof(struct frag_hdr),
1691         .setsockopt        = ipv6_setsockopt,
1692         .getsockopt        = ipv6_getsockopt,
1693         .addr2sockaddr     = inet6_csk_addr2sockaddr,
1694         .sockaddr_len      = sizeof(struct sockaddr_in6),
1695 #ifdef CONFIG_COMPAT
1696         .compat_setsockopt = compat_ipv6_setsockopt,
1697         .compat_getsockopt = compat_ipv6_getsockopt,
1698 #endif
1699         .mtu_reduced       = tcp_v6_mtu_reduced,
1700 };
1701
1702 #ifdef CONFIG_TCP_MD5SIG
1703 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1704         .md5_lookup     =       tcp_v6_md5_lookup,
1705         .calc_md5_hash  =       tcp_v6_md5_hash_skb,
1706         .md5_parse      =       tcp_v6_parse_md5_keys,
1707 };
1708 #endif
1709
1710 /*
1711  *      TCP over IPv4 via INET6 API
1712  */
1713 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1714         .queue_xmit        = ip_queue_xmit,
1715         .send_check        = tcp_v4_send_check,
1716         .rebuild_header    = inet_sk_rebuild_header,
1717         .sk_rx_dst_set     = inet_sk_rx_dst_set,
1718         .conn_request      = tcp_v6_conn_request,
1719         .syn_recv_sock     = tcp_v6_syn_recv_sock,
1720         .net_header_len    = sizeof(struct iphdr),
1721         .setsockopt        = ipv6_setsockopt,
1722         .getsockopt        = ipv6_getsockopt,
1723         .addr2sockaddr     = inet6_csk_addr2sockaddr,
1724         .sockaddr_len      = sizeof(struct sockaddr_in6),
1725 #ifdef CONFIG_COMPAT
1726         .compat_setsockopt = compat_ipv6_setsockopt,
1727         .compat_getsockopt = compat_ipv6_getsockopt,
1728 #endif
1729         .mtu_reduced       = tcp_v4_mtu_reduced,
1730 };
1731
1732 #ifdef CONFIG_TCP_MD5SIG
1733 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1734         .md5_lookup     =       tcp_v4_md5_lookup,
1735         .calc_md5_hash  =       tcp_v4_md5_hash_skb,
1736         .md5_parse      =       tcp_v6_parse_md5_keys,
1737 };
1738 #endif
1739
1740 /* NOTE: A lot of things set to zero explicitly by call to
1741  *       sk_alloc() so need not be done here.
1742  */
1743 static int tcp_v6_init_sock(struct sock *sk)
1744 {
1745         struct inet_connection_sock *icsk = inet_csk(sk);
1746
1747         tcp_init_sock(sk);
1748
1749         icsk->icsk_af_ops = &ipv6_specific;
1750
1751 #ifdef CONFIG_TCP_MD5SIG
1752         tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1753 #endif
1754
1755         return 0;
1756 }
1757
1758 static void tcp_v6_destroy_sock(struct sock *sk)
1759 {
1760         tcp_v4_destroy_sock(sk);
1761         inet6_destroy_sock(sk);
1762 }
1763
1764 #ifdef CONFIG_PROC_FS
1765 /* Proc filesystem TCPv6 sock list dumping. */
1766 static void get_openreq6(struct seq_file *seq,
1767                          const struct request_sock *req, int i)
1768 {
1769         long ttd = req->rsk_timer.expires - jiffies;
1770         const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1771         const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1772
1773         if (ttd < 0)
1774                 ttd = 0;
1775
1776         seq_printf(seq,
1777                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1778                    "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1779                    i,
1780                    src->s6_addr32[0], src->s6_addr32[1],
1781                    src->s6_addr32[2], src->s6_addr32[3],
1782                    inet_rsk(req)->ir_num,
1783                    dest->s6_addr32[0], dest->s6_addr32[1],
1784                    dest->s6_addr32[2], dest->s6_addr32[3],
1785                    ntohs(inet_rsk(req)->ir_rmt_port),
1786                    TCP_SYN_RECV,
1787                    0, 0, /* could print option size, but that is af dependent. */
1788                    1,   /* timers active (only the expire timer) */
1789                    jiffies_to_clock_t(ttd),
1790                    req->num_timeout,
1791                    from_kuid_munged(seq_user_ns(seq),
1792                                     sock_i_uid(req->rsk_listener)),
1793                    0,  /* non standard timer */
1794                    0, /* open_requests have no inode */
1795                    0, req);
1796 }
1797
1798 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1799 {
1800         const struct in6_addr *dest, *src;
1801         __u16 destp, srcp;
1802         int timer_active;
1803         unsigned long timer_expires;
1804         const struct inet_sock *inet = inet_sk(sp);
1805         const struct tcp_sock *tp = tcp_sk(sp);
1806         const struct inet_connection_sock *icsk = inet_csk(sp);
1807         const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1808         int rx_queue;
1809         int state;
1810
1811         dest  = &sp->sk_v6_daddr;
1812         src   = &sp->sk_v6_rcv_saddr;
1813         destp = ntohs(inet->inet_dport);
1814         srcp  = ntohs(inet->inet_sport);
1815
1816         if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
1817             icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
1818             icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1819                 timer_active    = 1;
1820                 timer_expires   = icsk->icsk_timeout;
1821         } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1822                 timer_active    = 4;
1823                 timer_expires   = icsk->icsk_timeout;
1824         } else if (timer_pending(&sp->sk_timer)) {
1825                 timer_active    = 2;
1826                 timer_expires   = sp->sk_timer.expires;
1827         } else {
1828                 timer_active    = 0;
1829                 timer_expires = jiffies;
1830         }
1831
1832         state = inet_sk_state_load(sp);
1833         if (state == TCP_LISTEN)
1834                 rx_queue = sp->sk_ack_backlog;
1835         else
1836                 /* Because we don't lock the socket,
1837                  * we might find a transient negative value.
1838                  */
1839                 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1840
1841         seq_printf(seq,
1842                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1843                    "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1844                    i,
1845                    src->s6_addr32[0], src->s6_addr32[1],
1846                    src->s6_addr32[2], src->s6_addr32[3], srcp,
1847                    dest->s6_addr32[0], dest->s6_addr32[1],
1848                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
1849                    state,
1850                    tp->write_seq - tp->snd_una,
1851                    rx_queue,
1852                    timer_active,
1853                    jiffies_delta_to_clock_t(timer_expires - jiffies),
1854                    icsk->icsk_retransmits,
1855                    from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1856                    icsk->icsk_probes_out,
1857                    sock_i_ino(sp),
1858                    refcount_read(&sp->sk_refcnt), sp,
1859                    jiffies_to_clock_t(icsk->icsk_rto),
1860                    jiffies_to_clock_t(icsk->icsk_ack.ato),
1861                    (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1862                    tp->snd_cwnd,
1863                    state == TCP_LISTEN ?
1864                         fastopenq->max_qlen :
1865                         (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1866                    );
1867 }
1868
1869 static void get_timewait6_sock(struct seq_file *seq,
1870                                struct inet_timewait_sock *tw, int i)
1871 {
1872         long delta = tw->tw_timer.expires - jiffies;
1873         const struct in6_addr *dest, *src;
1874         __u16 destp, srcp;
1875
1876         dest = &tw->tw_v6_daddr;
1877         src  = &tw->tw_v6_rcv_saddr;
1878         destp = ntohs(tw->tw_dport);
1879         srcp  = ntohs(tw->tw_sport);
1880
1881         seq_printf(seq,
1882                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1883                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1884                    i,
1885                    src->s6_addr32[0], src->s6_addr32[1],
1886                    src->s6_addr32[2], src->s6_addr32[3], srcp,
1887                    dest->s6_addr32[0], dest->s6_addr32[1],
1888                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
1889                    tw->tw_substate, 0, 0,
1890                    3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1891                    refcount_read(&tw->tw_refcnt), tw);
1892 }
1893
1894 static int tcp6_seq_show(struct seq_file *seq, void *v)
1895 {
1896         struct tcp_iter_state *st;
1897         struct sock *sk = v;
1898
1899         if (v == SEQ_START_TOKEN) {
1900                 seq_puts(seq,
1901                          "  sl  "
1902                          "local_address                         "
1903                          "remote_address                        "
1904                          "st tx_queue rx_queue tr tm->when retrnsmt"
1905                          "   uid  timeout inode\n");
1906                 goto out;
1907         }
1908         st = seq->private;
1909
1910         if (sk->sk_state == TCP_TIME_WAIT)
1911                 get_timewait6_sock(seq, v, st->num);
1912         else if (sk->sk_state == TCP_NEW_SYN_RECV)
1913                 get_openreq6(seq, v, st->num);
1914         else
1915                 get_tcp6_sock(seq, v, st->num);
1916 out:
1917         return 0;
1918 }
1919
1920 static const struct seq_operations tcp6_seq_ops = {
1921         .show           = tcp6_seq_show,
1922         .start          = tcp_seq_start,
1923         .next           = tcp_seq_next,
1924         .stop           = tcp_seq_stop,
1925 };
1926
1927 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1928         .family         = AF_INET6,
1929 };
1930
1931 int __net_init tcp6_proc_init(struct net *net)
1932 {
1933         if (!proc_create_net_data("tcp6", 0444, net->proc_net, &tcp6_seq_ops,
1934                         sizeof(struct tcp_iter_state), &tcp6_seq_afinfo))
1935                 return -ENOMEM;
1936         return 0;
1937 }
1938
1939 void tcp6_proc_exit(struct net *net)
1940 {
1941         remove_proc_entry("tcp6", net->proc_net);
1942 }
1943 #endif
1944
1945 struct proto tcpv6_prot = {
1946         .name                   = "TCPv6",
1947         .owner                  = THIS_MODULE,
1948         .close                  = tcp_close,
1949         .pre_connect            = tcp_v6_pre_connect,
1950         .connect                = tcp_v6_connect,
1951         .disconnect             = tcp_disconnect,
1952         .accept                 = inet_csk_accept,
1953         .ioctl                  = tcp_ioctl,
1954         .init                   = tcp_v6_init_sock,
1955         .destroy                = tcp_v6_destroy_sock,
1956         .shutdown               = tcp_shutdown,
1957         .setsockopt             = tcp_setsockopt,
1958         .getsockopt             = tcp_getsockopt,
1959         .keepalive              = tcp_set_keepalive,
1960         .recvmsg                = tcp_recvmsg,
1961         .sendmsg                = tcp_sendmsg,
1962         .sendpage               = tcp_sendpage,
1963         .backlog_rcv            = tcp_v6_do_rcv,
1964         .release_cb             = tcp_release_cb,
1965         .hash                   = inet6_hash,
1966         .unhash                 = inet_unhash,
1967         .get_port               = inet_csk_get_port,
1968         .enter_memory_pressure  = tcp_enter_memory_pressure,
1969         .leave_memory_pressure  = tcp_leave_memory_pressure,
1970         .stream_memory_free     = tcp_stream_memory_free,
1971         .sockets_allocated      = &tcp_sockets_allocated,
1972         .memory_allocated       = &tcp_memory_allocated,
1973         .memory_pressure        = &tcp_memory_pressure,
1974         .orphan_count           = &tcp_orphan_count,
1975         .sysctl_mem             = sysctl_tcp_mem,
1976         .sysctl_wmem_offset     = offsetof(struct net, ipv4.sysctl_tcp_wmem),
1977         .sysctl_rmem_offset     = offsetof(struct net, ipv4.sysctl_tcp_rmem),
1978         .max_header             = MAX_TCP_HEADER,
1979         .obj_size               = sizeof(struct tcp6_sock),
1980         .slab_flags             = SLAB_TYPESAFE_BY_RCU,
1981         .twsk_prot              = &tcp6_timewait_sock_ops,
1982         .rsk_prot               = &tcp6_request_sock_ops,
1983         .h.hashinfo             = &tcp_hashinfo,
1984         .no_autobind            = true,
1985 #ifdef CONFIG_COMPAT
1986         .compat_setsockopt      = compat_tcp_setsockopt,
1987         .compat_getsockopt      = compat_tcp_getsockopt,
1988 #endif
1989         .diag_destroy           = tcp_abort,
1990 };
1991
1992 /* thinking of making this const? Don't.
1993  * early_demux can change based on sysctl.
1994  */
1995 static struct inet6_protocol tcpv6_protocol = {
1996         .early_demux    =       tcp_v6_early_demux,
1997         .early_demux_handler =  tcp_v6_early_demux,
1998         .handler        =       tcp_v6_rcv,
1999         .err_handler    =       tcp_v6_err,
2000         .flags          =       INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2001 };
2002
2003 static struct inet_protosw tcpv6_protosw = {
2004         .type           =       SOCK_STREAM,
2005         .protocol       =       IPPROTO_TCP,
2006         .prot           =       &tcpv6_prot,
2007         .ops            =       &inet6_stream_ops,
2008         .flags          =       INET_PROTOSW_PERMANENT |
2009                                 INET_PROTOSW_ICSK,
2010 };
2011
2012 static int __net_init tcpv6_net_init(struct net *net)
2013 {
2014         return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2015                                     SOCK_RAW, IPPROTO_TCP, net);
2016 }
2017
2018 static void __net_exit tcpv6_net_exit(struct net *net)
2019 {
2020         inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2021 }
2022
2023 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2024 {
2025         inet_twsk_purge(&tcp_hashinfo, AF_INET6);
2026 }
2027
2028 static struct pernet_operations tcpv6_net_ops = {
2029         .init       = tcpv6_net_init,
2030         .exit       = tcpv6_net_exit,
2031         .exit_batch = tcpv6_net_exit_batch,
2032 };
2033
2034 int __init tcpv6_init(void)
2035 {
2036         int ret;
2037
2038         ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2039         if (ret)
2040                 goto out;
2041
2042         /* register inet6 protocol */
2043         ret = inet6_register_protosw(&tcpv6_protosw);
2044         if (ret)
2045                 goto out_tcpv6_protocol;
2046
2047         ret = register_pernet_subsys(&tcpv6_net_ops);
2048         if (ret)
2049                 goto out_tcpv6_protosw;
2050 out:
2051         return ret;
2052
2053 out_tcpv6_protosw:
2054         inet6_unregister_protosw(&tcpv6_protosw);
2055 out_tcpv6_protocol:
2056         inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2057         goto out;
2058 }
2059
2060 void tcpv6_exit(void)
2061 {
2062         unregister_pernet_subsys(&tcpv6_net_ops);
2063         inet6_unregister_protosw(&tcpv6_protosw);
2064         inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2065 }