Documentation: admin-guide: PM: Add cpuidle document
[sfrench/cifs-2.6.git] / net / ipv4 / inet_hashtables.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              Generic INET transport hashtables
7  *
8  * Authors:     Lotsa people, from code originally in tcp
9  *
10  *      This program is free software; you can redistribute it and/or
11  *      modify it under the terms of the GNU General Public License
12  *      as published by the Free Software Foundation; either version
13  *      2 of the License, or (at your option) any later version.
14  */
15
16 #include <linux/module.h>
17 #include <linux/random.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/wait.h>
21 #include <linux/vmalloc.h>
22 #include <linux/memblock.h>
23
24 #include <net/addrconf.h>
25 #include <net/inet_connection_sock.h>
26 #include <net/inet_hashtables.h>
27 #include <net/secure_seq.h>
28 #include <net/ip.h>
29 #include <net/tcp.h>
30 #include <net/sock_reuseport.h>
31
32 static u32 inet_ehashfn(const struct net *net, const __be32 laddr,
33                         const __u16 lport, const __be32 faddr,
34                         const __be16 fport)
35 {
36         static u32 inet_ehash_secret __read_mostly;
37
38         net_get_random_once(&inet_ehash_secret, sizeof(inet_ehash_secret));
39
40         return __inet_ehashfn(laddr, lport, faddr, fport,
41                               inet_ehash_secret + net_hash_mix(net));
42 }
43
44 /* This function handles inet_sock, but also timewait and request sockets
45  * for IPv4/IPv6.
46  */
47 static u32 sk_ehashfn(const struct sock *sk)
48 {
49 #if IS_ENABLED(CONFIG_IPV6)
50         if (sk->sk_family == AF_INET6 &&
51             !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
52                 return inet6_ehashfn(sock_net(sk),
53                                      &sk->sk_v6_rcv_saddr, sk->sk_num,
54                                      &sk->sk_v6_daddr, sk->sk_dport);
55 #endif
56         return inet_ehashfn(sock_net(sk),
57                             sk->sk_rcv_saddr, sk->sk_num,
58                             sk->sk_daddr, sk->sk_dport);
59 }
60
61 /*
62  * Allocate and initialize a new local port bind bucket.
63  * The bindhash mutex for snum's hash chain must be held here.
64  */
65 struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
66                                                  struct net *net,
67                                                  struct inet_bind_hashbucket *head,
68                                                  const unsigned short snum)
69 {
70         struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
71
72         if (tb) {
73                 write_pnet(&tb->ib_net, net);
74                 tb->port      = snum;
75                 tb->fastreuse = 0;
76                 tb->fastreuseport = 0;
77                 INIT_HLIST_HEAD(&tb->owners);
78                 hlist_add_head(&tb->node, &head->chain);
79         }
80         return tb;
81 }
82
83 /*
84  * Caller must hold hashbucket lock for this tb with local BH disabled
85  */
86 void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb)
87 {
88         if (hlist_empty(&tb->owners)) {
89                 __hlist_del(&tb->node);
90                 kmem_cache_free(cachep, tb);
91         }
92 }
93
94 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
95                     const unsigned short snum)
96 {
97         inet_sk(sk)->inet_num = snum;
98         sk_add_bind_node(sk, &tb->owners);
99         inet_csk(sk)->icsk_bind_hash = tb;
100 }
101
102 /*
103  * Get rid of any references to a local port held by the given sock.
104  */
105 static void __inet_put_port(struct sock *sk)
106 {
107         struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
108         const int bhash = inet_bhashfn(sock_net(sk), inet_sk(sk)->inet_num,
109                         hashinfo->bhash_size);
110         struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
111         struct inet_bind_bucket *tb;
112
113         spin_lock(&head->lock);
114         tb = inet_csk(sk)->icsk_bind_hash;
115         __sk_del_bind_node(sk);
116         inet_csk(sk)->icsk_bind_hash = NULL;
117         inet_sk(sk)->inet_num = 0;
118         inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
119         spin_unlock(&head->lock);
120 }
121
122 void inet_put_port(struct sock *sk)
123 {
124         local_bh_disable();
125         __inet_put_port(sk);
126         local_bh_enable();
127 }
128 EXPORT_SYMBOL(inet_put_port);
129
130 int __inet_inherit_port(const struct sock *sk, struct sock *child)
131 {
132         struct inet_hashinfo *table = sk->sk_prot->h.hashinfo;
133         unsigned short port = inet_sk(child)->inet_num;
134         const int bhash = inet_bhashfn(sock_net(sk), port,
135                         table->bhash_size);
136         struct inet_bind_hashbucket *head = &table->bhash[bhash];
137         struct inet_bind_bucket *tb;
138
139         spin_lock(&head->lock);
140         tb = inet_csk(sk)->icsk_bind_hash;
141         if (unlikely(!tb)) {
142                 spin_unlock(&head->lock);
143                 return -ENOENT;
144         }
145         if (tb->port != port) {
146                 /* NOTE: using tproxy and redirecting skbs to a proxy
147                  * on a different listener port breaks the assumption
148                  * that the listener socket's icsk_bind_hash is the same
149                  * as that of the child socket. We have to look up or
150                  * create a new bind bucket for the child here. */
151                 inet_bind_bucket_for_each(tb, &head->chain) {
152                         if (net_eq(ib_net(tb), sock_net(sk)) &&
153                             tb->port == port)
154                                 break;
155                 }
156                 if (!tb) {
157                         tb = inet_bind_bucket_create(table->bind_bucket_cachep,
158                                                      sock_net(sk), head, port);
159                         if (!tb) {
160                                 spin_unlock(&head->lock);
161                                 return -ENOMEM;
162                         }
163                 }
164         }
165         inet_bind_hash(child, tb, port);
166         spin_unlock(&head->lock);
167
168         return 0;
169 }
170 EXPORT_SYMBOL_GPL(__inet_inherit_port);
171
172 static struct inet_listen_hashbucket *
173 inet_lhash2_bucket_sk(struct inet_hashinfo *h, struct sock *sk)
174 {
175         u32 hash;
176
177 #if IS_ENABLED(CONFIG_IPV6)
178         if (sk->sk_family == AF_INET6)
179                 hash = ipv6_portaddr_hash(sock_net(sk),
180                                           &sk->sk_v6_rcv_saddr,
181                                           inet_sk(sk)->inet_num);
182         else
183 #endif
184                 hash = ipv4_portaddr_hash(sock_net(sk),
185                                           inet_sk(sk)->inet_rcv_saddr,
186                                           inet_sk(sk)->inet_num);
187         return inet_lhash2_bucket(h, hash);
188 }
189
190 static void inet_hash2(struct inet_hashinfo *h, struct sock *sk)
191 {
192         struct inet_listen_hashbucket *ilb2;
193
194         if (!h->lhash2)
195                 return;
196
197         ilb2 = inet_lhash2_bucket_sk(h, sk);
198
199         spin_lock(&ilb2->lock);
200         if (sk->sk_reuseport && sk->sk_family == AF_INET6)
201                 hlist_add_tail_rcu(&inet_csk(sk)->icsk_listen_portaddr_node,
202                                    &ilb2->head);
203         else
204                 hlist_add_head_rcu(&inet_csk(sk)->icsk_listen_portaddr_node,
205                                    &ilb2->head);
206         ilb2->count++;
207         spin_unlock(&ilb2->lock);
208 }
209
210 static void inet_unhash2(struct inet_hashinfo *h, struct sock *sk)
211 {
212         struct inet_listen_hashbucket *ilb2;
213
214         if (!h->lhash2 ||
215             WARN_ON_ONCE(hlist_unhashed(&inet_csk(sk)->icsk_listen_portaddr_node)))
216                 return;
217
218         ilb2 = inet_lhash2_bucket_sk(h, sk);
219
220         spin_lock(&ilb2->lock);
221         hlist_del_init_rcu(&inet_csk(sk)->icsk_listen_portaddr_node);
222         ilb2->count--;
223         spin_unlock(&ilb2->lock);
224 }
225
226 static inline int compute_score(struct sock *sk, struct net *net,
227                                 const unsigned short hnum, const __be32 daddr,
228                                 const int dif, const int sdif, bool exact_dif)
229 {
230         int score = -1;
231         struct inet_sock *inet = inet_sk(sk);
232
233         if (net_eq(sock_net(sk), net) && inet->inet_num == hnum &&
234                         !ipv6_only_sock(sk)) {
235                 __be32 rcv_saddr = inet->inet_rcv_saddr;
236                 score = sk->sk_family == PF_INET ? 2 : 1;
237                 if (rcv_saddr) {
238                         if (rcv_saddr != daddr)
239                                 return -1;
240                         score += 4;
241                 }
242                 if (sk->sk_bound_dev_if || exact_dif) {
243                         bool dev_match = (sk->sk_bound_dev_if == dif ||
244                                           sk->sk_bound_dev_if == sdif);
245
246                         if (!dev_match)
247                                 return -1;
248                         if (sk->sk_bound_dev_if)
249                                 score += 4;
250                 }
251                 if (sk->sk_incoming_cpu == raw_smp_processor_id())
252                         score++;
253         }
254         return score;
255 }
256
257 /*
258  * Here are some nice properties to exploit here. The BSD API
259  * does not allow a listening sock to specify the remote port nor the
260  * remote address for the connection. So always assume those are both
261  * wildcarded during the search since they can never be otherwise.
262  */
263
264 /* called with rcu_read_lock() : No refcount taken on the socket */
265 static struct sock *inet_lhash2_lookup(struct net *net,
266                                 struct inet_listen_hashbucket *ilb2,
267                                 struct sk_buff *skb, int doff,
268                                 const __be32 saddr, __be16 sport,
269                                 const __be32 daddr, const unsigned short hnum,
270                                 const int dif, const int sdif)
271 {
272         bool exact_dif = inet_exact_dif_match(net, skb);
273         struct inet_connection_sock *icsk;
274         struct sock *sk, *result = NULL;
275         int score, hiscore = 0;
276         u32 phash = 0;
277
278         inet_lhash2_for_each_icsk_rcu(icsk, &ilb2->head) {
279                 sk = (struct sock *)icsk;
280                 score = compute_score(sk, net, hnum, daddr,
281                                       dif, sdif, exact_dif);
282                 if (score > hiscore) {
283                         if (sk->sk_reuseport) {
284                                 phash = inet_ehashfn(net, daddr, hnum,
285                                                      saddr, sport);
286                                 result = reuseport_select_sock(sk, phash,
287                                                                skb, doff);
288                                 if (result)
289                                         return result;
290                         }
291                         result = sk;
292                         hiscore = score;
293                 }
294         }
295
296         return result;
297 }
298
299 struct sock *__inet_lookup_listener(struct net *net,
300                                     struct inet_hashinfo *hashinfo,
301                                     struct sk_buff *skb, int doff,
302                                     const __be32 saddr, __be16 sport,
303                                     const __be32 daddr, const unsigned short hnum,
304                                     const int dif, const int sdif)
305 {
306         unsigned int hash = inet_lhashfn(net, hnum);
307         struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
308         bool exact_dif = inet_exact_dif_match(net, skb);
309         struct inet_listen_hashbucket *ilb2;
310         struct sock *sk, *result = NULL;
311         int score, hiscore = 0;
312         unsigned int hash2;
313         u32 phash = 0;
314
315         if (ilb->count <= 10 || !hashinfo->lhash2)
316                 goto port_lookup;
317
318         /* Too many sk in the ilb bucket (which is hashed by port alone).
319          * Try lhash2 (which is hashed by port and addr) instead.
320          */
321
322         hash2 = ipv4_portaddr_hash(net, daddr, hnum);
323         ilb2 = inet_lhash2_bucket(hashinfo, hash2);
324         if (ilb2->count > ilb->count)
325                 goto port_lookup;
326
327         result = inet_lhash2_lookup(net, ilb2, skb, doff,
328                                     saddr, sport, daddr, hnum,
329                                     dif, sdif);
330         if (result)
331                 goto done;
332
333         /* Lookup lhash2 with INADDR_ANY */
334
335         hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
336         ilb2 = inet_lhash2_bucket(hashinfo, hash2);
337         if (ilb2->count > ilb->count)
338                 goto port_lookup;
339
340         result = inet_lhash2_lookup(net, ilb2, skb, doff,
341                                     saddr, sport, daddr, hnum,
342                                     dif, sdif);
343         goto done;
344
345 port_lookup:
346         sk_for_each_rcu(sk, &ilb->head) {
347                 score = compute_score(sk, net, hnum, daddr,
348                                       dif, sdif, exact_dif);
349                 if (score > hiscore) {
350                         if (sk->sk_reuseport) {
351                                 phash = inet_ehashfn(net, daddr, hnum,
352                                                      saddr, sport);
353                                 result = reuseport_select_sock(sk, phash,
354                                                                skb, doff);
355                                 if (result)
356                                         goto done;
357                         }
358                         result = sk;
359                         hiscore = score;
360                 }
361         }
362 done:
363         if (unlikely(IS_ERR(result)))
364                 return NULL;
365         return result;
366 }
367 EXPORT_SYMBOL_GPL(__inet_lookup_listener);
368
369 /* All sockets share common refcount, but have different destructors */
370 void sock_gen_put(struct sock *sk)
371 {
372         if (!refcount_dec_and_test(&sk->sk_refcnt))
373                 return;
374
375         if (sk->sk_state == TCP_TIME_WAIT)
376                 inet_twsk_free(inet_twsk(sk));
377         else if (sk->sk_state == TCP_NEW_SYN_RECV)
378                 reqsk_free(inet_reqsk(sk));
379         else
380                 sk_free(sk);
381 }
382 EXPORT_SYMBOL_GPL(sock_gen_put);
383
384 void sock_edemux(struct sk_buff *skb)
385 {
386         sock_gen_put(skb->sk);
387 }
388 EXPORT_SYMBOL(sock_edemux);
389
390 struct sock *__inet_lookup_established(struct net *net,
391                                   struct inet_hashinfo *hashinfo,
392                                   const __be32 saddr, const __be16 sport,
393                                   const __be32 daddr, const u16 hnum,
394                                   const int dif, const int sdif)
395 {
396         INET_ADDR_COOKIE(acookie, saddr, daddr);
397         const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
398         struct sock *sk;
399         const struct hlist_nulls_node *node;
400         /* Optimize here for direct hit, only listening connections can
401          * have wildcards anyways.
402          */
403         unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport);
404         unsigned int slot = hash & hashinfo->ehash_mask;
405         struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
406
407 begin:
408         sk_nulls_for_each_rcu(sk, node, &head->chain) {
409                 if (sk->sk_hash != hash)
410                         continue;
411                 if (likely(INET_MATCH(sk, net, acookie,
412                                       saddr, daddr, ports, dif, sdif))) {
413                         if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
414                                 goto out;
415                         if (unlikely(!INET_MATCH(sk, net, acookie,
416                                                  saddr, daddr, ports,
417                                                  dif, sdif))) {
418                                 sock_gen_put(sk);
419                                 goto begin;
420                         }
421                         goto found;
422                 }
423         }
424         /*
425          * if the nulls value we got at the end of this lookup is
426          * not the expected one, we must restart lookup.
427          * We probably met an item that was moved to another chain.
428          */
429         if (get_nulls_value(node) != slot)
430                 goto begin;
431 out:
432         sk = NULL;
433 found:
434         return sk;
435 }
436 EXPORT_SYMBOL_GPL(__inet_lookup_established);
437
438 /* called with local bh disabled */
439 static int __inet_check_established(struct inet_timewait_death_row *death_row,
440                                     struct sock *sk, __u16 lport,
441                                     struct inet_timewait_sock **twp)
442 {
443         struct inet_hashinfo *hinfo = death_row->hashinfo;
444         struct inet_sock *inet = inet_sk(sk);
445         __be32 daddr = inet->inet_rcv_saddr;
446         __be32 saddr = inet->inet_daddr;
447         int dif = sk->sk_bound_dev_if;
448         struct net *net = sock_net(sk);
449         int sdif = l3mdev_master_ifindex_by_index(net, dif);
450         INET_ADDR_COOKIE(acookie, saddr, daddr);
451         const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport);
452         unsigned int hash = inet_ehashfn(net, daddr, lport,
453                                          saddr, inet->inet_dport);
454         struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
455         spinlock_t *lock = inet_ehash_lockp(hinfo, hash);
456         struct sock *sk2;
457         const struct hlist_nulls_node *node;
458         struct inet_timewait_sock *tw = NULL;
459
460         spin_lock(lock);
461
462         sk_nulls_for_each(sk2, node, &head->chain) {
463                 if (sk2->sk_hash != hash)
464                         continue;
465
466                 if (likely(INET_MATCH(sk2, net, acookie,
467                                          saddr, daddr, ports, dif, sdif))) {
468                         if (sk2->sk_state == TCP_TIME_WAIT) {
469                                 tw = inet_twsk(sk2);
470                                 if (twsk_unique(sk, sk2, twp))
471                                         break;
472                         }
473                         goto not_unique;
474                 }
475         }
476
477         /* Must record num and sport now. Otherwise we will see
478          * in hash table socket with a funny identity.
479          */
480         inet->inet_num = lport;
481         inet->inet_sport = htons(lport);
482         sk->sk_hash = hash;
483         WARN_ON(!sk_unhashed(sk));
484         __sk_nulls_add_node_rcu(sk, &head->chain);
485         if (tw) {
486                 sk_nulls_del_node_init_rcu((struct sock *)tw);
487                 __NET_INC_STATS(net, LINUX_MIB_TIMEWAITRECYCLED);
488         }
489         spin_unlock(lock);
490         sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
491
492         if (twp) {
493                 *twp = tw;
494         } else if (tw) {
495                 /* Silly. Should hash-dance instead... */
496                 inet_twsk_deschedule_put(tw);
497         }
498         return 0;
499
500 not_unique:
501         spin_unlock(lock);
502         return -EADDRNOTAVAIL;
503 }
504
505 static u32 inet_sk_port_offset(const struct sock *sk)
506 {
507         const struct inet_sock *inet = inet_sk(sk);
508
509         return secure_ipv4_port_ephemeral(inet->inet_rcv_saddr,
510                                           inet->inet_daddr,
511                                           inet->inet_dport);
512 }
513
514 /* insert a socket into ehash, and eventually remove another one
515  * (The another one can be a SYN_RECV or TIMEWAIT
516  */
517 bool inet_ehash_insert(struct sock *sk, struct sock *osk)
518 {
519         struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
520         struct hlist_nulls_head *list;
521         struct inet_ehash_bucket *head;
522         spinlock_t *lock;
523         bool ret = true;
524
525         WARN_ON_ONCE(!sk_unhashed(sk));
526
527         sk->sk_hash = sk_ehashfn(sk);
528         head = inet_ehash_bucket(hashinfo, sk->sk_hash);
529         list = &head->chain;
530         lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
531
532         spin_lock(lock);
533         if (osk) {
534                 WARN_ON_ONCE(sk->sk_hash != osk->sk_hash);
535                 ret = sk_nulls_del_node_init_rcu(osk);
536         }
537         if (ret)
538                 __sk_nulls_add_node_rcu(sk, list);
539         spin_unlock(lock);
540         return ret;
541 }
542
543 bool inet_ehash_nolisten(struct sock *sk, struct sock *osk)
544 {
545         bool ok = inet_ehash_insert(sk, osk);
546
547         if (ok) {
548                 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
549         } else {
550                 percpu_counter_inc(sk->sk_prot->orphan_count);
551                 inet_sk_set_state(sk, TCP_CLOSE);
552                 sock_set_flag(sk, SOCK_DEAD);
553                 inet_csk_destroy_sock(sk);
554         }
555         return ok;
556 }
557 EXPORT_SYMBOL_GPL(inet_ehash_nolisten);
558
559 static int inet_reuseport_add_sock(struct sock *sk,
560                                    struct inet_listen_hashbucket *ilb)
561 {
562         struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
563         struct sock *sk2;
564         kuid_t uid = sock_i_uid(sk);
565
566         sk_for_each_rcu(sk2, &ilb->head) {
567                 if (sk2 != sk &&
568                     sk2->sk_family == sk->sk_family &&
569                     ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
570                     sk2->sk_bound_dev_if == sk->sk_bound_dev_if &&
571                     inet_csk(sk2)->icsk_bind_hash == tb &&
572                     sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
573                     inet_rcv_saddr_equal(sk, sk2, false))
574                         return reuseport_add_sock(sk, sk2,
575                                                   inet_rcv_saddr_any(sk));
576         }
577
578         return reuseport_alloc(sk, inet_rcv_saddr_any(sk));
579 }
580
581 int __inet_hash(struct sock *sk, struct sock *osk)
582 {
583         struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
584         struct inet_listen_hashbucket *ilb;
585         int err = 0;
586
587         if (sk->sk_state != TCP_LISTEN) {
588                 inet_ehash_nolisten(sk, osk);
589                 return 0;
590         }
591         WARN_ON(!sk_unhashed(sk));
592         ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
593
594         spin_lock(&ilb->lock);
595         if (sk->sk_reuseport) {
596                 err = inet_reuseport_add_sock(sk, ilb);
597                 if (err)
598                         goto unlock;
599         }
600         if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
601                 sk->sk_family == AF_INET6)
602                 hlist_add_tail_rcu(&sk->sk_node, &ilb->head);
603         else
604                 hlist_add_head_rcu(&sk->sk_node, &ilb->head);
605         inet_hash2(hashinfo, sk);
606         ilb->count++;
607         sock_set_flag(sk, SOCK_RCU_FREE);
608         sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
609 unlock:
610         spin_unlock(&ilb->lock);
611
612         return err;
613 }
614 EXPORT_SYMBOL(__inet_hash);
615
616 int inet_hash(struct sock *sk)
617 {
618         int err = 0;
619
620         if (sk->sk_state != TCP_CLOSE) {
621                 local_bh_disable();
622                 err = __inet_hash(sk, NULL);
623                 local_bh_enable();
624         }
625
626         return err;
627 }
628 EXPORT_SYMBOL_GPL(inet_hash);
629
630 void inet_unhash(struct sock *sk)
631 {
632         struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
633         struct inet_listen_hashbucket *ilb = NULL;
634         spinlock_t *lock;
635
636         if (sk_unhashed(sk))
637                 return;
638
639         if (sk->sk_state == TCP_LISTEN) {
640                 ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
641                 lock = &ilb->lock;
642         } else {
643                 lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
644         }
645         spin_lock_bh(lock);
646         if (sk_unhashed(sk))
647                 goto unlock;
648
649         if (rcu_access_pointer(sk->sk_reuseport_cb))
650                 reuseport_detach_sock(sk);
651         if (ilb) {
652                 inet_unhash2(hashinfo, sk);
653                  __sk_del_node_init(sk);
654                  ilb->count--;
655         } else {
656                 __sk_nulls_del_node_init_rcu(sk);
657         }
658         sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
659 unlock:
660         spin_unlock_bh(lock);
661 }
662 EXPORT_SYMBOL_GPL(inet_unhash);
663
664 int __inet_hash_connect(struct inet_timewait_death_row *death_row,
665                 struct sock *sk, u32 port_offset,
666                 int (*check_established)(struct inet_timewait_death_row *,
667                         struct sock *, __u16, struct inet_timewait_sock **))
668 {
669         struct inet_hashinfo *hinfo = death_row->hashinfo;
670         struct inet_timewait_sock *tw = NULL;
671         struct inet_bind_hashbucket *head;
672         int port = inet_sk(sk)->inet_num;
673         struct net *net = sock_net(sk);
674         struct inet_bind_bucket *tb;
675         u32 remaining, offset;
676         int ret, i, low, high;
677         static u32 hint;
678
679         if (port) {
680                 head = &hinfo->bhash[inet_bhashfn(net, port,
681                                                   hinfo->bhash_size)];
682                 tb = inet_csk(sk)->icsk_bind_hash;
683                 spin_lock_bh(&head->lock);
684                 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
685                         inet_ehash_nolisten(sk, NULL);
686                         spin_unlock_bh(&head->lock);
687                         return 0;
688                 }
689                 spin_unlock(&head->lock);
690                 /* No definite answer... Walk to established hash table */
691                 ret = check_established(death_row, sk, port, NULL);
692                 local_bh_enable();
693                 return ret;
694         }
695
696         inet_get_local_port_range(net, &low, &high);
697         high++; /* [32768, 60999] -> [32768, 61000[ */
698         remaining = high - low;
699         if (likely(remaining > 1))
700                 remaining &= ~1U;
701
702         offset = (hint + port_offset) % remaining;
703         /* In first pass we try ports of @low parity.
704          * inet_csk_get_port() does the opposite choice.
705          */
706         offset &= ~1U;
707 other_parity_scan:
708         port = low + offset;
709         for (i = 0; i < remaining; i += 2, port += 2) {
710                 if (unlikely(port >= high))
711                         port -= remaining;
712                 if (inet_is_local_reserved_port(net, port))
713                         continue;
714                 head = &hinfo->bhash[inet_bhashfn(net, port,
715                                                   hinfo->bhash_size)];
716                 spin_lock_bh(&head->lock);
717
718                 /* Does not bother with rcv_saddr checks, because
719                  * the established check is already unique enough.
720                  */
721                 inet_bind_bucket_for_each(tb, &head->chain) {
722                         if (net_eq(ib_net(tb), net) && tb->port == port) {
723                                 if (tb->fastreuse >= 0 ||
724                                     tb->fastreuseport >= 0)
725                                         goto next_port;
726                                 WARN_ON(hlist_empty(&tb->owners));
727                                 if (!check_established(death_row, sk,
728                                                        port, &tw))
729                                         goto ok;
730                                 goto next_port;
731                         }
732                 }
733
734                 tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
735                                              net, head, port);
736                 if (!tb) {
737                         spin_unlock_bh(&head->lock);
738                         return -ENOMEM;
739                 }
740                 tb->fastreuse = -1;
741                 tb->fastreuseport = -1;
742                 goto ok;
743 next_port:
744                 spin_unlock_bh(&head->lock);
745                 cond_resched();
746         }
747
748         offset++;
749         if ((offset & 1) && remaining > 1)
750                 goto other_parity_scan;
751
752         return -EADDRNOTAVAIL;
753
754 ok:
755         hint += i + 2;
756
757         /* Head lock still held and bh's disabled */
758         inet_bind_hash(sk, tb, port);
759         if (sk_unhashed(sk)) {
760                 inet_sk(sk)->inet_sport = htons(port);
761                 inet_ehash_nolisten(sk, (struct sock *)tw);
762         }
763         if (tw)
764                 inet_twsk_bind_unhash(tw, hinfo);
765         spin_unlock(&head->lock);
766         if (tw)
767                 inet_twsk_deschedule_put(tw);
768         local_bh_enable();
769         return 0;
770 }
771
772 /*
773  * Bind a port for a connect operation and hash it.
774  */
775 int inet_hash_connect(struct inet_timewait_death_row *death_row,
776                       struct sock *sk)
777 {
778         u32 port_offset = 0;
779
780         if (!inet_sk(sk)->inet_num)
781                 port_offset = inet_sk_port_offset(sk);
782         return __inet_hash_connect(death_row, sk, port_offset,
783                                    __inet_check_established);
784 }
785 EXPORT_SYMBOL_GPL(inet_hash_connect);
786
787 void inet_hashinfo_init(struct inet_hashinfo *h)
788 {
789         int i;
790
791         for (i = 0; i < INET_LHTABLE_SIZE; i++) {
792                 spin_lock_init(&h->listening_hash[i].lock);
793                 INIT_HLIST_HEAD(&h->listening_hash[i].head);
794                 h->listening_hash[i].count = 0;
795         }
796
797         h->lhash2 = NULL;
798 }
799 EXPORT_SYMBOL_GPL(inet_hashinfo_init);
800
801 void __init inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
802                                 unsigned long numentries, int scale,
803                                 unsigned long low_limit,
804                                 unsigned long high_limit)
805 {
806         unsigned int i;
807
808         h->lhash2 = alloc_large_system_hash(name,
809                                             sizeof(*h->lhash2),
810                                             numentries,
811                                             scale,
812                                             0,
813                                             NULL,
814                                             &h->lhash2_mask,
815                                             low_limit,
816                                             high_limit);
817
818         for (i = 0; i <= h->lhash2_mask; i++) {
819                 spin_lock_init(&h->lhash2[i].lock);
820                 INIT_HLIST_HEAD(&h->lhash2[i].head);
821                 h->lhash2[i].count = 0;
822         }
823 }
824
825 int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
826 {
827         unsigned int locksz = sizeof(spinlock_t);
828         unsigned int i, nblocks = 1;
829
830         if (locksz != 0) {
831                 /* allocate 2 cache lines or at least one spinlock per cpu */
832                 nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U);
833                 nblocks = roundup_pow_of_two(nblocks * num_possible_cpus());
834
835                 /* no more locks than number of hash buckets */
836                 nblocks = min(nblocks, hashinfo->ehash_mask + 1);
837
838                 hashinfo->ehash_locks = kvmalloc_array(nblocks, locksz, GFP_KERNEL);
839                 if (!hashinfo->ehash_locks)
840                         return -ENOMEM;
841
842                 for (i = 0; i < nblocks; i++)
843                         spin_lock_init(&hashinfo->ehash_locks[i]);
844         }
845         hashinfo->ehash_locks_mask = nblocks - 1;
846         return 0;
847 }
848 EXPORT_SYMBOL_GPL(inet_ehash_locks_alloc);