1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (C) 2013 Jozsef Kadlecsik <kadlec@netfilter.org> */
4 #ifndef _IP_SET_HASH_GEN_H
5 #define _IP_SET_HASH_GEN_H
7 #include <linux/rcupdate.h>
8 #include <linux/jhash.h>
9 #include <linux/types.h>
10 #include <linux/netfilter/nfnetlink.h>
11 #include <linux/netfilter/ipset/ip_set.h>
13 #define __ipset_dereference(p) \
14 rcu_dereference_protected(p, 1)
15 #define ipset_dereference_nfnl(p) \
16 rcu_dereference_protected(p, \
17 lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET))
18 #define ipset_dereference_set(p, set) \
19 rcu_dereference_protected(p, \
20 lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET) || \
21 lockdep_is_held(&(set)->lock))
22 #define ipset_dereference_bh_nfnl(p) \
23 rcu_dereference_bh_check(p, \
24 lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET))
26 /* Hashing which uses arrays to resolve clashing. The hash table is resized
27 * (doubled) when searching becomes too long.
28 * Internally jhash is used with the assumption that the size of the
29 * stored data is a multiple of sizeof(u32).
31 * Readers and resizing
33 * Resizing can be triggered by userspace command only, and those
34 * are serialized by the nfnl mutex. During resizing the set is
35 * read-locked, so the only possible concurrent operations are
36 * the kernel side readers. Those must be protected by proper RCU locking.
39 /* Number of elements to store in an initial array block */
40 #define AHASH_INIT_SIZE 2
41 /* Max number of elements to store in an array block */
42 #define AHASH_MAX_SIZE (6 * AHASH_INIT_SIZE)
43 /* Max muber of elements in the array block when tuned */
44 #define AHASH_MAX_TUNED 64
46 #define AHASH_MAX(h) ((h)->bucketsize)
48 /* Max number of elements can be tuned */
49 #ifdef IP_SET_HASH_WITH_MULTI
51 tune_bucketsize(u8 curr, u32 multi)
58 n = curr + AHASH_INIT_SIZE;
59 /* Currently, at listing one hash bucket must fit into a message.
60 * Therefore we have a hard limit here.
62 return n > curr && n <= AHASH_MAX_TUNED ? n : curr;
64 #define TUNE_BUCKETSIZE(h, multi) \
65 ((h)->bucketsize = tune_bucketsize((h)->bucketsize, multi))
67 #define TUNE_BUCKETSIZE(h, multi)
72 struct rcu_head rcu; /* for call_rcu */
73 /* Which positions are used in the array */
74 DECLARE_BITMAP(used, AHASH_MAX_TUNED);
75 u8 size; /* size of the array */
76 u8 pos; /* position of the first free entry */
77 unsigned char value[] /* the array of the values */
78 __aligned(__alignof__(u64));
81 /* Region size for locking == 2^HTABLE_REGION_BITS */
82 #define HTABLE_REGION_BITS 10
83 #define ahash_numof_locks(htable_bits) \
84 ((htable_bits) < HTABLE_REGION_BITS ? 1 \
85 : jhash_size((htable_bits) - HTABLE_REGION_BITS))
86 #define ahash_sizeof_regions(htable_bits) \
87 (ahash_numof_locks(htable_bits) * sizeof(struct ip_set_region))
88 #define ahash_region(n, htable_bits) \
89 ((n) % ahash_numof_locks(htable_bits))
90 #define ahash_bucket_start(h, htable_bits) \
91 ((htable_bits) < HTABLE_REGION_BITS ? 0 \
92 : (h) * jhash_size(HTABLE_REGION_BITS))
93 #define ahash_bucket_end(h, htable_bits) \
94 ((htable_bits) < HTABLE_REGION_BITS ? jhash_size(htable_bits) \
95 : ((h) + 1) * jhash_size(HTABLE_REGION_BITS))
98 struct delayed_work dwork;
99 struct ip_set *set; /* Set the gc belongs to */
100 u32 region; /* Last gc run position */
103 /* The hash table: the table size stored here in order to make resizing easy */
105 atomic_t ref; /* References for resizing */
106 atomic_t uref; /* References for dumping and gc */
107 u8 htable_bits; /* size of hash table == 2^htable_bits */
108 u32 maxelem; /* Maxelem per region */
109 struct ip_set_region *hregion; /* Region locks and ext sizes */
110 struct hbucket __rcu *bucket[]; /* hashtable buckets */
113 #define hbucket(h, i) ((h)->bucket[i])
114 #define ext_size(n, dsize) \
115 (sizeof(struct hbucket) + (n) * (dsize))
117 #ifndef IPSET_NET_COUNT
118 #define IPSET_NET_COUNT 1
121 /* Book-keeping of the prefixes added to the set */
122 struct net_prefixes {
123 u32 nets[IPSET_NET_COUNT]; /* number of elements for this cidr */
124 u8 cidr[IPSET_NET_COUNT]; /* the cidr value */
127 /* Compute the hash table size */
129 htable_size(u8 hbits)
133 /* We must fit both into u32 in jhash and size_t */
136 hsize = jhash_size(hbits);
137 if ((((size_t)-1) - sizeof(struct htable)) / sizeof(struct hbucket *)
141 return hsize * sizeof(struct hbucket *) + sizeof(struct htable);
144 #ifdef IP_SET_HASH_WITH_NETS
145 #if IPSET_NET_COUNT > 1
146 #define __CIDR(cidr, i) (cidr[i])
148 #define __CIDR(cidr, i) (cidr)
151 /* cidr + 1 is stored in net_prefixes to support /0 */
152 #define NCIDR_PUT(cidr) ((cidr) + 1)
153 #define NCIDR_GET(cidr) ((cidr) - 1)
155 #ifdef IP_SET_HASH_WITH_NETS_PACKED
156 /* When cidr is packed with nomatch, cidr - 1 is stored in the data entry */
157 #define DCIDR_PUT(cidr) ((cidr) - 1)
158 #define DCIDR_GET(cidr, i) (__CIDR(cidr, i) + 1)
160 #define DCIDR_PUT(cidr) (cidr)
161 #define DCIDR_GET(cidr, i) __CIDR(cidr, i)
164 #define INIT_CIDR(cidr, host_mask) \
165 DCIDR_PUT(((cidr) ? NCIDR_GET(cidr) : host_mask))
167 #ifdef IP_SET_HASH_WITH_NET0
168 /* cidr from 0 to HOST_MASK value and c = cidr + 1 */
169 #define NLEN (HOST_MASK + 1)
170 #define CIDR_POS(c) ((c) - 1)
172 /* cidr from 1 to HOST_MASK value and c = cidr + 1 */
173 #define NLEN HOST_MASK
174 #define CIDR_POS(c) ((c) - 2)
179 #endif /* IP_SET_HASH_WITH_NETS */
181 #define SET_ELEM_EXPIRED(set, d) \
182 (SET_WITH_TIMEOUT(set) && \
183 ip_set_timeout_expired(ext_timeout(d, set)))
185 #endif /* _IP_SET_HASH_GEN_H */
188 #error "MTYPE is not defined!"
192 #error "HTYPE is not defined!"
196 #error "HOST_MASK is not defined!"
199 /* Family dependent templates */
202 #undef mtype_data_equal
203 #undef mtype_do_data_match
204 #undef mtype_data_set_flags
205 #undef mtype_data_reset_elem
206 #undef mtype_data_reset_flags
207 #undef mtype_data_netmask
208 #undef mtype_data_list
209 #undef mtype_data_next
212 #undef mtype_ahash_destroy
213 #undef mtype_ext_cleanup
214 #undef mtype_add_cidr
215 #undef mtype_del_cidr
216 #undef mtype_ahash_memsize
219 #undef mtype_same_set
225 #undef mtype_test_cidrs
229 #undef mtype_ext_size
230 #undef mtype_resize_ad
237 #undef mtype_data_match
242 #define mtype_data_equal IPSET_TOKEN(MTYPE, _data_equal)
243 #ifdef IP_SET_HASH_WITH_NETS
244 #define mtype_do_data_match IPSET_TOKEN(MTYPE, _do_data_match)
246 #define mtype_do_data_match(d) 1
248 #define mtype_data_set_flags IPSET_TOKEN(MTYPE, _data_set_flags)
249 #define mtype_data_reset_elem IPSET_TOKEN(MTYPE, _data_reset_elem)
250 #define mtype_data_reset_flags IPSET_TOKEN(MTYPE, _data_reset_flags)
251 #define mtype_data_netmask IPSET_TOKEN(MTYPE, _data_netmask)
252 #define mtype_data_list IPSET_TOKEN(MTYPE, _data_list)
253 #define mtype_data_next IPSET_TOKEN(MTYPE, _data_next)
254 #define mtype_elem IPSET_TOKEN(MTYPE, _elem)
256 #define mtype_ahash_destroy IPSET_TOKEN(MTYPE, _ahash_destroy)
257 #define mtype_ext_cleanup IPSET_TOKEN(MTYPE, _ext_cleanup)
258 #define mtype_add_cidr IPSET_TOKEN(MTYPE, _add_cidr)
259 #define mtype_del_cidr IPSET_TOKEN(MTYPE, _del_cidr)
260 #define mtype_ahash_memsize IPSET_TOKEN(MTYPE, _ahash_memsize)
261 #define mtype_flush IPSET_TOKEN(MTYPE, _flush)
262 #define mtype_destroy IPSET_TOKEN(MTYPE, _destroy)
263 #define mtype_same_set IPSET_TOKEN(MTYPE, _same_set)
264 #define mtype_kadt IPSET_TOKEN(MTYPE, _kadt)
265 #define mtype_uadt IPSET_TOKEN(MTYPE, _uadt)
267 #define mtype_add IPSET_TOKEN(MTYPE, _add)
268 #define mtype_del IPSET_TOKEN(MTYPE, _del)
269 #define mtype_test_cidrs IPSET_TOKEN(MTYPE, _test_cidrs)
270 #define mtype_test IPSET_TOKEN(MTYPE, _test)
271 #define mtype_uref IPSET_TOKEN(MTYPE, _uref)
272 #define mtype_resize IPSET_TOKEN(MTYPE, _resize)
273 #define mtype_ext_size IPSET_TOKEN(MTYPE, _ext_size)
274 #define mtype_resize_ad IPSET_TOKEN(MTYPE, _resize_ad)
275 #define mtype_head IPSET_TOKEN(MTYPE, _head)
276 #define mtype_list IPSET_TOKEN(MTYPE, _list)
277 #define mtype_gc_do IPSET_TOKEN(MTYPE, _gc_do)
278 #define mtype_gc IPSET_TOKEN(MTYPE, _gc)
279 #define mtype_gc_init IPSET_TOKEN(MTYPE, _gc_init)
280 #define mtype_variant IPSET_TOKEN(MTYPE, _variant)
281 #define mtype_data_match IPSET_TOKEN(MTYPE, _data_match)
284 #define HKEY_DATALEN sizeof(struct mtype_elem)
289 #define HKEY(data, initval, htable_bits) \
291 const u32 *__k = (const u32 *)data; \
292 u32 __l = HKEY_DATALEN / sizeof(u32); \
294 BUILD_BUG_ON(HKEY_DATALEN % sizeof(u32) != 0); \
296 jhash2(__k, __l, initval) & jhash_mask(htable_bits); \
299 /* The generic hash structure */
301 struct htable __rcu *table; /* the hash table */
302 struct htable_gc gc; /* gc workqueue */
303 u32 maxelem; /* max elements in the hash */
304 u32 initval; /* random jhash init value */
305 #ifdef IP_SET_HASH_WITH_MARKMASK
306 u32 markmask; /* markmask value for mark mask to store */
308 u8 bucketsize; /* max elements in an array block */
309 #ifdef IP_SET_HASH_WITH_NETMASK
310 u8 netmask; /* netmask value for subnets to store */
312 struct list_head ad; /* Resize add|del backlist */
313 struct mtype_elem next; /* temporary storage for uadd */
314 #ifdef IP_SET_HASH_WITH_NETS
315 struct net_prefixes nets[NLEN]; /* book-keeping of prefixes */
319 /* ADD|DEL entries saved during resize */
320 struct mtype_resize_ad {
321 struct list_head list;
322 enum ipset_adt ad; /* ADD|DEL element */
323 struct mtype_elem d; /* Element value */
324 struct ip_set_ext ext; /* Extensions for ADD */
325 struct ip_set_ext mext; /* Target extensions for ADD */
326 u32 flags; /* Flags for ADD */
329 #ifdef IP_SET_HASH_WITH_NETS
330 /* Network cidr size book keeping when the hash stores different
331 * sized networks. cidr == real cidr + 1 to support /0.
334 mtype_add_cidr(struct ip_set *set, struct htype *h, u8 cidr, u8 n)
338 spin_lock_bh(&set->lock);
339 /* Add in increasing prefix order, so larger cidr first */
340 for (i = 0, j = -1; i < NLEN && h->nets[i].cidr[n]; i++) {
343 } else if (h->nets[i].cidr[n] < cidr) {
345 } else if (h->nets[i].cidr[n] == cidr) {
346 h->nets[CIDR_POS(cidr)].nets[n]++;
352 h->nets[i].cidr[n] = h->nets[i - 1].cidr[n];
354 h->nets[i].cidr[n] = cidr;
355 h->nets[CIDR_POS(cidr)].nets[n] = 1;
357 spin_unlock_bh(&set->lock);
361 mtype_del_cidr(struct ip_set *set, struct htype *h, u8 cidr, u8 n)
363 u8 i, j, net_end = NLEN - 1;
365 spin_lock_bh(&set->lock);
366 for (i = 0; i < NLEN; i++) {
367 if (h->nets[i].cidr[n] != cidr)
369 h->nets[CIDR_POS(cidr)].nets[n]--;
370 if (h->nets[CIDR_POS(cidr)].nets[n] > 0)
372 for (j = i; j < net_end && h->nets[j].cidr[n]; j++)
373 h->nets[j].cidr[n] = h->nets[j + 1].cidr[n];
374 h->nets[j].cidr[n] = 0;
378 spin_unlock_bh(&set->lock);
382 /* Calculate the actual memory size of the set data */
384 mtype_ahash_memsize(const struct htype *h, const struct htable *t)
386 return sizeof(*h) + sizeof(*t) + ahash_sizeof_regions(t->htable_bits);
389 /* Get the ith element from the array block n */
390 #define ahash_data(n, i, dsize) \
391 ((struct mtype_elem *)((n)->value + ((i) * (dsize))))
394 mtype_ext_cleanup(struct ip_set *set, struct hbucket *n)
398 for (i = 0; i < n->pos; i++)
399 if (test_bit(i, n->used))
400 ip_set_ext_destroy(set, ahash_data(n, i, set->dsize));
403 /* Flush a hash type of set: destroy all elements */
405 mtype_flush(struct ip_set *set)
407 struct htype *h = set->data;
412 t = ipset_dereference_nfnl(h->table);
413 for (r = 0; r < ahash_numof_locks(t->htable_bits); r++) {
414 spin_lock_bh(&t->hregion[r].lock);
415 for (i = ahash_bucket_start(r, t->htable_bits);
416 i < ahash_bucket_end(r, t->htable_bits); i++) {
417 n = __ipset_dereference(hbucket(t, i));
420 if (set->extensions & IPSET_EXT_DESTROY)
421 mtype_ext_cleanup(set, n);
422 /* FIXME: use slab cache */
423 rcu_assign_pointer(hbucket(t, i), NULL);
426 t->hregion[r].ext_size = 0;
427 t->hregion[r].elements = 0;
428 spin_unlock_bh(&t->hregion[r].lock);
430 #ifdef IP_SET_HASH_WITH_NETS
431 memset(h->nets, 0, sizeof(h->nets));
435 /* Destroy the hashtable part of the set */
437 mtype_ahash_destroy(struct ip_set *set, struct htable *t, bool ext_destroy)
442 for (i = 0; i < jhash_size(t->htable_bits); i++) {
443 n = __ipset_dereference(hbucket(t, i));
446 if (set->extensions & IPSET_EXT_DESTROY && ext_destroy)
447 mtype_ext_cleanup(set, n);
448 /* FIXME: use slab cache */
452 ip_set_free(t->hregion);
456 /* Destroy a hash type of set */
458 mtype_destroy(struct ip_set *set)
460 struct htype *h = set->data;
461 struct list_head *l, *lt;
463 if (SET_WITH_TIMEOUT(set))
464 cancel_delayed_work_sync(&h->gc.dwork);
466 mtype_ahash_destroy(set, ipset_dereference_nfnl(h->table), true);
467 list_for_each_safe(l, lt, &h->ad) {
477 mtype_same_set(const struct ip_set *a, const struct ip_set *b)
479 const struct htype *x = a->data;
480 const struct htype *y = b->data;
482 /* Resizing changes htable_bits, so we ignore it */
483 return x->maxelem == y->maxelem &&
484 a->timeout == b->timeout &&
485 #ifdef IP_SET_HASH_WITH_NETMASK
486 x->netmask == y->netmask &&
488 #ifdef IP_SET_HASH_WITH_MARKMASK
489 x->markmask == y->markmask &&
491 a->extensions == b->extensions;
495 mtype_gc_do(struct ip_set *set, struct htype *h, struct htable *t, u32 r)
497 struct hbucket *n, *tmp;
498 struct mtype_elem *data;
500 size_t dsize = set->dsize;
501 #ifdef IP_SET_HASH_WITH_NETS
504 u8 htable_bits = t->htable_bits;
506 spin_lock_bh(&t->hregion[r].lock);
507 for (i = ahash_bucket_start(r, htable_bits);
508 i < ahash_bucket_end(r, htable_bits); i++) {
509 n = __ipset_dereference(hbucket(t, i));
512 for (j = 0, d = 0; j < n->pos; j++) {
513 if (!test_bit(j, n->used)) {
517 data = ahash_data(n, j, dsize);
518 if (!ip_set_timeout_expired(ext_timeout(data, set)))
520 pr_debug("expired %u/%u\n", i, j);
521 clear_bit(j, n->used);
522 smp_mb__after_atomic();
523 #ifdef IP_SET_HASH_WITH_NETS
524 for (k = 0; k < IPSET_NET_COUNT; k++)
525 mtype_del_cidr(set, h,
526 NCIDR_PUT(DCIDR_GET(data->cidr, k)),
529 t->hregion[r].elements--;
530 ip_set_ext_destroy(set, data);
533 if (d >= AHASH_INIT_SIZE) {
535 t->hregion[r].ext_size -=
536 ext_size(n->size, dsize);
537 rcu_assign_pointer(hbucket(t, i), NULL);
541 tmp = kzalloc(sizeof(*tmp) +
542 (n->size - AHASH_INIT_SIZE) * dsize,
545 /* Still try to delete expired elements. */
547 tmp->size = n->size - AHASH_INIT_SIZE;
548 for (j = 0, d = 0; j < n->pos; j++) {
549 if (!test_bit(j, n->used))
551 data = ahash_data(n, j, dsize);
552 memcpy(tmp->value + d * dsize,
554 set_bit(d, tmp->used);
558 t->hregion[r].ext_size -=
559 ext_size(AHASH_INIT_SIZE, dsize);
560 rcu_assign_pointer(hbucket(t, i), tmp);
564 spin_unlock_bh(&t->hregion[r].lock);
568 mtype_gc(struct work_struct *work)
570 struct htable_gc *gc;
575 unsigned int next_run;
577 gc = container_of(work, struct htable_gc, dwork.work);
581 spin_lock_bh(&set->lock);
582 t = ipset_dereference_set(h->table, set);
583 atomic_inc(&t->uref);
584 numof_locks = ahash_numof_locks(t->htable_bits);
586 if (r >= numof_locks) {
589 next_run = (IPSET_GC_PERIOD(set->timeout) * HZ) / numof_locks;
590 if (next_run < HZ/10)
592 spin_unlock_bh(&set->lock);
594 mtype_gc_do(set, h, t, r);
596 if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
597 pr_debug("Table destroy after resize by expire: %p\n", t);
598 mtype_ahash_destroy(set, t, false);
601 queue_delayed_work(system_power_efficient_wq, &gc->dwork, next_run);
606 mtype_gc_init(struct htable_gc *gc)
608 INIT_DEFERRABLE_WORK(&gc->dwork, mtype_gc);
609 queue_delayed_work(system_power_efficient_wq, &gc->dwork, HZ);
613 mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
614 struct ip_set_ext *mext, u32 flags);
616 mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
617 struct ip_set_ext *mext, u32 flags);
619 /* Resize a hash: create a new hash table with doubling the hashsize
620 * and inserting the elements to it. Repeat until we succeed or
621 * fail due to memory pressures.
624 mtype_resize(struct ip_set *set, bool retried)
626 struct htype *h = set->data;
627 struct htable *t, *orig;
629 size_t hsize, dsize = set->dsize;
630 #ifdef IP_SET_HASH_WITH_NETS
632 struct mtype_elem *tmp;
634 struct mtype_elem *data;
635 struct mtype_elem *d;
636 struct hbucket *n, *m;
637 struct list_head *l, *lt;
638 struct mtype_resize_ad *x;
639 u32 i, j, r, nr, key;
642 #ifdef IP_SET_HASH_WITH_NETS
643 tmp = kmalloc(dsize, GFP_KERNEL);
647 orig = ipset_dereference_bh_nfnl(h->table);
648 htable_bits = orig->htable_bits;
655 hsize = htable_size(htable_bits);
658 t = ip_set_alloc(hsize);
663 t->hregion = ip_set_alloc(ahash_sizeof_regions(htable_bits));
669 t->htable_bits = htable_bits;
670 t->maxelem = h->maxelem / ahash_numof_locks(htable_bits);
671 for (i = 0; i < ahash_numof_locks(htable_bits); i++)
672 spin_lock_init(&t->hregion[i].lock);
674 /* There can't be another parallel resizing,
675 * but dumping, gc, kernel side add/del are possible
677 orig = ipset_dereference_bh_nfnl(h->table);
678 atomic_set(&orig->ref, 1);
679 atomic_inc(&orig->uref);
680 pr_debug("attempt to resize set %s from %u to %u, t %p\n",
681 set->name, orig->htable_bits, htable_bits, orig);
682 for (r = 0; r < ahash_numof_locks(orig->htable_bits); r++) {
683 /* Expire may replace a hbucket with another one */
685 for (i = ahash_bucket_start(r, orig->htable_bits);
686 i < ahash_bucket_end(r, orig->htable_bits); i++) {
687 n = __ipset_dereference(hbucket(orig, i));
690 for (j = 0; j < n->pos; j++) {
691 if (!test_bit(j, n->used))
693 data = ahash_data(n, j, dsize);
694 if (SET_ELEM_EXPIRED(set, data))
696 #ifdef IP_SET_HASH_WITH_NETS
697 /* We have readers running parallel with us,
698 * so the live data cannot be modified.
701 memcpy(tmp, data, dsize);
703 mtype_data_reset_flags(data, &flags);
705 key = HKEY(data, h->initval, htable_bits);
706 m = __ipset_dereference(hbucket(t, key));
707 nr = ahash_region(key, htable_bits);
709 m = kzalloc(sizeof(*m) +
710 AHASH_INIT_SIZE * dsize,
716 m->size = AHASH_INIT_SIZE;
717 t->hregion[nr].ext_size +=
718 ext_size(AHASH_INIT_SIZE,
720 RCU_INIT_POINTER(hbucket(t, key), m);
721 } else if (m->pos >= m->size) {
724 if (m->size >= AHASH_MAX(h)) {
727 ht = kzalloc(sizeof(*ht) +
728 (m->size + AHASH_INIT_SIZE)
736 memcpy(ht, m, sizeof(struct hbucket) +
738 ht->size = m->size + AHASH_INIT_SIZE;
739 t->hregion[nr].ext_size +=
740 ext_size(AHASH_INIT_SIZE,
744 RCU_INIT_POINTER(hbucket(t, key), ht);
746 d = ahash_data(m, m->pos, dsize);
747 memcpy(d, data, dsize);
748 set_bit(m->pos++, m->used);
749 t->hregion[nr].elements++;
750 #ifdef IP_SET_HASH_WITH_NETS
751 mtype_data_reset_flags(d, &flags);
755 rcu_read_unlock_bh();
758 /* There can't be any other writer. */
759 rcu_assign_pointer(h->table, t);
761 /* Give time to other readers of the set */
764 pr_debug("set %s resized from %u (%p) to %u (%p)\n", set->name,
765 orig->htable_bits, orig, t->htable_bits, t);
766 /* Add/delete elements processed by the SET target during resize.
767 * Kernel-side add cannot trigger a resize and userspace actions
768 * are serialized by the mutex.
770 list_for_each_safe(l, lt, &h->ad) {
771 x = list_entry(l, struct mtype_resize_ad, list);
772 if (x->ad == IPSET_ADD) {
773 mtype_add(set, &x->d, &x->ext, &x->mext, x->flags);
775 mtype_del(set, &x->d, NULL, NULL, 0);
780 /* If there's nobody else using the table, destroy it */
781 if (atomic_dec_and_test(&orig->uref)) {
782 pr_debug("Table destroy by resize %p\n", orig);
783 mtype_ahash_destroy(set, orig, false);
787 #ifdef IP_SET_HASH_WITH_NETS
793 rcu_read_unlock_bh();
794 atomic_set(&orig->ref, 0);
795 atomic_dec(&orig->uref);
796 mtype_ahash_destroy(set, t, false);
802 /* In case we have plenty of memory :-) */
803 pr_warn("Cannot increase the hashsize of set %s further\n", set->name);
804 ret = -IPSET_ERR_HASH_FULL;
808 /* Get the current number of elements and ext_size in the set */
810 mtype_ext_size(struct ip_set *set, u32 *elements, size_t *ext_size)
812 struct htype *h = set->data;
813 const struct htable *t;
816 struct mtype_elem *data;
818 t = rcu_dereference_bh(h->table);
819 for (r = 0; r < ahash_numof_locks(t->htable_bits); r++) {
820 for (i = ahash_bucket_start(r, t->htable_bits);
821 i < ahash_bucket_end(r, t->htable_bits); i++) {
822 n = rcu_dereference_bh(hbucket(t, i));
825 for (j = 0; j < n->pos; j++) {
826 if (!test_bit(j, n->used))
828 data = ahash_data(n, j, set->dsize);
829 if (!SET_ELEM_EXPIRED(set, data))
833 *ext_size += t->hregion[r].ext_size;
837 /* Add an element to a hash and update the internal counters when succeeded,
838 * otherwise report the proper error code.
841 mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
842 struct ip_set_ext *mext, u32 flags)
844 struct htype *h = set->data;
846 const struct mtype_elem *d = value;
847 struct mtype_elem *data;
848 struct hbucket *n, *old = ERR_PTR(-ENOENT);
850 bool flag_exist = flags & IPSET_FLAG_EXIST;
851 bool deleted = false, forceadd = false, reuse = false;
852 u32 r, key, multi = 0, elements, maxelem;
855 t = rcu_dereference_bh(h->table);
856 key = HKEY(value, h->initval, t->htable_bits);
857 r = ahash_region(key, t->htable_bits);
858 atomic_inc(&t->uref);
859 elements = t->hregion[r].elements;
860 maxelem = t->maxelem;
861 if (elements >= maxelem) {
863 if (SET_WITH_TIMEOUT(set)) {
864 rcu_read_unlock_bh();
865 mtype_gc_do(set, h, t, r);
868 maxelem = h->maxelem;
870 for (e = 0; e < ahash_numof_locks(t->htable_bits); e++)
871 elements += t->hregion[e].elements;
872 if (elements >= maxelem && SET_WITH_FORCEADD(set))
875 rcu_read_unlock_bh();
877 spin_lock_bh(&t->hregion[r].lock);
878 n = rcu_dereference_bh(hbucket(t, key));
880 if (forceadd || elements >= maxelem)
883 n = kzalloc(sizeof(*n) + AHASH_INIT_SIZE * set->dsize,
889 n->size = AHASH_INIT_SIZE;
890 t->hregion[r].ext_size +=
891 ext_size(AHASH_INIT_SIZE, set->dsize);
894 for (i = 0; i < n->pos; i++) {
895 if (!test_bit(i, n->used)) {
896 /* Reuse first deleted entry */
898 deleted = reuse = true;
903 data = ahash_data(n, i, set->dsize);
904 if (mtype_data_equal(data, d, &multi)) {
905 if (flag_exist || SET_ELEM_EXPIRED(set, data)) {
906 /* Just the extensions could be overwritten */
908 goto overwrite_extensions;
910 ret = -IPSET_ERR_EXIST;
913 /* Reuse first timed out entry */
914 if (SET_ELEM_EXPIRED(set, data) && j == -1) {
919 if (reuse || forceadd) {
922 data = ahash_data(n, j, set->dsize);
924 #ifdef IP_SET_HASH_WITH_NETS
925 for (i = 0; i < IPSET_NET_COUNT; i++)
926 mtype_del_cidr(set, h,
927 NCIDR_PUT(DCIDR_GET(data->cidr, i)),
930 ip_set_ext_destroy(set, data);
931 t->hregion[r].elements--;
935 if (elements >= maxelem)
937 /* Create a new slot */
938 if (n->pos >= n->size) {
939 TUNE_BUCKETSIZE(h, multi);
940 if (n->size >= AHASH_MAX(h)) {
941 /* Trigger rehashing */
942 mtype_data_next(&h->next, d);
947 n = kzalloc(sizeof(*n) +
948 (old->size + AHASH_INIT_SIZE) * set->dsize,
954 memcpy(n, old, sizeof(struct hbucket) +
955 old->size * set->dsize);
956 n->size = old->size + AHASH_INIT_SIZE;
957 t->hregion[r].ext_size +=
958 ext_size(AHASH_INIT_SIZE, set->dsize);
963 data = ahash_data(n, j, set->dsize);
965 t->hregion[r].elements++;
966 #ifdef IP_SET_HASH_WITH_NETS
967 for (i = 0; i < IPSET_NET_COUNT; i++)
968 mtype_add_cidr(set, h, NCIDR_PUT(DCIDR_GET(d->cidr, i)), i);
970 memcpy(data, d, sizeof(struct mtype_elem));
971 overwrite_extensions:
972 #ifdef IP_SET_HASH_WITH_NETS
973 mtype_data_set_flags(data, flags);
975 if (SET_WITH_COUNTER(set))
976 ip_set_init_counter(ext_counter(data, set), ext);
977 if (SET_WITH_COMMENT(set))
978 ip_set_init_comment(set, ext_comment(data, set), ext);
979 if (SET_WITH_SKBINFO(set))
980 ip_set_init_skbinfo(ext_skbinfo(data, set), ext);
981 /* Must come last for the case when timed out entry is reused */
982 if (SET_WITH_TIMEOUT(set))
983 ip_set_timeout_set(ext_timeout(data, set), ext->timeout);
984 smp_mb__before_atomic();
986 if (old != ERR_PTR(-ENOENT)) {
987 rcu_assign_pointer(hbucket(t, key), n);
993 spin_unlock_bh(&t->hregion[r].lock);
994 if (atomic_read(&t->ref) && ext->target) {
995 /* Resize is in process and kernel side add, save values */
996 struct mtype_resize_ad *x;
998 x = kzalloc(sizeof(struct mtype_resize_ad), GFP_ATOMIC);
1003 memcpy(&x->d, value, sizeof(struct mtype_elem));
1004 memcpy(&x->ext, ext, sizeof(struct ip_set_ext));
1005 memcpy(&x->mext, mext, sizeof(struct ip_set_ext));
1007 spin_lock_bh(&set->lock);
1008 list_add_tail(&x->list, &h->ad);
1009 spin_unlock_bh(&set->lock);
1014 if (net_ratelimit())
1015 pr_warn("Set %s is full, maxelem %u reached\n",
1016 set->name, maxelem);
1017 ret = -IPSET_ERR_HASH_FULL;
1019 spin_unlock_bh(&t->hregion[r].lock);
1021 if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
1022 pr_debug("Table destroy after resize by add: %p\n", t);
1023 mtype_ahash_destroy(set, t, false);
1028 /* Delete an element from the hash and free up space if possible.
1031 mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
1032 struct ip_set_ext *mext, u32 flags)
1034 struct htype *h = set->data;
1036 const struct mtype_elem *d = value;
1037 struct mtype_elem *data;
1039 struct mtype_resize_ad *x = NULL;
1040 int i, j, k, r, ret = -IPSET_ERR_EXIST;
1042 size_t dsize = set->dsize;
1044 /* Userspace add and resize is excluded by the mutex.
1045 * Kernespace add does not trigger resize.
1048 t = rcu_dereference_bh(h->table);
1049 key = HKEY(value, h->initval, t->htable_bits);
1050 r = ahash_region(key, t->htable_bits);
1051 atomic_inc(&t->uref);
1052 rcu_read_unlock_bh();
1054 spin_lock_bh(&t->hregion[r].lock);
1055 n = rcu_dereference_bh(hbucket(t, key));
1058 for (i = 0, k = 0; i < n->pos; i++) {
1059 if (!test_bit(i, n->used)) {
1063 data = ahash_data(n, i, dsize);
1064 if (!mtype_data_equal(data, d, &multi))
1066 if (SET_ELEM_EXPIRED(set, data))
1070 clear_bit(i, n->used);
1071 smp_mb__after_atomic();
1072 if (i + 1 == n->pos)
1074 t->hregion[r].elements--;
1075 #ifdef IP_SET_HASH_WITH_NETS
1076 for (j = 0; j < IPSET_NET_COUNT; j++)
1077 mtype_del_cidr(set, h,
1078 NCIDR_PUT(DCIDR_GET(d->cidr, j)), j);
1080 ip_set_ext_destroy(set, data);
1082 if (atomic_read(&t->ref) && ext->target) {
1083 /* Resize is in process and kernel side del,
1086 x = kzalloc(sizeof(struct mtype_resize_ad),
1090 memcpy(&x->d, value,
1091 sizeof(struct mtype_elem));
1095 for (; i < n->pos; i++) {
1096 if (!test_bit(i, n->used))
1099 if (n->pos == 0 && k == 0) {
1100 t->hregion[r].ext_size -= ext_size(n->size, dsize);
1101 rcu_assign_pointer(hbucket(t, key), NULL);
1103 } else if (k >= AHASH_INIT_SIZE) {
1104 struct hbucket *tmp = kzalloc(sizeof(*tmp) +
1105 (n->size - AHASH_INIT_SIZE) * dsize,
1109 tmp->size = n->size - AHASH_INIT_SIZE;
1110 for (j = 0, k = 0; j < n->pos; j++) {
1111 if (!test_bit(j, n->used))
1113 data = ahash_data(n, j, dsize);
1114 memcpy(tmp->value + k * dsize, data, dsize);
1115 set_bit(k, tmp->used);
1119 t->hregion[r].ext_size -=
1120 ext_size(AHASH_INIT_SIZE, dsize);
1121 rcu_assign_pointer(hbucket(t, key), tmp);
1128 spin_unlock_bh(&t->hregion[r].lock);
1130 spin_lock_bh(&set->lock);
1131 list_add(&x->list, &h->ad);
1132 spin_unlock_bh(&set->lock);
1134 if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
1135 pr_debug("Table destroy after resize by del: %p\n", t);
1136 mtype_ahash_destroy(set, t, false);
1142 mtype_data_match(struct mtype_elem *data, const struct ip_set_ext *ext,
1143 struct ip_set_ext *mext, struct ip_set *set, u32 flags)
1145 if (!ip_set_match_extensions(set, ext, mext, flags, data))
1147 /* nomatch entries return -ENOTEMPTY */
1148 return mtype_do_data_match(data);
1151 #ifdef IP_SET_HASH_WITH_NETS
1152 /* Special test function which takes into account the different network
1153 * sizes added to the set
1156 mtype_test_cidrs(struct ip_set *set, struct mtype_elem *d,
1157 const struct ip_set_ext *ext,
1158 struct ip_set_ext *mext, u32 flags)
1160 struct htype *h = set->data;
1161 struct htable *t = rcu_dereference_bh(h->table);
1163 struct mtype_elem *data;
1164 #if IPSET_NET_COUNT == 2
1165 struct mtype_elem orig = *d;
1166 int ret, i, j = 0, k;
1172 pr_debug("test by nets\n");
1173 for (; j < NLEN && h->nets[j].cidr[0] && !multi; j++) {
1174 #if IPSET_NET_COUNT == 2
1175 mtype_data_reset_elem(d, &orig);
1176 mtype_data_netmask(d, NCIDR_GET(h->nets[j].cidr[0]), false);
1177 for (k = 0; k < NLEN && h->nets[k].cidr[1] && !multi;
1179 mtype_data_netmask(d, NCIDR_GET(h->nets[k].cidr[1]),
1182 mtype_data_netmask(d, NCIDR_GET(h->nets[j].cidr[0]));
1184 key = HKEY(d, h->initval, t->htable_bits);
1185 n = rcu_dereference_bh(hbucket(t, key));
1188 for (i = 0; i < n->pos; i++) {
1189 if (!test_bit(i, n->used))
1191 data = ahash_data(n, i, set->dsize);
1192 if (!mtype_data_equal(data, d, &multi))
1194 ret = mtype_data_match(data, ext, mext, set, flags);
1197 #ifdef IP_SET_HASH_WITH_MULTI
1198 /* No match, reset multiple match flag */
1202 #if IPSET_NET_COUNT == 2
1210 /* Test whether the element is added to the set */
1212 mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext,
1213 struct ip_set_ext *mext, u32 flags)
1215 struct htype *h = set->data;
1217 struct mtype_elem *d = value;
1219 struct mtype_elem *data;
1224 t = rcu_dereference_bh(h->table);
1225 #ifdef IP_SET_HASH_WITH_NETS
1226 /* If we test an IP address and not a network address,
1227 * try all possible network sizes
1229 for (i = 0; i < IPSET_NET_COUNT; i++)
1230 if (DCIDR_GET(d->cidr, i) != HOST_MASK)
1232 if (i == IPSET_NET_COUNT) {
1233 ret = mtype_test_cidrs(set, d, ext, mext, flags);
1238 key = HKEY(d, h->initval, t->htable_bits);
1239 n = rcu_dereference_bh(hbucket(t, key));
1244 for (i = 0; i < n->pos; i++) {
1245 if (!test_bit(i, n->used))
1247 data = ahash_data(n, i, set->dsize);
1248 if (!mtype_data_equal(data, d, &multi))
1250 ret = mtype_data_match(data, ext, mext, set, flags);
1255 rcu_read_unlock_bh();
1259 /* Reply a HEADER request: fill out the header part of the set */
1261 mtype_head(struct ip_set *set, struct sk_buff *skb)
1263 struct htype *h = set->data;
1264 const struct htable *t;
1265 struct nlattr *nested;
1268 size_t ext_size = 0;
1272 t = rcu_dereference_bh(h->table);
1273 mtype_ext_size(set, &elements, &ext_size);
1274 memsize = mtype_ahash_memsize(h, t) + ext_size + set->ext_size;
1275 htable_bits = t->htable_bits;
1276 rcu_read_unlock_bh();
1278 nested = nla_nest_start(skb, IPSET_ATTR_DATA);
1280 goto nla_put_failure;
1281 if (nla_put_net32(skb, IPSET_ATTR_HASHSIZE,
1282 htonl(jhash_size(htable_bits))) ||
1283 nla_put_net32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem)))
1284 goto nla_put_failure;
1285 #ifdef IP_SET_HASH_WITH_NETMASK
1286 if (h->netmask != HOST_MASK &&
1287 nla_put_u8(skb, IPSET_ATTR_NETMASK, h->netmask))
1288 goto nla_put_failure;
1290 #ifdef IP_SET_HASH_WITH_MARKMASK
1291 if (nla_put_u32(skb, IPSET_ATTR_MARKMASK, h->markmask))
1292 goto nla_put_failure;
1294 if (set->flags & IPSET_CREATE_FLAG_BUCKETSIZE) {
1295 if (nla_put_u8(skb, IPSET_ATTR_BUCKETSIZE, h->bucketsize) ||
1296 nla_put_net32(skb, IPSET_ATTR_INITVAL, htonl(h->initval)))
1297 goto nla_put_failure;
1299 if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) ||
1300 nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)) ||
1301 nla_put_net32(skb, IPSET_ATTR_ELEMENTS, htonl(elements)))
1302 goto nla_put_failure;
1303 if (unlikely(ip_set_put_flags(skb, set)))
1304 goto nla_put_failure;
1305 nla_nest_end(skb, nested);
1312 /* Make possible to run dumping parallel with resizing */
1314 mtype_uref(struct ip_set *set, struct netlink_callback *cb, bool start)
1316 struct htype *h = set->data;
1321 t = ipset_dereference_bh_nfnl(h->table);
1322 atomic_inc(&t->uref);
1323 cb->args[IPSET_CB_PRIVATE] = (unsigned long)t;
1324 rcu_read_unlock_bh();
1325 } else if (cb->args[IPSET_CB_PRIVATE]) {
1326 t = (struct htable *)cb->args[IPSET_CB_PRIVATE];
1327 if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
1328 pr_debug("Table destroy after resize "
1329 " by dump: %p\n", t);
1330 mtype_ahash_destroy(set, t, false);
1332 cb->args[IPSET_CB_PRIVATE] = 0;
1336 /* Reply a LIST/SAVE request: dump the elements of the specified set */
1338 mtype_list(const struct ip_set *set,
1339 struct sk_buff *skb, struct netlink_callback *cb)
1341 const struct htable *t;
1342 struct nlattr *atd, *nested;
1343 const struct hbucket *n;
1344 const struct mtype_elem *e;
1345 u32 first = cb->args[IPSET_CB_ARG0];
1346 /* We assume that one hash bucket fills into one page */
1350 atd = nla_nest_start(skb, IPSET_ATTR_ADT);
1354 pr_debug("list hash set %s\n", set->name);
1355 t = (const struct htable *)cb->args[IPSET_CB_PRIVATE];
1356 /* Expire may replace a hbucket with another one */
1358 for (; cb->args[IPSET_CB_ARG0] < jhash_size(t->htable_bits);
1359 cb->args[IPSET_CB_ARG0]++) {
1361 incomplete = skb_tail_pointer(skb);
1362 n = rcu_dereference(hbucket(t, cb->args[IPSET_CB_ARG0]));
1363 pr_debug("cb->arg bucket: %lu, t %p n %p\n",
1364 cb->args[IPSET_CB_ARG0], t, n);
1367 for (i = 0; i < n->pos; i++) {
1368 if (!test_bit(i, n->used))
1370 e = ahash_data(n, i, set->dsize);
1371 if (SET_ELEM_EXPIRED(set, e))
1373 pr_debug("list hash %lu hbucket %p i %u, data %p\n",
1374 cb->args[IPSET_CB_ARG0], n, i, e);
1375 nested = nla_nest_start(skb, IPSET_ATTR_DATA);
1377 if (cb->args[IPSET_CB_ARG0] == first) {
1378 nla_nest_cancel(skb, atd);
1382 goto nla_put_failure;
1384 if (mtype_data_list(skb, e))
1385 goto nla_put_failure;
1386 if (ip_set_put_extensions(skb, set, e, true))
1387 goto nla_put_failure;
1388 nla_nest_end(skb, nested);
1391 nla_nest_end(skb, atd);
1392 /* Set listing finished */
1393 cb->args[IPSET_CB_ARG0] = 0;
1398 nlmsg_trim(skb, incomplete);
1399 if (unlikely(first == cb->args[IPSET_CB_ARG0])) {
1400 pr_warn("Can't list set %s: one bucket does not fit into a message. Please report it!\n",
1402 cb->args[IPSET_CB_ARG0] = 0;
1405 nla_nest_end(skb, atd);
1413 IPSET_TOKEN(MTYPE, _kadt)(struct ip_set *set, const struct sk_buff *skb,
1414 const struct xt_action_param *par,
1415 enum ipset_adt adt, struct ip_set_adt_opt *opt);
1418 IPSET_TOKEN(MTYPE, _uadt)(struct ip_set *set, struct nlattr *tb[],
1419 enum ipset_adt adt, u32 *lineno, u32 flags,
1422 static const struct ip_set_type_variant mtype_variant = {
1426 [IPSET_ADD] = mtype_add,
1427 [IPSET_DEL] = mtype_del,
1428 [IPSET_TEST] = mtype_test,
1430 .destroy = mtype_destroy,
1431 .flush = mtype_flush,
1435 .resize = mtype_resize,
1436 .same_set = mtype_same_set,
1437 .region_lock = true,
1440 #ifdef IP_SET_EMIT_CREATE
1442 IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
1443 struct nlattr *tb[], u32 flags)
1445 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
1446 #ifdef IP_SET_HASH_WITH_MARKMASK
1450 #ifdef IP_SET_HASH_WITH_NETMASK
1458 pr_debug("Create set %s with family %s\n",
1459 set->name, set->family == NFPROTO_IPV4 ? "inet" : "inet6");
1461 #ifdef IP_SET_PROTO_UNDEF
1462 if (set->family != NFPROTO_UNSPEC)
1463 return -IPSET_ERR_INVALID_FAMILY;
1465 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
1466 return -IPSET_ERR_INVALID_FAMILY;
1469 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
1470 !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
1471 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
1472 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
1473 return -IPSET_ERR_PROTOCOL;
1475 #ifdef IP_SET_HASH_WITH_MARKMASK
1476 /* Separated condition in order to avoid directive in argument list */
1477 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_MARKMASK)))
1478 return -IPSET_ERR_PROTOCOL;
1480 markmask = 0xffffffff;
1481 if (tb[IPSET_ATTR_MARKMASK]) {
1482 markmask = ntohl(nla_get_be32(tb[IPSET_ATTR_MARKMASK]));
1484 return -IPSET_ERR_INVALID_MARKMASK;
1488 #ifdef IP_SET_HASH_WITH_NETMASK
1489 netmask = set->family == NFPROTO_IPV4 ? 32 : 128;
1490 if (tb[IPSET_ATTR_NETMASK]) {
1491 netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]);
1493 if ((set->family == NFPROTO_IPV4 && netmask > 32) ||
1494 (set->family == NFPROTO_IPV6 && netmask > 128) ||
1496 return -IPSET_ERR_INVALID_NETMASK;
1500 if (tb[IPSET_ATTR_HASHSIZE]) {
1501 hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
1502 if (hashsize < IPSET_MIMINAL_HASHSIZE)
1503 hashsize = IPSET_MIMINAL_HASHSIZE;
1506 if (tb[IPSET_ATTR_MAXELEM])
1507 maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
1510 h = kzalloc(hsize, GFP_KERNEL);
1514 /* Compute htable_bits from the user input parameter hashsize.
1515 * Assume that hashsize == 2^htable_bits,
1516 * otherwise round up to the first 2^n value.
1518 hbits = fls(hashsize - 1);
1519 hsize = htable_size(hbits);
1524 t = ip_set_alloc(hsize);
1529 t->hregion = ip_set_alloc(ahash_sizeof_regions(hbits));
1536 for (i = 0; i < ahash_numof_locks(hbits); i++)
1537 spin_lock_init(&t->hregion[i].lock);
1538 h->maxelem = maxelem;
1539 #ifdef IP_SET_HASH_WITH_NETMASK
1540 h->netmask = netmask;
1542 #ifdef IP_SET_HASH_WITH_MARKMASK
1543 h->markmask = markmask;
1545 if (tb[IPSET_ATTR_INITVAL])
1546 h->initval = ntohl(nla_get_be32(tb[IPSET_ATTR_INITVAL]));
1548 get_random_bytes(&h->initval, sizeof(h->initval));
1549 h->bucketsize = AHASH_MAX_SIZE;
1550 if (tb[IPSET_ATTR_BUCKETSIZE]) {
1551 h->bucketsize = nla_get_u8(tb[IPSET_ATTR_BUCKETSIZE]);
1552 if (h->bucketsize < AHASH_INIT_SIZE)
1553 h->bucketsize = AHASH_INIT_SIZE;
1554 else if (h->bucketsize > AHASH_MAX_SIZE)
1555 h->bucketsize = AHASH_MAX_SIZE;
1556 else if (h->bucketsize % 2)
1559 t->htable_bits = hbits;
1560 t->maxelem = h->maxelem / ahash_numof_locks(hbits);
1561 RCU_INIT_POINTER(h->table, t);
1563 INIT_LIST_HEAD(&h->ad);
1565 #ifndef IP_SET_PROTO_UNDEF
1566 if (set->family == NFPROTO_IPV4) {
1568 set->variant = &IPSET_TOKEN(HTYPE, 4_variant);
1569 set->dsize = ip_set_elem_len(set, tb,
1570 sizeof(struct IPSET_TOKEN(HTYPE, 4_elem)),
1571 __alignof__(struct IPSET_TOKEN(HTYPE, 4_elem)));
1572 #ifndef IP_SET_PROTO_UNDEF
1574 set->variant = &IPSET_TOKEN(HTYPE, 6_variant);
1575 set->dsize = ip_set_elem_len(set, tb,
1576 sizeof(struct IPSET_TOKEN(HTYPE, 6_elem)),
1577 __alignof__(struct IPSET_TOKEN(HTYPE, 6_elem)));
1580 set->timeout = IPSET_NO_TIMEOUT;
1581 if (tb[IPSET_ATTR_TIMEOUT]) {
1582 set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
1583 #ifndef IP_SET_PROTO_UNDEF
1584 if (set->family == NFPROTO_IPV4)
1586 IPSET_TOKEN(HTYPE, 4_gc_init)(&h->gc);
1587 #ifndef IP_SET_PROTO_UNDEF
1589 IPSET_TOKEN(HTYPE, 6_gc_init)(&h->gc);
1592 pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
1593 set->name, jhash_size(t->htable_bits),
1594 t->htable_bits, h->maxelem, set->data, t);
1598 #endif /* IP_SET_EMIT_CREATE */