2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv6 packet filter");
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) printk(format , ## args)
44 #define dprintf(format, args...)
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) printk(format , ## args)
50 #define duprintf(format, args...)
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) \
57 printk("IP_NF_ASSERT: %s:%s:%u\n", \
58 __func__, __FILE__, __LINE__); \
61 #define IP_NF_ASSERT(x)
65 /* All the better to debug you with... */
71 We keep a set of rules for each CPU, so we can avoid write-locking
72 them in the softirq when updating the counters and therefore
73 only need to read-lock in the softirq; doing a write_lock_bh() in user
74 context stops packets coming through and allows user context to read
75 the counters or update the rules.
77 Hence the start of any table is given by get_table() below. */
79 /* Check for an extension */
81 ip6t_ext_hdr(u8 nexthdr)
83 return ( (nexthdr == IPPROTO_HOPOPTS) ||
84 (nexthdr == IPPROTO_ROUTING) ||
85 (nexthdr == IPPROTO_FRAGMENT) ||
86 (nexthdr == IPPROTO_ESP) ||
87 (nexthdr == IPPROTO_AH) ||
88 (nexthdr == IPPROTO_NONE) ||
89 (nexthdr == IPPROTO_DSTOPTS) );
92 static unsigned long ifname_compare(const char *_a, const char *_b,
93 const unsigned char *_mask)
95 const unsigned long *a = (const unsigned long *)_a;
96 const unsigned long *b = (const unsigned long *)_b;
97 const unsigned long *mask = (const unsigned long *)_mask;
100 ret = (a[0] ^ b[0]) & mask[0];
101 if (IFNAMSIZ > sizeof(unsigned long))
102 ret |= (a[1] ^ b[1]) & mask[1];
103 if (IFNAMSIZ > 2 * sizeof(unsigned long))
104 ret |= (a[2] ^ b[2]) & mask[2];
105 if (IFNAMSIZ > 3 * sizeof(unsigned long))
106 ret |= (a[3] ^ b[3]) & mask[3];
107 BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
111 /* Returns whether matches rule or not. */
112 /* Performance critical - called for every packet */
114 ip6_packet_match(const struct sk_buff *skb,
117 const struct ip6t_ip6 *ip6info,
118 unsigned int *protoff,
119 int *fragoff, bool *hotdrop)
122 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
124 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
126 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
127 &ip6info->src), IP6T_INV_SRCIP)
128 || FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
129 &ip6info->dst), IP6T_INV_DSTIP)) {
130 dprintf("Source or dest mismatch.\n");
132 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
133 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
134 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
135 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
136 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
137 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
141 ret = ifname_compare(indev, ip6info->iniface, ip6info->iniface_mask);
143 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
144 dprintf("VIA in mismatch (%s vs %s).%s\n",
145 indev, ip6info->iniface,
146 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
150 ret = ifname_compare(outdev, ip6info->outiface, ip6info->outiface_mask);
152 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
153 dprintf("VIA out mismatch (%s vs %s).%s\n",
154 outdev, ip6info->outiface,
155 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
159 /* ... might want to do something with class and flowlabel here ... */
161 /* look for the desired protocol header */
162 if((ip6info->flags & IP6T_F_PROTO)) {
164 unsigned short _frag_off;
166 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
172 *fragoff = _frag_off;
174 dprintf("Packet protocol %hi ?= %s%hi.\n",
176 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
179 if (ip6info->proto == protohdr) {
180 if(ip6info->invflags & IP6T_INV_PROTO) {
186 /* We need match for the '-p all', too! */
187 if ((ip6info->proto != 0) &&
188 !(ip6info->invflags & IP6T_INV_PROTO))
194 /* should be ip6 safe */
196 ip6_checkentry(const struct ip6t_ip6 *ipv6)
198 if (ipv6->flags & ~IP6T_F_MASK) {
199 duprintf("Unknown flag bits set: %08X\n",
200 ipv6->flags & ~IP6T_F_MASK);
203 if (ipv6->invflags & ~IP6T_INV_MASK) {
204 duprintf("Unknown invflag bits set: %08X\n",
205 ipv6->invflags & ~IP6T_INV_MASK);
212 ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
215 printk("ip6_tables: error: `%s'\n",
216 (const char *)par->targinfo);
221 /* Performance critical - called for every packet */
223 do_match(struct ip6t_entry_match *m, const struct sk_buff *skb,
224 struct xt_match_param *par)
226 par->match = m->u.kernel.match;
227 par->matchinfo = m->data;
229 /* Stop iteration if it doesn't match */
230 if (!m->u.kernel.match->match(skb, par))
236 static inline struct ip6t_entry *
237 get_entry(void *base, unsigned int offset)
239 return (struct ip6t_entry *)(base + offset);
242 /* All zeroes == unconditional rule. */
243 /* Mildly perf critical (only if packet tracing is on) */
245 unconditional(const struct ip6t_ip6 *ipv6)
249 for (i = 0; i < sizeof(*ipv6); i++)
250 if (((char *)ipv6)[i])
253 return (i == sizeof(*ipv6));
256 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
257 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
258 /* This cries for unification! */
259 static const char *const hooknames[] = {
260 [NF_INET_PRE_ROUTING] = "PREROUTING",
261 [NF_INET_LOCAL_IN] = "INPUT",
262 [NF_INET_FORWARD] = "FORWARD",
263 [NF_INET_LOCAL_OUT] = "OUTPUT",
264 [NF_INET_POST_ROUTING] = "POSTROUTING",
267 enum nf_ip_trace_comments {
268 NF_IP6_TRACE_COMMENT_RULE,
269 NF_IP6_TRACE_COMMENT_RETURN,
270 NF_IP6_TRACE_COMMENT_POLICY,
273 static const char *const comments[] = {
274 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
275 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
276 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
279 static struct nf_loginfo trace_loginfo = {
280 .type = NF_LOG_TYPE_LOG,
284 .logflags = NF_LOG_MASK,
289 /* Mildly perf critical (only if packet tracing is on) */
291 get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
292 char *hookname, char **chainname,
293 char **comment, unsigned int *rulenum)
295 struct ip6t_standard_target *t = (void *)ip6t_get_target(s);
297 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
298 /* Head of user chain: ERROR target with chainname */
299 *chainname = t->target.data;
304 if (s->target_offset == sizeof(struct ip6t_entry)
305 && strcmp(t->target.u.kernel.target->name,
306 IP6T_STANDARD_TARGET) == 0
308 && unconditional(&s->ipv6)) {
309 /* Tail of chains: STANDARD target (return/policy) */
310 *comment = *chainname == hookname
311 ? (char *)comments[NF_IP6_TRACE_COMMENT_POLICY]
312 : (char *)comments[NF_IP6_TRACE_COMMENT_RETURN];
321 static void trace_packet(struct sk_buff *skb,
323 const struct net_device *in,
324 const struct net_device *out,
325 const char *tablename,
326 struct xt_table_info *private,
327 struct ip6t_entry *e)
330 const struct ip6t_entry *root;
331 char *hookname, *chainname, *comment;
332 unsigned int rulenum = 0;
334 table_base = (void *)private->entries[smp_processor_id()];
335 root = get_entry(table_base, private->hook_entry[hook]);
337 hookname = chainname = (char *)hooknames[hook];
338 comment = (char *)comments[NF_IP6_TRACE_COMMENT_RULE];
340 IP6T_ENTRY_ITERATE(root,
341 private->size - private->hook_entry[hook],
342 get_chainname_rulenum,
343 e, hookname, &chainname, &comment, &rulenum);
345 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
346 "TRACE: %s:%s:%s:%u ",
347 tablename, chainname, comment, rulenum);
351 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
353 ip6t_do_table(struct sk_buff *skb,
355 const struct net_device *in,
356 const struct net_device *out,
357 struct xt_table *table)
359 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
360 bool hotdrop = false;
361 /* Initializing verdict to NF_DROP keeps gcc happy. */
362 unsigned int verdict = NF_DROP;
363 const char *indev, *outdev;
365 struct ip6t_entry *e, *back;
366 struct xt_table_info *private;
367 struct xt_match_param mtpar;
368 struct xt_target_param tgpar;
371 indev = in ? in->name : nulldevname;
372 outdev = out ? out->name : nulldevname;
373 /* We handle fragments by dealing with the first fragment as
374 * if it was a normal packet. All other fragments are treated
375 * normally, except that they will NEVER match rules that ask
376 * things we don't know, ie. tcp syn flag or ports). If the
377 * rule is also a fragment-specific rule, non-fragments won't
379 mtpar.hotdrop = &hotdrop;
380 mtpar.in = tgpar.in = in;
381 mtpar.out = tgpar.out = out;
382 mtpar.family = tgpar.family = NFPROTO_IPV6;
383 tgpar.hooknum = hook;
385 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
388 private = rcu_dereference(table->private);
389 table_base = rcu_dereference(private->entries[smp_processor_id()]);
391 e = get_entry(table_base, private->hook_entry[hook]);
393 /* For return from builtin chain */
394 back = get_entry(table_base, private->underflow[hook]);
399 if (ip6_packet_match(skb, indev, outdev, &e->ipv6,
400 &mtpar.thoff, &mtpar.fragoff, &hotdrop)) {
401 struct ip6t_entry_target *t;
403 if (IP6T_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0)
406 ADD_COUNTER(e->counters,
407 ntohs(ipv6_hdr(skb)->payload_len) +
408 sizeof(struct ipv6hdr), 1);
410 t = ip6t_get_target(e);
411 IP_NF_ASSERT(t->u.kernel.target);
413 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
414 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
415 /* The packet is traced: log it */
416 if (unlikely(skb->nf_trace))
417 trace_packet(skb, hook, in, out,
418 table->name, private, e);
420 /* Standard target? */
421 if (!t->u.kernel.target->target) {
424 v = ((struct ip6t_standard_target *)t)->verdict;
426 /* Pop from stack? */
427 if (v != IP6T_RETURN) {
428 verdict = (unsigned)(-v) - 1;
432 back = get_entry(table_base,
436 if (table_base + v != (void *)e + e->next_offset
437 && !(e->ipv6.flags & IP6T_F_GOTO)) {
438 /* Save old back ptr in next entry */
439 struct ip6t_entry *next
440 = (void *)e + e->next_offset;
442 = (void *)back - table_base;
443 /* set back pointer to next entry */
447 e = get_entry(table_base, v);
449 /* Targets which reenter must return
451 tgpar.target = t->u.kernel.target;
452 tgpar.targinfo = t->data;
454 #ifdef CONFIG_NETFILTER_DEBUG
455 ((struct ip6t_entry *)table_base)->comefrom
458 verdict = t->u.kernel.target->target(skb,
461 #ifdef CONFIG_NETFILTER_DEBUG
462 if (((struct ip6t_entry *)table_base)->comefrom
464 && verdict == IP6T_CONTINUE) {
465 printk("Target %s reentered!\n",
466 t->u.kernel.target->name);
469 ((struct ip6t_entry *)table_base)->comefrom
472 if (verdict == IP6T_CONTINUE)
473 e = (void *)e + e->next_offset;
481 e = (void *)e + e->next_offset;
485 #ifdef CONFIG_NETFILTER_DEBUG
486 ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON;
490 #ifdef DEBUG_ALLOW_ALL
499 /* Figures out from what hook each rule can be called: returns 0 if
500 there are loops. Puts hook bitmask in comefrom. */
502 mark_source_chains(struct xt_table_info *newinfo,
503 unsigned int valid_hooks, void *entry0)
507 /* No recursion; use packet counter to save back ptrs (reset
508 to 0 as we leave), and comefrom to save source hook bitmask */
509 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
510 unsigned int pos = newinfo->hook_entry[hook];
511 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
513 if (!(valid_hooks & (1 << hook)))
516 /* Set initial back pointer. */
517 e->counters.pcnt = pos;
520 struct ip6t_standard_target *t
521 = (void *)ip6t_get_target(e);
522 int visited = e->comefrom & (1 << hook);
524 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
525 printk("iptables: loop hook %u pos %u %08X.\n",
526 hook, pos, e->comefrom);
529 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
531 /* Unconditional return/END. */
532 if ((e->target_offset == sizeof(struct ip6t_entry)
533 && (strcmp(t->target.u.user.name,
534 IP6T_STANDARD_TARGET) == 0)
536 && unconditional(&e->ipv6)) || visited) {
537 unsigned int oldpos, size;
539 if (t->verdict < -NF_MAX_VERDICT - 1) {
540 duprintf("mark_source_chains: bad "
541 "negative verdict (%i)\n",
546 /* Return: backtrack through the last
549 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
550 #ifdef DEBUG_IP_FIREWALL_USER
552 & (1 << NF_INET_NUMHOOKS)) {
553 duprintf("Back unset "
560 pos = e->counters.pcnt;
561 e->counters.pcnt = 0;
563 /* We're at the start. */
567 e = (struct ip6t_entry *)
569 } while (oldpos == pos + e->next_offset);
572 size = e->next_offset;
573 e = (struct ip6t_entry *)
574 (entry0 + pos + size);
575 e->counters.pcnt = pos;
578 int newpos = t->verdict;
580 if (strcmp(t->target.u.user.name,
581 IP6T_STANDARD_TARGET) == 0
583 if (newpos > newinfo->size -
584 sizeof(struct ip6t_entry)) {
585 duprintf("mark_source_chains: "
586 "bad verdict (%i)\n",
590 /* This a jump; chase it. */
591 duprintf("Jump rule %u -> %u\n",
594 /* ... this is a fallthru */
595 newpos = pos + e->next_offset;
597 e = (struct ip6t_entry *)
599 e->counters.pcnt = pos;
604 duprintf("Finished chain %u\n", hook);
610 cleanup_match(struct ip6t_entry_match *m, unsigned int *i)
612 struct xt_mtdtor_param par;
614 if (i && (*i)-- == 0)
617 par.match = m->u.kernel.match;
618 par.matchinfo = m->data;
619 par.family = NFPROTO_IPV6;
620 if (par.match->destroy != NULL)
621 par.match->destroy(&par);
622 module_put(par.match->me);
627 check_entry(struct ip6t_entry *e, const char *name)
629 struct ip6t_entry_target *t;
631 if (!ip6_checkentry(&e->ipv6)) {
632 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
636 if (e->target_offset + sizeof(struct ip6t_entry_target) >
640 t = ip6t_get_target(e);
641 if (e->target_offset + t->u.target_size > e->next_offset)
647 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
650 const struct ip6t_ip6 *ipv6 = par->entryinfo;
653 par->match = m->u.kernel.match;
654 par->matchinfo = m->data;
656 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
657 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
659 duprintf("ip_tables: check failed for `%s'.\n",
668 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
671 struct xt_match *match;
674 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
676 "ip6t_%s", m->u.user.name);
677 if (IS_ERR(match) || !match) {
678 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
679 return match ? PTR_ERR(match) : -ENOENT;
681 m->u.kernel.match = match;
683 ret = check_match(m, par, i);
689 module_put(m->u.kernel.match->me);
693 static int check_target(struct ip6t_entry *e, const char *name)
695 struct ip6t_entry_target *t = ip6t_get_target(e);
696 struct xt_tgchk_param par = {
699 .target = t->u.kernel.target,
701 .hook_mask = e->comefrom,
702 .family = NFPROTO_IPV6,
706 t = ip6t_get_target(e);
707 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
708 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
710 duprintf("ip_tables: check failed for `%s'.\n",
711 t->u.kernel.target->name);
718 find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
721 struct ip6t_entry_target *t;
722 struct xt_target *target;
725 struct xt_mtchk_param mtpar;
727 ret = check_entry(e, name);
733 mtpar.entryinfo = &e->ipv6;
734 mtpar.hook_mask = e->comefrom;
735 mtpar.family = NFPROTO_IPV6;
736 ret = IP6T_MATCH_ITERATE(e, find_check_match, &mtpar, &j);
738 goto cleanup_matches;
740 t = ip6t_get_target(e);
741 target = try_then_request_module(xt_find_target(AF_INET6,
744 "ip6t_%s", t->u.user.name);
745 if (IS_ERR(target) || !target) {
746 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
747 ret = target ? PTR_ERR(target) : -ENOENT;
748 goto cleanup_matches;
750 t->u.kernel.target = target;
752 ret = check_target(e, name);
759 module_put(t->u.kernel.target->me);
761 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
766 check_entry_size_and_hooks(struct ip6t_entry *e,
767 struct xt_table_info *newinfo,
769 unsigned char *limit,
770 const unsigned int *hook_entries,
771 const unsigned int *underflows,
776 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0
777 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
778 duprintf("Bad offset %p\n", e);
783 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
784 duprintf("checking: element %p size %u\n",
789 /* Check hooks & underflows */
790 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
791 if ((unsigned char *)e - base == hook_entries[h])
792 newinfo->hook_entry[h] = hook_entries[h];
793 if ((unsigned char *)e - base == underflows[h])
794 newinfo->underflow[h] = underflows[h];
797 /* FIXME: underflows must be unconditional, standard verdicts
798 < 0 (not IP6T_RETURN). --RR */
800 /* Clear counters and comefrom */
801 e->counters = ((struct xt_counters) { 0, 0 });
809 cleanup_entry(struct ip6t_entry *e, unsigned int *i)
811 struct xt_tgdtor_param par;
812 struct ip6t_entry_target *t;
814 if (i && (*i)-- == 0)
817 /* Cleanup all matches */
818 IP6T_MATCH_ITERATE(e, cleanup_match, NULL);
819 t = ip6t_get_target(e);
821 par.target = t->u.kernel.target;
822 par.targinfo = t->data;
823 par.family = NFPROTO_IPV6;
824 if (par.target->destroy != NULL)
825 par.target->destroy(&par);
826 module_put(par.target->me);
830 /* Checks and translates the user-supplied table segment (held in
833 translate_table(const char *name,
834 unsigned int valid_hooks,
835 struct xt_table_info *newinfo,
839 const unsigned int *hook_entries,
840 const unsigned int *underflows)
845 newinfo->size = size;
846 newinfo->number = number;
848 /* Init all hooks to impossible value. */
849 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
850 newinfo->hook_entry[i] = 0xFFFFFFFF;
851 newinfo->underflow[i] = 0xFFFFFFFF;
854 duprintf("translate_table: size %u\n", newinfo->size);
856 /* Walk through entries, checking offsets. */
857 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
858 check_entry_size_and_hooks,
862 hook_entries, underflows, &i);
867 duprintf("translate_table: %u not %u entries\n",
872 /* Check hooks all assigned */
873 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
874 /* Only hooks which are valid */
875 if (!(valid_hooks & (1 << i)))
877 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
878 duprintf("Invalid hook entry %u %u\n",
882 if (newinfo->underflow[i] == 0xFFFFFFFF) {
883 duprintf("Invalid underflow %u %u\n",
889 if (!mark_source_chains(newinfo, valid_hooks, entry0))
892 /* Finally, each sanity check must pass */
894 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
895 find_check_entry, name, size, &i);
898 IP6T_ENTRY_ITERATE(entry0, newinfo->size,
903 /* And one copy for every other CPU */
904 for_each_possible_cpu(i) {
905 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
906 memcpy(newinfo->entries[i], entry0, newinfo->size);
914 add_entry_to_counter(const struct ip6t_entry *e,
915 struct xt_counters total[],
918 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
925 set_entry_to_counter(const struct ip6t_entry *e,
926 struct ip6t_counters total[],
929 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
936 get_counters(const struct xt_table_info *t,
937 struct xt_counters counters[])
943 /* Instead of clearing (by a previous call to memset())
944 * the counters and using adds, we set the counters
945 * with data used by 'current' CPU
946 * We dont care about preemption here.
948 curcpu = raw_smp_processor_id();
951 IP6T_ENTRY_ITERATE(t->entries[curcpu],
953 set_entry_to_counter,
957 for_each_possible_cpu(cpu) {
961 IP6T_ENTRY_ITERATE(t->entries[cpu],
963 add_entry_to_counter,
969 /* We're lazy, and add to the first CPU; overflow works its fey magic
970 * and everything is OK. */
972 add_counter_to_entry(struct ip6t_entry *e,
973 const struct xt_counters addme[],
976 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
982 /* Take values from counters and add them back onto the current cpu */
983 static void put_counters(struct xt_table_info *t,
984 const struct xt_counters counters[])
989 cpu = smp_processor_id();
991 IP6T_ENTRY_ITERATE(t->entries[cpu],
993 add_counter_to_entry,
1000 zero_entry_counter(struct ip6t_entry *e, void *arg)
1002 e->counters.bcnt = 0;
1003 e->counters.pcnt = 0;
1008 clone_counters(struct xt_table_info *newinfo, const struct xt_table_info *info)
1011 const void *loc_cpu_entry = info->entries[raw_smp_processor_id()];
1013 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1014 for_each_possible_cpu(cpu) {
1015 memcpy(newinfo->entries[cpu], loc_cpu_entry, info->size);
1016 IP6T_ENTRY_ITERATE(newinfo->entries[cpu], newinfo->size,
1017 zero_entry_counter, NULL);
1021 static struct xt_counters *alloc_counters(struct xt_table *table)
1023 unsigned int countersize;
1024 struct xt_counters *counters;
1025 struct xt_table_info *private = table->private;
1026 struct xt_table_info *info;
1028 /* We need atomic snapshot of counters: rest doesn't change
1029 (other than comefrom, which userspace doesn't care
1031 countersize = sizeof(struct xt_counters) * private->number;
1032 counters = vmalloc_node(countersize, numa_node_id());
1034 if (counters == NULL)
1037 info = xt_alloc_table_info(private->size);
1041 clone_counters(info, private);
1043 mutex_lock(&table->lock);
1044 xt_table_entry_swap_rcu(private, info);
1045 synchronize_net(); /* Wait until smoke has cleared */
1047 get_counters(info, counters);
1048 put_counters(private, counters);
1049 mutex_unlock(&table->lock);
1051 xt_free_table_info(info);
1056 return ERR_PTR(-ENOMEM);
1060 copy_entries_to_user(unsigned int total_size,
1061 struct xt_table *table,
1062 void __user *userptr)
1064 unsigned int off, num;
1065 struct ip6t_entry *e;
1066 struct xt_counters *counters;
1067 const struct xt_table_info *private = table->private;
1069 const void *loc_cpu_entry;
1071 counters = alloc_counters(table);
1072 if (IS_ERR(counters))
1073 return PTR_ERR(counters);
1075 /* choose the copy that is on our node/cpu, ...
1076 * This choice is lazy (because current thread is
1077 * allowed to migrate to another cpu)
1079 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1080 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1085 /* FIXME: use iterator macros --RR */
1086 /* ... then go back and fix counters and names */
1087 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1089 const struct ip6t_entry_match *m;
1090 const struct ip6t_entry_target *t;
1092 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1093 if (copy_to_user(userptr + off
1094 + offsetof(struct ip6t_entry, counters),
1096 sizeof(counters[num])) != 0) {
1101 for (i = sizeof(struct ip6t_entry);
1102 i < e->target_offset;
1103 i += m->u.match_size) {
1106 if (copy_to_user(userptr + off + i
1107 + offsetof(struct ip6t_entry_match,
1109 m->u.kernel.match->name,
1110 strlen(m->u.kernel.match->name)+1)
1117 t = ip6t_get_target(e);
1118 if (copy_to_user(userptr + off + e->target_offset
1119 + offsetof(struct ip6t_entry_target,
1121 t->u.kernel.target->name,
1122 strlen(t->u.kernel.target->name)+1) != 0) {
1133 #ifdef CONFIG_COMPAT
1134 static void compat_standard_from_user(void *dst, void *src)
1136 int v = *(compat_int_t *)src;
1139 v += xt_compat_calc_jump(AF_INET6, v);
1140 memcpy(dst, &v, sizeof(v));
1143 static int compat_standard_to_user(void __user *dst, void *src)
1145 compat_int_t cv = *(int *)src;
1148 cv -= xt_compat_calc_jump(AF_INET6, cv);
1149 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1153 compat_calc_match(struct ip6t_entry_match *m, int *size)
1155 *size += xt_compat_match_offset(m->u.kernel.match);
1159 static int compat_calc_entry(struct ip6t_entry *e,
1160 const struct xt_table_info *info,
1161 void *base, struct xt_table_info *newinfo)
1163 struct ip6t_entry_target *t;
1164 unsigned int entry_offset;
1167 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1168 entry_offset = (void *)e - base;
1169 IP6T_MATCH_ITERATE(e, compat_calc_match, &off);
1170 t = ip6t_get_target(e);
1171 off += xt_compat_target_offset(t->u.kernel.target);
1172 newinfo->size -= off;
1173 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1177 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1178 if (info->hook_entry[i] &&
1179 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1180 newinfo->hook_entry[i] -= off;
1181 if (info->underflow[i] &&
1182 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1183 newinfo->underflow[i] -= off;
1188 static int compat_table_info(const struct xt_table_info *info,
1189 struct xt_table_info *newinfo)
1191 void *loc_cpu_entry;
1193 if (!newinfo || !info)
1196 /* we dont care about newinfo->entries[] */
1197 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1198 newinfo->initial_entries = 0;
1199 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1200 return IP6T_ENTRY_ITERATE(loc_cpu_entry, info->size,
1201 compat_calc_entry, info, loc_cpu_entry,
1206 static int get_info(struct net *net, void __user *user, int *len, int compat)
1208 char name[IP6T_TABLE_MAXNAMELEN];
1212 if (*len != sizeof(struct ip6t_getinfo)) {
1213 duprintf("length %u != %zu\n", *len,
1214 sizeof(struct ip6t_getinfo));
1218 if (copy_from_user(name, user, sizeof(name)) != 0)
1221 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1222 #ifdef CONFIG_COMPAT
1224 xt_compat_lock(AF_INET6);
1226 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1227 "ip6table_%s", name);
1228 if (t && !IS_ERR(t)) {
1229 struct ip6t_getinfo info;
1230 const struct xt_table_info *private = t->private;
1232 #ifdef CONFIG_COMPAT
1234 struct xt_table_info tmp;
1235 ret = compat_table_info(private, &tmp);
1236 xt_compat_flush_offsets(AF_INET6);
1240 info.valid_hooks = t->valid_hooks;
1241 memcpy(info.hook_entry, private->hook_entry,
1242 sizeof(info.hook_entry));
1243 memcpy(info.underflow, private->underflow,
1244 sizeof(info.underflow));
1245 info.num_entries = private->number;
1246 info.size = private->size;
1247 strcpy(info.name, name);
1249 if (copy_to_user(user, &info, *len) != 0)
1257 ret = t ? PTR_ERR(t) : -ENOENT;
1258 #ifdef CONFIG_COMPAT
1260 xt_compat_unlock(AF_INET6);
1266 get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len)
1269 struct ip6t_get_entries get;
1272 if (*len < sizeof(get)) {
1273 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1276 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1278 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1279 duprintf("get_entries: %u != %zu\n",
1280 *len, sizeof(get) + get.size);
1284 t = xt_find_table_lock(net, AF_INET6, get.name);
1285 if (t && !IS_ERR(t)) {
1286 struct xt_table_info *private = t->private;
1287 duprintf("t->private->number = %u\n", private->number);
1288 if (get.size == private->size)
1289 ret = copy_entries_to_user(private->size,
1290 t, uptr->entrytable);
1292 duprintf("get_entries: I've got %u not %u!\n",
1293 private->size, get.size);
1299 ret = t ? PTR_ERR(t) : -ENOENT;
1305 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1306 struct xt_table_info *newinfo, unsigned int num_counters,
1307 void __user *counters_ptr)
1311 struct xt_table_info *oldinfo;
1312 struct xt_counters *counters;
1313 const void *loc_cpu_old_entry;
1316 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1323 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1324 "ip6table_%s", name);
1325 if (!t || IS_ERR(t)) {
1326 ret = t ? PTR_ERR(t) : -ENOENT;
1327 goto free_newinfo_counters_untrans;
1331 if (valid_hooks != t->valid_hooks) {
1332 duprintf("Valid hook crap: %08X vs %08X\n",
1333 valid_hooks, t->valid_hooks);
1338 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1342 /* Update module usage count based on number of rules */
1343 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1344 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1345 if ((oldinfo->number > oldinfo->initial_entries) ||
1346 (newinfo->number <= oldinfo->initial_entries))
1348 if ((oldinfo->number > oldinfo->initial_entries) &&
1349 (newinfo->number <= oldinfo->initial_entries))
1352 /* Get the old counters. */
1353 get_counters(oldinfo, counters);
1354 /* Decrease module usage counts and free resource */
1355 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1356 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1358 xt_free_table_info(oldinfo);
1359 if (copy_to_user(counters_ptr, counters,
1360 sizeof(struct xt_counters) * num_counters) != 0)
1369 free_newinfo_counters_untrans:
1376 do_replace(struct net *net, void __user *user, unsigned int len)
1379 struct ip6t_replace tmp;
1380 struct xt_table_info *newinfo;
1381 void *loc_cpu_entry;
1383 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1386 /* overflow check */
1387 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1390 newinfo = xt_alloc_table_info(tmp.size);
1394 /* choose the copy that is on our node/cpu */
1395 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1396 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1402 ret = translate_table(tmp.name, tmp.valid_hooks,
1403 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1404 tmp.hook_entry, tmp.underflow);
1408 duprintf("ip_tables: Translated table\n");
1410 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1411 tmp.num_counters, tmp.counters);
1413 goto free_newinfo_untrans;
1416 free_newinfo_untrans:
1417 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1419 xt_free_table_info(newinfo);
1424 do_add_counters(struct net *net, void __user *user, unsigned int len,
1428 struct xt_counters_info tmp;
1429 struct xt_counters *paddc;
1430 unsigned int num_counters;
1435 const struct xt_table_info *private;
1437 const void *loc_cpu_entry;
1438 #ifdef CONFIG_COMPAT
1439 struct compat_xt_counters_info compat_tmp;
1443 size = sizeof(struct compat_xt_counters_info);
1448 size = sizeof(struct xt_counters_info);
1451 if (copy_from_user(ptmp, user, size) != 0)
1454 #ifdef CONFIG_COMPAT
1456 num_counters = compat_tmp.num_counters;
1457 name = compat_tmp.name;
1461 num_counters = tmp.num_counters;
1465 if (len != size + num_counters * sizeof(struct xt_counters))
1468 paddc = vmalloc_node(len - size, numa_node_id());
1472 if (copy_from_user(paddc, user + size, len - size) != 0) {
1477 t = xt_find_table_lock(net, AF_INET6, name);
1478 if (!t || IS_ERR(t)) {
1479 ret = t ? PTR_ERR(t) : -ENOENT;
1483 mutex_lock(&t->lock);
1484 private = t->private;
1485 if (private->number != num_counters) {
1487 goto unlock_up_free;
1492 /* Choose the copy that is on our node */
1493 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1494 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1496 add_counter_to_entry,
1501 mutex_unlock(&t->lock);
1510 #ifdef CONFIG_COMPAT
1511 struct compat_ip6t_replace {
1512 char name[IP6T_TABLE_MAXNAMELEN];
1516 u32 hook_entry[NF_INET_NUMHOOKS];
1517 u32 underflow[NF_INET_NUMHOOKS];
1519 compat_uptr_t counters; /* struct ip6t_counters * */
1520 struct compat_ip6t_entry entries[0];
1524 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1525 unsigned int *size, struct xt_counters *counters,
1528 struct ip6t_entry_target *t;
1529 struct compat_ip6t_entry __user *ce;
1530 u_int16_t target_offset, next_offset;
1531 compat_uint_t origsize;
1536 ce = (struct compat_ip6t_entry __user *)*dstptr;
1537 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)))
1540 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1543 *dstptr += sizeof(struct compat_ip6t_entry);
1544 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1546 ret = IP6T_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1547 target_offset = e->target_offset - (origsize - *size);
1550 t = ip6t_get_target(e);
1551 ret = xt_compat_target_to_user(t, dstptr, size);
1555 next_offset = e->next_offset - (origsize - *size);
1556 if (put_user(target_offset, &ce->target_offset))
1558 if (put_user(next_offset, &ce->next_offset))
1568 compat_find_calc_match(struct ip6t_entry_match *m,
1570 const struct ip6t_ip6 *ipv6,
1571 unsigned int hookmask,
1572 int *size, unsigned int *i)
1574 struct xt_match *match;
1576 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
1577 m->u.user.revision),
1578 "ip6t_%s", m->u.user.name);
1579 if (IS_ERR(match) || !match) {
1580 duprintf("compat_check_calc_match: `%s' not found\n",
1582 return match ? PTR_ERR(match) : -ENOENT;
1584 m->u.kernel.match = match;
1585 *size += xt_compat_match_offset(match);
1592 compat_release_match(struct ip6t_entry_match *m, unsigned int *i)
1594 if (i && (*i)-- == 0)
1597 module_put(m->u.kernel.match->me);
1602 compat_release_entry(struct compat_ip6t_entry *e, unsigned int *i)
1604 struct ip6t_entry_target *t;
1606 if (i && (*i)-- == 0)
1609 /* Cleanup all matches */
1610 COMPAT_IP6T_MATCH_ITERATE(e, compat_release_match, NULL);
1611 t = compat_ip6t_get_target(e);
1612 module_put(t->u.kernel.target->me);
1617 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1618 struct xt_table_info *newinfo,
1620 unsigned char *base,
1621 unsigned char *limit,
1622 unsigned int *hook_entries,
1623 unsigned int *underflows,
1627 struct ip6t_entry_target *t;
1628 struct xt_target *target;
1629 unsigned int entry_offset;
1633 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1634 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0
1635 || (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1636 duprintf("Bad offset %p, limit = %p\n", e, limit);
1640 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1641 sizeof(struct compat_xt_entry_target)) {
1642 duprintf("checking: element %p size %u\n",
1647 /* For purposes of check_entry casting the compat entry is fine */
1648 ret = check_entry((struct ip6t_entry *)e, name);
1652 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1653 entry_offset = (void *)e - (void *)base;
1655 ret = COMPAT_IP6T_MATCH_ITERATE(e, compat_find_calc_match, name,
1656 &e->ipv6, e->comefrom, &off, &j);
1658 goto release_matches;
1660 t = compat_ip6t_get_target(e);
1661 target = try_then_request_module(xt_find_target(AF_INET6,
1663 t->u.user.revision),
1664 "ip6t_%s", t->u.user.name);
1665 if (IS_ERR(target) || !target) {
1666 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1668 ret = target ? PTR_ERR(target) : -ENOENT;
1669 goto release_matches;
1671 t->u.kernel.target = target;
1673 off += xt_compat_target_offset(target);
1675 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1679 /* Check hooks & underflows */
1680 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1681 if ((unsigned char *)e - base == hook_entries[h])
1682 newinfo->hook_entry[h] = hook_entries[h];
1683 if ((unsigned char *)e - base == underflows[h])
1684 newinfo->underflow[h] = underflows[h];
1687 /* Clear counters and comefrom */
1688 memset(&e->counters, 0, sizeof(e->counters));
1695 module_put(t->u.kernel.target->me);
1697 IP6T_MATCH_ITERATE(e, compat_release_match, &j);
1702 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1703 unsigned int *size, const char *name,
1704 struct xt_table_info *newinfo, unsigned char *base)
1706 struct ip6t_entry_target *t;
1707 struct xt_target *target;
1708 struct ip6t_entry *de;
1709 unsigned int origsize;
1714 de = (struct ip6t_entry *)*dstptr;
1715 memcpy(de, e, sizeof(struct ip6t_entry));
1716 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1718 *dstptr += sizeof(struct ip6t_entry);
1719 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1721 ret = COMPAT_IP6T_MATCH_ITERATE(e, xt_compat_match_from_user,
1725 de->target_offset = e->target_offset - (origsize - *size);
1726 t = compat_ip6t_get_target(e);
1727 target = t->u.kernel.target;
1728 xt_compat_target_from_user(t, dstptr, size);
1730 de->next_offset = e->next_offset - (origsize - *size);
1731 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1732 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1733 newinfo->hook_entry[h] -= origsize - *size;
1734 if ((unsigned char *)de - base < newinfo->underflow[h])
1735 newinfo->underflow[h] -= origsize - *size;
1740 static int compat_check_entry(struct ip6t_entry *e, const char *name,
1745 struct xt_mtchk_param mtpar;
1749 mtpar.entryinfo = &e->ipv6;
1750 mtpar.hook_mask = e->comefrom;
1751 mtpar.family = NFPROTO_IPV6;
1752 ret = IP6T_MATCH_ITERATE(e, check_match, &mtpar, &j);
1754 goto cleanup_matches;
1756 ret = check_target(e, name);
1758 goto cleanup_matches;
1764 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
1769 translate_compat_table(const char *name,
1770 unsigned int valid_hooks,
1771 struct xt_table_info **pinfo,
1773 unsigned int total_size,
1774 unsigned int number,
1775 unsigned int *hook_entries,
1776 unsigned int *underflows)
1779 struct xt_table_info *newinfo, *info;
1780 void *pos, *entry0, *entry1;
1787 info->number = number;
1789 /* Init all hooks to impossible value. */
1790 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1791 info->hook_entry[i] = 0xFFFFFFFF;
1792 info->underflow[i] = 0xFFFFFFFF;
1795 duprintf("translate_compat_table: size %u\n", info->size);
1797 xt_compat_lock(AF_INET6);
1798 /* Walk through entries, checking offsets. */
1799 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1800 check_compat_entry_size_and_hooks,
1801 info, &size, entry0,
1802 entry0 + total_size,
1803 hook_entries, underflows, &j, name);
1809 duprintf("translate_compat_table: %u not %u entries\n",
1814 /* Check hooks all assigned */
1815 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1816 /* Only hooks which are valid */
1817 if (!(valid_hooks & (1 << i)))
1819 if (info->hook_entry[i] == 0xFFFFFFFF) {
1820 duprintf("Invalid hook entry %u %u\n",
1821 i, hook_entries[i]);
1824 if (info->underflow[i] == 0xFFFFFFFF) {
1825 duprintf("Invalid underflow %u %u\n",
1832 newinfo = xt_alloc_table_info(size);
1836 newinfo->number = number;
1837 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1838 newinfo->hook_entry[i] = info->hook_entry[i];
1839 newinfo->underflow[i] = info->underflow[i];
1841 entry1 = newinfo->entries[raw_smp_processor_id()];
1844 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1845 compat_copy_entry_from_user,
1846 &pos, &size, name, newinfo, entry1);
1847 xt_compat_flush_offsets(AF_INET6);
1848 xt_compat_unlock(AF_INET6);
1853 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1857 ret = IP6T_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1861 COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1862 compat_release_entry, &j);
1863 IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1864 xt_free_table_info(newinfo);
1868 /* And one copy for every other CPU */
1869 for_each_possible_cpu(i)
1870 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1871 memcpy(newinfo->entries[i], entry1, newinfo->size);
1875 xt_free_table_info(info);
1879 xt_free_table_info(newinfo);
1881 COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1884 xt_compat_flush_offsets(AF_INET6);
1885 xt_compat_unlock(AF_INET6);
1890 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1893 struct compat_ip6t_replace tmp;
1894 struct xt_table_info *newinfo;
1895 void *loc_cpu_entry;
1897 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1900 /* overflow check */
1901 if (tmp.size >= INT_MAX / num_possible_cpus())
1903 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1906 newinfo = xt_alloc_table_info(tmp.size);
1910 /* choose the copy that is on our node/cpu */
1911 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1912 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1918 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1919 &newinfo, &loc_cpu_entry, tmp.size,
1920 tmp.num_entries, tmp.hook_entry,
1925 duprintf("compat_do_replace: Translated table\n");
1927 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1928 tmp.num_counters, compat_ptr(tmp.counters));
1930 goto free_newinfo_untrans;
1933 free_newinfo_untrans:
1934 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1936 xt_free_table_info(newinfo);
1941 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1946 if (!capable(CAP_NET_ADMIN))
1950 case IP6T_SO_SET_REPLACE:
1951 ret = compat_do_replace(sock_net(sk), user, len);
1954 case IP6T_SO_SET_ADD_COUNTERS:
1955 ret = do_add_counters(sock_net(sk), user, len, 1);
1959 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1966 struct compat_ip6t_get_entries {
1967 char name[IP6T_TABLE_MAXNAMELEN];
1969 struct compat_ip6t_entry entrytable[0];
1973 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1974 void __user *userptr)
1976 struct xt_counters *counters;
1977 const struct xt_table_info *private = table->private;
1981 const void *loc_cpu_entry;
1984 counters = alloc_counters(table);
1985 if (IS_ERR(counters))
1986 return PTR_ERR(counters);
1988 /* choose the copy that is on our node/cpu, ...
1989 * This choice is lazy (because current thread is
1990 * allowed to migrate to another cpu)
1992 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1995 ret = IP6T_ENTRY_ITERATE(loc_cpu_entry, total_size,
1996 compat_copy_entry_to_user,
1997 &pos, &size, counters, &i);
2004 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
2008 struct compat_ip6t_get_entries get;
2011 if (*len < sizeof(get)) {
2012 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
2016 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
2019 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
2020 duprintf("compat_get_entries: %u != %zu\n",
2021 *len, sizeof(get) + get.size);
2025 xt_compat_lock(AF_INET6);
2026 t = xt_find_table_lock(net, AF_INET6, get.name);
2027 if (t && !IS_ERR(t)) {
2028 const struct xt_table_info *private = t->private;
2029 struct xt_table_info info;
2030 duprintf("t->private->number = %u\n", private->number);
2031 ret = compat_table_info(private, &info);
2032 if (!ret && get.size == info.size) {
2033 ret = compat_copy_entries_to_user(private->size,
2034 t, uptr->entrytable);
2036 duprintf("compat_get_entries: I've got %u not %u!\n",
2037 private->size, get.size);
2040 xt_compat_flush_offsets(AF_INET6);
2044 ret = t ? PTR_ERR(t) : -ENOENT;
2046 xt_compat_unlock(AF_INET6);
2050 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
2053 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2057 if (!capable(CAP_NET_ADMIN))
2061 case IP6T_SO_GET_INFO:
2062 ret = get_info(sock_net(sk), user, len, 1);
2064 case IP6T_SO_GET_ENTRIES:
2065 ret = compat_get_entries(sock_net(sk), user, len);
2068 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2075 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2079 if (!capable(CAP_NET_ADMIN))
2083 case IP6T_SO_SET_REPLACE:
2084 ret = do_replace(sock_net(sk), user, len);
2087 case IP6T_SO_SET_ADD_COUNTERS:
2088 ret = do_add_counters(sock_net(sk), user, len, 0);
2092 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2100 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2104 if (!capable(CAP_NET_ADMIN))
2108 case IP6T_SO_GET_INFO:
2109 ret = get_info(sock_net(sk), user, len, 0);
2112 case IP6T_SO_GET_ENTRIES:
2113 ret = get_entries(sock_net(sk), user, len);
2116 case IP6T_SO_GET_REVISION_MATCH:
2117 case IP6T_SO_GET_REVISION_TARGET: {
2118 struct ip6t_get_revision rev;
2121 if (*len != sizeof(rev)) {
2125 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2130 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2135 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2138 "ip6t_%s", rev.name);
2143 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2150 struct xt_table *ip6t_register_table(struct net *net, struct xt_table *table,
2151 const struct ip6t_replace *repl)
2154 struct xt_table_info *newinfo;
2155 struct xt_table_info bootstrap
2156 = { 0, 0, 0, { 0 }, { 0 }, { } };
2157 void *loc_cpu_entry;
2158 struct xt_table *new_table;
2160 newinfo = xt_alloc_table_info(repl->size);
2166 /* choose the copy on our node/cpu, but dont care about preemption */
2167 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2168 memcpy(loc_cpu_entry, repl->entries, repl->size);
2170 ret = translate_table(table->name, table->valid_hooks,
2171 newinfo, loc_cpu_entry, repl->size,
2178 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2179 if (IS_ERR(new_table)) {
2180 ret = PTR_ERR(new_table);
2186 xt_free_table_info(newinfo);
2188 return ERR_PTR(ret);
2191 void ip6t_unregister_table(struct xt_table *table)
2193 struct xt_table_info *private;
2194 void *loc_cpu_entry;
2195 struct module *table_owner = table->me;
2197 private = xt_unregister_table(table);
2199 /* Decrease module usage counts and free resources */
2200 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2201 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2202 if (private->number > private->initial_entries)
2203 module_put(table_owner);
2204 xt_free_table_info(private);
2207 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2209 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2210 u_int8_t type, u_int8_t code,
2213 return (type == test_type && code >= min_code && code <= max_code)
2218 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2220 const struct icmp6hdr *ic;
2221 struct icmp6hdr _icmph;
2222 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2224 /* Must not be a fragment. */
2225 if (par->fragoff != 0)
2228 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2230 /* We've been asked to examine this packet, and we
2231 * can't. Hence, no choice but to drop.
2233 duprintf("Dropping evil ICMP tinygram.\n");
2234 *par->hotdrop = true;
2238 return icmp6_type_code_match(icmpinfo->type,
2241 ic->icmp6_type, ic->icmp6_code,
2242 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2245 /* Called when user tries to insert an entry of this type. */
2246 static bool icmp6_checkentry(const struct xt_mtchk_param *par)
2248 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2250 /* Must specify no unknown invflags */
2251 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2254 /* The built-in targets: standard (NULL) and error. */
2255 static struct xt_target ip6t_standard_target __read_mostly = {
2256 .name = IP6T_STANDARD_TARGET,
2257 .targetsize = sizeof(int),
2259 #ifdef CONFIG_COMPAT
2260 .compatsize = sizeof(compat_int_t),
2261 .compat_from_user = compat_standard_from_user,
2262 .compat_to_user = compat_standard_to_user,
2266 static struct xt_target ip6t_error_target __read_mostly = {
2267 .name = IP6T_ERROR_TARGET,
2268 .target = ip6t_error,
2269 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2273 static struct nf_sockopt_ops ip6t_sockopts = {
2275 .set_optmin = IP6T_BASE_CTL,
2276 .set_optmax = IP6T_SO_SET_MAX+1,
2277 .set = do_ip6t_set_ctl,
2278 #ifdef CONFIG_COMPAT
2279 .compat_set = compat_do_ip6t_set_ctl,
2281 .get_optmin = IP6T_BASE_CTL,
2282 .get_optmax = IP6T_SO_GET_MAX+1,
2283 .get = do_ip6t_get_ctl,
2284 #ifdef CONFIG_COMPAT
2285 .compat_get = compat_do_ip6t_get_ctl,
2287 .owner = THIS_MODULE,
2290 static struct xt_match icmp6_matchstruct __read_mostly = {
2292 .match = icmp6_match,
2293 .matchsize = sizeof(struct ip6t_icmp),
2294 .checkentry = icmp6_checkentry,
2295 .proto = IPPROTO_ICMPV6,
2299 static int __net_init ip6_tables_net_init(struct net *net)
2301 return xt_proto_init(net, AF_INET6);
2304 static void __net_exit ip6_tables_net_exit(struct net *net)
2306 xt_proto_fini(net, AF_INET6);
2309 static struct pernet_operations ip6_tables_net_ops = {
2310 .init = ip6_tables_net_init,
2311 .exit = ip6_tables_net_exit,
2314 static int __init ip6_tables_init(void)
2318 ret = register_pernet_subsys(&ip6_tables_net_ops);
2322 /* Noone else will be downing sem now, so we won't sleep */
2323 ret = xt_register_target(&ip6t_standard_target);
2326 ret = xt_register_target(&ip6t_error_target);
2329 ret = xt_register_match(&icmp6_matchstruct);
2333 /* Register setsockopt */
2334 ret = nf_register_sockopt(&ip6t_sockopts);
2338 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2342 xt_unregister_match(&icmp6_matchstruct);
2344 xt_unregister_target(&ip6t_error_target);
2346 xt_unregister_target(&ip6t_standard_target);
2348 unregister_pernet_subsys(&ip6_tables_net_ops);
2353 static void __exit ip6_tables_fini(void)
2355 nf_unregister_sockopt(&ip6t_sockopts);
2357 xt_unregister_match(&icmp6_matchstruct);
2358 xt_unregister_target(&ip6t_error_target);
2359 xt_unregister_target(&ip6t_standard_target);
2361 unregister_pernet_subsys(&ip6_tables_net_ops);
2365 * find the offset to specified header or the protocol number of last header
2366 * if target < 0. "last header" is transport protocol header, ESP, or
2369 * If target header is found, its offset is set in *offset and return protocol
2370 * number. Otherwise, return -1.
2372 * If the first fragment doesn't contain the final protocol header or
2373 * NEXTHDR_NONE it is considered invalid.
2375 * Note that non-1st fragment is special case that "the protocol number
2376 * of last header" is "next header" field in Fragment header. In this case,
2377 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2381 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2382 int target, unsigned short *fragoff)
2384 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2385 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2386 unsigned int len = skb->len - start;
2391 while (nexthdr != target) {
2392 struct ipv6_opt_hdr _hdr, *hp;
2393 unsigned int hdrlen;
2395 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2401 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2404 if (nexthdr == NEXTHDR_FRAGMENT) {
2405 unsigned short _frag_off;
2407 fp = skb_header_pointer(skb,
2408 start+offsetof(struct frag_hdr,
2415 _frag_off = ntohs(*fp) & ~0x7;
2418 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2419 hp->nexthdr == NEXTHDR_NONE)) {
2421 *fragoff = _frag_off;
2427 } else if (nexthdr == NEXTHDR_AUTH)
2428 hdrlen = (hp->hdrlen + 2) << 2;
2430 hdrlen = ipv6_optlen(hp);
2432 nexthdr = hp->nexthdr;
2441 EXPORT_SYMBOL(ip6t_register_table);
2442 EXPORT_SYMBOL(ip6t_unregister_table);
2443 EXPORT_SYMBOL(ip6t_do_table);
2444 EXPORT_SYMBOL(ip6t_ext_hdr);
2445 EXPORT_SYMBOL(ipv6_find_hdr);
2447 module_init(ip6_tables_init);
2448 module_exit(ip6_tables_fini);