Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/cpufreq
[sfrench/cifs-2.6.git] / net / ipv4 / netfilter / ip_tables.c
1 /*
2  * Packet matching code.
3  *
4  * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5  * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * 19 Jan 2002 Harald Welte <laforge@gnumonks.org>
12  *      - increase module usage count as soon as we have rules inside
13  *        a table
14  * 08 Oct 2005 Harald Welte <lafore@netfilter.org>
15  *      - Generalize into "x_tables" layer and "{ip,ip6,arp}_tables"
16  */
17 #include <linux/cache.h>
18 #include <linux/capability.h>
19 #include <linux/skbuff.h>
20 #include <linux/kmod.h>
21 #include <linux/vmalloc.h>
22 #include <linux/netdevice.h>
23 #include <linux/module.h>
24 #include <linux/icmp.h>
25 #include <net/ip.h>
26 #include <net/compat.h>
27 #include <asm/uaccess.h>
28 #include <linux/mutex.h>
29 #include <linux/proc_fs.h>
30 #include <linux/err.h>
31 #include <linux/cpumask.h>
32
33 #include <linux/netfilter/x_tables.h>
34 #include <linux/netfilter_ipv4/ip_tables.h>
35
36 MODULE_LICENSE("GPL");
37 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
38 MODULE_DESCRIPTION("IPv4 packet filter");
39
40 /*#define DEBUG_IP_FIREWALL*/
41 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
42 /*#define DEBUG_IP_FIREWALL_USER*/
43
44 #ifdef DEBUG_IP_FIREWALL
45 #define dprintf(format, args...)  printk(format , ## args)
46 #else
47 #define dprintf(format, args...)
48 #endif
49
50 #ifdef DEBUG_IP_FIREWALL_USER
51 #define duprintf(format, args...) printk(format , ## args)
52 #else
53 #define duprintf(format, args...)
54 #endif
55
56 #ifdef CONFIG_NETFILTER_DEBUG
57 #define IP_NF_ASSERT(x)                                         \
58 do {                                                            \
59         if (!(x))                                               \
60                 printk("IP_NF_ASSERT: %s:%s:%u\n",              \
61                        __FUNCTION__, __FILE__, __LINE__);       \
62 } while(0)
63 #else
64 #define IP_NF_ASSERT(x)
65 #endif
66
67 #if 0
68 /* All the better to debug you with... */
69 #define static
70 #define inline
71 #endif
72
73 /*
74    We keep a set of rules for each CPU, so we can avoid write-locking
75    them in the softirq when updating the counters and therefore
76    only need to read-lock in the softirq; doing a write_lock_bh() in user
77    context stops packets coming through and allows user context to read
78    the counters or update the rules.
79
80    Hence the start of any table is given by get_table() below.  */
81
82 /* Returns whether matches rule or not. */
83 static inline int
84 ip_packet_match(const struct iphdr *ip,
85                 const char *indev,
86                 const char *outdev,
87                 const struct ipt_ip *ipinfo,
88                 int isfrag)
89 {
90         size_t i;
91         unsigned long ret;
92
93 #define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
94
95         if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
96                   IPT_INV_SRCIP)
97             || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
98                      IPT_INV_DSTIP)) {
99                 dprintf("Source or dest mismatch.\n");
100
101                 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
102                         NIPQUAD(ip->saddr),
103                         NIPQUAD(ipinfo->smsk.s_addr),
104                         NIPQUAD(ipinfo->src.s_addr),
105                         ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
106                 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
107                         NIPQUAD(ip->daddr),
108                         NIPQUAD(ipinfo->dmsk.s_addr),
109                         NIPQUAD(ipinfo->dst.s_addr),
110                         ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
111                 return 0;
112         }
113
114         /* Look for ifname matches; this should unroll nicely. */
115         for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
116                 ret |= (((const unsigned long *)indev)[i]
117                         ^ ((const unsigned long *)ipinfo->iniface)[i])
118                         & ((const unsigned long *)ipinfo->iniface_mask)[i];
119         }
120
121         if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
122                 dprintf("VIA in mismatch (%s vs %s).%s\n",
123                         indev, ipinfo->iniface,
124                         ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
125                 return 0;
126         }
127
128         for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
129                 ret |= (((const unsigned long *)outdev)[i]
130                         ^ ((const unsigned long *)ipinfo->outiface)[i])
131                         & ((const unsigned long *)ipinfo->outiface_mask)[i];
132         }
133
134         if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
135                 dprintf("VIA out mismatch (%s vs %s).%s\n",
136                         outdev, ipinfo->outiface,
137                         ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
138                 return 0;
139         }
140
141         /* Check specific protocol */
142         if (ipinfo->proto
143             && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
144                 dprintf("Packet protocol %hi does not match %hi.%s\n",
145                         ip->protocol, ipinfo->proto,
146                         ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
147                 return 0;
148         }
149
150         /* If we have a fragment rule but the packet is not a fragment
151          * then we return zero */
152         if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
153                 dprintf("Fragment rule but not fragment.%s\n",
154                         ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
155                 return 0;
156         }
157
158         return 1;
159 }
160
161 static inline int
162 ip_checkentry(const struct ipt_ip *ip)
163 {
164         if (ip->flags & ~IPT_F_MASK) {
165                 duprintf("Unknown flag bits set: %08X\n",
166                          ip->flags & ~IPT_F_MASK);
167                 return 0;
168         }
169         if (ip->invflags & ~IPT_INV_MASK) {
170                 duprintf("Unknown invflag bits set: %08X\n",
171                          ip->invflags & ~IPT_INV_MASK);
172                 return 0;
173         }
174         return 1;
175 }
176
177 static unsigned int
178 ipt_error(struct sk_buff **pskb,
179           const struct net_device *in,
180           const struct net_device *out,
181           unsigned int hooknum,
182           const struct xt_target *target,
183           const void *targinfo)
184 {
185         if (net_ratelimit())
186                 printk("ip_tables: error: `%s'\n", (char *)targinfo);
187
188         return NF_DROP;
189 }
190
191 static inline
192 int do_match(struct ipt_entry_match *m,
193              const struct sk_buff *skb,
194              const struct net_device *in,
195              const struct net_device *out,
196              int offset,
197              int *hotdrop)
198 {
199         /* Stop iteration if it doesn't match */
200         if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
201                                       offset, skb->nh.iph->ihl*4, hotdrop))
202                 return 1;
203         else
204                 return 0;
205 }
206
207 static inline struct ipt_entry *
208 get_entry(void *base, unsigned int offset)
209 {
210         return (struct ipt_entry *)(base + offset);
211 }
212
213 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
214 unsigned int
215 ipt_do_table(struct sk_buff **pskb,
216              unsigned int hook,
217              const struct net_device *in,
218              const struct net_device *out,
219              struct xt_table *table)
220 {
221         static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
222         u_int16_t offset;
223         struct iphdr *ip;
224         u_int16_t datalen;
225         int hotdrop = 0;
226         /* Initializing verdict to NF_DROP keeps gcc happy. */
227         unsigned int verdict = NF_DROP;
228         const char *indev, *outdev;
229         void *table_base;
230         struct ipt_entry *e, *back;
231         struct xt_table_info *private;
232
233         /* Initialization */
234         ip = (*pskb)->nh.iph;
235         datalen = (*pskb)->len - ip->ihl * 4;
236         indev = in ? in->name : nulldevname;
237         outdev = out ? out->name : nulldevname;
238         /* We handle fragments by dealing with the first fragment as
239          * if it was a normal packet.  All other fragments are treated
240          * normally, except that they will NEVER match rules that ask
241          * things we don't know, ie. tcp syn flag or ports).  If the
242          * rule is also a fragment-specific rule, non-fragments won't
243          * match it. */
244         offset = ntohs(ip->frag_off) & IP_OFFSET;
245
246         read_lock_bh(&table->lock);
247         IP_NF_ASSERT(table->valid_hooks & (1 << hook));
248         private = table->private;
249         table_base = (void *)private->entries[smp_processor_id()];
250         e = get_entry(table_base, private->hook_entry[hook]);
251
252         /* For return from builtin chain */
253         back = get_entry(table_base, private->underflow[hook]);
254
255         do {
256                 IP_NF_ASSERT(e);
257                 IP_NF_ASSERT(back);
258                 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
259                         struct ipt_entry_target *t;
260
261                         if (IPT_MATCH_ITERATE(e, do_match,
262                                               *pskb, in, out,
263                                               offset, &hotdrop) != 0)
264                                 goto no_match;
265
266                         ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
267
268                         t = ipt_get_target(e);
269                         IP_NF_ASSERT(t->u.kernel.target);
270                         /* Standard target? */
271                         if (!t->u.kernel.target->target) {
272                                 int v;
273
274                                 v = ((struct ipt_standard_target *)t)->verdict;
275                                 if (v < 0) {
276                                         /* Pop from stack? */
277                                         if (v != IPT_RETURN) {
278                                                 verdict = (unsigned)(-v) - 1;
279                                                 break;
280                                         }
281                                         e = back;
282                                         back = get_entry(table_base,
283                                                          back->comefrom);
284                                         continue;
285                                 }
286                                 if (table_base + v != (void *)e + e->next_offset
287                                     && !(e->ip.flags & IPT_F_GOTO)) {
288                                         /* Save old back ptr in next entry */
289                                         struct ipt_entry *next
290                                                 = (void *)e + e->next_offset;
291                                         next->comefrom
292                                                 = (void *)back - table_base;
293                                         /* set back pointer to next entry */
294                                         back = next;
295                                 }
296
297                                 e = get_entry(table_base, v);
298                         } else {
299                                 /* Targets which reenter must return
300                                    abs. verdicts */
301 #ifdef CONFIG_NETFILTER_DEBUG
302                                 ((struct ipt_entry *)table_base)->comefrom
303                                         = 0xeeeeeeec;
304 #endif
305                                 verdict = t->u.kernel.target->target(pskb,
306                                                                      in, out,
307                                                                      hook,
308                                                                      t->u.kernel.target,
309                                                                      t->data);
310
311 #ifdef CONFIG_NETFILTER_DEBUG
312                                 if (((struct ipt_entry *)table_base)->comefrom
313                                     != 0xeeeeeeec
314                                     && verdict == IPT_CONTINUE) {
315                                         printk("Target %s reentered!\n",
316                                                t->u.kernel.target->name);
317                                         verdict = NF_DROP;
318                                 }
319                                 ((struct ipt_entry *)table_base)->comefrom
320                                         = 0x57acc001;
321 #endif
322                                 /* Target might have changed stuff. */
323                                 ip = (*pskb)->nh.iph;
324                                 datalen = (*pskb)->len - ip->ihl * 4;
325
326                                 if (verdict == IPT_CONTINUE)
327                                         e = (void *)e + e->next_offset;
328                                 else
329                                         /* Verdict */
330                                         break;
331                         }
332                 } else {
333
334                 no_match:
335                         e = (void *)e + e->next_offset;
336                 }
337         } while (!hotdrop);
338
339         read_unlock_bh(&table->lock);
340
341 #ifdef DEBUG_ALLOW_ALL
342         return NF_ACCEPT;
343 #else
344         if (hotdrop)
345                 return NF_DROP;
346         else return verdict;
347 #endif
348 }
349
350 /* All zeroes == unconditional rule. */
351 static inline int
352 unconditional(const struct ipt_ip *ip)
353 {
354         unsigned int i;
355
356         for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
357                 if (((__u32 *)ip)[i])
358                         return 0;
359
360         return 1;
361 }
362
363 /* Figures out from what hook each rule can be called: returns 0 if
364    there are loops.  Puts hook bitmask in comefrom. */
365 static int
366 mark_source_chains(struct xt_table_info *newinfo,
367                    unsigned int valid_hooks, void *entry0)
368 {
369         unsigned int hook;
370
371         /* No recursion; use packet counter to save back ptrs (reset
372            to 0 as we leave), and comefrom to save source hook bitmask */
373         for (hook = 0; hook < NF_IP_NUMHOOKS; hook++) {
374                 unsigned int pos = newinfo->hook_entry[hook];
375                 struct ipt_entry *e
376                         = (struct ipt_entry *)(entry0 + pos);
377
378                 if (!(valid_hooks & (1 << hook)))
379                         continue;
380
381                 /* Set initial back pointer. */
382                 e->counters.pcnt = pos;
383
384                 for (;;) {
385                         struct ipt_standard_target *t
386                                 = (void *)ipt_get_target(e);
387                         int visited = e->comefrom & (1 << hook);
388
389                         if (e->comefrom & (1 << NF_IP_NUMHOOKS)) {
390                                 printk("iptables: loop hook %u pos %u %08X.\n",
391                                        hook, pos, e->comefrom);
392                                 return 0;
393                         }
394                         e->comefrom
395                                 |= ((1 << hook) | (1 << NF_IP_NUMHOOKS));
396
397                         /* Unconditional return/END. */
398                         if ((e->target_offset == sizeof(struct ipt_entry)
399                             && (strcmp(t->target.u.user.name,
400                                        IPT_STANDARD_TARGET) == 0)
401                             && t->verdict < 0
402                             && unconditional(&e->ip)) || visited) {
403                                 unsigned int oldpos, size;
404
405                                 if (t->verdict < -NF_MAX_VERDICT - 1) {
406                                         duprintf("mark_source_chains: bad "
407                                                 "negative verdict (%i)\n",
408                                                                 t->verdict);
409                                         return 0;
410                                 }
411
412                                 /* Return: backtrack through the last
413                                    big jump. */
414                                 do {
415                                         e->comefrom ^= (1<<NF_IP_NUMHOOKS);
416 #ifdef DEBUG_IP_FIREWALL_USER
417                                         if (e->comefrom
418                                             & (1 << NF_IP_NUMHOOKS)) {
419                                                 duprintf("Back unset "
420                                                          "on hook %u "
421                                                          "rule %u\n",
422                                                          hook, pos);
423                                         }
424 #endif
425                                         oldpos = pos;
426                                         pos = e->counters.pcnt;
427                                         e->counters.pcnt = 0;
428
429                                         /* We're at the start. */
430                                         if (pos == oldpos)
431                                                 goto next;
432
433                                         e = (struct ipt_entry *)
434                                                 (entry0 + pos);
435                                 } while (oldpos == pos + e->next_offset);
436
437                                 /* Move along one */
438                                 size = e->next_offset;
439                                 e = (struct ipt_entry *)
440                                         (entry0 + pos + size);
441                                 e->counters.pcnt = pos;
442                                 pos += size;
443                         } else {
444                                 int newpos = t->verdict;
445
446                                 if (strcmp(t->target.u.user.name,
447                                            IPT_STANDARD_TARGET) == 0
448                                     && newpos >= 0) {
449                                         if (newpos > newinfo->size -
450                                                 sizeof(struct ipt_entry)) {
451                                                 duprintf("mark_source_chains: "
452                                                         "bad verdict (%i)\n",
453                                                                 newpos);
454                                                 return 0;
455                                         }
456                                         /* This a jump; chase it. */
457                                         duprintf("Jump rule %u -> %u\n",
458                                                  pos, newpos);
459                                 } else {
460                                         /* ... this is a fallthru */
461                                         newpos = pos + e->next_offset;
462                                 }
463                                 e = (struct ipt_entry *)
464                                         (entry0 + newpos);
465                                 e->counters.pcnt = pos;
466                                 pos = newpos;
467                         }
468                 }
469                 next:
470                 duprintf("Finished chain %u\n", hook);
471         }
472         return 1;
473 }
474
475 static inline int
476 cleanup_match(struct ipt_entry_match *m, unsigned int *i)
477 {
478         if (i && (*i)-- == 0)
479                 return 1;
480
481         if (m->u.kernel.match->destroy)
482                 m->u.kernel.match->destroy(m->u.kernel.match, m->data);
483         module_put(m->u.kernel.match->me);
484         return 0;
485 }
486
487 static inline int
488 check_entry(struct ipt_entry *e, const char *name)
489 {
490         struct ipt_entry_target *t;
491
492         if (!ip_checkentry(&e->ip)) {
493                 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
494                 return -EINVAL;
495         }
496
497         if (e->target_offset + sizeof(struct ipt_entry_target) > e->next_offset)
498                 return -EINVAL;
499
500         t = ipt_get_target(e);
501         if (e->target_offset + t->u.target_size > e->next_offset)
502                 return -EINVAL;
503
504         return 0;
505 }
506
507 static inline int check_match(struct ipt_entry_match *m, const char *name,
508                                 const struct ipt_ip *ip, unsigned int hookmask)
509 {
510         struct xt_match *match;
511         int ret;
512
513         match = m->u.kernel.match;
514         ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m),
515                              name, hookmask, ip->proto,
516                              ip->invflags & IPT_INV_PROTO);
517         if (!ret && m->u.kernel.match->checkentry
518             && !m->u.kernel.match->checkentry(name, ip, match, m->data,
519                                               hookmask)) {
520                 duprintf("ip_tables: check failed for `%s'.\n",
521                          m->u.kernel.match->name);
522                 ret = -EINVAL;
523         }
524         return ret;
525 }
526
527 static inline int
528 find_check_match(struct ipt_entry_match *m,
529             const char *name,
530             const struct ipt_ip *ip,
531             unsigned int hookmask,
532             unsigned int *i)
533 {
534         struct xt_match *match;
535         int ret;
536
537         match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
538                                                    m->u.user.revision),
539                                         "ipt_%s", m->u.user.name);
540         if (IS_ERR(match) || !match) {
541                 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
542                 return match ? PTR_ERR(match) : -ENOENT;
543         }
544         m->u.kernel.match = match;
545
546         ret = check_match(m, name, ip, hookmask);
547         if (ret)
548                 goto err;
549
550         (*i)++;
551         return 0;
552 err:
553         module_put(m->u.kernel.match->me);
554         return ret;
555 }
556
557 static inline int check_target(struct ipt_entry *e, const char *name)
558 {
559         struct ipt_entry_target *t;
560         struct xt_target *target;
561         int ret;
562
563         t = ipt_get_target(e);
564         target = t->u.kernel.target;
565         ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
566                               name, e->comefrom, e->ip.proto,
567                               e->ip.invflags & IPT_INV_PROTO);
568         if (!ret && t->u.kernel.target->checkentry
569                    && !t->u.kernel.target->checkentry(name, e, target,
570                                                       t->data, e->comefrom)) {
571                 duprintf("ip_tables: check failed for `%s'.\n",
572                          t->u.kernel.target->name);
573                 ret = -EINVAL;
574         }
575         return ret;
576 }
577
578 static inline int
579 find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
580             unsigned int *i)
581 {
582         struct ipt_entry_target *t;
583         struct xt_target *target;
584         int ret;
585         unsigned int j;
586
587         ret = check_entry(e, name);
588         if (ret)
589                 return ret;
590
591         j = 0;
592         ret = IPT_MATCH_ITERATE(e, find_check_match, name, &e->ip,
593                                                         e->comefrom, &j);
594         if (ret != 0)
595                 goto cleanup_matches;
596
597         t = ipt_get_target(e);
598         target = try_then_request_module(xt_find_target(AF_INET,
599                                                      t->u.user.name,
600                                                      t->u.user.revision),
601                                          "ipt_%s", t->u.user.name);
602         if (IS_ERR(target) || !target) {
603                 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
604                 ret = target ? PTR_ERR(target) : -ENOENT;
605                 goto cleanup_matches;
606         }
607         t->u.kernel.target = target;
608
609         ret = check_target(e, name);
610         if (ret)
611                 goto err;
612
613         (*i)++;
614         return 0;
615  err:
616         module_put(t->u.kernel.target->me);
617  cleanup_matches:
618         IPT_MATCH_ITERATE(e, cleanup_match, &j);
619         return ret;
620 }
621
622 static inline int
623 check_entry_size_and_hooks(struct ipt_entry *e,
624                            struct xt_table_info *newinfo,
625                            unsigned char *base,
626                            unsigned char *limit,
627                            const unsigned int *hook_entries,
628                            const unsigned int *underflows,
629                            unsigned int *i)
630 {
631         unsigned int h;
632
633         if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
634             || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
635                 duprintf("Bad offset %p\n", e);
636                 return -EINVAL;
637         }
638
639         if (e->next_offset
640             < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
641                 duprintf("checking: element %p size %u\n",
642                          e, e->next_offset);
643                 return -EINVAL;
644         }
645
646         /* Check hooks & underflows */
647         for (h = 0; h < NF_IP_NUMHOOKS; h++) {
648                 if ((unsigned char *)e - base == hook_entries[h])
649                         newinfo->hook_entry[h] = hook_entries[h];
650                 if ((unsigned char *)e - base == underflows[h])
651                         newinfo->underflow[h] = underflows[h];
652         }
653
654         /* FIXME: underflows must be unconditional, standard verdicts
655            < 0 (not IPT_RETURN). --RR */
656
657         /* Clear counters and comefrom */
658         e->counters = ((struct xt_counters) { 0, 0 });
659         e->comefrom = 0;
660
661         (*i)++;
662         return 0;
663 }
664
665 static inline int
666 cleanup_entry(struct ipt_entry *e, unsigned int *i)
667 {
668         struct ipt_entry_target *t;
669
670         if (i && (*i)-- == 0)
671                 return 1;
672
673         /* Cleanup all matches */
674         IPT_MATCH_ITERATE(e, cleanup_match, NULL);
675         t = ipt_get_target(e);
676         if (t->u.kernel.target->destroy)
677                 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
678         module_put(t->u.kernel.target->me);
679         return 0;
680 }
681
682 /* Checks and translates the user-supplied table segment (held in
683    newinfo) */
684 static int
685 translate_table(const char *name,
686                 unsigned int valid_hooks,
687                 struct xt_table_info *newinfo,
688                 void *entry0,
689                 unsigned int size,
690                 unsigned int number,
691                 const unsigned int *hook_entries,
692                 const unsigned int *underflows)
693 {
694         unsigned int i;
695         int ret;
696
697         newinfo->size = size;
698         newinfo->number = number;
699
700         /* Init all hooks to impossible value. */
701         for (i = 0; i < NF_IP_NUMHOOKS; i++) {
702                 newinfo->hook_entry[i] = 0xFFFFFFFF;
703                 newinfo->underflow[i] = 0xFFFFFFFF;
704         }
705
706         duprintf("translate_table: size %u\n", newinfo->size);
707         i = 0;
708         /* Walk through entries, checking offsets. */
709         ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
710                                 check_entry_size_and_hooks,
711                                 newinfo,
712                                 entry0,
713                                 entry0 + size,
714                                 hook_entries, underflows, &i);
715         if (ret != 0)
716                 return ret;
717
718         if (i != number) {
719                 duprintf("translate_table: %u not %u entries\n",
720                          i, number);
721                 return -EINVAL;
722         }
723
724         /* Check hooks all assigned */
725         for (i = 0; i < NF_IP_NUMHOOKS; i++) {
726                 /* Only hooks which are valid */
727                 if (!(valid_hooks & (1 << i)))
728                         continue;
729                 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
730                         duprintf("Invalid hook entry %u %u\n",
731                                  i, hook_entries[i]);
732                         return -EINVAL;
733                 }
734                 if (newinfo->underflow[i] == 0xFFFFFFFF) {
735                         duprintf("Invalid underflow %u %u\n",
736                                  i, underflows[i]);
737                         return -EINVAL;
738                 }
739         }
740
741         if (!mark_source_chains(newinfo, valid_hooks, entry0))
742                 return -ELOOP;
743
744         /* Finally, each sanity check must pass */
745         i = 0;
746         ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
747                                 find_check_entry, name, size, &i);
748
749         if (ret != 0) {
750                 IPT_ENTRY_ITERATE(entry0, newinfo->size,
751                                 cleanup_entry, &i);
752                 return ret;
753         }
754
755         /* And one copy for every other CPU */
756         for_each_possible_cpu(i) {
757                 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
758                         memcpy(newinfo->entries[i], entry0, newinfo->size);
759         }
760
761         return ret;
762 }
763
764 /* Gets counters. */
765 static inline int
766 add_entry_to_counter(const struct ipt_entry *e,
767                      struct xt_counters total[],
768                      unsigned int *i)
769 {
770         ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
771
772         (*i)++;
773         return 0;
774 }
775
776 static inline int
777 set_entry_to_counter(const struct ipt_entry *e,
778                      struct ipt_counters total[],
779                      unsigned int *i)
780 {
781         SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
782
783         (*i)++;
784         return 0;
785 }
786
787 static void
788 get_counters(const struct xt_table_info *t,
789              struct xt_counters counters[])
790 {
791         unsigned int cpu;
792         unsigned int i;
793         unsigned int curcpu;
794
795         /* Instead of clearing (by a previous call to memset())
796          * the counters and using adds, we set the counters
797          * with data used by 'current' CPU
798          * We dont care about preemption here.
799          */
800         curcpu = raw_smp_processor_id();
801
802         i = 0;
803         IPT_ENTRY_ITERATE(t->entries[curcpu],
804                           t->size,
805                           set_entry_to_counter,
806                           counters,
807                           &i);
808
809         for_each_possible_cpu(cpu) {
810                 if (cpu == curcpu)
811                         continue;
812                 i = 0;
813                 IPT_ENTRY_ITERATE(t->entries[cpu],
814                                   t->size,
815                                   add_entry_to_counter,
816                                   counters,
817                                   &i);
818         }
819 }
820
821 static inline struct xt_counters * alloc_counters(struct xt_table *table)
822 {
823         unsigned int countersize;
824         struct xt_counters *counters;
825         struct xt_table_info *private = table->private;
826
827         /* We need atomic snapshot of counters: rest doesn't change
828            (other than comefrom, which userspace doesn't care
829            about). */
830         countersize = sizeof(struct xt_counters) * private->number;
831         counters = vmalloc_node(countersize, numa_node_id());
832
833         if (counters == NULL)
834                 return ERR_PTR(-ENOMEM);
835
836         /* First, sum counters... */
837         write_lock_bh(&table->lock);
838         get_counters(private, counters);
839         write_unlock_bh(&table->lock);
840
841         return counters;
842 }
843
844 static int
845 copy_entries_to_user(unsigned int total_size,
846                      struct xt_table *table,
847                      void __user *userptr)
848 {
849         unsigned int off, num;
850         struct ipt_entry *e;
851         struct xt_counters *counters;
852         struct xt_table_info *private = table->private;
853         int ret = 0;
854         void *loc_cpu_entry;
855
856         counters = alloc_counters(table);
857         if (IS_ERR(counters))
858                 return PTR_ERR(counters);
859
860         /* choose the copy that is on our node/cpu, ...
861          * This choice is lazy (because current thread is
862          * allowed to migrate to another cpu)
863          */
864         loc_cpu_entry = private->entries[raw_smp_processor_id()];
865         /* ... then copy entire thing ... */
866         if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
867                 ret = -EFAULT;
868                 goto free_counters;
869         }
870
871         /* FIXME: use iterator macros --RR */
872         /* ... then go back and fix counters and names */
873         for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
874                 unsigned int i;
875                 struct ipt_entry_match *m;
876                 struct ipt_entry_target *t;
877
878                 e = (struct ipt_entry *)(loc_cpu_entry + off);
879                 if (copy_to_user(userptr + off
880                                  + offsetof(struct ipt_entry, counters),
881                                  &counters[num],
882                                  sizeof(counters[num])) != 0) {
883                         ret = -EFAULT;
884                         goto free_counters;
885                 }
886
887                 for (i = sizeof(struct ipt_entry);
888                      i < e->target_offset;
889                      i += m->u.match_size) {
890                         m = (void *)e + i;
891
892                         if (copy_to_user(userptr + off + i
893                                          + offsetof(struct ipt_entry_match,
894                                                     u.user.name),
895                                          m->u.kernel.match->name,
896                                          strlen(m->u.kernel.match->name)+1)
897                             != 0) {
898                                 ret = -EFAULT;
899                                 goto free_counters;
900                         }
901                 }
902
903                 t = ipt_get_target(e);
904                 if (copy_to_user(userptr + off + e->target_offset
905                                  + offsetof(struct ipt_entry_target,
906                                             u.user.name),
907                                  t->u.kernel.target->name,
908                                  strlen(t->u.kernel.target->name)+1) != 0) {
909                         ret = -EFAULT;
910                         goto free_counters;
911                 }
912         }
913
914  free_counters:
915         vfree(counters);
916         return ret;
917 }
918
919 #ifdef CONFIG_COMPAT
920 struct compat_delta {
921         struct compat_delta *next;
922         unsigned int offset;
923         short delta;
924 };
925
926 static struct compat_delta *compat_offsets = NULL;
927
928 static int compat_add_offset(unsigned int offset, short delta)
929 {
930         struct compat_delta *tmp;
931
932         tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
933         if (!tmp)
934                 return -ENOMEM;
935         tmp->offset = offset;
936         tmp->delta = delta;
937         if (compat_offsets) {
938                 tmp->next = compat_offsets->next;
939                 compat_offsets->next = tmp;
940         } else {
941                 compat_offsets = tmp;
942                 tmp->next = NULL;
943         }
944         return 0;
945 }
946
947 static void compat_flush_offsets(void)
948 {
949         struct compat_delta *tmp, *next;
950
951         if (compat_offsets) {
952                 for(tmp = compat_offsets; tmp; tmp = next) {
953                         next = tmp->next;
954                         kfree(tmp);
955                 }
956                 compat_offsets = NULL;
957         }
958 }
959
960 static short compat_calc_jump(unsigned int offset)
961 {
962         struct compat_delta *tmp;
963         short delta;
964
965         for(tmp = compat_offsets, delta = 0; tmp; tmp = tmp->next)
966                 if (tmp->offset < offset)
967                         delta += tmp->delta;
968         return delta;
969 }
970
971 static void compat_standard_from_user(void *dst, void *src)
972 {
973         int v = *(compat_int_t *)src;
974
975         if (v > 0)
976                 v += compat_calc_jump(v);
977         memcpy(dst, &v, sizeof(v));
978 }
979
980 static int compat_standard_to_user(void __user *dst, void *src)
981 {
982         compat_int_t cv = *(int *)src;
983
984         if (cv > 0)
985                 cv -= compat_calc_jump(cv);
986         return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
987 }
988
989 static inline int
990 compat_calc_match(struct ipt_entry_match *m, int * size)
991 {
992         *size += xt_compat_match_offset(m->u.kernel.match);
993         return 0;
994 }
995
996 static int compat_calc_entry(struct ipt_entry *e, struct xt_table_info *info,
997                 void *base, struct xt_table_info *newinfo)
998 {
999         struct ipt_entry_target *t;
1000         unsigned int entry_offset;
1001         int off, i, ret;
1002
1003         off = 0;
1004         entry_offset = (void *)e - base;
1005         IPT_MATCH_ITERATE(e, compat_calc_match, &off);
1006         t = ipt_get_target(e);
1007         off += xt_compat_target_offset(t->u.kernel.target);
1008         newinfo->size -= off;
1009         ret = compat_add_offset(entry_offset, off);
1010         if (ret)
1011                 return ret;
1012
1013         for (i = 0; i< NF_IP_NUMHOOKS; i++) {
1014                 if (info->hook_entry[i] && (e < (struct ipt_entry *)
1015                                 (base + info->hook_entry[i])))
1016                         newinfo->hook_entry[i] -= off;
1017                 if (info->underflow[i] && (e < (struct ipt_entry *)
1018                                 (base + info->underflow[i])))
1019                         newinfo->underflow[i] -= off;
1020         }
1021         return 0;
1022 }
1023
1024 static int compat_table_info(struct xt_table_info *info,
1025                 struct xt_table_info *newinfo)
1026 {
1027         void *loc_cpu_entry;
1028         int i;
1029
1030         if (!newinfo || !info)
1031                 return -EINVAL;
1032
1033         memset(newinfo, 0, sizeof(struct xt_table_info));
1034         newinfo->size = info->size;
1035         newinfo->number = info->number;
1036         for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1037                 newinfo->hook_entry[i] = info->hook_entry[i];
1038                 newinfo->underflow[i] = info->underflow[i];
1039         }
1040         loc_cpu_entry = info->entries[raw_smp_processor_id()];
1041         return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
1042                         compat_calc_entry, info, loc_cpu_entry, newinfo);
1043 }
1044 #endif
1045
1046 static int get_info(void __user *user, int *len, int compat)
1047 {
1048         char name[IPT_TABLE_MAXNAMELEN];
1049         struct xt_table *t;
1050         int ret;
1051
1052         if (*len != sizeof(struct ipt_getinfo)) {
1053                 duprintf("length %u != %u\n", *len,
1054                         (unsigned int)sizeof(struct ipt_getinfo));
1055                 return -EINVAL;
1056         }
1057
1058         if (copy_from_user(name, user, sizeof(name)) != 0)
1059                 return -EFAULT;
1060
1061         name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1062 #ifdef CONFIG_COMPAT
1063         if (compat)
1064                 xt_compat_lock(AF_INET);
1065 #endif
1066         t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1067                         "iptable_%s", name);
1068         if (t && !IS_ERR(t)) {
1069                 struct ipt_getinfo info;
1070                 struct xt_table_info *private = t->private;
1071
1072 #ifdef CONFIG_COMPAT
1073                 if (compat) {
1074                         struct xt_table_info tmp;
1075                         ret = compat_table_info(private, &tmp);
1076                         compat_flush_offsets();
1077                         private =  &tmp;
1078                 }
1079 #endif
1080                 info.valid_hooks = t->valid_hooks;
1081                 memcpy(info.hook_entry, private->hook_entry,
1082                                 sizeof(info.hook_entry));
1083                 memcpy(info.underflow, private->underflow,
1084                                 sizeof(info.underflow));
1085                 info.num_entries = private->number;
1086                 info.size = private->size;
1087                 strcpy(info.name, name);
1088
1089                 if (copy_to_user(user, &info, *len) != 0)
1090                         ret = -EFAULT;
1091                 else
1092                         ret = 0;
1093
1094                 xt_table_unlock(t);
1095                 module_put(t->me);
1096         } else
1097                 ret = t ? PTR_ERR(t) : -ENOENT;
1098 #ifdef CONFIG_COMPAT
1099         if (compat)
1100                 xt_compat_unlock(AF_INET);
1101 #endif
1102         return ret;
1103 }
1104
1105 static int
1106 get_entries(struct ipt_get_entries __user *uptr, int *len)
1107 {
1108         int ret;
1109         struct ipt_get_entries get;
1110         struct xt_table *t;
1111
1112         if (*len < sizeof(get)) {
1113                 duprintf("get_entries: %u < %d\n", *len,
1114                                 (unsigned int)sizeof(get));
1115                 return -EINVAL;
1116         }
1117         if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1118                 return -EFAULT;
1119         if (*len != sizeof(struct ipt_get_entries) + get.size) {
1120                 duprintf("get_entries: %u != %u\n", *len,
1121                                 (unsigned int)(sizeof(struct ipt_get_entries) +
1122                                 get.size));
1123                 return -EINVAL;
1124         }
1125
1126         t = xt_find_table_lock(AF_INET, get.name);
1127         if (t && !IS_ERR(t)) {
1128                 struct xt_table_info *private = t->private;
1129                 duprintf("t->private->number = %u\n",
1130                          private->number);
1131                 if (get.size == private->size)
1132                         ret = copy_entries_to_user(private->size,
1133                                                    t, uptr->entrytable);
1134                 else {
1135                         duprintf("get_entries: I've got %u not %u!\n",
1136                                  private->size,
1137                                  get.size);
1138                         ret = -EINVAL;
1139                 }
1140                 module_put(t->me);
1141                 xt_table_unlock(t);
1142         } else
1143                 ret = t ? PTR_ERR(t) : -ENOENT;
1144
1145         return ret;
1146 }
1147
1148 static int
1149 __do_replace(const char *name, unsigned int valid_hooks,
1150                 struct xt_table_info *newinfo, unsigned int num_counters,
1151                 void __user *counters_ptr)
1152 {
1153         int ret;
1154         struct xt_table *t;
1155         struct xt_table_info *oldinfo;
1156         struct xt_counters *counters;
1157         void *loc_cpu_old_entry;
1158
1159         ret = 0;
1160         counters = vmalloc(num_counters * sizeof(struct xt_counters));
1161         if (!counters) {
1162                 ret = -ENOMEM;
1163                 goto out;
1164         }
1165
1166         t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1167                                     "iptable_%s", name);
1168         if (!t || IS_ERR(t)) {
1169                 ret = t ? PTR_ERR(t) : -ENOENT;
1170                 goto free_newinfo_counters_untrans;
1171         }
1172
1173         /* You lied! */
1174         if (valid_hooks != t->valid_hooks) {
1175                 duprintf("Valid hook crap: %08X vs %08X\n",
1176                          valid_hooks, t->valid_hooks);
1177                 ret = -EINVAL;
1178                 goto put_module;
1179         }
1180
1181         oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1182         if (!oldinfo)
1183                 goto put_module;
1184
1185         /* Update module usage count based on number of rules */
1186         duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1187                 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1188         if ((oldinfo->number > oldinfo->initial_entries) ||
1189             (newinfo->number <= oldinfo->initial_entries))
1190                 module_put(t->me);
1191         if ((oldinfo->number > oldinfo->initial_entries) &&
1192             (newinfo->number <= oldinfo->initial_entries))
1193                 module_put(t->me);
1194
1195         /* Get the old counters. */
1196         get_counters(oldinfo, counters);
1197         /* Decrease module usage counts and free resource */
1198         loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1199         IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
1200         xt_free_table_info(oldinfo);
1201         if (copy_to_user(counters_ptr, counters,
1202                          sizeof(struct xt_counters) * num_counters) != 0)
1203                 ret = -EFAULT;
1204         vfree(counters);
1205         xt_table_unlock(t);
1206         return ret;
1207
1208  put_module:
1209         module_put(t->me);
1210         xt_table_unlock(t);
1211  free_newinfo_counters_untrans:
1212         vfree(counters);
1213  out:
1214         return ret;
1215 }
1216
1217 static int
1218 do_replace(void __user *user, unsigned int len)
1219 {
1220         int ret;
1221         struct ipt_replace tmp;
1222         struct xt_table_info *newinfo;
1223         void *loc_cpu_entry;
1224
1225         if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1226                 return -EFAULT;
1227
1228         /* Hack: Causes ipchains to give correct error msg --RR */
1229         if (len != sizeof(tmp) + tmp.size)
1230                 return -ENOPROTOOPT;
1231
1232         /* overflow check */
1233         if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1234                         SMP_CACHE_BYTES)
1235                 return -ENOMEM;
1236         if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1237                 return -ENOMEM;
1238
1239         newinfo = xt_alloc_table_info(tmp.size);
1240         if (!newinfo)
1241                 return -ENOMEM;
1242
1243         /* choose the copy that is our node/cpu */
1244         loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1245         if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1246                            tmp.size) != 0) {
1247                 ret = -EFAULT;
1248                 goto free_newinfo;
1249         }
1250
1251         ret = translate_table(tmp.name, tmp.valid_hooks,
1252                               newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1253                               tmp.hook_entry, tmp.underflow);
1254         if (ret != 0)
1255                 goto free_newinfo;
1256
1257         duprintf("ip_tables: Translated table\n");
1258
1259         ret = __do_replace(tmp.name, tmp.valid_hooks,
1260                               newinfo, tmp.num_counters,
1261                               tmp.counters);
1262         if (ret)
1263                 goto free_newinfo_untrans;
1264         return 0;
1265
1266  free_newinfo_untrans:
1267         IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1268  free_newinfo:
1269         xt_free_table_info(newinfo);
1270         return ret;
1271 }
1272
1273 /* We're lazy, and add to the first CPU; overflow works its fey magic
1274  * and everything is OK. */
1275 static inline int
1276 add_counter_to_entry(struct ipt_entry *e,
1277                      const struct xt_counters addme[],
1278                      unsigned int *i)
1279 {
1280 #if 0
1281         duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1282                  *i,
1283                  (long unsigned int)e->counters.pcnt,
1284                  (long unsigned int)e->counters.bcnt,
1285                  (long unsigned int)addme[*i].pcnt,
1286                  (long unsigned int)addme[*i].bcnt);
1287 #endif
1288
1289         ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1290
1291         (*i)++;
1292         return 0;
1293 }
1294
1295 static int
1296 do_add_counters(void __user *user, unsigned int len, int compat)
1297 {
1298         unsigned int i;
1299         struct xt_counters_info tmp;
1300         struct xt_counters *paddc;
1301         unsigned int num_counters;
1302         char *name;
1303         int size;
1304         void *ptmp;
1305         struct xt_table *t;
1306         struct xt_table_info *private;
1307         int ret = 0;
1308         void *loc_cpu_entry;
1309 #ifdef CONFIG_COMPAT
1310         struct compat_xt_counters_info compat_tmp;
1311
1312         if (compat) {
1313                 ptmp = &compat_tmp;
1314                 size = sizeof(struct compat_xt_counters_info);
1315         } else
1316 #endif
1317         {
1318                 ptmp = &tmp;
1319                 size = sizeof(struct xt_counters_info);
1320         }
1321
1322         if (copy_from_user(ptmp, user, size) != 0)
1323                 return -EFAULT;
1324
1325 #ifdef CONFIG_COMPAT
1326         if (compat) {
1327                 num_counters = compat_tmp.num_counters;
1328                 name = compat_tmp.name;
1329         } else
1330 #endif
1331         {
1332                 num_counters = tmp.num_counters;
1333                 name = tmp.name;
1334         }
1335
1336         if (len != size + num_counters * sizeof(struct xt_counters))
1337                 return -EINVAL;
1338
1339         paddc = vmalloc_node(len - size, numa_node_id());
1340         if (!paddc)
1341                 return -ENOMEM;
1342
1343         if (copy_from_user(paddc, user + size, len - size) != 0) {
1344                 ret = -EFAULT;
1345                 goto free;
1346         }
1347
1348         t = xt_find_table_lock(AF_INET, name);
1349         if (!t || IS_ERR(t)) {
1350                 ret = t ? PTR_ERR(t) : -ENOENT;
1351                 goto free;
1352         }
1353
1354         write_lock_bh(&t->lock);
1355         private = t->private;
1356         if (private->number != num_counters) {
1357                 ret = -EINVAL;
1358                 goto unlock_up_free;
1359         }
1360
1361         i = 0;
1362         /* Choose the copy that is on our node */
1363         loc_cpu_entry = private->entries[raw_smp_processor_id()];
1364         IPT_ENTRY_ITERATE(loc_cpu_entry,
1365                           private->size,
1366                           add_counter_to_entry,
1367                           paddc,
1368                           &i);
1369  unlock_up_free:
1370         write_unlock_bh(&t->lock);
1371         xt_table_unlock(t);
1372         module_put(t->me);
1373  free:
1374         vfree(paddc);
1375
1376         return ret;
1377 }
1378
1379 #ifdef CONFIG_COMPAT
1380 struct compat_ipt_replace {
1381         char                    name[IPT_TABLE_MAXNAMELEN];
1382         u32                     valid_hooks;
1383         u32                     num_entries;
1384         u32                     size;
1385         u32                     hook_entry[NF_IP_NUMHOOKS];
1386         u32                     underflow[NF_IP_NUMHOOKS];
1387         u32                     num_counters;
1388         compat_uptr_t           counters;       /* struct ipt_counters * */
1389         struct compat_ipt_entry entries[0];
1390 };
1391
1392 static inline int compat_copy_match_to_user(struct ipt_entry_match *m,
1393                 void __user **dstptr, compat_uint_t *size)
1394 {
1395         return xt_compat_match_to_user(m, dstptr, size);
1396 }
1397
1398 static int compat_copy_entry_to_user(struct ipt_entry *e,
1399                 void __user **dstptr, compat_uint_t *size)
1400 {
1401         struct ipt_entry_target *t;
1402         struct compat_ipt_entry __user *ce;
1403         u_int16_t target_offset, next_offset;
1404         compat_uint_t origsize;
1405         int ret;
1406
1407         ret = -EFAULT;
1408         origsize = *size;
1409         ce = (struct compat_ipt_entry __user *)*dstptr;
1410         if (copy_to_user(ce, e, sizeof(struct ipt_entry)))
1411                 goto out;
1412
1413         *dstptr += sizeof(struct compat_ipt_entry);
1414         ret = IPT_MATCH_ITERATE(e, compat_copy_match_to_user, dstptr, size);
1415         target_offset = e->target_offset - (origsize - *size);
1416         if (ret)
1417                 goto out;
1418         t = ipt_get_target(e);
1419         ret = xt_compat_target_to_user(t, dstptr, size);
1420         if (ret)
1421                 goto out;
1422         ret = -EFAULT;
1423         next_offset = e->next_offset - (origsize - *size);
1424         if (put_user(target_offset, &ce->target_offset))
1425                 goto out;
1426         if (put_user(next_offset, &ce->next_offset))
1427                 goto out;
1428         return 0;
1429 out:
1430         return ret;
1431 }
1432
1433 static inline int
1434 compat_check_calc_match(struct ipt_entry_match *m,
1435             const char *name,
1436             const struct ipt_ip *ip,
1437             unsigned int hookmask,
1438             int *size, int *i)
1439 {
1440         struct xt_match *match;
1441
1442         match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1443                                                    m->u.user.revision),
1444                                         "ipt_%s", m->u.user.name);
1445         if (IS_ERR(match) || !match) {
1446                 duprintf("compat_check_calc_match: `%s' not found\n",
1447                                 m->u.user.name);
1448                 return match ? PTR_ERR(match) : -ENOENT;
1449         }
1450         m->u.kernel.match = match;
1451         *size += xt_compat_match_offset(match);
1452
1453         (*i)++;
1454         return 0;
1455 }
1456
1457 static inline int
1458 check_compat_entry_size_and_hooks(struct ipt_entry *e,
1459                            struct xt_table_info *newinfo,
1460                            unsigned int *size,
1461                            unsigned char *base,
1462                            unsigned char *limit,
1463                            unsigned int *hook_entries,
1464                            unsigned int *underflows,
1465                            unsigned int *i,
1466                            const char *name)
1467 {
1468         struct ipt_entry_target *t;
1469         struct xt_target *target;
1470         unsigned int entry_offset;
1471         int ret, off, h, j;
1472
1473         duprintf("check_compat_entry_size_and_hooks %p\n", e);
1474         if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
1475             || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1476                 duprintf("Bad offset %p, limit = %p\n", e, limit);
1477                 return -EINVAL;
1478         }
1479
1480         if (e->next_offset < sizeof(struct compat_ipt_entry) +
1481                         sizeof(struct compat_xt_entry_target)) {
1482                 duprintf("checking: element %p size %u\n",
1483                          e, e->next_offset);
1484                 return -EINVAL;
1485         }
1486
1487         ret = check_entry(e, name);
1488         if (ret)
1489                 return ret;
1490
1491         off = 0;
1492         entry_offset = (void *)e - (void *)base;
1493         j = 0;
1494         ret = IPT_MATCH_ITERATE(e, compat_check_calc_match, name, &e->ip,
1495                         e->comefrom, &off, &j);
1496         if (ret != 0)
1497                 goto cleanup_matches;
1498
1499         t = ipt_get_target(e);
1500         target = try_then_request_module(xt_find_target(AF_INET,
1501                                                      t->u.user.name,
1502                                                      t->u.user.revision),
1503                                          "ipt_%s", t->u.user.name);
1504         if (IS_ERR(target) || !target) {
1505                 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1506                                                         t->u.user.name);
1507                 ret = target ? PTR_ERR(target) : -ENOENT;
1508                 goto cleanup_matches;
1509         }
1510         t->u.kernel.target = target;
1511
1512         off += xt_compat_target_offset(target);
1513         *size += off;
1514         ret = compat_add_offset(entry_offset, off);
1515         if (ret)
1516                 goto out;
1517
1518         /* Check hooks & underflows */
1519         for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1520                 if ((unsigned char *)e - base == hook_entries[h])
1521                         newinfo->hook_entry[h] = hook_entries[h];
1522                 if ((unsigned char *)e - base == underflows[h])
1523                         newinfo->underflow[h] = underflows[h];
1524         }
1525
1526         /* Clear counters and comefrom */
1527         e->counters = ((struct ipt_counters) { 0, 0 });
1528         e->comefrom = 0;
1529
1530         (*i)++;
1531         return 0;
1532
1533 out:
1534         module_put(t->u.kernel.target->me);
1535 cleanup_matches:
1536         IPT_MATCH_ITERATE(e, cleanup_match, &j);
1537         return ret;
1538 }
1539
1540 static inline int compat_copy_match_from_user(struct ipt_entry_match *m,
1541         void **dstptr, compat_uint_t *size, const char *name,
1542         const struct ipt_ip *ip, unsigned int hookmask)
1543 {
1544         xt_compat_match_from_user(m, dstptr, size);
1545         return 0;
1546 }
1547
1548 static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr,
1549         unsigned int *size, const char *name,
1550         struct xt_table_info *newinfo, unsigned char *base)
1551 {
1552         struct ipt_entry_target *t;
1553         struct xt_target *target;
1554         struct ipt_entry *de;
1555         unsigned int origsize;
1556         int ret, h;
1557
1558         ret = 0;
1559         origsize = *size;
1560         de = (struct ipt_entry *)*dstptr;
1561         memcpy(de, e, sizeof(struct ipt_entry));
1562
1563         *dstptr += sizeof(struct compat_ipt_entry);
1564         ret = IPT_MATCH_ITERATE(e, compat_copy_match_from_user, dstptr, size,
1565                         name, &de->ip, de->comefrom);
1566         if (ret)
1567                 return ret;
1568         de->target_offset = e->target_offset - (origsize - *size);
1569         t = ipt_get_target(e);
1570         target = t->u.kernel.target;
1571         xt_compat_target_from_user(t, dstptr, size);
1572
1573         de->next_offset = e->next_offset - (origsize - *size);
1574         for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1575                 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1576                         newinfo->hook_entry[h] -= origsize - *size;
1577                 if ((unsigned char *)de - base < newinfo->underflow[h])
1578                         newinfo->underflow[h] -= origsize - *size;
1579         }
1580         return ret;
1581 }
1582
1583 static inline int compat_check_entry(struct ipt_entry *e, const char *name)
1584 {
1585         int ret;
1586
1587         ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip, e->comefrom);
1588         if (ret)
1589                 return ret;
1590
1591         return check_target(e, name);
1592 }
1593
1594 static int
1595 translate_compat_table(const char *name,
1596                 unsigned int valid_hooks,
1597                 struct xt_table_info **pinfo,
1598                 void **pentry0,
1599                 unsigned int total_size,
1600                 unsigned int number,
1601                 unsigned int *hook_entries,
1602                 unsigned int *underflows)
1603 {
1604         unsigned int i, j;
1605         struct xt_table_info *newinfo, *info;
1606         void *pos, *entry0, *entry1;
1607         unsigned int size;
1608         int ret;
1609
1610         info = *pinfo;
1611         entry0 = *pentry0;
1612         size = total_size;
1613         info->number = number;
1614
1615         /* Init all hooks to impossible value. */
1616         for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1617                 info->hook_entry[i] = 0xFFFFFFFF;
1618                 info->underflow[i] = 0xFFFFFFFF;
1619         }
1620
1621         duprintf("translate_compat_table: size %u\n", info->size);
1622         j = 0;
1623         xt_compat_lock(AF_INET);
1624         /* Walk through entries, checking offsets. */
1625         ret = IPT_ENTRY_ITERATE(entry0, total_size,
1626                                 check_compat_entry_size_and_hooks,
1627                                 info, &size, entry0,
1628                                 entry0 + total_size,
1629                                 hook_entries, underflows, &j, name);
1630         if (ret != 0)
1631                 goto out_unlock;
1632
1633         ret = -EINVAL;
1634         if (j != number) {
1635                 duprintf("translate_compat_table: %u not %u entries\n",
1636                          j, number);
1637                 goto out_unlock;
1638         }
1639
1640         /* Check hooks all assigned */
1641         for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1642                 /* Only hooks which are valid */
1643                 if (!(valid_hooks & (1 << i)))
1644                         continue;
1645                 if (info->hook_entry[i] == 0xFFFFFFFF) {
1646                         duprintf("Invalid hook entry %u %u\n",
1647                                  i, hook_entries[i]);
1648                         goto out_unlock;
1649                 }
1650                 if (info->underflow[i] == 0xFFFFFFFF) {
1651                         duprintf("Invalid underflow %u %u\n",
1652                                  i, underflows[i]);
1653                         goto out_unlock;
1654                 }
1655         }
1656
1657         ret = -ENOMEM;
1658         newinfo = xt_alloc_table_info(size);
1659         if (!newinfo)
1660                 goto out_unlock;
1661
1662         newinfo->number = number;
1663         for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1664                 newinfo->hook_entry[i] = info->hook_entry[i];
1665                 newinfo->underflow[i] = info->underflow[i];
1666         }
1667         entry1 = newinfo->entries[raw_smp_processor_id()];
1668         pos = entry1;
1669         size =  total_size;
1670         ret = IPT_ENTRY_ITERATE(entry0, total_size,
1671                         compat_copy_entry_from_user, &pos, &size,
1672                         name, newinfo, entry1);
1673         compat_flush_offsets();
1674         xt_compat_unlock(AF_INET);
1675         if (ret)
1676                 goto free_newinfo;
1677
1678         ret = -ELOOP;
1679         if (!mark_source_chains(newinfo, valid_hooks, entry1))
1680                 goto free_newinfo;
1681
1682         ret = IPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1683                                                                         name);
1684         if (ret)
1685                 goto free_newinfo;
1686
1687         /* And one copy for every other CPU */
1688         for_each_possible_cpu(i)
1689                 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1690                         memcpy(newinfo->entries[i], entry1, newinfo->size);
1691
1692         *pinfo = newinfo;
1693         *pentry0 = entry1;
1694         xt_free_table_info(info);
1695         return 0;
1696
1697 free_newinfo:
1698         xt_free_table_info(newinfo);
1699 out:
1700         IPT_ENTRY_ITERATE(entry0, total_size, cleanup_entry, &j);
1701         return ret;
1702 out_unlock:
1703         compat_flush_offsets();
1704         xt_compat_unlock(AF_INET);
1705         goto out;
1706 }
1707
1708 static int
1709 compat_do_replace(void __user *user, unsigned int len)
1710 {
1711         int ret;
1712         struct compat_ipt_replace tmp;
1713         struct xt_table_info *newinfo;
1714         void *loc_cpu_entry;
1715
1716         if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1717                 return -EFAULT;
1718
1719         /* Hack: Causes ipchains to give correct error msg --RR */
1720         if (len != sizeof(tmp) + tmp.size)
1721                 return -ENOPROTOOPT;
1722
1723         /* overflow check */
1724         if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1725                         SMP_CACHE_BYTES)
1726                 return -ENOMEM;
1727         if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1728                 return -ENOMEM;
1729
1730         newinfo = xt_alloc_table_info(tmp.size);
1731         if (!newinfo)
1732                 return -ENOMEM;
1733
1734         /* choose the copy that is our node/cpu */
1735         loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1736         if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1737                            tmp.size) != 0) {
1738                 ret = -EFAULT;
1739                 goto free_newinfo;
1740         }
1741
1742         ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1743                               &newinfo, &loc_cpu_entry, tmp.size,
1744                               tmp.num_entries, tmp.hook_entry, tmp.underflow);
1745         if (ret != 0)
1746                 goto free_newinfo;
1747
1748         duprintf("compat_do_replace: Translated table\n");
1749
1750         ret = __do_replace(tmp.name, tmp.valid_hooks,
1751                               newinfo, tmp.num_counters,
1752                               compat_ptr(tmp.counters));
1753         if (ret)
1754                 goto free_newinfo_untrans;
1755         return 0;
1756
1757  free_newinfo_untrans:
1758         IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1759  free_newinfo:
1760         xt_free_table_info(newinfo);
1761         return ret;
1762 }
1763
1764 static int
1765 compat_do_ipt_set_ctl(struct sock *sk,  int cmd, void __user *user,
1766                 unsigned int len)
1767 {
1768         int ret;
1769
1770         if (!capable(CAP_NET_ADMIN))
1771                 return -EPERM;
1772
1773         switch (cmd) {
1774         case IPT_SO_SET_REPLACE:
1775                 ret = compat_do_replace(user, len);
1776                 break;
1777
1778         case IPT_SO_SET_ADD_COUNTERS:
1779                 ret = do_add_counters(user, len, 1);
1780                 break;
1781
1782         default:
1783                 duprintf("do_ipt_set_ctl:  unknown request %i\n", cmd);
1784                 ret = -EINVAL;
1785         }
1786
1787         return ret;
1788 }
1789
1790 struct compat_ipt_get_entries
1791 {
1792         char name[IPT_TABLE_MAXNAMELEN];
1793         compat_uint_t size;
1794         struct compat_ipt_entry entrytable[0];
1795 };
1796
1797 static int compat_copy_entries_to_user(unsigned int total_size,
1798                      struct xt_table *table, void __user *userptr)
1799 {
1800         unsigned int off, num;
1801         struct compat_ipt_entry e;
1802         struct xt_counters *counters;
1803         struct xt_table_info *private = table->private;
1804         void __user *pos;
1805         unsigned int size;
1806         int ret = 0;
1807         void *loc_cpu_entry;
1808
1809         counters = alloc_counters(table);
1810         if (IS_ERR(counters))
1811                 return PTR_ERR(counters);
1812
1813         /* choose the copy that is on our node/cpu, ...
1814          * This choice is lazy (because current thread is
1815          * allowed to migrate to another cpu)
1816          */
1817         loc_cpu_entry = private->entries[raw_smp_processor_id()];
1818         pos = userptr;
1819         size = total_size;
1820         ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1821                         compat_copy_entry_to_user, &pos, &size);
1822         if (ret)
1823                 goto free_counters;
1824
1825         /* ... then go back and fix counters and names */
1826         for (off = 0, num = 0; off < size; off += e.next_offset, num++) {
1827                 unsigned int i;
1828                 struct ipt_entry_match m;
1829                 struct ipt_entry_target t;
1830
1831                 ret = -EFAULT;
1832                 if (copy_from_user(&e, userptr + off,
1833                                         sizeof(struct compat_ipt_entry)))
1834                         goto free_counters;
1835                 if (copy_to_user(userptr + off +
1836                         offsetof(struct compat_ipt_entry, counters),
1837                          &counters[num], sizeof(counters[num])))
1838                         goto free_counters;
1839
1840                 for (i = sizeof(struct compat_ipt_entry);
1841                                 i < e.target_offset; i += m.u.match_size) {
1842                         if (copy_from_user(&m, userptr + off + i,
1843                                         sizeof(struct ipt_entry_match)))
1844                                 goto free_counters;
1845                         if (copy_to_user(userptr + off + i +
1846                                 offsetof(struct ipt_entry_match, u.user.name),
1847                                 m.u.kernel.match->name,
1848                                 strlen(m.u.kernel.match->name) + 1))
1849                                 goto free_counters;
1850                 }
1851
1852                 if (copy_from_user(&t, userptr + off + e.target_offset,
1853                                         sizeof(struct ipt_entry_target)))
1854                         goto free_counters;
1855                 if (copy_to_user(userptr + off + e.target_offset +
1856                         offsetof(struct ipt_entry_target, u.user.name),
1857                         t.u.kernel.target->name,
1858                         strlen(t.u.kernel.target->name) + 1))
1859                         goto free_counters;
1860         }
1861         ret = 0;
1862 free_counters:
1863         vfree(counters);
1864         return ret;
1865 }
1866
1867 static int
1868 compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len)
1869 {
1870         int ret;
1871         struct compat_ipt_get_entries get;
1872         struct xt_table *t;
1873
1874
1875         if (*len < sizeof(get)) {
1876                 duprintf("compat_get_entries: %u < %u\n",
1877                                 *len, (unsigned int)sizeof(get));
1878                 return -EINVAL;
1879         }
1880
1881         if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1882                 return -EFAULT;
1883
1884         if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1885                 duprintf("compat_get_entries: %u != %u\n", *len,
1886                         (unsigned int)(sizeof(struct compat_ipt_get_entries) +
1887                         get.size));
1888                 return -EINVAL;
1889         }
1890
1891         xt_compat_lock(AF_INET);
1892         t = xt_find_table_lock(AF_INET, get.name);
1893         if (t && !IS_ERR(t)) {
1894                 struct xt_table_info *private = t->private;
1895                 struct xt_table_info info;
1896                 duprintf("t->private->number = %u\n",
1897                          private->number);
1898                 ret = compat_table_info(private, &info);
1899                 if (!ret && get.size == info.size) {
1900                         ret = compat_copy_entries_to_user(private->size,
1901                                                    t, uptr->entrytable);
1902                 } else if (!ret) {
1903                         duprintf("compat_get_entries: I've got %u not %u!\n",
1904                                  private->size,
1905                                  get.size);
1906                         ret = -EINVAL;
1907                 }
1908                 compat_flush_offsets();
1909                 module_put(t->me);
1910                 xt_table_unlock(t);
1911         } else
1912                 ret = t ? PTR_ERR(t) : -ENOENT;
1913
1914         xt_compat_unlock(AF_INET);
1915         return ret;
1916 }
1917
1918 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1919
1920 static int
1921 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1922 {
1923         int ret;
1924
1925         if (!capable(CAP_NET_ADMIN))
1926                 return -EPERM;
1927
1928         switch (cmd) {
1929         case IPT_SO_GET_INFO:
1930                 ret = get_info(user, len, 1);
1931                 break;
1932         case IPT_SO_GET_ENTRIES:
1933                 ret = compat_get_entries(user, len);
1934                 break;
1935         default:
1936                 ret = do_ipt_get_ctl(sk, cmd, user, len);
1937         }
1938         return ret;
1939 }
1940 #endif
1941
1942 static int
1943 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1944 {
1945         int ret;
1946
1947         if (!capable(CAP_NET_ADMIN))
1948                 return -EPERM;
1949
1950         switch (cmd) {
1951         case IPT_SO_SET_REPLACE:
1952                 ret = do_replace(user, len);
1953                 break;
1954
1955         case IPT_SO_SET_ADD_COUNTERS:
1956                 ret = do_add_counters(user, len, 0);
1957                 break;
1958
1959         default:
1960                 duprintf("do_ipt_set_ctl:  unknown request %i\n", cmd);
1961                 ret = -EINVAL;
1962         }
1963
1964         return ret;
1965 }
1966
1967 static int
1968 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1969 {
1970         int ret;
1971
1972         if (!capable(CAP_NET_ADMIN))
1973                 return -EPERM;
1974
1975         switch (cmd) {
1976         case IPT_SO_GET_INFO:
1977                 ret = get_info(user, len, 0);
1978                 break;
1979
1980         case IPT_SO_GET_ENTRIES:
1981                 ret = get_entries(user, len);
1982                 break;
1983
1984         case IPT_SO_GET_REVISION_MATCH:
1985         case IPT_SO_GET_REVISION_TARGET: {
1986                 struct ipt_get_revision rev;
1987                 int target;
1988
1989                 if (*len != sizeof(rev)) {
1990                         ret = -EINVAL;
1991                         break;
1992                 }
1993                 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
1994                         ret = -EFAULT;
1995                         break;
1996                 }
1997
1998                 if (cmd == IPT_SO_GET_REVISION_TARGET)
1999                         target = 1;
2000                 else
2001                         target = 0;
2002
2003                 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2004                                                          rev.revision,
2005                                                          target, &ret),
2006                                         "ipt_%s", rev.name);
2007                 break;
2008         }
2009
2010         default:
2011                 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2012                 ret = -EINVAL;
2013         }
2014
2015         return ret;
2016 }
2017
2018 int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
2019 {
2020         int ret;
2021         struct xt_table_info *newinfo;
2022         static struct xt_table_info bootstrap
2023                 = { 0, 0, 0, { 0 }, { 0 }, { } };
2024         void *loc_cpu_entry;
2025
2026         newinfo = xt_alloc_table_info(repl->size);
2027         if (!newinfo)
2028                 return -ENOMEM;
2029
2030         /* choose the copy on our node/cpu
2031          * but dont care of preemption
2032          */
2033         loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2034         memcpy(loc_cpu_entry, repl->entries, repl->size);
2035
2036         ret = translate_table(table->name, table->valid_hooks,
2037                               newinfo, loc_cpu_entry, repl->size,
2038                               repl->num_entries,
2039                               repl->hook_entry,
2040                               repl->underflow);
2041         if (ret != 0) {
2042                 xt_free_table_info(newinfo);
2043                 return ret;
2044         }
2045
2046         ret = xt_register_table(table, &bootstrap, newinfo);
2047         if (ret != 0) {
2048                 xt_free_table_info(newinfo);
2049                 return ret;
2050         }
2051
2052         return 0;
2053 }
2054
2055 void ipt_unregister_table(struct xt_table *table)
2056 {
2057         struct xt_table_info *private;
2058         void *loc_cpu_entry;
2059
2060         private = xt_unregister_table(table);
2061
2062         /* Decrease module usage counts and free resources */
2063         loc_cpu_entry = private->entries[raw_smp_processor_id()];
2064         IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2065         xt_free_table_info(private);
2066 }
2067
2068 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2069 static inline int
2070 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2071                      u_int8_t type, u_int8_t code,
2072                      int invert)
2073 {
2074         return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code))
2075                 ^ invert;
2076 }
2077
2078 static int
2079 icmp_match(const struct sk_buff *skb,
2080            const struct net_device *in,
2081            const struct net_device *out,
2082            const struct xt_match *match,
2083            const void *matchinfo,
2084            int offset,
2085            unsigned int protoff,
2086            int *hotdrop)
2087 {
2088         struct icmphdr _icmph, *ic;
2089         const struct ipt_icmp *icmpinfo = matchinfo;
2090
2091         /* Must not be a fragment. */
2092         if (offset)
2093                 return 0;
2094
2095         ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
2096         if (ic == NULL) {
2097                 /* We've been asked to examine this packet, and we
2098                  * can't.  Hence, no choice but to drop.
2099                  */
2100                 duprintf("Dropping evil ICMP tinygram.\n");
2101                 *hotdrop = 1;
2102                 return 0;
2103         }
2104
2105         return icmp_type_code_match(icmpinfo->type,
2106                                     icmpinfo->code[0],
2107                                     icmpinfo->code[1],
2108                                     ic->type, ic->code,
2109                                     !!(icmpinfo->invflags&IPT_ICMP_INV));
2110 }
2111
2112 /* Called when user tries to insert an entry of this type. */
2113 static int
2114 icmp_checkentry(const char *tablename,
2115            const void *info,
2116            const struct xt_match *match,
2117            void *matchinfo,
2118            unsigned int hook_mask)
2119 {
2120         const struct ipt_icmp *icmpinfo = matchinfo;
2121
2122         /* Must specify no unknown invflags */
2123         return !(icmpinfo->invflags & ~IPT_ICMP_INV);
2124 }
2125
2126 /* The built-in targets: standard (NULL) and error. */
2127 static struct xt_target ipt_standard_target = {
2128         .name           = IPT_STANDARD_TARGET,
2129         .targetsize     = sizeof(int),
2130         .family         = AF_INET,
2131 #ifdef CONFIG_COMPAT
2132         .compatsize     = sizeof(compat_int_t),
2133         .compat_from_user = compat_standard_from_user,
2134         .compat_to_user = compat_standard_to_user,
2135 #endif
2136 };
2137
2138 static struct xt_target ipt_error_target = {
2139         .name           = IPT_ERROR_TARGET,
2140         .target         = ipt_error,
2141         .targetsize     = IPT_FUNCTION_MAXNAMELEN,
2142         .family         = AF_INET,
2143 };
2144
2145 static struct nf_sockopt_ops ipt_sockopts = {
2146         .pf             = PF_INET,
2147         .set_optmin     = IPT_BASE_CTL,
2148         .set_optmax     = IPT_SO_SET_MAX+1,
2149         .set            = do_ipt_set_ctl,
2150 #ifdef CONFIG_COMPAT
2151         .compat_set     = compat_do_ipt_set_ctl,
2152 #endif
2153         .get_optmin     = IPT_BASE_CTL,
2154         .get_optmax     = IPT_SO_GET_MAX+1,
2155         .get            = do_ipt_get_ctl,
2156 #ifdef CONFIG_COMPAT
2157         .compat_get     = compat_do_ipt_get_ctl,
2158 #endif
2159 };
2160
2161 static struct xt_match icmp_matchstruct = {
2162         .name           = "icmp",
2163         .match          = icmp_match,
2164         .matchsize      = sizeof(struct ipt_icmp),
2165         .proto          = IPPROTO_ICMP,
2166         .family         = AF_INET,
2167         .checkentry     = icmp_checkentry,
2168 };
2169
2170 static int __init ip_tables_init(void)
2171 {
2172         int ret;
2173
2174         ret = xt_proto_init(AF_INET);
2175         if (ret < 0)
2176                 goto err1;
2177
2178         /* Noone else will be downing sem now, so we won't sleep */
2179         ret = xt_register_target(&ipt_standard_target);
2180         if (ret < 0)
2181                 goto err2;
2182         ret = xt_register_target(&ipt_error_target);
2183         if (ret < 0)
2184                 goto err3;
2185         ret = xt_register_match(&icmp_matchstruct);
2186         if (ret < 0)
2187                 goto err4;
2188
2189         /* Register setsockopt */
2190         ret = nf_register_sockopt(&ipt_sockopts);
2191         if (ret < 0)
2192                 goto err5;
2193
2194         printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2195         return 0;
2196
2197 err5:
2198         xt_unregister_match(&icmp_matchstruct);
2199 err4:
2200         xt_unregister_target(&ipt_error_target);
2201 err3:
2202         xt_unregister_target(&ipt_standard_target);
2203 err2:
2204         xt_proto_fini(AF_INET);
2205 err1:
2206         return ret;
2207 }
2208
2209 static void __exit ip_tables_fini(void)
2210 {
2211         nf_unregister_sockopt(&ipt_sockopts);
2212
2213         xt_unregister_match(&icmp_matchstruct);
2214         xt_unregister_target(&ipt_error_target);
2215         xt_unregister_target(&ipt_standard_target);
2216
2217         xt_proto_fini(AF_INET);
2218 }
2219
2220 EXPORT_SYMBOL(ipt_register_table);
2221 EXPORT_SYMBOL(ipt_unregister_table);
2222 EXPORT_SYMBOL(ipt_do_table);
2223 module_init(ip_tables_init);
2224 module_exit(ip_tables_fini);