3 #include <linux/kernel.h>
4 #include <linux/types.h>
6 #define XT_FUNCTION_MAXNAMELEN 30
7 #define XT_EXTENSION_MAXNAMELEN 29
8 #define XT_TABLE_MAXNAMELEN 32
10 struct xt_entry_match {
15 /* Used by userspace */
16 char name[XT_EXTENSION_MAXNAMELEN];
22 /* Used inside the kernel */
23 struct xt_match *match;
30 unsigned char data[0];
33 struct xt_entry_target {
38 /* Used by userspace */
39 char name[XT_EXTENSION_MAXNAMELEN];
45 /* Used inside the kernel */
46 struct xt_target *target;
53 unsigned char data[0];
56 #define XT_TARGET_INIT(__name, __size) \
59 .target_size = XT_ALIGN(__size), \
64 struct xt_standard_target {
65 struct xt_entry_target target;
69 /* The argument to IPT_SO_GET_REVISION_*. Returns highest revision
70 * kernel supports, if >= revision. */
71 struct xt_get_revision {
72 char name[XT_EXTENSION_MAXNAMELEN];
76 /* CONTINUE verdict for targets */
77 #define XT_CONTINUE 0xFFFFFFFF
79 /* For standard target */
80 #define XT_RETURN (-NF_REPEAT - 1)
82 /* this is a dummy structure to find out the alignment requirement for a struct
83 * containing all the fundamental data types that are used in ipt_entry,
84 * ip6t_entry and arpt_entry. This sucks, and it is a hack. It will be my
85 * personal pleasure to remove it -HW
94 #define XT_ALIGN(s) __ALIGN_KERNEL((s), __alignof__(struct _xt_align))
96 /* Standard return verdict, or do jump. */
97 #define XT_STANDARD_TARGET ""
99 #define XT_ERROR_TARGET "ERROR"
101 #define SET_COUNTER(c,b,p) do { (c).bcnt = (b); (c).pcnt = (p); } while(0)
102 #define ADD_COUNTER(c,b,p) do { (c).bcnt += (b); (c).pcnt += (p); } while(0)
105 __u64 pcnt, bcnt; /* Packet and byte counters */
108 /* The argument to IPT_SO_ADD_COUNTERS. */
109 struct xt_counters_info {
111 char name[XT_TABLE_MAXNAMELEN];
113 unsigned int num_counters;
115 /* The counters (actually `number' of these). */
116 struct xt_counters counters[0];
119 #define XT_INV_PROTO 0x40 /* Invert the sense of PROTO. */
122 /* fn returns 0 to continue iteration */
123 #define XT_MATCH_ITERATE(type, e, fn, args...) \
127 struct xt_entry_match *__m; \
129 for (__i = sizeof(type); \
130 __i < (e)->target_offset; \
131 __i += __m->u.match_size) { \
132 __m = (void *)e + __i; \
134 __ret = fn(__m , ## args); \
141 /* fn returns 0 to continue iteration */
142 #define XT_ENTRY_ITERATE_CONTINUE(type, entries, size, n, fn, args...) \
144 unsigned int __i, __n; \
148 for (__i = 0, __n = 0; __i < (size); \
149 __i += __entry->next_offset, __n++) { \
150 __entry = (void *)(entries) + __i; \
154 __ret = fn(__entry , ## args); \
161 /* fn returns 0 to continue iteration */
162 #define XT_ENTRY_ITERATE(type, entries, size, fn, args...) \
163 XT_ENTRY_ITERATE_CONTINUE(type, entries, size, 0, fn, args)
165 #endif /* !__KERNEL__ */
167 /* pos is normally a struct ipt_entry/ip6t_entry/etc. */
168 #define xt_entry_foreach(pos, ehead, esize) \
169 for ((pos) = (typeof(pos))(ehead); \
170 (pos) < (typeof(pos))((char *)(ehead) + (esize)); \
171 (pos) = (typeof(pos))((char *)(pos) + (pos)->next_offset))
173 /* can only be xt_entry_match, so no use of typeof here */
174 #define xt_ematch_foreach(pos, entry) \
175 for ((pos) = (struct xt_entry_match *)entry->elems; \
176 (pos) < (struct xt_entry_match *)((char *)(entry) + \
177 (entry)->target_offset); \
178 (pos) = (struct xt_entry_match *)((char *)(pos) + \
179 (pos)->u.match_size))
183 #include <linux/netdevice.h>
186 * struct xt_match_param - parameters for match extensions' match functions
188 * @in: input netdevice
189 * @out: output netdevice
190 * @match: struct xt_match through which this function was invoked
191 * @matchinfo: per-match data
192 * @fragoff: packet is a fragment, this is the data offset
193 * @thoff: position of transport header relative to skb->data
194 * @hook: hook number given packet came from
195 * @family: Actual NFPROTO_* through which the function is invoked
196 * (helpful when match->family == NFPROTO_UNSPEC)
197 * @hotdrop: drop packet if we had inspection problems
198 * Network namespace obtainable using dev_net(in/out)
200 struct xt_match_param {
201 const struct net_device *in, *out;
202 const struct xt_match *match;
203 const void *matchinfo;
206 unsigned int hooknum;
212 * struct xt_mtchk_param - parameters for match extensions'
213 * checkentry functions
215 * @net: network namespace through which the check was invoked
216 * @table: table the rule is tried to be inserted into
217 * @entryinfo: the family-specific rule data
218 * (struct ipt_ip, ip6t_ip, arpt_arp or (note) ebt_entry)
219 * @match: struct xt_match through which this function was invoked
220 * @matchinfo: per-match data
221 * @hook_mask: via which hooks the new rule is reachable
222 * Other fields as above.
224 struct xt_mtchk_param {
227 const void *entryinfo;
228 const struct xt_match *match;
230 unsigned int hook_mask;
235 * struct xt_mdtor_param - match destructor parameters
238 struct xt_mtdtor_param {
240 const struct xt_match *match;
246 * struct xt_target_param - parameters for target extensions' target functions
248 * @hooknum: hook through which this target was invoked
249 * @target: struct xt_target through which this function was invoked
250 * @targinfo: per-target data
252 * Other fields see above.
254 struct xt_target_param {
255 const struct net_device *in, *out;
256 const struct xt_target *target;
257 const void *targinfo;
258 unsigned int hooknum;
263 * struct xt_tgchk_param - parameters for target extensions'
264 * checkentry functions
266 * @entryinfo: the family-specific rule data
267 * (struct ipt_entry, ip6t_entry, arpt_entry, ebt_entry)
269 * Other fields see above.
271 struct xt_tgchk_param {
274 const void *entryinfo;
275 const struct xt_target *target;
277 unsigned int hook_mask;
281 /* Target destructor parameters */
282 struct xt_tgdtor_param {
284 const struct xt_target *target;
290 struct list_head list;
292 const char name[XT_EXTENSION_MAXNAMELEN];
295 /* Return true or false: return FALSE and set *hotdrop = 1 to
296 force immediate packet drop. */
297 /* Arguments changed since 2.6.9, as this must now handle
298 non-linear skb, using skb_header_pointer and
299 skb_ip_make_writable. */
300 bool (*match)(const struct sk_buff *skb,
301 const struct xt_match_param *);
303 /* Called when user tries to insert an entry of this type. */
304 int (*checkentry)(const struct xt_mtchk_param *);
306 /* Called when entry of this type deleted. */
307 void (*destroy)(const struct xt_mtdtor_param *);
309 /* Called when userspace align differs from kernel space one */
310 void (*compat_from_user)(void *dst, const void *src);
311 int (*compat_to_user)(void __user *dst, const void *src);
313 /* Set this to THIS_MODULE if you are a module, otherwise NULL */
317 unsigned int matchsize;
319 unsigned int compatsize;
322 unsigned short proto;
324 unsigned short family;
327 /* Registration hooks for targets. */
329 struct list_head list;
331 const char name[XT_EXTENSION_MAXNAMELEN];
334 /* Returns verdict. Argument order changed since 2.6.9, as this
335 must now handle non-linear skbs, using skb_copy_bits and
336 skb_ip_make_writable. */
337 unsigned int (*target)(struct sk_buff *skb,
338 const struct xt_target_param *);
340 /* Called when user tries to insert an entry of this type:
341 hook_mask is a bitmask of hooks from which it can be
343 /* Should return true or false, or an error code (-Exxxx). */
344 int (*checkentry)(const struct xt_tgchk_param *);
346 /* Called when entry of this type deleted. */
347 void (*destroy)(const struct xt_tgdtor_param *);
349 /* Called when userspace align differs from kernel space one */
350 void (*compat_from_user)(void *dst, const void *src);
351 int (*compat_to_user)(void __user *dst, const void *src);
353 /* Set this to THIS_MODULE if you are a module, otherwise NULL */
357 unsigned int targetsize;
359 unsigned int compatsize;
362 unsigned short proto;
364 unsigned short family;
367 /* Furniture shopping... */
369 struct list_head list;
371 /* What hooks you will enter on */
372 unsigned int valid_hooks;
374 /* Man behind the curtain... */
375 struct xt_table_info *private;
377 /* Set this to THIS_MODULE if you are a module, otherwise NULL */
380 u_int8_t af; /* address/protocol family */
381 int priority; /* hook order */
383 /* A unique name... */
384 const char name[XT_TABLE_MAXNAMELEN];
387 #include <linux/netfilter_ipv4.h>
389 /* The table itself */
390 struct xt_table_info {
393 /* Number of entries: FIXME. --RR */
395 /* Initial number of entries. Needed for module usage count */
396 unsigned int initial_entries;
398 /* Entry points and underflows */
399 unsigned int hook_entry[NF_INET_NUMHOOKS];
400 unsigned int underflow[NF_INET_NUMHOOKS];
403 * Number of user chains. Since tables cannot have loops, at most
404 * @stacksize jumps (number of user chains) can possibly be made.
406 unsigned int stacksize;
407 unsigned int *stackptr;
409 /* ipt_entry tables: one per CPU */
410 /* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */
414 #define XT_TABLE_INFO_SZ (offsetof(struct xt_table_info, entries) \
415 + nr_cpu_ids * sizeof(char *))
416 extern int xt_register_target(struct xt_target *target);
417 extern void xt_unregister_target(struct xt_target *target);
418 extern int xt_register_targets(struct xt_target *target, unsigned int n);
419 extern void xt_unregister_targets(struct xt_target *target, unsigned int n);
421 extern int xt_register_match(struct xt_match *target);
422 extern void xt_unregister_match(struct xt_match *target);
423 extern int xt_register_matches(struct xt_match *match, unsigned int n);
424 extern void xt_unregister_matches(struct xt_match *match, unsigned int n);
426 extern int xt_check_match(struct xt_mtchk_param *,
427 unsigned int size, u_int8_t proto, bool inv_proto);
428 extern int xt_check_target(struct xt_tgchk_param *,
429 unsigned int size, u_int8_t proto, bool inv_proto);
431 extern struct xt_table *xt_register_table(struct net *net,
432 const struct xt_table *table,
433 struct xt_table_info *bootstrap,
434 struct xt_table_info *newinfo);
435 extern void *xt_unregister_table(struct xt_table *table);
437 extern struct xt_table_info *xt_replace_table(struct xt_table *table,
438 unsigned int num_counters,
439 struct xt_table_info *newinfo,
442 extern struct xt_match *xt_find_match(u8 af, const char *name, u8 revision);
443 extern struct xt_target *xt_find_target(u8 af, const char *name, u8 revision);
444 extern struct xt_match *xt_request_find_match(u8 af, const char *name,
446 extern struct xt_target *xt_request_find_target(u8 af, const char *name,
448 extern int xt_find_revision(u8 af, const char *name, u8 revision,
449 int target, int *err);
451 extern struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
453 extern void xt_table_unlock(struct xt_table *t);
455 extern int xt_proto_init(struct net *net, u_int8_t af);
456 extern void xt_proto_fini(struct net *net, u_int8_t af);
458 extern struct xt_table_info *xt_alloc_table_info(unsigned int size);
459 extern void xt_free_table_info(struct xt_table_info *info);
462 * Per-CPU spinlock associated with per-cpu table entries, and
463 * with a counter for the "reading" side that allows a recursive
464 * reader to avoid taking the lock and deadlocking.
466 * "reading" is used by ip/arp/ip6 tables rule processing which runs per-cpu.
467 * It needs to ensure that the rules are not being changed while the packet
468 * is being processed. In some cases, the read lock will be acquired
469 * twice on the same CPU; this is okay because of the count.
471 * "writing" is used when reading counters.
472 * During replace any readers that are using the old tables have to complete
473 * before freeing the old table. This is handled by the write locking
474 * necessary for reading the counters.
476 struct xt_info_lock {
478 unsigned char readers;
480 DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks);
483 * Note: we need to ensure that preemption is disabled before acquiring
484 * the per-cpu-variable, so we do it as a two step process rather than
485 * using "spin_lock_bh()".
487 * We _also_ need to disable bottom half processing before updating our
488 * nesting count, to make sure that the only kind of re-entrancy is this
489 * code being called by itself: since the count+lock is not an atomic
490 * operation, we can allow no races.
492 * _Only_ that special combination of being per-cpu and never getting
493 * re-entered asynchronously means that the count is safe.
495 static inline void xt_info_rdlock_bh(void)
497 struct xt_info_lock *lock;
500 lock = &__get_cpu_var(xt_info_locks);
501 if (likely(!lock->readers++))
502 spin_lock(&lock->lock);
505 static inline void xt_info_rdunlock_bh(void)
507 struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks);
509 if (likely(!--lock->readers))
510 spin_unlock(&lock->lock);
515 * The "writer" side needs to get exclusive access to the lock,
516 * regardless of readers. This must be called with bottom half
517 * processing (and thus also preemption) disabled.
519 static inline void xt_info_wrlock(unsigned int cpu)
521 spin_lock(&per_cpu(xt_info_locks, cpu).lock);
524 static inline void xt_info_wrunlock(unsigned int cpu)
526 spin_unlock(&per_cpu(xt_info_locks, cpu).lock);
530 * This helper is performance critical and must be inlined
532 static inline unsigned long ifname_compare_aligned(const char *_a,
536 const unsigned long *a = (const unsigned long *)_a;
537 const unsigned long *b = (const unsigned long *)_b;
538 const unsigned long *mask = (const unsigned long *)_mask;
541 ret = (a[0] ^ b[0]) & mask[0];
542 if (IFNAMSIZ > sizeof(unsigned long))
543 ret |= (a[1] ^ b[1]) & mask[1];
544 if (IFNAMSIZ > 2 * sizeof(unsigned long))
545 ret |= (a[2] ^ b[2]) & mask[2];
546 if (IFNAMSIZ > 3 * sizeof(unsigned long))
547 ret |= (a[3] ^ b[3]) & mask[3];
548 BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
552 extern struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *);
553 extern void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *);
556 #include <net/compat.h>
558 struct compat_xt_entry_match {
561 u_int16_t match_size;
562 char name[XT_FUNCTION_MAXNAMELEN - 1];
566 u_int16_t match_size;
569 u_int16_t match_size;
571 unsigned char data[0];
574 struct compat_xt_entry_target {
577 u_int16_t target_size;
578 char name[XT_FUNCTION_MAXNAMELEN - 1];
582 u_int16_t target_size;
583 compat_uptr_t target;
585 u_int16_t target_size;
587 unsigned char data[0];
590 /* FIXME: this works only on 32 bit tasks
591 * need to change whole approach in order to calculate align as function of
592 * current task alignment */
594 struct compat_xt_counters {
595 compat_u64 pcnt, bcnt; /* Packet and byte counters */
598 struct compat_xt_counters_info {
599 char name[XT_TABLE_MAXNAMELEN];
600 compat_uint_t num_counters;
601 struct compat_xt_counters counters[0];
604 struct _compat_xt_align {
611 #define COMPAT_XT_ALIGN(s) __ALIGN_KERNEL((s), __alignof__(struct _compat_xt_align))
613 extern void xt_compat_lock(u_int8_t af);
614 extern void xt_compat_unlock(u_int8_t af);
616 extern int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta);
617 extern void xt_compat_flush_offsets(u_int8_t af);
618 extern int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
620 extern int xt_compat_match_offset(const struct xt_match *match);
621 extern int xt_compat_match_from_user(struct xt_entry_match *m,
622 void **dstptr, unsigned int *size);
623 extern int xt_compat_match_to_user(const struct xt_entry_match *m,
624 void __user **dstptr, unsigned int *size);
626 extern int xt_compat_target_offset(const struct xt_target *target);
627 extern void xt_compat_target_from_user(struct xt_entry_target *t,
628 void **dstptr, unsigned int *size);
629 extern int xt_compat_target_to_user(const struct xt_entry_target *t,
630 void __user **dstptr, unsigned int *size);
632 #endif /* CONFIG_COMPAT */
633 #endif /* __KERNEL__ */
635 #endif /* _X_TABLES_H */