5 #include <linux/netdevice.h>
6 #include <linux/static_key.h>
7 #include <uapi/linux/netfilter/x_tables.h>
10 * struct xt_action_param - parameters for matches/targets
12 * @match: the match extension
13 * @target: the target extension
14 * @matchinfo: per-match data
15 * @targetinfo: per-target data
16 * @net network namespace through which the action was invoked
17 * @in: input netdevice
18 * @out: output netdevice
19 * @fragoff: packet is a fragment, this is the data offset
20 * @thoff: position of transport header relative to skb->data
21 * @hook: hook number given packet came from
22 * @family: Actual NFPROTO_* through which the function is invoked
23 * (helpful when match->family == NFPROTO_UNSPEC)
25 * Fields written to by extensions:
27 * @hotdrop: drop packet if we had inspection problems
29 struct xt_action_param
{
31 const struct xt_match
*match
;
32 const struct xt_target
*target
;
35 const void *matchinfo
, *targinfo
;
38 const struct net_device
*in
, *out
;
47 * struct xt_mtchk_param - parameters for match extensions'
48 * checkentry functions
50 * @net: network namespace through which the check was invoked
51 * @table: table the rule is tried to be inserted into
52 * @entryinfo: the family-specific rule data
53 * (struct ipt_ip, ip6t_ip, arpt_arp or (note) ebt_entry)
54 * @match: struct xt_match through which this function was invoked
55 * @matchinfo: per-match data
56 * @hook_mask: via which hooks the new rule is reachable
57 * Other fields as above.
59 struct xt_mtchk_param
{
62 const void *entryinfo
;
63 const struct xt_match
*match
;
65 unsigned int hook_mask
;
71 * struct xt_mdtor_param - match destructor parameters
74 struct xt_mtdtor_param
{
76 const struct xt_match
*match
;
82 * struct xt_tgchk_param - parameters for target extensions'
83 * checkentry functions
85 * @entryinfo: the family-specific rule data
86 * (struct ipt_entry, ip6t_entry, arpt_entry, ebt_entry)
88 * Other fields see above.
90 struct xt_tgchk_param
{
93 const void *entryinfo
;
94 const struct xt_target
*target
;
96 unsigned int hook_mask
;
101 /* Target destructor parameters */
102 struct xt_tgdtor_param
{
104 const struct xt_target
*target
;
110 struct list_head list
;
112 const char name
[XT_EXTENSION_MAXNAMELEN
];
115 /* Return true or false: return FALSE and set *hotdrop = 1 to
116 force immediate packet drop. */
117 /* Arguments changed since 2.6.9, as this must now handle
118 non-linear skb, using skb_header_pointer and
119 skb_ip_make_writable. */
120 bool (*match
)(const struct sk_buff
*skb
,
121 struct xt_action_param
*);
123 /* Called when user tries to insert an entry of this type. */
124 int (*checkentry
)(const struct xt_mtchk_param
*);
126 /* Called when entry of this type deleted. */
127 void (*destroy
)(const struct xt_mtdtor_param
*);
129 /* Called when userspace align differs from kernel space one */
130 void (*compat_from_user
)(void *dst
, const void *src
);
131 int (*compat_to_user
)(void __user
*dst
, const void *src
);
133 /* Set this to THIS_MODULE if you are a module, otherwise NULL */
137 unsigned int matchsize
;
139 unsigned int compatsize
;
142 unsigned short proto
;
144 unsigned short family
;
147 /* Registration hooks for targets. */
149 struct list_head list
;
151 const char name
[XT_EXTENSION_MAXNAMELEN
];
154 /* Returns verdict. Argument order changed since 2.6.9, as this
155 must now handle non-linear skbs, using skb_copy_bits and
156 skb_ip_make_writable. */
157 unsigned int (*target
)(struct sk_buff
*skb
,
158 const struct xt_action_param
*);
160 /* Called when user tries to insert an entry of this type:
161 hook_mask is a bitmask of hooks from which it can be
163 /* Should return 0 on success or an error code otherwise (-Exxxx). */
164 int (*checkentry
)(const struct xt_tgchk_param
*);
166 /* Called when entry of this type deleted. */
167 void (*destroy
)(const struct xt_tgdtor_param
*);
169 /* Called when userspace align differs from kernel space one */
170 void (*compat_from_user
)(void *dst
, const void *src
);
171 int (*compat_to_user
)(void __user
*dst
, const void *src
);
173 /* Set this to THIS_MODULE if you are a module, otherwise NULL */
177 unsigned int targetsize
;
179 unsigned int compatsize
;
182 unsigned short proto
;
184 unsigned short family
;
187 /* Furniture shopping... */
189 struct list_head list
;
191 /* What hooks you will enter on */
192 unsigned int valid_hooks
;
194 /* Man behind the curtain... */
195 struct xt_table_info
*private;
197 /* Set this to THIS_MODULE if you are a module, otherwise NULL */
200 u_int8_t af
; /* address/protocol family */
201 int priority
; /* hook order */
203 /* called when table is needed in the given netns */
204 int (*table_init
)(struct net
*net
);
206 /* A unique name... */
207 const char name
[XT_TABLE_MAXNAMELEN
];
210 #include <linux/netfilter_ipv4.h>
212 /* The table itself */
213 struct xt_table_info
{
216 /* Number of entries: FIXME. --RR */
218 /* Initial number of entries. Needed for module usage count */
219 unsigned int initial_entries
;
221 /* Entry points and underflows */
222 unsigned int hook_entry
[NF_INET_NUMHOOKS
];
223 unsigned int underflow
[NF_INET_NUMHOOKS
];
226 * Number of user chains. Since tables cannot have loops, at most
227 * @stacksize jumps (number of user chains) can possibly be made.
229 unsigned int stacksize
;
232 unsigned char entries
[0] __aligned(8);
235 int xt_register_target(struct xt_target
*target
);
236 void xt_unregister_target(struct xt_target
*target
);
237 int xt_register_targets(struct xt_target
*target
, unsigned int n
);
238 void xt_unregister_targets(struct xt_target
*target
, unsigned int n
);
240 int xt_register_match(struct xt_match
*target
);
241 void xt_unregister_match(struct xt_match
*target
);
242 int xt_register_matches(struct xt_match
*match
, unsigned int n
);
243 void xt_unregister_matches(struct xt_match
*match
, unsigned int n
);
245 int xt_check_match(struct xt_mtchk_param
*, unsigned int size
, u_int8_t proto
,
247 int xt_check_target(struct xt_tgchk_param
*, unsigned int size
, u_int8_t proto
,
250 struct xt_table
*xt_register_table(struct net
*net
,
251 const struct xt_table
*table
,
252 struct xt_table_info
*bootstrap
,
253 struct xt_table_info
*newinfo
);
254 void *xt_unregister_table(struct xt_table
*table
);
256 struct xt_table_info
*xt_replace_table(struct xt_table
*table
,
257 unsigned int num_counters
,
258 struct xt_table_info
*newinfo
,
261 struct xt_match
*xt_find_match(u8 af
, const char *name
, u8 revision
);
262 struct xt_target
*xt_find_target(u8 af
, const char *name
, u8 revision
);
263 struct xt_match
*xt_request_find_match(u8 af
, const char *name
, u8 revision
);
264 struct xt_target
*xt_request_find_target(u8 af
, const char *name
, u8 revision
);
265 int xt_find_revision(u8 af
, const char *name
, u8 revision
, int target
,
268 struct xt_table
*xt_find_table_lock(struct net
*net
, u_int8_t af
,
270 void xt_table_unlock(struct xt_table
*t
);
272 int xt_proto_init(struct net
*net
, u_int8_t af
);
273 void xt_proto_fini(struct net
*net
, u_int8_t af
);
275 struct xt_table_info
*xt_alloc_table_info(unsigned int size
);
276 void xt_free_table_info(struct xt_table_info
*info
);
279 * xt_recseq - recursive seqcount for netfilter use
281 * Packet processing changes the seqcount only if no recursion happened
282 * get_counters() can use read_seqcount_begin()/read_seqcount_retry(),
283 * because we use the normal seqcount convention :
284 * Low order bit set to 1 if a writer is active.
286 DECLARE_PER_CPU(seqcount_t
, xt_recseq
);
288 /* xt_tee_enabled - true if x_tables needs to handle reentrancy
290 * Enabled if current ip(6)tables ruleset has at least one -j TEE rule.
292 extern struct static_key xt_tee_enabled
;
295 * xt_write_recseq_begin - start of a write section
297 * Begin packet processing : all readers must wait the end
298 * 1) Must be called with preemption disabled
299 * 2) softirqs must be disabled too (or we should use this_cpu_add())
301 * 1 if no recursion on this cpu
302 * 0 if recursion detected
304 static inline unsigned int xt_write_recseq_begin(void)
309 * Low order bit of sequence is set if we already
310 * called xt_write_recseq_begin().
312 addend
= (__this_cpu_read(xt_recseq
.sequence
) + 1) & 1;
315 * This is kind of a write_seqcount_begin(), but addend is 0 or 1
316 * We dont check addend value to avoid a test and conditional jump,
317 * since addend is most likely 1
319 __this_cpu_add(xt_recseq
.sequence
, addend
);
326 * xt_write_recseq_end - end of a write section
327 * @addend: return value from previous xt_write_recseq_begin()
329 * End packet processing : all readers can proceed
330 * 1) Must be called with preemption disabled
331 * 2) softirqs must be disabled too (or we should use this_cpu_add())
333 static inline void xt_write_recseq_end(unsigned int addend
)
335 /* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
337 __this_cpu_add(xt_recseq
.sequence
, addend
);
341 * This helper is performance critical and must be inlined
343 static inline unsigned long ifname_compare_aligned(const char *_a
,
347 const unsigned long *a
= (const unsigned long *)_a
;
348 const unsigned long *b
= (const unsigned long *)_b
;
349 const unsigned long *mask
= (const unsigned long *)_mask
;
352 ret
= (a
[0] ^ b
[0]) & mask
[0];
353 if (IFNAMSIZ
> sizeof(unsigned long))
354 ret
|= (a
[1] ^ b
[1]) & mask
[1];
355 if (IFNAMSIZ
> 2 * sizeof(unsigned long))
356 ret
|= (a
[2] ^ b
[2]) & mask
[2];
357 if (IFNAMSIZ
> 3 * sizeof(unsigned long))
358 ret
|= (a
[3] ^ b
[3]) & mask
[3];
359 BUILD_BUG_ON(IFNAMSIZ
> 4 * sizeof(unsigned long));
364 /* On SMP, ip(6)t_entry->counters.pcnt holds address of the
365 * real (percpu) counter. On !SMP, its just the packet count,
366 * so nothing needs to be done there.
368 * xt_percpu_counter_alloc returns the address of the percpu
369 * counter, or 0 on !SMP. We force an alignment of 16 bytes
370 * so that bytes/packets share a common cache line.
372 * Hence caller must use IS_ERR_VALUE to check for error, this
373 * allows us to return 0 for single core systems without forcing
374 * callers to deal with SMP vs. NONSMP issues.
376 static inline u64
xt_percpu_counter_alloc(void)
378 if (nr_cpu_ids
> 1) {
379 void __percpu
*res
= __alloc_percpu(sizeof(struct xt_counters
),
380 sizeof(struct xt_counters
));
383 return (u64
) -ENOMEM
;
385 return (u64
) (__force
unsigned long) res
;
390 static inline void xt_percpu_counter_free(u64 pcnt
)
393 free_percpu((void __percpu
*) (unsigned long) pcnt
);
396 static inline struct xt_counters
*
397 xt_get_this_cpu_counter(struct xt_counters
*cnt
)
400 return this_cpu_ptr((void __percpu
*) (unsigned long) cnt
->pcnt
);
405 static inline struct xt_counters
*
406 xt_get_per_cpu_counter(struct xt_counters
*cnt
, unsigned int cpu
)
409 return per_cpu_ptr((void __percpu
*) (unsigned long) cnt
->pcnt
, cpu
);
414 struct nf_hook_ops
*xt_hook_ops_alloc(const struct xt_table
*, nf_hookfn
*);
417 #include <net/compat.h>
419 struct compat_xt_entry_match
{
422 u_int16_t match_size
;
423 char name
[XT_FUNCTION_MAXNAMELEN
- 1];
427 u_int16_t match_size
;
430 u_int16_t match_size
;
432 unsigned char data
[0];
435 struct compat_xt_entry_target
{
438 u_int16_t target_size
;
439 char name
[XT_FUNCTION_MAXNAMELEN
- 1];
443 u_int16_t target_size
;
444 compat_uptr_t target
;
446 u_int16_t target_size
;
448 unsigned char data
[0];
451 /* FIXME: this works only on 32 bit tasks
452 * need to change whole approach in order to calculate align as function of
453 * current task alignment */
455 struct compat_xt_counters
{
456 compat_u64 pcnt
, bcnt
; /* Packet and byte counters */
459 struct compat_xt_counters_info
{
460 char name
[XT_TABLE_MAXNAMELEN
];
461 compat_uint_t num_counters
;
462 struct compat_xt_counters counters
[0];
465 struct _compat_xt_align
{
472 #define COMPAT_XT_ALIGN(s) __ALIGN_KERNEL((s), __alignof__(struct _compat_xt_align))
474 void xt_compat_lock(u_int8_t af
);
475 void xt_compat_unlock(u_int8_t af
);
477 int xt_compat_add_offset(u_int8_t af
, unsigned int offset
, int delta
);
478 void xt_compat_flush_offsets(u_int8_t af
);
479 void xt_compat_init_offsets(u_int8_t af
, unsigned int number
);
480 int xt_compat_calc_jump(u_int8_t af
, unsigned int offset
);
482 int xt_compat_match_offset(const struct xt_match
*match
);
483 int xt_compat_match_from_user(struct xt_entry_match
*m
, void **dstptr
,
485 int xt_compat_match_to_user(const struct xt_entry_match
*m
,
486 void __user
**dstptr
, unsigned int *size
);
488 int xt_compat_target_offset(const struct xt_target
*target
);
489 void xt_compat_target_from_user(struct xt_entry_target
*t
, void **dstptr
,
491 int xt_compat_target_to_user(const struct xt_entry_target
*t
,
492 void __user
**dstptr
, unsigned int *size
);
494 #endif /* CONFIG_COMPAT */
495 #endif /* _X_TABLES_H */