1 /*
2  * x_tables core - Backend for {ip,ip6,arp}_tables
3  *
4  * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org>
5  * Copyright (C) 2006-2012 Patrick McHardy <kaber@trash.net>
6  *
7  * Based on existing ip_tables code which is
8  *   Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
9  *   Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License version 2 as
13  * published by the Free Software Foundation.
14  *
15  */
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/socket.h>
20 #include <linux/net.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/string.h>
24 #include <linux/vmalloc.h>
25 #include <linux/mutex.h>
26 #include <linux/mm.h>
27 #include <linux/slab.h>
28 #include <linux/audit.h>
29 #include <linux/user_namespace.h>
30 #include <net/net_namespace.h>
31 
32 #include <linux/netfilter/x_tables.h>
33 #include <linux/netfilter_arp.h>
34 #include <linux/netfilter_ipv4/ip_tables.h>
35 #include <linux/netfilter_ipv6/ip6_tables.h>
36 #include <linux/netfilter_arp/arp_tables.h>
37 
38 MODULE_LICENSE("GPL");
39 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
40 MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
41 
42 #define XT_PCPU_BLOCK_SIZE 4096
43 #define XT_MAX_TABLE_SIZE	(512 * 1024 * 1024)
44 
45 struct compat_delta {
46 	unsigned int offset; /* offset in kernel */
47 	int delta; /* delta in 32bit user land */
48 };
49 
50 struct xt_af {
51 	struct mutex mutex;
52 	struct list_head match;
53 	struct list_head target;
54 #ifdef CONFIG_COMPAT
55 	struct mutex compat_mutex;
56 	struct compat_delta *compat_tab;
57 	unsigned int number; /* number of slots in compat_tab[] */
58 	unsigned int cur; /* number of used slots in compat_tab[] */
59 #endif
60 };
61 
62 static struct xt_af *xt;
63 
64 static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
65 	[NFPROTO_UNSPEC] = "x",
66 	[NFPROTO_IPV4]   = "ip",
67 	[NFPROTO_ARP]    = "arp",
68 	[NFPROTO_BRIDGE] = "eb",
69 	[NFPROTO_IPV6]   = "ip6",
70 };
71 
72 /* Registration hooks for targets. */
xt_register_target(struct xt_target * target)73 int xt_register_target(struct xt_target *target)
74 {
75 	u_int8_t af = target->family;
76 
77 	mutex_lock(&xt[af].mutex);
78 	list_add(&target->list, &xt[af].target);
79 	mutex_unlock(&xt[af].mutex);
80 	return 0;
81 }
82 EXPORT_SYMBOL(xt_register_target);
83 
84 void
xt_unregister_target(struct xt_target * target)85 xt_unregister_target(struct xt_target *target)
86 {
87 	u_int8_t af = target->family;
88 
89 	mutex_lock(&xt[af].mutex);
90 	list_del(&target->list);
91 	mutex_unlock(&xt[af].mutex);
92 }
93 EXPORT_SYMBOL(xt_unregister_target);
94 
95 int
xt_register_targets(struct xt_target * target,unsigned int n)96 xt_register_targets(struct xt_target *target, unsigned int n)
97 {
98 	unsigned int i;
99 	int err = 0;
100 
101 	for (i = 0; i < n; i++) {
102 		err = xt_register_target(&target[i]);
103 		if (err)
104 			goto err;
105 	}
106 	return err;
107 
108 err:
109 	if (i > 0)
110 		xt_unregister_targets(target, i);
111 	return err;
112 }
113 EXPORT_SYMBOL(xt_register_targets);
114 
115 void
xt_unregister_targets(struct xt_target * target,unsigned int n)116 xt_unregister_targets(struct xt_target *target, unsigned int n)
117 {
118 	while (n-- > 0)
119 		xt_unregister_target(&target[n]);
120 }
121 EXPORT_SYMBOL(xt_unregister_targets);
122 
xt_register_match(struct xt_match * match)123 int xt_register_match(struct xt_match *match)
124 {
125 	u_int8_t af = match->family;
126 
127 	mutex_lock(&xt[af].mutex);
128 	list_add(&match->list, &xt[af].match);
129 	mutex_unlock(&xt[af].mutex);
130 	return 0;
131 }
132 EXPORT_SYMBOL(xt_register_match);
133 
134 void
xt_unregister_match(struct xt_match * match)135 xt_unregister_match(struct xt_match *match)
136 {
137 	u_int8_t af = match->family;
138 
139 	mutex_lock(&xt[af].mutex);
140 	list_del(&match->list);
141 	mutex_unlock(&xt[af].mutex);
142 }
143 EXPORT_SYMBOL(xt_unregister_match);
144 
145 int
xt_register_matches(struct xt_match * match,unsigned int n)146 xt_register_matches(struct xt_match *match, unsigned int n)
147 {
148 	unsigned int i;
149 	int err = 0;
150 
151 	for (i = 0; i < n; i++) {
152 		err = xt_register_match(&match[i]);
153 		if (err)
154 			goto err;
155 	}
156 	return err;
157 
158 err:
159 	if (i > 0)
160 		xt_unregister_matches(match, i);
161 	return err;
162 }
163 EXPORT_SYMBOL(xt_register_matches);
164 
165 void
xt_unregister_matches(struct xt_match * match,unsigned int n)166 xt_unregister_matches(struct xt_match *match, unsigned int n)
167 {
168 	while (n-- > 0)
169 		xt_unregister_match(&match[n]);
170 }
171 EXPORT_SYMBOL(xt_unregister_matches);
172 
173 
174 /*
175  * These are weird, but module loading must not be done with mutex
176  * held (since they will register), and we have to have a single
177  * function to use.
178  */
179 
180 /* Find match, grabs ref.  Returns ERR_PTR() on error. */
xt_find_match(u8 af,const char * name,u8 revision)181 struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
182 {
183 	struct xt_match *m;
184 	int err = -ENOENT;
185 
186 	if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
187 		return ERR_PTR(-EINVAL);
188 
189 	mutex_lock(&xt[af].mutex);
190 	list_for_each_entry(m, &xt[af].match, list) {
191 		if (strcmp(m->name, name) == 0) {
192 			if (m->revision == revision) {
193 				if (try_module_get(m->me)) {
194 					mutex_unlock(&xt[af].mutex);
195 					return m;
196 				}
197 			} else
198 				err = -EPROTOTYPE; /* Found something. */
199 		}
200 	}
201 	mutex_unlock(&xt[af].mutex);
202 
203 	if (af != NFPROTO_UNSPEC)
204 		/* Try searching again in the family-independent list */
205 		return xt_find_match(NFPROTO_UNSPEC, name, revision);
206 
207 	return ERR_PTR(err);
208 }
209 EXPORT_SYMBOL(xt_find_match);
210 
211 struct xt_match *
xt_request_find_match(uint8_t nfproto,const char * name,uint8_t revision)212 xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
213 {
214 	struct xt_match *match;
215 
216 	if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
217 		return ERR_PTR(-EINVAL);
218 
219 	match = xt_find_match(nfproto, name, revision);
220 	if (IS_ERR(match)) {
221 		request_module("%st_%s", xt_prefix[nfproto], name);
222 		match = xt_find_match(nfproto, name, revision);
223 	}
224 
225 	return match;
226 }
227 EXPORT_SYMBOL_GPL(xt_request_find_match);
228 
229 /* Find target, grabs ref.  Returns ERR_PTR() on error. */
xt_find_target(u8 af,const char * name,u8 revision)230 struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
231 {
232 	struct xt_target *t;
233 	int err = -ENOENT;
234 
235 	if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
236 		return ERR_PTR(-EINVAL);
237 
238 	mutex_lock(&xt[af].mutex);
239 	list_for_each_entry(t, &xt[af].target, list) {
240 		if (strcmp(t->name, name) == 0) {
241 			if (t->revision == revision) {
242 				if (try_module_get(t->me)) {
243 					mutex_unlock(&xt[af].mutex);
244 					return t;
245 				}
246 			} else
247 				err = -EPROTOTYPE; /* Found something. */
248 		}
249 	}
250 	mutex_unlock(&xt[af].mutex);
251 
252 	if (af != NFPROTO_UNSPEC)
253 		/* Try searching again in the family-independent list */
254 		return xt_find_target(NFPROTO_UNSPEC, name, revision);
255 
256 	return ERR_PTR(err);
257 }
258 EXPORT_SYMBOL(xt_find_target);
259 
xt_request_find_target(u8 af,const char * name,u8 revision)260 struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
261 {
262 	struct xt_target *target;
263 
264 	if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
265 		return ERR_PTR(-EINVAL);
266 
267 	target = xt_find_target(af, name, revision);
268 	if (IS_ERR(target)) {
269 		request_module("%st_%s", xt_prefix[af], name);
270 		target = xt_find_target(af, name, revision);
271 	}
272 
273 	return target;
274 }
275 EXPORT_SYMBOL_GPL(xt_request_find_target);
276 
277 
xt_obj_to_user(u16 __user * psize,u16 size,void __user * pname,const char * name,u8 __user * prev,u8 rev)278 static int xt_obj_to_user(u16 __user *psize, u16 size,
279 			  void __user *pname, const char *name,
280 			  u8 __user *prev, u8 rev)
281 {
282 	if (put_user(size, psize))
283 		return -EFAULT;
284 	if (copy_to_user(pname, name, strlen(name) + 1))
285 		return -EFAULT;
286 	if (put_user(rev, prev))
287 		return -EFAULT;
288 
289 	return 0;
290 }
291 
292 #define XT_OBJ_TO_USER(U, K, TYPE, C_SIZE)				\
293 	xt_obj_to_user(&U->u.TYPE##_size, C_SIZE ? : K->u.TYPE##_size,	\
294 		       U->u.user.name, K->u.kernel.TYPE->name,		\
295 		       &U->u.user.revision, K->u.kernel.TYPE->revision)
296 
xt_data_to_user(void __user * dst,const void * src,int usersize,int size,int aligned_size)297 int xt_data_to_user(void __user *dst, const void *src,
298 		    int usersize, int size, int aligned_size)
299 {
300 	usersize = usersize ? : size;
301 	if (copy_to_user(dst, src, usersize))
302 		return -EFAULT;
303 	if (usersize != aligned_size &&
304 	    clear_user(dst + usersize, aligned_size - usersize))
305 		return -EFAULT;
306 
307 	return 0;
308 }
309 EXPORT_SYMBOL_GPL(xt_data_to_user);
310 
311 #define XT_DATA_TO_USER(U, K, TYPE)					\
312 	xt_data_to_user(U->data, K->data,				\
313 			K->u.kernel.TYPE->usersize,			\
314 			K->u.kernel.TYPE->TYPE##size,			\
315 			XT_ALIGN(K->u.kernel.TYPE->TYPE##size))
316 
xt_match_to_user(const struct xt_entry_match * m,struct xt_entry_match __user * u)317 int xt_match_to_user(const struct xt_entry_match *m,
318 		     struct xt_entry_match __user *u)
319 {
320 	return XT_OBJ_TO_USER(u, m, match, 0) ||
321 	       XT_DATA_TO_USER(u, m, match);
322 }
323 EXPORT_SYMBOL_GPL(xt_match_to_user);
324 
xt_target_to_user(const struct xt_entry_target * t,struct xt_entry_target __user * u)325 int xt_target_to_user(const struct xt_entry_target *t,
326 		      struct xt_entry_target __user *u)
327 {
328 	return XT_OBJ_TO_USER(u, t, target, 0) ||
329 	       XT_DATA_TO_USER(u, t, target);
330 }
331 EXPORT_SYMBOL_GPL(xt_target_to_user);
332 
match_revfn(u8 af,const char * name,u8 revision,int * bestp)333 static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
334 {
335 	const struct xt_match *m;
336 	int have_rev = 0;
337 
338 	list_for_each_entry(m, &xt[af].match, list) {
339 		if (strcmp(m->name, name) == 0) {
340 			if (m->revision > *bestp)
341 				*bestp = m->revision;
342 			if (m->revision == revision)
343 				have_rev = 1;
344 		}
345 	}
346 
347 	if (af != NFPROTO_UNSPEC && !have_rev)
348 		return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
349 
350 	return have_rev;
351 }
352 
target_revfn(u8 af,const char * name,u8 revision,int * bestp)353 static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
354 {
355 	const struct xt_target *t;
356 	int have_rev = 0;
357 
358 	list_for_each_entry(t, &xt[af].target, list) {
359 		if (strcmp(t->name, name) == 0) {
360 			if (t->revision > *bestp)
361 				*bestp = t->revision;
362 			if (t->revision == revision)
363 				have_rev = 1;
364 		}
365 	}
366 
367 	if (af != NFPROTO_UNSPEC && !have_rev)
368 		return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
369 
370 	return have_rev;
371 }
372 
373 /* Returns true or false (if no such extension at all) */
xt_find_revision(u8 af,const char * name,u8 revision,int target,int * err)374 int xt_find_revision(u8 af, const char *name, u8 revision, int target,
375 		     int *err)
376 {
377 	int have_rev, best = -1;
378 
379 	mutex_lock(&xt[af].mutex);
380 	if (target == 1)
381 		have_rev = target_revfn(af, name, revision, &best);
382 	else
383 		have_rev = match_revfn(af, name, revision, &best);
384 	mutex_unlock(&xt[af].mutex);
385 
386 	/* Nothing at all?  Return 0 to try loading module. */
387 	if (best == -1) {
388 		*err = -ENOENT;
389 		return 0;
390 	}
391 
392 	*err = best;
393 	if (!have_rev)
394 		*err = -EPROTONOSUPPORT;
395 	return 1;
396 }
397 EXPORT_SYMBOL_GPL(xt_find_revision);
398 
399 static char *
textify_hooks(char * buf,size_t size,unsigned int mask,uint8_t nfproto)400 textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
401 {
402 	static const char *const inetbr_names[] = {
403 		"PREROUTING", "INPUT", "FORWARD",
404 		"OUTPUT", "POSTROUTING", "BROUTING",
405 	};
406 	static const char *const arp_names[] = {
407 		"INPUT", "FORWARD", "OUTPUT",
408 	};
409 	const char *const *names;
410 	unsigned int i, max;
411 	char *p = buf;
412 	bool np = false;
413 	int res;
414 
415 	names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names;
416 	max   = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) :
417 	                                   ARRAY_SIZE(inetbr_names);
418 	*p = '\0';
419 	for (i = 0; i < max; ++i) {
420 		if (!(mask & (1 << i)))
421 			continue;
422 		res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
423 		if (res > 0) {
424 			size -= res;
425 			p += res;
426 		}
427 		np = true;
428 	}
429 
430 	return buf;
431 }
432 
433 /**
434  * xt_check_proc_name - check that name is suitable for /proc file creation
435  *
436  * @name: file name candidate
437  * @size: length of buffer
438  *
439  * some x_tables modules wish to create a file in /proc.
440  * This function makes sure that the name is suitable for this
441  * purpose, it checks that name is NUL terminated and isn't a 'special'
442  * name, like "..".
443  *
444  * returns negative number on error or 0 if name is useable.
445  */
xt_check_proc_name(const char * name,unsigned int size)446 int xt_check_proc_name(const char *name, unsigned int size)
447 {
448 	if (name[0] == '\0')
449 		return -EINVAL;
450 
451 	if (strnlen(name, size) == size)
452 		return -ENAMETOOLONG;
453 
454 	if (strcmp(name, ".") == 0 ||
455 	    strcmp(name, "..") == 0 ||
456 	    strchr(name, '/'))
457 		return -EINVAL;
458 
459 	return 0;
460 }
461 EXPORT_SYMBOL(xt_check_proc_name);
462 
xt_check_match(struct xt_mtchk_param * par,unsigned int size,u_int8_t proto,bool inv_proto)463 int xt_check_match(struct xt_mtchk_param *par,
464 		   unsigned int size, u_int8_t proto, bool inv_proto)
465 {
466 	int ret;
467 
468 	if (XT_ALIGN(par->match->matchsize) != size &&
469 	    par->match->matchsize != -1) {
470 		/*
471 		 * ebt_among is exempt from centralized matchsize checking
472 		 * because it uses a dynamic-size data set.
473 		 */
474 		pr_err_ratelimited("%s_tables: %s.%u match: invalid size %u (kernel) != (user) %u\n",
475 				   xt_prefix[par->family], par->match->name,
476 				   par->match->revision,
477 				   XT_ALIGN(par->match->matchsize), size);
478 		return -EINVAL;
479 	}
480 	if (par->match->table != NULL &&
481 	    strcmp(par->match->table, par->table) != 0) {
482 		pr_info_ratelimited("%s_tables: %s match: only valid in %s table, not %s\n",
483 				    xt_prefix[par->family], par->match->name,
484 				    par->match->table, par->table);
485 		return -EINVAL;
486 	}
487 	if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
488 		char used[64], allow[64];
489 
490 		pr_info_ratelimited("%s_tables: %s match: used from hooks %s, but only valid from %s\n",
491 				    xt_prefix[par->family], par->match->name,
492 				    textify_hooks(used, sizeof(used),
493 						  par->hook_mask, par->family),
494 				    textify_hooks(allow, sizeof(allow),
495 						  par->match->hooks,
496 						  par->family));
497 		return -EINVAL;
498 	}
499 	if (par->match->proto && (par->match->proto != proto || inv_proto)) {
500 		pr_info_ratelimited("%s_tables: %s match: only valid for protocol %u\n",
501 				    xt_prefix[par->family], par->match->name,
502 				    par->match->proto);
503 		return -EINVAL;
504 	}
505 	if (par->match->checkentry != NULL) {
506 		ret = par->match->checkentry(par);
507 		if (ret < 0)
508 			return ret;
509 		else if (ret > 0)
510 			/* Flag up potential errors. */
511 			return -EIO;
512 	}
513 	return 0;
514 }
515 EXPORT_SYMBOL_GPL(xt_check_match);
516 
517 /** xt_check_entry_match - check that matches end before start of target
518  *
519  * @match: beginning of xt_entry_match
520  * @target: beginning of this rules target (alleged end of matches)
521  * @alignment: alignment requirement of match structures
522  *
523  * Validates that all matches add up to the beginning of the target,
524  * and that each match covers at least the base structure size.
525  *
526  * Return: 0 on success, negative errno on failure.
527  */
xt_check_entry_match(const char * match,const char * target,const size_t alignment)528 static int xt_check_entry_match(const char *match, const char *target,
529 				const size_t alignment)
530 {
531 	const struct xt_entry_match *pos;
532 	int length = target - match;
533 
534 	if (length == 0) /* no matches */
535 		return 0;
536 
537 	pos = (struct xt_entry_match *)match;
538 	do {
539 		if ((unsigned long)pos % alignment)
540 			return -EINVAL;
541 
542 		if (length < (int)sizeof(struct xt_entry_match))
543 			return -EINVAL;
544 
545 		if (pos->u.match_size < sizeof(struct xt_entry_match))
546 			return -EINVAL;
547 
548 		if (pos->u.match_size > length)
549 			return -EINVAL;
550 
551 		length -= pos->u.match_size;
552 		pos = ((void *)((char *)(pos) + (pos)->u.match_size));
553 	} while (length > 0);
554 
555 	return 0;
556 }
557 
558 /** xt_check_table_hooks - check hook entry points are sane
559  *
560  * @info xt_table_info to check
561  * @valid_hooks - hook entry points that we can enter from
562  *
563  * Validates that the hook entry and underflows points are set up.
564  *
565  * Return: 0 on success, negative errno on failure.
566  */
xt_check_table_hooks(const struct xt_table_info * info,unsigned int valid_hooks)567 int xt_check_table_hooks(const struct xt_table_info *info, unsigned int valid_hooks)
568 {
569 	const char *err = "unsorted underflow";
570 	unsigned int i, max_uflow, max_entry;
571 	bool check_hooks = false;
572 
573 	BUILD_BUG_ON(ARRAY_SIZE(info->hook_entry) != ARRAY_SIZE(info->underflow));
574 
575 	max_entry = 0;
576 	max_uflow = 0;
577 
578 	for (i = 0; i < ARRAY_SIZE(info->hook_entry); i++) {
579 		if (!(valid_hooks & (1 << i)))
580 			continue;
581 
582 		if (info->hook_entry[i] == 0xFFFFFFFF)
583 			return -EINVAL;
584 		if (info->underflow[i] == 0xFFFFFFFF)
585 			return -EINVAL;
586 
587 		if (check_hooks) {
588 			if (max_uflow > info->underflow[i])
589 				goto error;
590 
591 			if (max_uflow == info->underflow[i]) {
592 				err = "duplicate underflow";
593 				goto error;
594 			}
595 			if (max_entry > info->hook_entry[i]) {
596 				err = "unsorted entry";
597 				goto error;
598 			}
599 			if (max_entry == info->hook_entry[i]) {
600 				err = "duplicate entry";
601 				goto error;
602 			}
603 		}
604 		max_entry = info->hook_entry[i];
605 		max_uflow = info->underflow[i];
606 		check_hooks = true;
607 	}
608 
609 	return 0;
610 error:
611 	pr_err_ratelimited("%s at hook %d\n", err, i);
612 	return -EINVAL;
613 }
614 EXPORT_SYMBOL(xt_check_table_hooks);
615 
verdict_ok(int verdict)616 static bool verdict_ok(int verdict)
617 {
618 	if (verdict > 0)
619 		return true;
620 
621 	if (verdict < 0) {
622 		int v = -verdict - 1;
623 
624 		if (verdict == XT_RETURN)
625 			return true;
626 
627 		switch (v) {
628 		case NF_ACCEPT: return true;
629 		case NF_DROP: return true;
630 		case NF_QUEUE: return true;
631 		default:
632 			break;
633 		}
634 
635 		return false;
636 	}
637 
638 	return false;
639 }
640 
error_tg_ok(unsigned int usersize,unsigned int kernsize,const char * msg,unsigned int msglen)641 static bool error_tg_ok(unsigned int usersize, unsigned int kernsize,
642 			const char *msg, unsigned int msglen)
643 {
644 	return usersize == kernsize && strnlen(msg, msglen) < msglen;
645 }
646 
647 #ifdef CONFIG_COMPAT
xt_compat_add_offset(u_int8_t af,unsigned int offset,int delta)648 int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
649 {
650 	struct xt_af *xp = &xt[af];
651 
652 	WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
653 
654 	if (WARN_ON(!xp->compat_tab))
655 		return -ENOMEM;
656 
657 	if (xp->cur >= xp->number)
658 		return -EINVAL;
659 
660 	if (xp->cur)
661 		delta += xp->compat_tab[xp->cur - 1].delta;
662 	xp->compat_tab[xp->cur].offset = offset;
663 	xp->compat_tab[xp->cur].delta = delta;
664 	xp->cur++;
665 	return 0;
666 }
667 EXPORT_SYMBOL_GPL(xt_compat_add_offset);
668 
xt_compat_flush_offsets(u_int8_t af)669 void xt_compat_flush_offsets(u_int8_t af)
670 {
671 	WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
672 
673 	if (xt[af].compat_tab) {
674 		vfree(xt[af].compat_tab);
675 		xt[af].compat_tab = NULL;
676 		xt[af].number = 0;
677 		xt[af].cur = 0;
678 	}
679 }
680 EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
681 
xt_compat_calc_jump(u_int8_t af,unsigned int offset)682 int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
683 {
684 	struct compat_delta *tmp = xt[af].compat_tab;
685 	int mid, left = 0, right = xt[af].cur - 1;
686 
687 	while (left <= right) {
688 		mid = (left + right) >> 1;
689 		if (offset > tmp[mid].offset)
690 			left = mid + 1;
691 		else if (offset < tmp[mid].offset)
692 			right = mid - 1;
693 		else
694 			return mid ? tmp[mid - 1].delta : 0;
695 	}
696 	return left ? tmp[left - 1].delta : 0;
697 }
698 EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
699 
xt_compat_init_offsets(u8 af,unsigned int number)700 int xt_compat_init_offsets(u8 af, unsigned int number)
701 {
702 	size_t mem;
703 
704 	WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
705 
706 	if (!number || number > (INT_MAX / sizeof(struct compat_delta)))
707 		return -EINVAL;
708 
709 	if (WARN_ON(xt[af].compat_tab))
710 		return -EINVAL;
711 
712 	mem = sizeof(struct compat_delta) * number;
713 	if (mem > XT_MAX_TABLE_SIZE)
714 		return -ENOMEM;
715 
716 	xt[af].compat_tab = vmalloc(mem);
717 	if (!xt[af].compat_tab)
718 		return -ENOMEM;
719 
720 	xt[af].number = number;
721 	xt[af].cur = 0;
722 
723 	return 0;
724 }
725 EXPORT_SYMBOL(xt_compat_init_offsets);
726 
xt_compat_match_offset(const struct xt_match * match)727 int xt_compat_match_offset(const struct xt_match *match)
728 {
729 	u_int16_t csize = match->compatsize ? : match->matchsize;
730 	return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
731 }
732 EXPORT_SYMBOL_GPL(xt_compat_match_offset);
733 
xt_compat_match_from_user(struct xt_entry_match * m,void ** dstptr,unsigned int * size)734 void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
735 			       unsigned int *size)
736 {
737 	const struct xt_match *match = m->u.kernel.match;
738 	struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
739 	int pad, off = xt_compat_match_offset(match);
740 	u_int16_t msize = cm->u.user.match_size;
741 	char name[sizeof(m->u.user.name)];
742 
743 	m = *dstptr;
744 	memcpy(m, cm, sizeof(*cm));
745 	if (match->compat_from_user)
746 		match->compat_from_user(m->data, cm->data);
747 	else
748 		memcpy(m->data, cm->data, msize - sizeof(*cm));
749 	pad = XT_ALIGN(match->matchsize) - match->matchsize;
750 	if (pad > 0)
751 		memset(m->data + match->matchsize, 0, pad);
752 
753 	msize += off;
754 	m->u.user.match_size = msize;
755 	strlcpy(name, match->name, sizeof(name));
756 	module_put(match->me);
757 	strncpy(m->u.user.name, name, sizeof(m->u.user.name));
758 
759 	*size += off;
760 	*dstptr += msize;
761 }
762 EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
763 
764 #define COMPAT_XT_DATA_TO_USER(U, K, TYPE, C_SIZE)			\
765 	xt_data_to_user(U->data, K->data,				\
766 			K->u.kernel.TYPE->usersize,			\
767 			C_SIZE,						\
768 			COMPAT_XT_ALIGN(C_SIZE))
769 
xt_compat_match_to_user(const struct xt_entry_match * m,void __user ** dstptr,unsigned int * size)770 int xt_compat_match_to_user(const struct xt_entry_match *m,
771 			    void __user **dstptr, unsigned int *size)
772 {
773 	const struct xt_match *match = m->u.kernel.match;
774 	struct compat_xt_entry_match __user *cm = *dstptr;
775 	int off = xt_compat_match_offset(match);
776 	u_int16_t msize = m->u.user.match_size - off;
777 
778 	if (XT_OBJ_TO_USER(cm, m, match, msize))
779 		return -EFAULT;
780 
781 	if (match->compat_to_user) {
782 		if (match->compat_to_user((void __user *)cm->data, m->data))
783 			return -EFAULT;
784 	} else {
785 		if (COMPAT_XT_DATA_TO_USER(cm, m, match, msize - sizeof(*cm)))
786 			return -EFAULT;
787 	}
788 
789 	*size -= off;
790 	*dstptr += msize;
791 	return 0;
792 }
793 EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
794 
795 /* non-compat version may have padding after verdict */
796 struct compat_xt_standard_target {
797 	struct compat_xt_entry_target t;
798 	compat_uint_t verdict;
799 };
800 
801 struct compat_xt_error_target {
802 	struct compat_xt_entry_target t;
803 	char errorname[XT_FUNCTION_MAXNAMELEN];
804 };
805 
xt_compat_check_entry_offsets(const void * base,const char * elems,unsigned int target_offset,unsigned int next_offset)806 int xt_compat_check_entry_offsets(const void *base, const char *elems,
807 				  unsigned int target_offset,
808 				  unsigned int next_offset)
809 {
810 	long size_of_base_struct = elems - (const char *)base;
811 	const struct compat_xt_entry_target *t;
812 	const char *e = base;
813 
814 	if (target_offset < size_of_base_struct)
815 		return -EINVAL;
816 
817 	if (target_offset + sizeof(*t) > next_offset)
818 		return -EINVAL;
819 
820 	t = (void *)(e + target_offset);
821 	if (t->u.target_size < sizeof(*t))
822 		return -EINVAL;
823 
824 	if (target_offset + t->u.target_size > next_offset)
825 		return -EINVAL;
826 
827 	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0) {
828 		const struct compat_xt_standard_target *st = (const void *)t;
829 
830 		if (COMPAT_XT_ALIGN(target_offset + sizeof(*st)) != next_offset)
831 			return -EINVAL;
832 
833 		if (!verdict_ok(st->verdict))
834 			return -EINVAL;
835 	} else if (strcmp(t->u.user.name, XT_ERROR_TARGET) == 0) {
836 		const struct compat_xt_error_target *et = (const void *)t;
837 
838 		if (!error_tg_ok(t->u.target_size, sizeof(*et),
839 				 et->errorname, sizeof(et->errorname)))
840 			return -EINVAL;
841 	}
842 
843 	/* compat_xt_entry match has less strict alignment requirements,
844 	 * otherwise they are identical.  In case of padding differences
845 	 * we need to add compat version of xt_check_entry_match.
846 	 */
847 	BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match));
848 
849 	return xt_check_entry_match(elems, base + target_offset,
850 				    __alignof__(struct compat_xt_entry_match));
851 }
852 EXPORT_SYMBOL(xt_compat_check_entry_offsets);
853 #endif /* CONFIG_COMPAT */
854 
855 /**
856  * xt_check_entry_offsets - validate arp/ip/ip6t_entry
857  *
858  * @base: pointer to arp/ip/ip6t_entry
859  * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems
860  * @target_offset: the arp/ip/ip6_t->target_offset
861  * @next_offset: the arp/ip/ip6_t->next_offset
862  *
863  * validates that target_offset and next_offset are sane and that all
864  * match sizes (if any) align with the target offset.
865  *
866  * This function does not validate the targets or matches themselves, it
867  * only tests that all the offsets and sizes are correct, that all
868  * match structures are aligned, and that the last structure ends where
869  * the target structure begins.
870  *
871  * Also see xt_compat_check_entry_offsets for CONFIG_COMPAT version.
872  *
873  * The arp/ip/ip6t_entry structure @base must have passed following tests:
874  * - it must point to a valid memory location
875  * - base to base + next_offset must be accessible, i.e. not exceed allocated
876  *   length.
877  *
878  * A well-formed entry looks like this:
879  *
880  * ip(6)t_entry   match [mtdata]  match [mtdata] target [tgdata] ip(6)t_entry
881  * e->elems[]-----'                              |               |
882  *                matchsize                      |               |
883  *                                matchsize      |               |
884  *                                               |               |
885  * target_offset---------------------------------'               |
886  * next_offset---------------------------------------------------'
887  *
888  * elems[]: flexible array member at end of ip(6)/arpt_entry struct.
889  *          This is where matches (if any) and the target reside.
890  * target_offset: beginning of target.
891  * next_offset: start of the next rule; also: size of this rule.
892  * Since targets have a minimum size, target_offset + minlen <= next_offset.
893  *
894  * Every match stores its size, sum of sizes must not exceed target_offset.
895  *
896  * Return: 0 on success, negative errno on failure.
897  */
xt_check_entry_offsets(const void * base,const char * elems,unsigned int target_offset,unsigned int next_offset)898 int xt_check_entry_offsets(const void *base,
899 			   const char *elems,
900 			   unsigned int target_offset,
901 			   unsigned int next_offset)
902 {
903 	long size_of_base_struct = elems - (const char *)base;
904 	const struct xt_entry_target *t;
905 	const char *e = base;
906 
907 	/* target start is within the ip/ip6/arpt_entry struct */
908 	if (target_offset < size_of_base_struct)
909 		return -EINVAL;
910 
911 	if (target_offset + sizeof(*t) > next_offset)
912 		return -EINVAL;
913 
914 	t = (void *)(e + target_offset);
915 	if (t->u.target_size < sizeof(*t))
916 		return -EINVAL;
917 
918 	if (target_offset + t->u.target_size > next_offset)
919 		return -EINVAL;
920 
921 	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0) {
922 		const struct xt_standard_target *st = (const void *)t;
923 
924 		if (XT_ALIGN(target_offset + sizeof(*st)) != next_offset)
925 			return -EINVAL;
926 
927 		if (!verdict_ok(st->verdict))
928 			return -EINVAL;
929 	} else if (strcmp(t->u.user.name, XT_ERROR_TARGET) == 0) {
930 		const struct xt_error_target *et = (const void *)t;
931 
932 		if (!error_tg_ok(t->u.target_size, sizeof(*et),
933 				 et->errorname, sizeof(et->errorname)))
934 			return -EINVAL;
935 	}
936 
937 	return xt_check_entry_match(elems, base + target_offset,
938 				    __alignof__(struct xt_entry_match));
939 }
940 EXPORT_SYMBOL(xt_check_entry_offsets);
941 
942 /**
943  * xt_alloc_entry_offsets - allocate array to store rule head offsets
944  *
945  * @size: number of entries
946  *
947  * Return: NULL or kmalloc'd or vmalloc'd array
948  */
xt_alloc_entry_offsets(unsigned int size)949 unsigned int *xt_alloc_entry_offsets(unsigned int size)
950 {
951 	if (size > XT_MAX_TABLE_SIZE / sizeof(unsigned int))
952 		return NULL;
953 
954 	return kvmalloc_array(size, sizeof(unsigned int), GFP_KERNEL | __GFP_ZERO);
955 
956 }
957 EXPORT_SYMBOL(xt_alloc_entry_offsets);
958 
959 /**
960  * xt_find_jump_offset - check if target is a valid jump offset
961  *
962  * @offsets: array containing all valid rule start offsets of a rule blob
963  * @target: the jump target to search for
964  * @size: entries in @offset
965  */
xt_find_jump_offset(const unsigned int * offsets,unsigned int target,unsigned int size)966 bool xt_find_jump_offset(const unsigned int *offsets,
967 			 unsigned int target, unsigned int size)
968 {
969 	int m, low = 0, hi = size;
970 
971 	while (hi > low) {
972 		m = (low + hi) / 2u;
973 
974 		if (offsets[m] > target)
975 			hi = m;
976 		else if (offsets[m] < target)
977 			low = m + 1;
978 		else
979 			return true;
980 	}
981 
982 	return false;
983 }
984 EXPORT_SYMBOL(xt_find_jump_offset);
985 
xt_check_target(struct xt_tgchk_param * par,unsigned int size,u_int8_t proto,bool inv_proto)986 int xt_check_target(struct xt_tgchk_param *par,
987 		    unsigned int size, u_int8_t proto, bool inv_proto)
988 {
989 	int ret;
990 
991 	if (XT_ALIGN(par->target->targetsize) != size) {
992 		pr_err_ratelimited("%s_tables: %s.%u target: invalid size %u (kernel) != (user) %u\n",
993 				   xt_prefix[par->family], par->target->name,
994 				   par->target->revision,
995 				   XT_ALIGN(par->target->targetsize), size);
996 		return -EINVAL;
997 	}
998 	if (par->target->table != NULL &&
999 	    strcmp(par->target->table, par->table) != 0) {
1000 		pr_info_ratelimited("%s_tables: %s target: only valid in %s table, not %s\n",
1001 				    xt_prefix[par->family], par->target->name,
1002 				    par->target->table, par->table);
1003 		return -EINVAL;
1004 	}
1005 	if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
1006 		char used[64], allow[64];
1007 
1008 		pr_info_ratelimited("%s_tables: %s target: used from hooks %s, but only usable from %s\n",
1009 				    xt_prefix[par->family], par->target->name,
1010 				    textify_hooks(used, sizeof(used),
1011 						  par->hook_mask, par->family),
1012 				    textify_hooks(allow, sizeof(allow),
1013 						  par->target->hooks,
1014 						  par->family));
1015 		return -EINVAL;
1016 	}
1017 	if (par->target->proto && (par->target->proto != proto || inv_proto)) {
1018 		pr_info_ratelimited("%s_tables: %s target: only valid for protocol %u\n",
1019 				    xt_prefix[par->family], par->target->name,
1020 				    par->target->proto);
1021 		return -EINVAL;
1022 	}
1023 	if (par->target->checkentry != NULL) {
1024 		ret = par->target->checkentry(par);
1025 		if (ret < 0)
1026 			return ret;
1027 		else if (ret > 0)
1028 			/* Flag up potential errors. */
1029 			return -EIO;
1030 	}
1031 	return 0;
1032 }
1033 EXPORT_SYMBOL_GPL(xt_check_target);
1034 
1035 /**
1036  * xt_copy_counters_from_user - copy counters and metadata from userspace
1037  *
1038  * @user: src pointer to userspace memory
1039  * @len: alleged size of userspace memory
1040  * @info: where to store the xt_counters_info metadata
1041  * @compat: true if we setsockopt call is done by 32bit task on 64bit kernel
1042  *
1043  * Copies counter meta data from @user and stores it in @info.
1044  *
1045  * vmallocs memory to hold the counters, then copies the counter data
1046  * from @user to the new memory and returns a pointer to it.
1047  *
1048  * If @compat is true, @info gets converted automatically to the 64bit
1049  * representation.
1050  *
1051  * The metadata associated with the counters is stored in @info.
1052  *
1053  * Return: returns pointer that caller has to test via IS_ERR().
1054  * If IS_ERR is false, caller has to vfree the pointer.
1055  */
xt_copy_counters_from_user(const void __user * user,unsigned int len,struct xt_counters_info * info,bool compat)1056 void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
1057 				 struct xt_counters_info *info, bool compat)
1058 {
1059 	void *mem;
1060 	u64 size;
1061 
1062 #ifdef CONFIG_COMPAT
1063 	if (compat) {
1064 		/* structures only differ in size due to alignment */
1065 		struct compat_xt_counters_info compat_tmp;
1066 
1067 		if (len <= sizeof(compat_tmp))
1068 			return ERR_PTR(-EINVAL);
1069 
1070 		len -= sizeof(compat_tmp);
1071 		if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0)
1072 			return ERR_PTR(-EFAULT);
1073 
1074 		memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1);
1075 		info->num_counters = compat_tmp.num_counters;
1076 		user += sizeof(compat_tmp);
1077 	} else
1078 #endif
1079 	{
1080 		if (len <= sizeof(*info))
1081 			return ERR_PTR(-EINVAL);
1082 
1083 		len -= sizeof(*info);
1084 		if (copy_from_user(info, user, sizeof(*info)) != 0)
1085 			return ERR_PTR(-EFAULT);
1086 
1087 		user += sizeof(*info);
1088 	}
1089 	info->name[sizeof(info->name) - 1] = '\0';
1090 
1091 	size = sizeof(struct xt_counters);
1092 	size *= info->num_counters;
1093 
1094 	if (size != (u64)len)
1095 		return ERR_PTR(-EINVAL);
1096 
1097 	mem = vmalloc(len);
1098 	if (!mem)
1099 		return ERR_PTR(-ENOMEM);
1100 
1101 	if (copy_from_user(mem, user, len) == 0)
1102 		return mem;
1103 
1104 	vfree(mem);
1105 	return ERR_PTR(-EFAULT);
1106 }
1107 EXPORT_SYMBOL_GPL(xt_copy_counters_from_user);
1108 
1109 #ifdef CONFIG_COMPAT
xt_compat_target_offset(const struct xt_target * target)1110 int xt_compat_target_offset(const struct xt_target *target)
1111 {
1112 	u_int16_t csize = target->compatsize ? : target->targetsize;
1113 	return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
1114 }
1115 EXPORT_SYMBOL_GPL(xt_compat_target_offset);
1116 
xt_compat_target_from_user(struct xt_entry_target * t,void ** dstptr,unsigned int * size)1117 void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
1118 				unsigned int *size)
1119 {
1120 	const struct xt_target *target = t->u.kernel.target;
1121 	struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
1122 	int pad, off = xt_compat_target_offset(target);
1123 	u_int16_t tsize = ct->u.user.target_size;
1124 	char name[sizeof(t->u.user.name)];
1125 
1126 	t = *dstptr;
1127 	memcpy(t, ct, sizeof(*ct));
1128 	if (target->compat_from_user)
1129 		target->compat_from_user(t->data, ct->data);
1130 	else
1131 		memcpy(t->data, ct->data, tsize - sizeof(*ct));
1132 	pad = XT_ALIGN(target->targetsize) - target->targetsize;
1133 	if (pad > 0)
1134 		memset(t->data + target->targetsize, 0, pad);
1135 
1136 	tsize += off;
1137 	t->u.user.target_size = tsize;
1138 	strlcpy(name, target->name, sizeof(name));
1139 	module_put(target->me);
1140 	strncpy(t->u.user.name, name, sizeof(t->u.user.name));
1141 
1142 	*size += off;
1143 	*dstptr += tsize;
1144 }
1145 EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
1146 
xt_compat_target_to_user(const struct xt_entry_target * t,void __user ** dstptr,unsigned int * size)1147 int xt_compat_target_to_user(const struct xt_entry_target *t,
1148 			     void __user **dstptr, unsigned int *size)
1149 {
1150 	const struct xt_target *target = t->u.kernel.target;
1151 	struct compat_xt_entry_target __user *ct = *dstptr;
1152 	int off = xt_compat_target_offset(target);
1153 	u_int16_t tsize = t->u.user.target_size - off;
1154 
1155 	if (XT_OBJ_TO_USER(ct, t, target, tsize))
1156 		return -EFAULT;
1157 
1158 	if (target->compat_to_user) {
1159 		if (target->compat_to_user((void __user *)ct->data, t->data))
1160 			return -EFAULT;
1161 	} else {
1162 		if (COMPAT_XT_DATA_TO_USER(ct, t, target, tsize - sizeof(*ct)))
1163 			return -EFAULT;
1164 	}
1165 
1166 	*size -= off;
1167 	*dstptr += tsize;
1168 	return 0;
1169 }
1170 EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
1171 #endif
1172 
xt_alloc_table_info(unsigned int size)1173 struct xt_table_info *xt_alloc_table_info(unsigned int size)
1174 {
1175 	struct xt_table_info *info = NULL;
1176 	size_t sz = sizeof(*info) + size;
1177 
1178 	if (sz < sizeof(*info) || sz >= XT_MAX_TABLE_SIZE)
1179 		return NULL;
1180 
1181 	info = kvmalloc(sz, GFP_KERNEL_ACCOUNT);
1182 	if (!info)
1183 		return NULL;
1184 
1185 	memset(info, 0, sizeof(*info));
1186 	info->size = size;
1187 	return info;
1188 }
1189 EXPORT_SYMBOL(xt_alloc_table_info);
1190 
xt_free_table_info(struct xt_table_info * info)1191 void xt_free_table_info(struct xt_table_info *info)
1192 {
1193 	int cpu;
1194 
1195 	if (info->jumpstack != NULL) {
1196 		for_each_possible_cpu(cpu)
1197 			kvfree(info->jumpstack[cpu]);
1198 		kvfree(info->jumpstack);
1199 	}
1200 
1201 	kvfree(info);
1202 }
1203 EXPORT_SYMBOL(xt_free_table_info);
1204 
1205 /* Find table by name, grabs mutex & ref.  Returns ERR_PTR on error. */
xt_find_table_lock(struct net * net,u_int8_t af,const char * name)1206 struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
1207 				    const char *name)
1208 {
1209 	struct xt_table *t, *found = NULL;
1210 
1211 	mutex_lock(&xt[af].mutex);
1212 	list_for_each_entry(t, &net->xt.tables[af], list)
1213 		if (strcmp(t->name, name) == 0 && try_module_get(t->me))
1214 			return t;
1215 
1216 	if (net == &init_net)
1217 		goto out;
1218 
1219 	/* Table doesn't exist in this netns, re-try init */
1220 	list_for_each_entry(t, &init_net.xt.tables[af], list) {
1221 		int err;
1222 
1223 		if (strcmp(t->name, name))
1224 			continue;
1225 		if (!try_module_get(t->me))
1226 			goto out;
1227 		mutex_unlock(&xt[af].mutex);
1228 		err = t->table_init(net);
1229 		if (err < 0) {
1230 			module_put(t->me);
1231 			return ERR_PTR(err);
1232 		}
1233 
1234 		found = t;
1235 
1236 		mutex_lock(&xt[af].mutex);
1237 		break;
1238 	}
1239 
1240 	if (!found)
1241 		goto out;
1242 
1243 	/* and once again: */
1244 	list_for_each_entry(t, &net->xt.tables[af], list)
1245 		if (strcmp(t->name, name) == 0)
1246 			return t;
1247 
1248 	module_put(found->me);
1249  out:
1250 	mutex_unlock(&xt[af].mutex);
1251 	return ERR_PTR(-ENOENT);
1252 }
1253 EXPORT_SYMBOL_GPL(xt_find_table_lock);
1254 
xt_request_find_table_lock(struct net * net,u_int8_t af,const char * name)1255 struct xt_table *xt_request_find_table_lock(struct net *net, u_int8_t af,
1256 					    const char *name)
1257 {
1258 	struct xt_table *t = xt_find_table_lock(net, af, name);
1259 
1260 #ifdef CONFIG_MODULES
1261 	if (IS_ERR(t)) {
1262 		int err = request_module("%stable_%s", xt_prefix[af], name);
1263 		if (err < 0)
1264 			return ERR_PTR(err);
1265 		t = xt_find_table_lock(net, af, name);
1266 	}
1267 #endif
1268 
1269 	return t;
1270 }
1271 EXPORT_SYMBOL_GPL(xt_request_find_table_lock);
1272 
xt_table_unlock(struct xt_table * table)1273 void xt_table_unlock(struct xt_table *table)
1274 {
1275 	mutex_unlock(&xt[table->af].mutex);
1276 }
1277 EXPORT_SYMBOL_GPL(xt_table_unlock);
1278 
1279 #ifdef CONFIG_COMPAT
xt_compat_lock(u_int8_t af)1280 void xt_compat_lock(u_int8_t af)
1281 {
1282 	mutex_lock(&xt[af].compat_mutex);
1283 }
1284 EXPORT_SYMBOL_GPL(xt_compat_lock);
1285 
xt_compat_unlock(u_int8_t af)1286 void xt_compat_unlock(u_int8_t af)
1287 {
1288 	mutex_unlock(&xt[af].compat_mutex);
1289 }
1290 EXPORT_SYMBOL_GPL(xt_compat_unlock);
1291 #endif
1292 
1293 DEFINE_PER_CPU(seqcount_t, xt_recseq);
1294 EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
1295 
1296 struct static_key xt_tee_enabled __read_mostly;
1297 EXPORT_SYMBOL_GPL(xt_tee_enabled);
1298 
xt_jumpstack_alloc(struct xt_table_info * i)1299 static int xt_jumpstack_alloc(struct xt_table_info *i)
1300 {
1301 	unsigned int size;
1302 	int cpu;
1303 
1304 	size = sizeof(void **) * nr_cpu_ids;
1305 	if (size > PAGE_SIZE)
1306 		i->jumpstack = kvzalloc(size, GFP_KERNEL);
1307 	else
1308 		i->jumpstack = kzalloc(size, GFP_KERNEL);
1309 	if (i->jumpstack == NULL)
1310 		return -ENOMEM;
1311 
1312 	/* ruleset without jumps -- no stack needed */
1313 	if (i->stacksize == 0)
1314 		return 0;
1315 
1316 	/* Jumpstack needs to be able to record two full callchains, one
1317 	 * from the first rule set traversal, plus one table reentrancy
1318 	 * via -j TEE without clobbering the callchain that brought us to
1319 	 * TEE target.
1320 	 *
1321 	 * This is done by allocating two jumpstacks per cpu, on reentry
1322 	 * the upper half of the stack is used.
1323 	 *
1324 	 * see the jumpstack setup in ipt_do_table() for more details.
1325 	 */
1326 	size = sizeof(void *) * i->stacksize * 2u;
1327 	for_each_possible_cpu(cpu) {
1328 		i->jumpstack[cpu] = kvmalloc_node(size, GFP_KERNEL,
1329 			cpu_to_node(cpu));
1330 		if (i->jumpstack[cpu] == NULL)
1331 			/*
1332 			 * Freeing will be done later on by the callers. The
1333 			 * chain is: xt_replace_table -> __do_replace ->
1334 			 * do_replace -> xt_free_table_info.
1335 			 */
1336 			return -ENOMEM;
1337 	}
1338 
1339 	return 0;
1340 }
1341 
xt_counters_alloc(unsigned int counters)1342 struct xt_counters *xt_counters_alloc(unsigned int counters)
1343 {
1344 	struct xt_counters *mem;
1345 
1346 	if (counters == 0 || counters > INT_MAX / sizeof(*mem))
1347 		return NULL;
1348 
1349 	counters *= sizeof(*mem);
1350 	if (counters > XT_MAX_TABLE_SIZE)
1351 		return NULL;
1352 
1353 	return vzalloc(counters);
1354 }
1355 EXPORT_SYMBOL(xt_counters_alloc);
1356 
1357 struct xt_table_info *
xt_replace_table(struct xt_table * table,unsigned int num_counters,struct xt_table_info * newinfo,int * error)1358 xt_replace_table(struct xt_table *table,
1359 	      unsigned int num_counters,
1360 	      struct xt_table_info *newinfo,
1361 	      int *error)
1362 {
1363 	struct xt_table_info *private;
1364 	unsigned int cpu;
1365 	int ret;
1366 
1367 	ret = xt_jumpstack_alloc(newinfo);
1368 	if (ret < 0) {
1369 		*error = ret;
1370 		return NULL;
1371 	}
1372 
1373 	/* Do the substitution. */
1374 	local_bh_disable();
1375 	private = table->private;
1376 
1377 	/* Check inside lock: is the old number correct? */
1378 	if (num_counters != private->number) {
1379 		pr_debug("num_counters != table->private->number (%u/%u)\n",
1380 			 num_counters, private->number);
1381 		local_bh_enable();
1382 		*error = -EAGAIN;
1383 		return NULL;
1384 	}
1385 
1386 	newinfo->initial_entries = private->initial_entries;
1387 	/*
1388 	 * Ensure contents of newinfo are visible before assigning to
1389 	 * private.
1390 	 */
1391 	smp_wmb();
1392 	table->private = newinfo;
1393 
1394 	/* make sure all cpus see new ->private value */
1395 	smp_wmb();
1396 
1397 	/*
1398 	 * Even though table entries have now been swapped, other CPU's
1399 	 * may still be using the old entries...
1400 	 */
1401 	local_bh_enable();
1402 
1403 	/* ... so wait for even xt_recseq on all cpus */
1404 	for_each_possible_cpu(cpu) {
1405 		seqcount_t *s = &per_cpu(xt_recseq, cpu);
1406 		u32 seq = raw_read_seqcount(s);
1407 
1408 		if (seq & 1) {
1409 			do {
1410 				cond_resched();
1411 				cpu_relax();
1412 			} while (seq == raw_read_seqcount(s));
1413 		}
1414 	}
1415 
1416 #ifdef CONFIG_AUDIT
1417 	if (audit_enabled) {
1418 		audit_log(audit_context(), GFP_KERNEL,
1419 			  AUDIT_NETFILTER_CFG,
1420 			  "table=%s family=%u entries=%u",
1421 			  table->name, table->af, private->number);
1422 	}
1423 #endif
1424 
1425 	return private;
1426 }
1427 EXPORT_SYMBOL_GPL(xt_replace_table);
1428 
xt_register_table(struct net * net,const struct xt_table * input_table,struct xt_table_info * bootstrap,struct xt_table_info * newinfo)1429 struct xt_table *xt_register_table(struct net *net,
1430 				   const struct xt_table *input_table,
1431 				   struct xt_table_info *bootstrap,
1432 				   struct xt_table_info *newinfo)
1433 {
1434 	int ret;
1435 	struct xt_table_info *private;
1436 	struct xt_table *t, *table;
1437 
1438 	/* Don't add one object to multiple lists. */
1439 	table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
1440 	if (!table) {
1441 		ret = -ENOMEM;
1442 		goto out;
1443 	}
1444 
1445 	mutex_lock(&xt[table->af].mutex);
1446 	/* Don't autoload: we'd eat our tail... */
1447 	list_for_each_entry(t, &net->xt.tables[table->af], list) {
1448 		if (strcmp(t->name, table->name) == 0) {
1449 			ret = -EEXIST;
1450 			goto unlock;
1451 		}
1452 	}
1453 
1454 	/* Simplifies replace_table code. */
1455 	table->private = bootstrap;
1456 
1457 	if (!xt_replace_table(table, 0, newinfo, &ret))
1458 		goto unlock;
1459 
1460 	private = table->private;
1461 	pr_debug("table->private->number = %u\n", private->number);
1462 
1463 	/* save number of initial entries */
1464 	private->initial_entries = private->number;
1465 
1466 	list_add(&table->list, &net->xt.tables[table->af]);
1467 	mutex_unlock(&xt[table->af].mutex);
1468 	return table;
1469 
1470 unlock:
1471 	mutex_unlock(&xt[table->af].mutex);
1472 	kfree(table);
1473 out:
1474 	return ERR_PTR(ret);
1475 }
1476 EXPORT_SYMBOL_GPL(xt_register_table);
1477 
xt_unregister_table(struct xt_table * table)1478 void *xt_unregister_table(struct xt_table *table)
1479 {
1480 	struct xt_table_info *private;
1481 
1482 	mutex_lock(&xt[table->af].mutex);
1483 	private = table->private;
1484 	list_del(&table->list);
1485 	mutex_unlock(&xt[table->af].mutex);
1486 	kfree(table);
1487 
1488 	return private;
1489 }
1490 EXPORT_SYMBOL_GPL(xt_unregister_table);
1491 
1492 #ifdef CONFIG_PROC_FS
xt_table_seq_start(struct seq_file * seq,loff_t * pos)1493 static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
1494 {
1495 	struct net *net = seq_file_net(seq);
1496 	u_int8_t af = (unsigned long)PDE_DATA(file_inode(seq->file));
1497 
1498 	mutex_lock(&xt[af].mutex);
1499 	return seq_list_start(&net->xt.tables[af], *pos);
1500 }
1501 
xt_table_seq_next(struct seq_file * seq,void * v,loff_t * pos)1502 static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1503 {
1504 	struct net *net = seq_file_net(seq);
1505 	u_int8_t af = (unsigned long)PDE_DATA(file_inode(seq->file));
1506 
1507 	return seq_list_next(v, &net->xt.tables[af], pos);
1508 }
1509 
xt_table_seq_stop(struct seq_file * seq,void * v)1510 static void xt_table_seq_stop(struct seq_file *seq, void *v)
1511 {
1512 	u_int8_t af = (unsigned long)PDE_DATA(file_inode(seq->file));
1513 
1514 	mutex_unlock(&xt[af].mutex);
1515 }
1516 
xt_table_seq_show(struct seq_file * seq,void * v)1517 static int xt_table_seq_show(struct seq_file *seq, void *v)
1518 {
1519 	struct xt_table *table = list_entry(v, struct xt_table, list);
1520 
1521 	if (*table->name)
1522 		seq_printf(seq, "%s\n", table->name);
1523 	return 0;
1524 }
1525 
1526 static const struct seq_operations xt_table_seq_ops = {
1527 	.start	= xt_table_seq_start,
1528 	.next	= xt_table_seq_next,
1529 	.stop	= xt_table_seq_stop,
1530 	.show	= xt_table_seq_show,
1531 };
1532 
1533 /*
1534  * Traverse state for ip{,6}_{tables,matches} for helping crossing
1535  * the multi-AF mutexes.
1536  */
1537 struct nf_mttg_trav {
1538 	struct list_head *head, *curr;
1539 	uint8_t class;
1540 };
1541 
1542 enum {
1543 	MTTG_TRAV_INIT,
1544 	MTTG_TRAV_NFP_UNSPEC,
1545 	MTTG_TRAV_NFP_SPEC,
1546 	MTTG_TRAV_DONE,
1547 };
1548 
xt_mttg_seq_next(struct seq_file * seq,void * v,loff_t * ppos,bool is_target)1549 static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
1550     bool is_target)
1551 {
1552 	static const uint8_t next_class[] = {
1553 		[MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
1554 		[MTTG_TRAV_NFP_SPEC]   = MTTG_TRAV_DONE,
1555 	};
1556 	uint8_t nfproto = (unsigned long)PDE_DATA(file_inode(seq->file));
1557 	struct nf_mttg_trav *trav = seq->private;
1558 
1559 	switch (trav->class) {
1560 	case MTTG_TRAV_INIT:
1561 		trav->class = MTTG_TRAV_NFP_UNSPEC;
1562 		mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
1563 		trav->head = trav->curr = is_target ?
1564 			&xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
1565  		break;
1566 	case MTTG_TRAV_NFP_UNSPEC:
1567 		trav->curr = trav->curr->next;
1568 		if (trav->curr != trav->head)
1569 			break;
1570 		mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1571 		mutex_lock(&xt[nfproto].mutex);
1572 		trav->head = trav->curr = is_target ?
1573 			&xt[nfproto].target : &xt[nfproto].match;
1574 		trav->class = next_class[trav->class];
1575 		break;
1576 	case MTTG_TRAV_NFP_SPEC:
1577 		trav->curr = trav->curr->next;
1578 		if (trav->curr != trav->head)
1579 			break;
1580 		/* fall through */
1581 	default:
1582 		return NULL;
1583 	}
1584 
1585 	if (ppos != NULL)
1586 		++*ppos;
1587 	return trav;
1588 }
1589 
xt_mttg_seq_start(struct seq_file * seq,loff_t * pos,bool is_target)1590 static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
1591     bool is_target)
1592 {
1593 	struct nf_mttg_trav *trav = seq->private;
1594 	unsigned int j;
1595 
1596 	trav->class = MTTG_TRAV_INIT;
1597 	for (j = 0; j < *pos; ++j)
1598 		if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
1599 			return NULL;
1600 	return trav;
1601 }
1602 
xt_mttg_seq_stop(struct seq_file * seq,void * v)1603 static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
1604 {
1605 	uint8_t nfproto = (unsigned long)PDE_DATA(file_inode(seq->file));
1606 	struct nf_mttg_trav *trav = seq->private;
1607 
1608 	switch (trav->class) {
1609 	case MTTG_TRAV_NFP_UNSPEC:
1610 		mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1611 		break;
1612 	case MTTG_TRAV_NFP_SPEC:
1613 		mutex_unlock(&xt[nfproto].mutex);
1614 		break;
1615 	}
1616 }
1617 
xt_match_seq_start(struct seq_file * seq,loff_t * pos)1618 static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
1619 {
1620 	return xt_mttg_seq_start(seq, pos, false);
1621 }
1622 
xt_match_seq_next(struct seq_file * seq,void * v,loff_t * ppos)1623 static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1624 {
1625 	return xt_mttg_seq_next(seq, v, ppos, false);
1626 }
1627 
xt_match_seq_show(struct seq_file * seq,void * v)1628 static int xt_match_seq_show(struct seq_file *seq, void *v)
1629 {
1630 	const struct nf_mttg_trav *trav = seq->private;
1631 	const struct xt_match *match;
1632 
1633 	switch (trav->class) {
1634 	case MTTG_TRAV_NFP_UNSPEC:
1635 	case MTTG_TRAV_NFP_SPEC:
1636 		if (trav->curr == trav->head)
1637 			return 0;
1638 		match = list_entry(trav->curr, struct xt_match, list);
1639 		if (*match->name)
1640 			seq_printf(seq, "%s\n", match->name);
1641 	}
1642 	return 0;
1643 }
1644 
1645 static const struct seq_operations xt_match_seq_ops = {
1646 	.start	= xt_match_seq_start,
1647 	.next	= xt_match_seq_next,
1648 	.stop	= xt_mttg_seq_stop,
1649 	.show	= xt_match_seq_show,
1650 };
1651 
xt_target_seq_start(struct seq_file * seq,loff_t * pos)1652 static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
1653 {
1654 	return xt_mttg_seq_start(seq, pos, true);
1655 }
1656 
xt_target_seq_next(struct seq_file * seq,void * v,loff_t * ppos)1657 static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1658 {
1659 	return xt_mttg_seq_next(seq, v, ppos, true);
1660 }
1661 
xt_target_seq_show(struct seq_file * seq,void * v)1662 static int xt_target_seq_show(struct seq_file *seq, void *v)
1663 {
1664 	const struct nf_mttg_trav *trav = seq->private;
1665 	const struct xt_target *target;
1666 
1667 	switch (trav->class) {
1668 	case MTTG_TRAV_NFP_UNSPEC:
1669 	case MTTG_TRAV_NFP_SPEC:
1670 		if (trav->curr == trav->head)
1671 			return 0;
1672 		target = list_entry(trav->curr, struct xt_target, list);
1673 		if (*target->name)
1674 			seq_printf(seq, "%s\n", target->name);
1675 	}
1676 	return 0;
1677 }
1678 
1679 static const struct seq_operations xt_target_seq_ops = {
1680 	.start	= xt_target_seq_start,
1681 	.next	= xt_target_seq_next,
1682 	.stop	= xt_mttg_seq_stop,
1683 	.show	= xt_target_seq_show,
1684 };
1685 
1686 #define FORMAT_TABLES	"_tables_names"
1687 #define	FORMAT_MATCHES	"_tables_matches"
1688 #define FORMAT_TARGETS 	"_tables_targets"
1689 
1690 #endif /* CONFIG_PROC_FS */
1691 
1692 /**
1693  * xt_hook_ops_alloc - set up hooks for a new table
1694  * @table:	table with metadata needed to set up hooks
1695  * @fn:		Hook function
1696  *
1697  * This function will create the nf_hook_ops that the x_table needs
1698  * to hand to xt_hook_link_net().
1699  */
1700 struct nf_hook_ops *
xt_hook_ops_alloc(const struct xt_table * table,nf_hookfn * fn)1701 xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn)
1702 {
1703 	unsigned int hook_mask = table->valid_hooks;
1704 	uint8_t i, num_hooks = hweight32(hook_mask);
1705 	uint8_t hooknum;
1706 	struct nf_hook_ops *ops;
1707 
1708 	if (!num_hooks)
1709 		return ERR_PTR(-EINVAL);
1710 
1711 	ops = kcalloc(num_hooks, sizeof(*ops), GFP_KERNEL);
1712 	if (ops == NULL)
1713 		return ERR_PTR(-ENOMEM);
1714 
1715 	for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0;
1716 	     hook_mask >>= 1, ++hooknum) {
1717 		if (!(hook_mask & 1))
1718 			continue;
1719 		ops[i].hook     = fn;
1720 		ops[i].pf       = table->af;
1721 		ops[i].hooknum  = hooknum;
1722 		ops[i].priority = table->priority;
1723 		++i;
1724 	}
1725 
1726 	return ops;
1727 }
1728 EXPORT_SYMBOL_GPL(xt_hook_ops_alloc);
1729 
xt_proto_init(struct net * net,u_int8_t af)1730 int xt_proto_init(struct net *net, u_int8_t af)
1731 {
1732 #ifdef CONFIG_PROC_FS
1733 	char buf[XT_FUNCTION_MAXNAMELEN];
1734 	struct proc_dir_entry *proc;
1735 	kuid_t root_uid;
1736 	kgid_t root_gid;
1737 #endif
1738 
1739 	if (af >= ARRAY_SIZE(xt_prefix))
1740 		return -EINVAL;
1741 
1742 
1743 #ifdef CONFIG_PROC_FS
1744 	root_uid = make_kuid(net->user_ns, 0);
1745 	root_gid = make_kgid(net->user_ns, 0);
1746 
1747 	strlcpy(buf, xt_prefix[af], sizeof(buf));
1748 	strlcat(buf, FORMAT_TABLES, sizeof(buf));
1749 	proc = proc_create_net_data(buf, 0440, net->proc_net, &xt_table_seq_ops,
1750 			sizeof(struct seq_net_private),
1751 			(void *)(unsigned long)af);
1752 	if (!proc)
1753 		goto out;
1754 	if (uid_valid(root_uid) && gid_valid(root_gid))
1755 		proc_set_user(proc, root_uid, root_gid);
1756 
1757 	strlcpy(buf, xt_prefix[af], sizeof(buf));
1758 	strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1759 	proc = proc_create_seq_private(buf, 0440, net->proc_net,
1760 			&xt_match_seq_ops, sizeof(struct nf_mttg_trav),
1761 			(void *)(unsigned long)af);
1762 	if (!proc)
1763 		goto out_remove_tables;
1764 	if (uid_valid(root_uid) && gid_valid(root_gid))
1765 		proc_set_user(proc, root_uid, root_gid);
1766 
1767 	strlcpy(buf, xt_prefix[af], sizeof(buf));
1768 	strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1769 	proc = proc_create_seq_private(buf, 0440, net->proc_net,
1770 			 &xt_target_seq_ops, sizeof(struct nf_mttg_trav),
1771 			 (void *)(unsigned long)af);
1772 	if (!proc)
1773 		goto out_remove_matches;
1774 	if (uid_valid(root_uid) && gid_valid(root_gid))
1775 		proc_set_user(proc, root_uid, root_gid);
1776 #endif
1777 
1778 	return 0;
1779 
1780 #ifdef CONFIG_PROC_FS
1781 out_remove_matches:
1782 	strlcpy(buf, xt_prefix[af], sizeof(buf));
1783 	strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1784 	remove_proc_entry(buf, net->proc_net);
1785 
1786 out_remove_tables:
1787 	strlcpy(buf, xt_prefix[af], sizeof(buf));
1788 	strlcat(buf, FORMAT_TABLES, sizeof(buf));
1789 	remove_proc_entry(buf, net->proc_net);
1790 out:
1791 	return -1;
1792 #endif
1793 }
1794 EXPORT_SYMBOL_GPL(xt_proto_init);
1795 
xt_proto_fini(struct net * net,u_int8_t af)1796 void xt_proto_fini(struct net *net, u_int8_t af)
1797 {
1798 #ifdef CONFIG_PROC_FS
1799 	char buf[XT_FUNCTION_MAXNAMELEN];
1800 
1801 	strlcpy(buf, xt_prefix[af], sizeof(buf));
1802 	strlcat(buf, FORMAT_TABLES, sizeof(buf));
1803 	remove_proc_entry(buf, net->proc_net);
1804 
1805 	strlcpy(buf, xt_prefix[af], sizeof(buf));
1806 	strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1807 	remove_proc_entry(buf, net->proc_net);
1808 
1809 	strlcpy(buf, xt_prefix[af], sizeof(buf));
1810 	strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1811 	remove_proc_entry(buf, net->proc_net);
1812 #endif /*CONFIG_PROC_FS*/
1813 }
1814 EXPORT_SYMBOL_GPL(xt_proto_fini);
1815 
1816 /**
1817  * xt_percpu_counter_alloc - allocate x_tables rule counter
1818  *
1819  * @state: pointer to xt_percpu allocation state
1820  * @counter: pointer to counter struct inside the ip(6)/arpt_entry struct
1821  *
1822  * On SMP, the packet counter [ ip(6)t_entry->counters.pcnt ] will then
1823  * contain the address of the real (percpu) counter.
1824  *
1825  * Rule evaluation needs to use xt_get_this_cpu_counter() helper
1826  * to fetch the real percpu counter.
1827  *
1828  * To speed up allocation and improve data locality, a 4kb block is
1829  * allocated.  Freeing any counter may free an entire block, so all
1830  * counters allocated using the same state must be freed at the same
1831  * time.
1832  *
1833  * xt_percpu_counter_alloc_state contains the base address of the
1834  * allocated page and the current sub-offset.
1835  *
1836  * returns false on error.
1837  */
xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state * state,struct xt_counters * counter)1838 bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state,
1839 			     struct xt_counters *counter)
1840 {
1841 	BUILD_BUG_ON(XT_PCPU_BLOCK_SIZE < (sizeof(*counter) * 2));
1842 
1843 	if (nr_cpu_ids <= 1)
1844 		return true;
1845 
1846 	if (!state->mem) {
1847 		state->mem = __alloc_percpu(XT_PCPU_BLOCK_SIZE,
1848 					    XT_PCPU_BLOCK_SIZE);
1849 		if (!state->mem)
1850 			return false;
1851 	}
1852 	counter->pcnt = (__force unsigned long)(state->mem + state->off);
1853 	state->off += sizeof(*counter);
1854 	if (state->off > (XT_PCPU_BLOCK_SIZE - sizeof(*counter))) {
1855 		state->mem = NULL;
1856 		state->off = 0;
1857 	}
1858 	return true;
1859 }
1860 EXPORT_SYMBOL_GPL(xt_percpu_counter_alloc);
1861 
xt_percpu_counter_free(struct xt_counters * counters)1862 void xt_percpu_counter_free(struct xt_counters *counters)
1863 {
1864 	unsigned long pcnt = counters->pcnt;
1865 
1866 	if (nr_cpu_ids > 1 && (pcnt & (XT_PCPU_BLOCK_SIZE - 1)) == 0)
1867 		free_percpu((void __percpu *)pcnt);
1868 }
1869 EXPORT_SYMBOL_GPL(xt_percpu_counter_free);
1870 
xt_net_init(struct net * net)1871 static int __net_init xt_net_init(struct net *net)
1872 {
1873 	int i;
1874 
1875 	for (i = 0; i < NFPROTO_NUMPROTO; i++)
1876 		INIT_LIST_HEAD(&net->xt.tables[i]);
1877 	return 0;
1878 }
1879 
xt_net_exit(struct net * net)1880 static void __net_exit xt_net_exit(struct net *net)
1881 {
1882 	int i;
1883 
1884 	for (i = 0; i < NFPROTO_NUMPROTO; i++)
1885 		WARN_ON_ONCE(!list_empty(&net->xt.tables[i]));
1886 }
1887 
1888 static struct pernet_operations xt_net_ops = {
1889 	.init = xt_net_init,
1890 	.exit = xt_net_exit,
1891 };
1892 
xt_init(void)1893 static int __init xt_init(void)
1894 {
1895 	unsigned int i;
1896 	int rv;
1897 
1898 	for_each_possible_cpu(i) {
1899 		seqcount_init(&per_cpu(xt_recseq, i));
1900 	}
1901 
1902 	xt = kmalloc_array(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL);
1903 	if (!xt)
1904 		return -ENOMEM;
1905 
1906 	for (i = 0; i < NFPROTO_NUMPROTO; i++) {
1907 		mutex_init(&xt[i].mutex);
1908 #ifdef CONFIG_COMPAT
1909 		mutex_init(&xt[i].compat_mutex);
1910 		xt[i].compat_tab = NULL;
1911 #endif
1912 		INIT_LIST_HEAD(&xt[i].target);
1913 		INIT_LIST_HEAD(&xt[i].match);
1914 	}
1915 	rv = register_pernet_subsys(&xt_net_ops);
1916 	if (rv < 0)
1917 		kfree(xt);
1918 	return rv;
1919 }
1920 
xt_fini(void)1921 static void __exit xt_fini(void)
1922 {
1923 	unregister_pernet_subsys(&xt_net_ops);
1924 	kfree(xt);
1925 }
1926 
1927 module_init(xt_init);
1928 module_exit(xt_fini);
1929 
1930