1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (C) 2013 Jozsef Kadlecsik <kadlec@netfilter.org> */
3 
4 #ifndef _IP_SET_HASH_GEN_H
5 #define _IP_SET_HASH_GEN_H
6 
7 #include <linux/rcupdate.h>
8 #include <linux/jhash.h>
9 #include <linux/types.h>
10 #include <linux/netfilter/nfnetlink.h>
11 #include <linux/netfilter/ipset/ip_set.h>
12 
13 #define __ipset_dereference(p)		\
14 	rcu_dereference_protected(p, 1)
15 #define ipset_dereference_nfnl(p)	\
16 	rcu_dereference_protected(p,	\
17 		lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET))
18 #define ipset_dereference_set(p, set) 	\
19 	rcu_dereference_protected(p,	\
20 		lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET) || \
21 		lockdep_is_held(&(set)->lock))
22 #define ipset_dereference_bh_nfnl(p)	\
23 	rcu_dereference_bh_check(p, 	\
24 		lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET))
25 
26 /* Hashing which uses arrays to resolve clashing. The hash table is resized
27  * (doubled) when searching becomes too long.
28  * Internally jhash is used with the assumption that the size of the
29  * stored data is a multiple of sizeof(u32).
30  *
31  * Readers and resizing
32  *
33  * Resizing can be triggered by userspace command only, and those
34  * are serialized by the nfnl mutex. During resizing the set is
35  * read-locked, so the only possible concurrent operations are
36  * the kernel side readers. Those must be protected by proper RCU locking.
37  */
38 
39 /* Number of elements to store in an initial array block */
40 #define AHASH_INIT_SIZE			2
41 /* Max number of elements to store in an array block */
42 #define AHASH_MAX_SIZE			(6 * AHASH_INIT_SIZE)
43 /* Max muber of elements in the array block when tuned */
44 #define AHASH_MAX_TUNED			64
45 #define AHASH_MAX(h)			((h)->bucketsize)
46 
47 /* A hash bucket */
48 struct hbucket {
49 	struct rcu_head rcu;	/* for call_rcu */
50 	/* Which positions are used in the array */
51 	DECLARE_BITMAP(used, AHASH_MAX_TUNED);
52 	u8 size;		/* size of the array */
53 	u8 pos;			/* position of the first free entry */
54 	unsigned char value[]	/* the array of the values */
55 		__aligned(__alignof__(u64));
56 };
57 
58 /* Region size for locking == 2^HTABLE_REGION_BITS */
59 #define HTABLE_REGION_BITS	10
60 #define ahash_numof_locks(htable_bits)		\
61 	((htable_bits) < HTABLE_REGION_BITS ? 1	\
62 		: jhash_size((htable_bits) - HTABLE_REGION_BITS))
63 #define ahash_sizeof_regions(htable_bits)		\
64 	(ahash_numof_locks(htable_bits) * sizeof(struct ip_set_region))
65 #define ahash_region(n, htable_bits)		\
66 	((n) % ahash_numof_locks(htable_bits))
67 #define ahash_bucket_start(h,  htable_bits)	\
68 	((htable_bits) < HTABLE_REGION_BITS ? 0	\
69 		: (h) * jhash_size(HTABLE_REGION_BITS))
70 #define ahash_bucket_end(h,  htable_bits)	\
71 	((htable_bits) < HTABLE_REGION_BITS ? jhash_size(htable_bits)	\
72 		: ((h) + 1) * jhash_size(HTABLE_REGION_BITS))
73 
74 struct htable_gc {
75 	struct delayed_work dwork;
76 	struct ip_set *set;	/* Set the gc belongs to */
77 	u32 region;		/* Last gc run position */
78 };
79 
80 /* The hash table: the table size stored here in order to make resizing easy */
81 struct htable {
82 	atomic_t ref;		/* References for resizing */
83 	atomic_t uref;		/* References for dumping and gc */
84 	u8 htable_bits;		/* size of hash table == 2^htable_bits */
85 	u32 maxelem;		/* Maxelem per region */
86 	struct ip_set_region *hregion;	/* Region locks and ext sizes */
87 	struct hbucket __rcu *bucket[]; /* hashtable buckets */
88 };
89 
90 #define hbucket(h, i)		((h)->bucket[i])
91 #define ext_size(n, dsize)	\
92 	(sizeof(struct hbucket) + (n) * (dsize))
93 
94 #ifndef IPSET_NET_COUNT
95 #define IPSET_NET_COUNT		1
96 #endif
97 
98 /* Book-keeping of the prefixes added to the set */
99 struct net_prefixes {
100 	u32 nets[IPSET_NET_COUNT]; /* number of elements for this cidr */
101 	u8 cidr[IPSET_NET_COUNT];  /* the cidr value */
102 };
103 
104 /* Compute the hash table size */
105 static size_t
htable_size(u8 hbits)106 htable_size(u8 hbits)
107 {
108 	size_t hsize;
109 
110 	/* We must fit both into u32 in jhash and INT_MAX in kvmalloc_node() */
111 	if (hbits > 31)
112 		return 0;
113 	hsize = jhash_size(hbits);
114 	if ((INT_MAX - sizeof(struct htable)) / sizeof(struct hbucket *)
115 	    < hsize)
116 		return 0;
117 
118 	return hsize * sizeof(struct hbucket *) + sizeof(struct htable);
119 }
120 
121 #ifdef IP_SET_HASH_WITH_NETS
122 #if IPSET_NET_COUNT > 1
123 #define __CIDR(cidr, i)		(cidr[i])
124 #else
125 #define __CIDR(cidr, i)		(cidr)
126 #endif
127 
128 /* cidr + 1 is stored in net_prefixes to support /0 */
129 #define NCIDR_PUT(cidr)		((cidr) + 1)
130 #define NCIDR_GET(cidr)		((cidr) - 1)
131 
132 #ifdef IP_SET_HASH_WITH_NETS_PACKED
133 /* When cidr is packed with nomatch, cidr - 1 is stored in the data entry */
134 #define DCIDR_PUT(cidr)		((cidr) - 1)
135 #define DCIDR_GET(cidr, i)	(__CIDR(cidr, i) + 1)
136 #else
137 #define DCIDR_PUT(cidr)		(cidr)
138 #define DCIDR_GET(cidr, i)	__CIDR(cidr, i)
139 #endif
140 
141 #define INIT_CIDR(cidr, host_mask)	\
142 	DCIDR_PUT(((cidr) ? NCIDR_GET(cidr) : host_mask))
143 
144 #ifdef IP_SET_HASH_WITH_NET0
145 /* cidr from 0 to HOST_MASK value and c = cidr + 1 */
146 #define NLEN			(HOST_MASK + 1)
147 #define CIDR_POS(c)		((c) - 1)
148 #else
149 /* cidr from 1 to HOST_MASK value and c = cidr + 1 */
150 #define NLEN			HOST_MASK
151 #define CIDR_POS(c)		((c) - 2)
152 #endif
153 
154 #else
155 #define NLEN			0
156 #endif /* IP_SET_HASH_WITH_NETS */
157 
158 #define SET_ELEM_EXPIRED(set, d)	\
159 	(SET_WITH_TIMEOUT(set) &&	\
160 	 ip_set_timeout_expired(ext_timeout(d, set)))
161 
162 #if defined(IP_SET_HASH_WITH_NETMASK) || defined(IP_SET_HASH_WITH_BITMASK)
163 static const union nf_inet_addr onesmask = {
164 	.all[0] = 0xffffffff,
165 	.all[1] = 0xffffffff,
166 	.all[2] = 0xffffffff,
167 	.all[3] = 0xffffffff
168 };
169 
170 static const union nf_inet_addr zeromask = {};
171 #endif
172 
173 #endif /* _IP_SET_HASH_GEN_H */
174 
175 #ifndef MTYPE
176 #error "MTYPE is not defined!"
177 #endif
178 
179 #ifndef HTYPE
180 #error "HTYPE is not defined!"
181 #endif
182 
183 #ifndef HOST_MASK
184 #error "HOST_MASK is not defined!"
185 #endif
186 
187 /* Family dependent templates */
188 
189 #undef ahash_data
190 #undef mtype_data_equal
191 #undef mtype_do_data_match
192 #undef mtype_data_set_flags
193 #undef mtype_data_reset_elem
194 #undef mtype_data_reset_flags
195 #undef mtype_data_netmask
196 #undef mtype_data_list
197 #undef mtype_data_next
198 #undef mtype_elem
199 
200 #undef mtype_ahash_destroy
201 #undef mtype_ext_cleanup
202 #undef mtype_add_cidr
203 #undef mtype_del_cidr
204 #undef mtype_ahash_memsize
205 #undef mtype_flush
206 #undef mtype_destroy
207 #undef mtype_same_set
208 #undef mtype_kadt
209 #undef mtype_uadt
210 
211 #undef mtype_add
212 #undef mtype_del
213 #undef mtype_test_cidrs
214 #undef mtype_test
215 #undef mtype_uref
216 #undef mtype_resize
217 #undef mtype_ext_size
218 #undef mtype_resize_ad
219 #undef mtype_head
220 #undef mtype_list
221 #undef mtype_gc_do
222 #undef mtype_gc
223 #undef mtype_gc_init
224 #undef mtype_variant
225 #undef mtype_data_match
226 
227 #undef htype
228 #undef HKEY
229 
230 #define mtype_data_equal	IPSET_TOKEN(MTYPE, _data_equal)
231 #ifdef IP_SET_HASH_WITH_NETS
232 #define mtype_do_data_match	IPSET_TOKEN(MTYPE, _do_data_match)
233 #else
234 #define mtype_do_data_match(d)	1
235 #endif
236 #define mtype_data_set_flags	IPSET_TOKEN(MTYPE, _data_set_flags)
237 #define mtype_data_reset_elem	IPSET_TOKEN(MTYPE, _data_reset_elem)
238 #define mtype_data_reset_flags	IPSET_TOKEN(MTYPE, _data_reset_flags)
239 #define mtype_data_netmask	IPSET_TOKEN(MTYPE, _data_netmask)
240 #define mtype_data_list		IPSET_TOKEN(MTYPE, _data_list)
241 #define mtype_data_next		IPSET_TOKEN(MTYPE, _data_next)
242 #define mtype_elem		IPSET_TOKEN(MTYPE, _elem)
243 
244 #define mtype_ahash_destroy	IPSET_TOKEN(MTYPE, _ahash_destroy)
245 #define mtype_ext_cleanup	IPSET_TOKEN(MTYPE, _ext_cleanup)
246 #define mtype_add_cidr		IPSET_TOKEN(MTYPE, _add_cidr)
247 #define mtype_del_cidr		IPSET_TOKEN(MTYPE, _del_cidr)
248 #define mtype_ahash_memsize	IPSET_TOKEN(MTYPE, _ahash_memsize)
249 #define mtype_flush		IPSET_TOKEN(MTYPE, _flush)
250 #define mtype_destroy		IPSET_TOKEN(MTYPE, _destroy)
251 #define mtype_same_set		IPSET_TOKEN(MTYPE, _same_set)
252 #define mtype_kadt		IPSET_TOKEN(MTYPE, _kadt)
253 #define mtype_uadt		IPSET_TOKEN(MTYPE, _uadt)
254 
255 #define mtype_add		IPSET_TOKEN(MTYPE, _add)
256 #define mtype_del		IPSET_TOKEN(MTYPE, _del)
257 #define mtype_test_cidrs	IPSET_TOKEN(MTYPE, _test_cidrs)
258 #define mtype_test		IPSET_TOKEN(MTYPE, _test)
259 #define mtype_uref		IPSET_TOKEN(MTYPE, _uref)
260 #define mtype_resize		IPSET_TOKEN(MTYPE, _resize)
261 #define mtype_ext_size		IPSET_TOKEN(MTYPE, _ext_size)
262 #define mtype_resize_ad		IPSET_TOKEN(MTYPE, _resize_ad)
263 #define mtype_head		IPSET_TOKEN(MTYPE, _head)
264 #define mtype_list		IPSET_TOKEN(MTYPE, _list)
265 #define mtype_gc_do		IPSET_TOKEN(MTYPE, _gc_do)
266 #define mtype_gc		IPSET_TOKEN(MTYPE, _gc)
267 #define mtype_gc_init		IPSET_TOKEN(MTYPE, _gc_init)
268 #define mtype_variant		IPSET_TOKEN(MTYPE, _variant)
269 #define mtype_data_match	IPSET_TOKEN(MTYPE, _data_match)
270 
271 #ifndef HKEY_DATALEN
272 #define HKEY_DATALEN		sizeof(struct mtype_elem)
273 #endif
274 
275 #define htype			MTYPE
276 
277 #define HKEY(data, initval, htable_bits)			\
278 ({								\
279 	const u32 *__k = (const u32 *)data;			\
280 	u32 __l = HKEY_DATALEN / sizeof(u32);			\
281 								\
282 	BUILD_BUG_ON(HKEY_DATALEN % sizeof(u32) != 0);		\
283 								\
284 	jhash2(__k, __l, initval) & jhash_mask(htable_bits);	\
285 })
286 
287 /* The generic hash structure */
288 struct htype {
289 	struct htable __rcu *table; /* the hash table */
290 	struct htable_gc gc;	/* gc workqueue */
291 	u32 maxelem;		/* max elements in the hash */
292 	u32 initval;		/* random jhash init value */
293 #ifdef IP_SET_HASH_WITH_MARKMASK
294 	u32 markmask;		/* markmask value for mark mask to store */
295 #endif
296 	u8 bucketsize;		/* max elements in an array block */
297 #if defined(IP_SET_HASH_WITH_NETMASK) || defined(IP_SET_HASH_WITH_BITMASK)
298 	u8 netmask;		/* netmask value for subnets to store */
299 	union nf_inet_addr bitmask;	/* stores bitmask */
300 #endif
301 	struct list_head ad;	/* Resize add|del backlist */
302 	struct mtype_elem next; /* temporary storage for uadd */
303 #ifdef IP_SET_HASH_WITH_NETS
304 	struct net_prefixes nets[NLEN]; /* book-keeping of prefixes */
305 #endif
306 };
307 
308 /* ADD|DEL entries saved during resize */
309 struct mtype_resize_ad {
310 	struct list_head list;
311 	enum ipset_adt ad;	/* ADD|DEL element */
312 	struct mtype_elem d;	/* Element value */
313 	struct ip_set_ext ext;	/* Extensions for ADD */
314 	struct ip_set_ext mext;	/* Target extensions for ADD */
315 	u32 flags;		/* Flags for ADD */
316 };
317 
318 #ifdef IP_SET_HASH_WITH_NETS
319 /* Network cidr size book keeping when the hash stores different
320  * sized networks. cidr == real cidr + 1 to support /0.
321  */
322 static void
mtype_add_cidr(struct ip_set * set,struct htype * h,u8 cidr,u8 n)323 mtype_add_cidr(struct ip_set *set, struct htype *h, u8 cidr, u8 n)
324 {
325 	int i, j;
326 
327 	spin_lock_bh(&set->lock);
328 	/* Add in increasing prefix order, so larger cidr first */
329 	for (i = 0, j = -1; i < NLEN && h->nets[i].cidr[n]; i++) {
330 		if (j != -1) {
331 			continue;
332 		} else if (h->nets[i].cidr[n] < cidr) {
333 			j = i;
334 		} else if (h->nets[i].cidr[n] == cidr) {
335 			h->nets[CIDR_POS(cidr)].nets[n]++;
336 			goto unlock;
337 		}
338 	}
339 	if (j != -1) {
340 		for (; i > j; i--)
341 			h->nets[i].cidr[n] = h->nets[i - 1].cidr[n];
342 	}
343 	h->nets[i].cidr[n] = cidr;
344 	h->nets[CIDR_POS(cidr)].nets[n] = 1;
345 unlock:
346 	spin_unlock_bh(&set->lock);
347 }
348 
349 static void
mtype_del_cidr(struct ip_set * set,struct htype * h,u8 cidr,u8 n)350 mtype_del_cidr(struct ip_set *set, struct htype *h, u8 cidr, u8 n)
351 {
352 	u8 i, j, net_end = NLEN - 1;
353 
354 	spin_lock_bh(&set->lock);
355 	for (i = 0; i < NLEN; i++) {
356 		if (h->nets[i].cidr[n] != cidr)
357 			continue;
358 		h->nets[CIDR_POS(cidr)].nets[n]--;
359 		if (h->nets[CIDR_POS(cidr)].nets[n] > 0)
360 			goto unlock;
361 		for (j = i; j < net_end && h->nets[j].cidr[n]; j++)
362 			h->nets[j].cidr[n] = h->nets[j + 1].cidr[n];
363 		h->nets[j].cidr[n] = 0;
364 		goto unlock;
365 	}
366 unlock:
367 	spin_unlock_bh(&set->lock);
368 }
369 #endif
370 
371 /* Calculate the actual memory size of the set data */
372 static size_t
mtype_ahash_memsize(const struct htype * h,const struct htable * t)373 mtype_ahash_memsize(const struct htype *h, const struct htable *t)
374 {
375 	return sizeof(*h) + sizeof(*t) + ahash_sizeof_regions(t->htable_bits);
376 }
377 
378 /* Get the ith element from the array block n */
379 #define ahash_data(n, i, dsize)	\
380 	((struct mtype_elem *)((n)->value + ((i) * (dsize))))
381 
382 static void
mtype_ext_cleanup(struct ip_set * set,struct hbucket * n)383 mtype_ext_cleanup(struct ip_set *set, struct hbucket *n)
384 {
385 	int i;
386 
387 	for (i = 0; i < n->pos; i++)
388 		if (test_bit(i, n->used))
389 			ip_set_ext_destroy(set, ahash_data(n, i, set->dsize));
390 }
391 
392 /* Flush a hash type of set: destroy all elements */
393 static void
mtype_flush(struct ip_set * set)394 mtype_flush(struct ip_set *set)
395 {
396 	struct htype *h = set->data;
397 	struct htable *t;
398 	struct hbucket *n;
399 	u32 r, i;
400 
401 	t = ipset_dereference_nfnl(h->table);
402 	for (r = 0; r < ahash_numof_locks(t->htable_bits); r++) {
403 		spin_lock_bh(&t->hregion[r].lock);
404 		for (i = ahash_bucket_start(r, t->htable_bits);
405 		     i < ahash_bucket_end(r, t->htable_bits); i++) {
406 			n = __ipset_dereference(hbucket(t, i));
407 			if (!n)
408 				continue;
409 			if (set->extensions & IPSET_EXT_DESTROY)
410 				mtype_ext_cleanup(set, n);
411 			/* FIXME: use slab cache */
412 			rcu_assign_pointer(hbucket(t, i), NULL);
413 			kfree_rcu(n, rcu);
414 		}
415 		t->hregion[r].ext_size = 0;
416 		t->hregion[r].elements = 0;
417 		spin_unlock_bh(&t->hregion[r].lock);
418 	}
419 #ifdef IP_SET_HASH_WITH_NETS
420 	memset(h->nets, 0, sizeof(h->nets));
421 #endif
422 }
423 
424 /* Destroy the hashtable part of the set */
425 static void
mtype_ahash_destroy(struct ip_set * set,struct htable * t,bool ext_destroy)426 mtype_ahash_destroy(struct ip_set *set, struct htable *t, bool ext_destroy)
427 {
428 	struct hbucket *n;
429 	u32 i;
430 
431 	for (i = 0; i < jhash_size(t->htable_bits); i++) {
432 		n = __ipset_dereference(hbucket(t, i));
433 		if (!n)
434 			continue;
435 		if (set->extensions & IPSET_EXT_DESTROY && ext_destroy)
436 			mtype_ext_cleanup(set, n);
437 		/* FIXME: use slab cache */
438 		kfree(n);
439 	}
440 
441 	ip_set_free(t->hregion);
442 	ip_set_free(t);
443 }
444 
445 /* Destroy a hash type of set */
446 static void
mtype_destroy(struct ip_set * set)447 mtype_destroy(struct ip_set *set)
448 {
449 	struct htype *h = set->data;
450 	struct list_head *l, *lt;
451 
452 	if (SET_WITH_TIMEOUT(set))
453 		cancel_delayed_work_sync(&h->gc.dwork);
454 
455 	mtype_ahash_destroy(set, ipset_dereference_nfnl(h->table), true);
456 	list_for_each_safe(l, lt, &h->ad) {
457 		list_del(l);
458 		kfree(l);
459 	}
460 	kfree(h);
461 
462 	set->data = NULL;
463 }
464 
465 static bool
mtype_same_set(const struct ip_set * a,const struct ip_set * b)466 mtype_same_set(const struct ip_set *a, const struct ip_set *b)
467 {
468 	const struct htype *x = a->data;
469 	const struct htype *y = b->data;
470 
471 	/* Resizing changes htable_bits, so we ignore it */
472 	return x->maxelem == y->maxelem &&
473 	       a->timeout == b->timeout &&
474 #if defined(IP_SET_HASH_WITH_NETMASK) || defined(IP_SET_HASH_WITH_BITMASK)
475 	       nf_inet_addr_cmp(&x->bitmask, &y->bitmask) &&
476 #endif
477 #ifdef IP_SET_HASH_WITH_MARKMASK
478 	       x->markmask == y->markmask &&
479 #endif
480 	       a->extensions == b->extensions;
481 }
482 
483 static void
mtype_gc_do(struct ip_set * set,struct htype * h,struct htable * t,u32 r)484 mtype_gc_do(struct ip_set *set, struct htype *h, struct htable *t, u32 r)
485 {
486 	struct hbucket *n, *tmp;
487 	struct mtype_elem *data;
488 	u32 i, j, d;
489 	size_t dsize = set->dsize;
490 #ifdef IP_SET_HASH_WITH_NETS
491 	u8 k;
492 #endif
493 	u8 htable_bits = t->htable_bits;
494 
495 	spin_lock_bh(&t->hregion[r].lock);
496 	for (i = ahash_bucket_start(r, htable_bits);
497 	     i < ahash_bucket_end(r, htable_bits); i++) {
498 		n = __ipset_dereference(hbucket(t, i));
499 		if (!n)
500 			continue;
501 		for (j = 0, d = 0; j < n->pos; j++) {
502 			if (!test_bit(j, n->used)) {
503 				d++;
504 				continue;
505 			}
506 			data = ahash_data(n, j, dsize);
507 			if (!ip_set_timeout_expired(ext_timeout(data, set)))
508 				continue;
509 			pr_debug("expired %u/%u\n", i, j);
510 			clear_bit(j, n->used);
511 			smp_mb__after_atomic();
512 #ifdef IP_SET_HASH_WITH_NETS
513 			for (k = 0; k < IPSET_NET_COUNT; k++)
514 				mtype_del_cidr(set, h,
515 					NCIDR_PUT(DCIDR_GET(data->cidr, k)),
516 					k);
517 #endif
518 			t->hregion[r].elements--;
519 			ip_set_ext_destroy(set, data);
520 			d++;
521 		}
522 		if (d >= AHASH_INIT_SIZE) {
523 			if (d >= n->size) {
524 				t->hregion[r].ext_size -=
525 					ext_size(n->size, dsize);
526 				rcu_assign_pointer(hbucket(t, i), NULL);
527 				kfree_rcu(n, rcu);
528 				continue;
529 			}
530 			tmp = kzalloc(sizeof(*tmp) +
531 				(n->size - AHASH_INIT_SIZE) * dsize,
532 				GFP_ATOMIC);
533 			if (!tmp)
534 				/* Still try to delete expired elements. */
535 				continue;
536 			tmp->size = n->size - AHASH_INIT_SIZE;
537 			for (j = 0, d = 0; j < n->pos; j++) {
538 				if (!test_bit(j, n->used))
539 					continue;
540 				data = ahash_data(n, j, dsize);
541 				memcpy(tmp->value + d * dsize,
542 				       data, dsize);
543 				set_bit(d, tmp->used);
544 				d++;
545 			}
546 			tmp->pos = d;
547 			t->hregion[r].ext_size -=
548 				ext_size(AHASH_INIT_SIZE, dsize);
549 			rcu_assign_pointer(hbucket(t, i), tmp);
550 			kfree_rcu(n, rcu);
551 		}
552 	}
553 	spin_unlock_bh(&t->hregion[r].lock);
554 }
555 
556 static void
mtype_gc(struct work_struct * work)557 mtype_gc(struct work_struct *work)
558 {
559 	struct htable_gc *gc;
560 	struct ip_set *set;
561 	struct htype *h;
562 	struct htable *t;
563 	u32 r, numof_locks;
564 	unsigned int next_run;
565 
566 	gc = container_of(work, struct htable_gc, dwork.work);
567 	set = gc->set;
568 	h = set->data;
569 
570 	spin_lock_bh(&set->lock);
571 	t = ipset_dereference_set(h->table, set);
572 	atomic_inc(&t->uref);
573 	numof_locks = ahash_numof_locks(t->htable_bits);
574 	r = gc->region++;
575 	if (r >= numof_locks) {
576 		r = gc->region = 0;
577 	}
578 	next_run = (IPSET_GC_PERIOD(set->timeout) * HZ) / numof_locks;
579 	if (next_run < HZ/10)
580 		next_run = HZ/10;
581 	spin_unlock_bh(&set->lock);
582 
583 	mtype_gc_do(set, h, t, r);
584 
585 	if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
586 		pr_debug("Table destroy after resize by expire: %p\n", t);
587 		mtype_ahash_destroy(set, t, false);
588 	}
589 
590 	queue_delayed_work(system_power_efficient_wq, &gc->dwork, next_run);
591 
592 }
593 
594 static void
mtype_gc_init(struct htable_gc * gc)595 mtype_gc_init(struct htable_gc *gc)
596 {
597 	INIT_DEFERRABLE_WORK(&gc->dwork, mtype_gc);
598 	queue_delayed_work(system_power_efficient_wq, &gc->dwork, HZ);
599 }
600 
601 static int
602 mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
603 	  struct ip_set_ext *mext, u32 flags);
604 static int
605 mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
606 	  struct ip_set_ext *mext, u32 flags);
607 
608 /* Resize a hash: create a new hash table with doubling the hashsize
609  * and inserting the elements to it. Repeat until we succeed or
610  * fail due to memory pressures.
611  */
612 static int
mtype_resize(struct ip_set * set,bool retried)613 mtype_resize(struct ip_set *set, bool retried)
614 {
615 	struct htype *h = set->data;
616 	struct htable *t, *orig;
617 	u8 htable_bits;
618 	size_t hsize, dsize = set->dsize;
619 #ifdef IP_SET_HASH_WITH_NETS
620 	u8 flags;
621 	struct mtype_elem *tmp;
622 #endif
623 	struct mtype_elem *data;
624 	struct mtype_elem *d;
625 	struct hbucket *n, *m;
626 	struct list_head *l, *lt;
627 	struct mtype_resize_ad *x;
628 	u32 i, j, r, nr, key;
629 	int ret;
630 
631 #ifdef IP_SET_HASH_WITH_NETS
632 	tmp = kmalloc(dsize, GFP_KERNEL);
633 	if (!tmp)
634 		return -ENOMEM;
635 #endif
636 	orig = ipset_dereference_bh_nfnl(h->table);
637 	htable_bits = orig->htable_bits;
638 
639 retry:
640 	ret = 0;
641 	htable_bits++;
642 	if (!htable_bits)
643 		goto hbwarn;
644 	hsize = htable_size(htable_bits);
645 	if (!hsize)
646 		goto hbwarn;
647 	t = ip_set_alloc(hsize);
648 	if (!t) {
649 		ret = -ENOMEM;
650 		goto out;
651 	}
652 	t->hregion = ip_set_alloc(ahash_sizeof_regions(htable_bits));
653 	if (!t->hregion) {
654 		ip_set_free(t);
655 		ret = -ENOMEM;
656 		goto out;
657 	}
658 	t->htable_bits = htable_bits;
659 	t->maxelem = h->maxelem / ahash_numof_locks(htable_bits);
660 	for (i = 0; i < ahash_numof_locks(htable_bits); i++)
661 		spin_lock_init(&t->hregion[i].lock);
662 
663 	/* There can't be another parallel resizing,
664 	 * but dumping, gc, kernel side add/del are possible
665 	 */
666 	orig = ipset_dereference_bh_nfnl(h->table);
667 	atomic_set(&orig->ref, 1);
668 	atomic_inc(&orig->uref);
669 	pr_debug("attempt to resize set %s from %u to %u, t %p\n",
670 		 set->name, orig->htable_bits, htable_bits, orig);
671 	for (r = 0; r < ahash_numof_locks(orig->htable_bits); r++) {
672 		/* Expire may replace a hbucket with another one */
673 		rcu_read_lock_bh();
674 		for (i = ahash_bucket_start(r, orig->htable_bits);
675 		     i < ahash_bucket_end(r, orig->htable_bits); i++) {
676 			n = __ipset_dereference(hbucket(orig, i));
677 			if (!n)
678 				continue;
679 			for (j = 0; j < n->pos; j++) {
680 				if (!test_bit(j, n->used))
681 					continue;
682 				data = ahash_data(n, j, dsize);
683 				if (SET_ELEM_EXPIRED(set, data))
684 					continue;
685 #ifdef IP_SET_HASH_WITH_NETS
686 				/* We have readers running parallel with us,
687 				 * so the live data cannot be modified.
688 				 */
689 				flags = 0;
690 				memcpy(tmp, data, dsize);
691 				data = tmp;
692 				mtype_data_reset_flags(data, &flags);
693 #endif
694 				key = HKEY(data, h->initval, htable_bits);
695 				m = __ipset_dereference(hbucket(t, key));
696 				nr = ahash_region(key, htable_bits);
697 				if (!m) {
698 					m = kzalloc(sizeof(*m) +
699 					    AHASH_INIT_SIZE * dsize,
700 					    GFP_ATOMIC);
701 					if (!m) {
702 						ret = -ENOMEM;
703 						goto cleanup;
704 					}
705 					m->size = AHASH_INIT_SIZE;
706 					t->hregion[nr].ext_size +=
707 						ext_size(AHASH_INIT_SIZE,
708 							 dsize);
709 					RCU_INIT_POINTER(hbucket(t, key), m);
710 				} else if (m->pos >= m->size) {
711 					struct hbucket *ht;
712 
713 					if (m->size >= AHASH_MAX(h)) {
714 						ret = -EAGAIN;
715 					} else {
716 						ht = kzalloc(sizeof(*ht) +
717 						(m->size + AHASH_INIT_SIZE)
718 						* dsize,
719 						GFP_ATOMIC);
720 						if (!ht)
721 							ret = -ENOMEM;
722 					}
723 					if (ret < 0)
724 						goto cleanup;
725 					memcpy(ht, m, sizeof(struct hbucket) +
726 					       m->size * dsize);
727 					ht->size = m->size + AHASH_INIT_SIZE;
728 					t->hregion[nr].ext_size +=
729 						ext_size(AHASH_INIT_SIZE,
730 							 dsize);
731 					kfree(m);
732 					m = ht;
733 					RCU_INIT_POINTER(hbucket(t, key), ht);
734 				}
735 				d = ahash_data(m, m->pos, dsize);
736 				memcpy(d, data, dsize);
737 				set_bit(m->pos++, m->used);
738 				t->hregion[nr].elements++;
739 #ifdef IP_SET_HASH_WITH_NETS
740 				mtype_data_reset_flags(d, &flags);
741 #endif
742 			}
743 		}
744 		rcu_read_unlock_bh();
745 	}
746 
747 	/* There can't be any other writer. */
748 	rcu_assign_pointer(h->table, t);
749 
750 	/* Give time to other readers of the set */
751 	synchronize_rcu();
752 
753 	pr_debug("set %s resized from %u (%p) to %u (%p)\n", set->name,
754 		 orig->htable_bits, orig, t->htable_bits, t);
755 	/* Add/delete elements processed by the SET target during resize.
756 	 * Kernel-side add cannot trigger a resize and userspace actions
757 	 * are serialized by the mutex.
758 	 */
759 	list_for_each_safe(l, lt, &h->ad) {
760 		x = list_entry(l, struct mtype_resize_ad, list);
761 		if (x->ad == IPSET_ADD) {
762 			mtype_add(set, &x->d, &x->ext, &x->mext, x->flags);
763 		} else {
764 			mtype_del(set, &x->d, NULL, NULL, 0);
765 		}
766 		list_del(l);
767 		kfree(l);
768 	}
769 	/* If there's nobody else using the table, destroy it */
770 	if (atomic_dec_and_test(&orig->uref)) {
771 		pr_debug("Table destroy by resize %p\n", orig);
772 		mtype_ahash_destroy(set, orig, false);
773 	}
774 
775 out:
776 #ifdef IP_SET_HASH_WITH_NETS
777 	kfree(tmp);
778 #endif
779 	return ret;
780 
781 cleanup:
782 	rcu_read_unlock_bh();
783 	atomic_set(&orig->ref, 0);
784 	atomic_dec(&orig->uref);
785 	mtype_ahash_destroy(set, t, false);
786 	if (ret == -EAGAIN)
787 		goto retry;
788 	goto out;
789 
790 hbwarn:
791 	/* In case we have plenty of memory :-) */
792 	pr_warn("Cannot increase the hashsize of set %s further\n", set->name);
793 	ret = -IPSET_ERR_HASH_FULL;
794 	goto out;
795 }
796 
797 /* Get the current number of elements and ext_size in the set  */
798 static void
mtype_ext_size(struct ip_set * set,u32 * elements,size_t * ext_size)799 mtype_ext_size(struct ip_set *set, u32 *elements, size_t *ext_size)
800 {
801 	struct htype *h = set->data;
802 	const struct htable *t;
803 	u32 i, j, r;
804 	struct hbucket *n;
805 	struct mtype_elem *data;
806 
807 	t = rcu_dereference_bh(h->table);
808 	for (r = 0; r < ahash_numof_locks(t->htable_bits); r++) {
809 		for (i = ahash_bucket_start(r, t->htable_bits);
810 		     i < ahash_bucket_end(r, t->htable_bits); i++) {
811 			n = rcu_dereference_bh(hbucket(t, i));
812 			if (!n)
813 				continue;
814 			for (j = 0; j < n->pos; j++) {
815 				if (!test_bit(j, n->used))
816 					continue;
817 				data = ahash_data(n, j, set->dsize);
818 				if (!SET_ELEM_EXPIRED(set, data))
819 					(*elements)++;
820 			}
821 		}
822 		*ext_size += t->hregion[r].ext_size;
823 	}
824 }
825 
826 /* Add an element to a hash and update the internal counters when succeeded,
827  * otherwise report the proper error code.
828  */
829 static int
mtype_add(struct ip_set * set,void * value,const struct ip_set_ext * ext,struct ip_set_ext * mext,u32 flags)830 mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
831 	  struct ip_set_ext *mext, u32 flags)
832 {
833 	struct htype *h = set->data;
834 	struct htable *t;
835 	const struct mtype_elem *d = value;
836 	struct mtype_elem *data;
837 	struct hbucket *n, *old = ERR_PTR(-ENOENT);
838 	int i, j = -1, ret;
839 	bool flag_exist = flags & IPSET_FLAG_EXIST;
840 	bool deleted = false, forceadd = false, reuse = false;
841 	u32 r, key, multi = 0, elements, maxelem;
842 
843 	rcu_read_lock_bh();
844 	t = rcu_dereference_bh(h->table);
845 	key = HKEY(value, h->initval, t->htable_bits);
846 	r = ahash_region(key, t->htable_bits);
847 	atomic_inc(&t->uref);
848 	elements = t->hregion[r].elements;
849 	maxelem = t->maxelem;
850 	if (elements >= maxelem) {
851 		u32 e;
852 		if (SET_WITH_TIMEOUT(set)) {
853 			rcu_read_unlock_bh();
854 			mtype_gc_do(set, h, t, r);
855 			rcu_read_lock_bh();
856 		}
857 		maxelem = h->maxelem;
858 		elements = 0;
859 		for (e = 0; e < ahash_numof_locks(t->htable_bits); e++)
860 			elements += t->hregion[e].elements;
861 		if (elements >= maxelem && SET_WITH_FORCEADD(set))
862 			forceadd = true;
863 	}
864 	rcu_read_unlock_bh();
865 
866 	spin_lock_bh(&t->hregion[r].lock);
867 	n = rcu_dereference_bh(hbucket(t, key));
868 	if (!n) {
869 		if (forceadd || elements >= maxelem)
870 			goto set_full;
871 		old = NULL;
872 		n = kzalloc(sizeof(*n) + AHASH_INIT_SIZE * set->dsize,
873 			    GFP_ATOMIC);
874 		if (!n) {
875 			ret = -ENOMEM;
876 			goto unlock;
877 		}
878 		n->size = AHASH_INIT_SIZE;
879 		t->hregion[r].ext_size +=
880 			ext_size(AHASH_INIT_SIZE, set->dsize);
881 		goto copy_elem;
882 	}
883 	for (i = 0; i < n->pos; i++) {
884 		if (!test_bit(i, n->used)) {
885 			/* Reuse first deleted entry */
886 			if (j == -1) {
887 				deleted = reuse = true;
888 				j = i;
889 			}
890 			continue;
891 		}
892 		data = ahash_data(n, i, set->dsize);
893 		if (mtype_data_equal(data, d, &multi)) {
894 			if (flag_exist || SET_ELEM_EXPIRED(set, data)) {
895 				/* Just the extensions could be overwritten */
896 				j = i;
897 				goto overwrite_extensions;
898 			}
899 			ret = -IPSET_ERR_EXIST;
900 			goto unlock;
901 		}
902 		/* Reuse first timed out entry */
903 		if (SET_ELEM_EXPIRED(set, data) && j == -1) {
904 			j = i;
905 			reuse = true;
906 		}
907 	}
908 	if (reuse || forceadd) {
909 		if (j == -1)
910 			j = 0;
911 		data = ahash_data(n, j, set->dsize);
912 		if (!deleted) {
913 #ifdef IP_SET_HASH_WITH_NETS
914 			for (i = 0; i < IPSET_NET_COUNT; i++)
915 				mtype_del_cidr(set, h,
916 					NCIDR_PUT(DCIDR_GET(data->cidr, i)),
917 					i);
918 #endif
919 			ip_set_ext_destroy(set, data);
920 			t->hregion[r].elements--;
921 		}
922 		goto copy_data;
923 	}
924 	if (elements >= maxelem)
925 		goto set_full;
926 	/* Create a new slot */
927 	if (n->pos >= n->size) {
928 #ifdef IP_SET_HASH_WITH_MULTI
929 		if (h->bucketsize >= AHASH_MAX_TUNED)
930 			goto set_full;
931 		else if (h->bucketsize <= multi)
932 			h->bucketsize += AHASH_INIT_SIZE;
933 #endif
934 		if (n->size >= AHASH_MAX(h)) {
935 			/* Trigger rehashing */
936 			mtype_data_next(&h->next, d);
937 			ret = -EAGAIN;
938 			goto resize;
939 		}
940 		old = n;
941 		n = kzalloc(sizeof(*n) +
942 			    (old->size + AHASH_INIT_SIZE) * set->dsize,
943 			    GFP_ATOMIC);
944 		if (!n) {
945 			ret = -ENOMEM;
946 			goto unlock;
947 		}
948 		memcpy(n, old, sizeof(struct hbucket) +
949 		       old->size * set->dsize);
950 		n->size = old->size + AHASH_INIT_SIZE;
951 		t->hregion[r].ext_size +=
952 			ext_size(AHASH_INIT_SIZE, set->dsize);
953 	}
954 
955 copy_elem:
956 	j = n->pos++;
957 	data = ahash_data(n, j, set->dsize);
958 copy_data:
959 	t->hregion[r].elements++;
960 #ifdef IP_SET_HASH_WITH_NETS
961 	for (i = 0; i < IPSET_NET_COUNT; i++)
962 		mtype_add_cidr(set, h, NCIDR_PUT(DCIDR_GET(d->cidr, i)), i);
963 #endif
964 	memcpy(data, d, sizeof(struct mtype_elem));
965 overwrite_extensions:
966 #ifdef IP_SET_HASH_WITH_NETS
967 	mtype_data_set_flags(data, flags);
968 #endif
969 	if (SET_WITH_COUNTER(set))
970 		ip_set_init_counter(ext_counter(data, set), ext);
971 	if (SET_WITH_COMMENT(set))
972 		ip_set_init_comment(set, ext_comment(data, set), ext);
973 	if (SET_WITH_SKBINFO(set))
974 		ip_set_init_skbinfo(ext_skbinfo(data, set), ext);
975 	/* Must come last for the case when timed out entry is reused */
976 	if (SET_WITH_TIMEOUT(set))
977 		ip_set_timeout_set(ext_timeout(data, set), ext->timeout);
978 	smp_mb__before_atomic();
979 	set_bit(j, n->used);
980 	if (old != ERR_PTR(-ENOENT)) {
981 		rcu_assign_pointer(hbucket(t, key), n);
982 		if (old)
983 			kfree_rcu(old, rcu);
984 	}
985 	ret = 0;
986 resize:
987 	spin_unlock_bh(&t->hregion[r].lock);
988 	if (atomic_read(&t->ref) && ext->target) {
989 		/* Resize is in process and kernel side add, save values */
990 		struct mtype_resize_ad *x;
991 
992 		x = kzalloc(sizeof(struct mtype_resize_ad), GFP_ATOMIC);
993 		if (!x)
994 			/* Don't bother */
995 			goto out;
996 		x->ad = IPSET_ADD;
997 		memcpy(&x->d, value, sizeof(struct mtype_elem));
998 		memcpy(&x->ext, ext, sizeof(struct ip_set_ext));
999 		memcpy(&x->mext, mext, sizeof(struct ip_set_ext));
1000 		x->flags = flags;
1001 		spin_lock_bh(&set->lock);
1002 		list_add_tail(&x->list, &h->ad);
1003 		spin_unlock_bh(&set->lock);
1004 	}
1005 	goto out;
1006 
1007 set_full:
1008 	if (net_ratelimit())
1009 		pr_warn("Set %s is full, maxelem %u reached\n",
1010 			set->name, maxelem);
1011 	ret = -IPSET_ERR_HASH_FULL;
1012 unlock:
1013 	spin_unlock_bh(&t->hregion[r].lock);
1014 out:
1015 	if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
1016 		pr_debug("Table destroy after resize by add: %p\n", t);
1017 		mtype_ahash_destroy(set, t, false);
1018 	}
1019 	return ret;
1020 }
1021 
1022 /* Delete an element from the hash and free up space if possible.
1023  */
1024 static int
mtype_del(struct ip_set * set,void * value,const struct ip_set_ext * ext,struct ip_set_ext * mext,u32 flags)1025 mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
1026 	  struct ip_set_ext *mext, u32 flags)
1027 {
1028 	struct htype *h = set->data;
1029 	struct htable *t;
1030 	const struct mtype_elem *d = value;
1031 	struct mtype_elem *data;
1032 	struct hbucket *n;
1033 	struct mtype_resize_ad *x = NULL;
1034 	int i, j, k, r, ret = -IPSET_ERR_EXIST;
1035 	u32 key, multi = 0;
1036 	size_t dsize = set->dsize;
1037 
1038 	/* Userspace add and resize is excluded by the mutex.
1039 	 * Kernespace add does not trigger resize.
1040 	 */
1041 	rcu_read_lock_bh();
1042 	t = rcu_dereference_bh(h->table);
1043 	key = HKEY(value, h->initval, t->htable_bits);
1044 	r = ahash_region(key, t->htable_bits);
1045 	atomic_inc(&t->uref);
1046 	rcu_read_unlock_bh();
1047 
1048 	spin_lock_bh(&t->hregion[r].lock);
1049 	n = rcu_dereference_bh(hbucket(t, key));
1050 	if (!n)
1051 		goto out;
1052 	for (i = 0, k = 0; i < n->pos; i++) {
1053 		if (!test_bit(i, n->used)) {
1054 			k++;
1055 			continue;
1056 		}
1057 		data = ahash_data(n, i, dsize);
1058 		if (!mtype_data_equal(data, d, &multi))
1059 			continue;
1060 		if (SET_ELEM_EXPIRED(set, data))
1061 			goto out;
1062 
1063 		ret = 0;
1064 		clear_bit(i, n->used);
1065 		smp_mb__after_atomic();
1066 		if (i + 1 == n->pos)
1067 			n->pos--;
1068 		t->hregion[r].elements--;
1069 #ifdef IP_SET_HASH_WITH_NETS
1070 		for (j = 0; j < IPSET_NET_COUNT; j++)
1071 			mtype_del_cidr(set, h,
1072 				       NCIDR_PUT(DCIDR_GET(d->cidr, j)), j);
1073 #endif
1074 		ip_set_ext_destroy(set, data);
1075 
1076 		if (atomic_read(&t->ref) && ext->target) {
1077 			/* Resize is in process and kernel side del,
1078 			 * save values
1079 			 */
1080 			x = kzalloc(sizeof(struct mtype_resize_ad),
1081 				    GFP_ATOMIC);
1082 			if (x) {
1083 				x->ad = IPSET_DEL;
1084 				memcpy(&x->d, value,
1085 				       sizeof(struct mtype_elem));
1086 				x->flags = flags;
1087 			}
1088 		}
1089 		for (; i < n->pos; i++) {
1090 			if (!test_bit(i, n->used))
1091 				k++;
1092 		}
1093 		if (n->pos == 0 && k == 0) {
1094 			t->hregion[r].ext_size -= ext_size(n->size, dsize);
1095 			rcu_assign_pointer(hbucket(t, key), NULL);
1096 			kfree_rcu(n, rcu);
1097 		} else if (k >= AHASH_INIT_SIZE) {
1098 			struct hbucket *tmp = kzalloc(sizeof(*tmp) +
1099 					(n->size - AHASH_INIT_SIZE) * dsize,
1100 					GFP_ATOMIC);
1101 			if (!tmp)
1102 				goto out;
1103 			tmp->size = n->size - AHASH_INIT_SIZE;
1104 			for (j = 0, k = 0; j < n->pos; j++) {
1105 				if (!test_bit(j, n->used))
1106 					continue;
1107 				data = ahash_data(n, j, dsize);
1108 				memcpy(tmp->value + k * dsize, data, dsize);
1109 				set_bit(k, tmp->used);
1110 				k++;
1111 			}
1112 			tmp->pos = k;
1113 			t->hregion[r].ext_size -=
1114 				ext_size(AHASH_INIT_SIZE, dsize);
1115 			rcu_assign_pointer(hbucket(t, key), tmp);
1116 			kfree_rcu(n, rcu);
1117 		}
1118 		goto out;
1119 	}
1120 
1121 out:
1122 	spin_unlock_bh(&t->hregion[r].lock);
1123 	if (x) {
1124 		spin_lock_bh(&set->lock);
1125 		list_add(&x->list, &h->ad);
1126 		spin_unlock_bh(&set->lock);
1127 	}
1128 	if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
1129 		pr_debug("Table destroy after resize by del: %p\n", t);
1130 		mtype_ahash_destroy(set, t, false);
1131 	}
1132 	return ret;
1133 }
1134 
1135 static int
mtype_data_match(struct mtype_elem * data,const struct ip_set_ext * ext,struct ip_set_ext * mext,struct ip_set * set,u32 flags)1136 mtype_data_match(struct mtype_elem *data, const struct ip_set_ext *ext,
1137 		 struct ip_set_ext *mext, struct ip_set *set, u32 flags)
1138 {
1139 	if (!ip_set_match_extensions(set, ext, mext, flags, data))
1140 		return 0;
1141 	/* nomatch entries return -ENOTEMPTY */
1142 	return mtype_do_data_match(data);
1143 }
1144 
1145 #ifdef IP_SET_HASH_WITH_NETS
1146 /* Special test function which takes into account the different network
1147  * sizes added to the set
1148  */
1149 static int
mtype_test_cidrs(struct ip_set * set,struct mtype_elem * d,const struct ip_set_ext * ext,struct ip_set_ext * mext,u32 flags)1150 mtype_test_cidrs(struct ip_set *set, struct mtype_elem *d,
1151 		 const struct ip_set_ext *ext,
1152 		 struct ip_set_ext *mext, u32 flags)
1153 {
1154 	struct htype *h = set->data;
1155 	struct htable *t = rcu_dereference_bh(h->table);
1156 	struct hbucket *n;
1157 	struct mtype_elem *data;
1158 #if IPSET_NET_COUNT == 2
1159 	struct mtype_elem orig = *d;
1160 	int ret, i, j = 0, k;
1161 #else
1162 	int ret, i, j = 0;
1163 #endif
1164 	u32 key, multi = 0;
1165 
1166 	pr_debug("test by nets\n");
1167 	for (; j < NLEN && h->nets[j].cidr[0] && !multi; j++) {
1168 #if IPSET_NET_COUNT == 2
1169 		mtype_data_reset_elem(d, &orig);
1170 		mtype_data_netmask(d, NCIDR_GET(h->nets[j].cidr[0]), false);
1171 		for (k = 0; k < NLEN && h->nets[k].cidr[1] && !multi;
1172 		     k++) {
1173 			mtype_data_netmask(d, NCIDR_GET(h->nets[k].cidr[1]),
1174 					   true);
1175 #else
1176 		mtype_data_netmask(d, NCIDR_GET(h->nets[j].cidr[0]));
1177 #endif
1178 		key = HKEY(d, h->initval, t->htable_bits);
1179 		n = rcu_dereference_bh(hbucket(t, key));
1180 		if (!n)
1181 			continue;
1182 		for (i = 0; i < n->pos; i++) {
1183 			if (!test_bit(i, n->used))
1184 				continue;
1185 			data = ahash_data(n, i, set->dsize);
1186 			if (!mtype_data_equal(data, d, &multi))
1187 				continue;
1188 			ret = mtype_data_match(data, ext, mext, set, flags);
1189 			if (ret != 0)
1190 				return ret;
1191 #ifdef IP_SET_HASH_WITH_MULTI
1192 			/* No match, reset multiple match flag */
1193 			multi = 0;
1194 #endif
1195 		}
1196 #if IPSET_NET_COUNT == 2
1197 		}
1198 #endif
1199 	}
1200 	return 0;
1201 }
1202 #endif
1203 
1204 /* Test whether the element is added to the set */
1205 static int
mtype_test(struct ip_set * set,void * value,const struct ip_set_ext * ext,struct ip_set_ext * mext,u32 flags)1206 mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext,
1207 	   struct ip_set_ext *mext, u32 flags)
1208 {
1209 	struct htype *h = set->data;
1210 	struct htable *t;
1211 	struct mtype_elem *d = value;
1212 	struct hbucket *n;
1213 	struct mtype_elem *data;
1214 	int i, ret = 0;
1215 	u32 key, multi = 0;
1216 
1217 	rcu_read_lock_bh();
1218 	t = rcu_dereference_bh(h->table);
1219 #ifdef IP_SET_HASH_WITH_NETS
1220 	/* If we test an IP address and not a network address,
1221 	 * try all possible network sizes
1222 	 */
1223 	for (i = 0; i < IPSET_NET_COUNT; i++)
1224 		if (DCIDR_GET(d->cidr, i) != HOST_MASK)
1225 			break;
1226 	if (i == IPSET_NET_COUNT) {
1227 		ret = mtype_test_cidrs(set, d, ext, mext, flags);
1228 		goto out;
1229 	}
1230 #endif
1231 
1232 	key = HKEY(d, h->initval, t->htable_bits);
1233 	n = rcu_dereference_bh(hbucket(t, key));
1234 	if (!n) {
1235 		ret = 0;
1236 		goto out;
1237 	}
1238 	for (i = 0; i < n->pos; i++) {
1239 		if (!test_bit(i, n->used))
1240 			continue;
1241 		data = ahash_data(n, i, set->dsize);
1242 		if (!mtype_data_equal(data, d, &multi))
1243 			continue;
1244 		ret = mtype_data_match(data, ext, mext, set, flags);
1245 		if (ret != 0)
1246 			goto out;
1247 	}
1248 out:
1249 	rcu_read_unlock_bh();
1250 	return ret;
1251 }
1252 
1253 /* Reply a HEADER request: fill out the header part of the set */
1254 static int
mtype_head(struct ip_set * set,struct sk_buff * skb)1255 mtype_head(struct ip_set *set, struct sk_buff *skb)
1256 {
1257 	struct htype *h = set->data;
1258 	const struct htable *t;
1259 	struct nlattr *nested;
1260 	size_t memsize;
1261 	u32 elements = 0;
1262 	size_t ext_size = 0;
1263 	u8 htable_bits;
1264 
1265 	rcu_read_lock_bh();
1266 	t = rcu_dereference_bh(h->table);
1267 	mtype_ext_size(set, &elements, &ext_size);
1268 	memsize = mtype_ahash_memsize(h, t) + ext_size + set->ext_size;
1269 	htable_bits = t->htable_bits;
1270 	rcu_read_unlock_bh();
1271 
1272 	nested = nla_nest_start(skb, IPSET_ATTR_DATA);
1273 	if (!nested)
1274 		goto nla_put_failure;
1275 	if (nla_put_net32(skb, IPSET_ATTR_HASHSIZE,
1276 			  htonl(jhash_size(htable_bits))) ||
1277 	    nla_put_net32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem)))
1278 		goto nla_put_failure;
1279 #ifdef IP_SET_HASH_WITH_BITMASK
1280 	/* if netmask is set to anything other than HOST_MASK we know that the user supplied netmask
1281 	 * and not bitmask. These two are mutually exclusive. */
1282 	if (h->netmask == HOST_MASK && !nf_inet_addr_cmp(&onesmask, &h->bitmask)) {
1283 		if (set->family == NFPROTO_IPV4) {
1284 			if (nla_put_ipaddr4(skb, IPSET_ATTR_BITMASK, h->bitmask.ip))
1285 				goto nla_put_failure;
1286 		} else if (set->family == NFPROTO_IPV6) {
1287 			if (nla_put_ipaddr6(skb, IPSET_ATTR_BITMASK, &h->bitmask.in6))
1288 				goto nla_put_failure;
1289 		}
1290 	}
1291 #endif
1292 #ifdef IP_SET_HASH_WITH_NETMASK
1293 	if (h->netmask != HOST_MASK && nla_put_u8(skb, IPSET_ATTR_NETMASK, h->netmask))
1294 		goto nla_put_failure;
1295 #endif
1296 #ifdef IP_SET_HASH_WITH_MARKMASK
1297 	if (nla_put_u32(skb, IPSET_ATTR_MARKMASK, h->markmask))
1298 		goto nla_put_failure;
1299 #endif
1300 	if (set->flags & IPSET_CREATE_FLAG_BUCKETSIZE) {
1301 		if (nla_put_u8(skb, IPSET_ATTR_BUCKETSIZE, h->bucketsize) ||
1302 		    nla_put_net32(skb, IPSET_ATTR_INITVAL, htonl(h->initval)))
1303 			goto nla_put_failure;
1304 	}
1305 	if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) ||
1306 	    nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)) ||
1307 	    nla_put_net32(skb, IPSET_ATTR_ELEMENTS, htonl(elements)))
1308 		goto nla_put_failure;
1309 	if (unlikely(ip_set_put_flags(skb, set)))
1310 		goto nla_put_failure;
1311 	nla_nest_end(skb, nested);
1312 
1313 	return 0;
1314 nla_put_failure:
1315 	return -EMSGSIZE;
1316 }
1317 
1318 /* Make possible to run dumping parallel with resizing */
1319 static void
mtype_uref(struct ip_set * set,struct netlink_callback * cb,bool start)1320 mtype_uref(struct ip_set *set, struct netlink_callback *cb, bool start)
1321 {
1322 	struct htype *h = set->data;
1323 	struct htable *t;
1324 
1325 	if (start) {
1326 		rcu_read_lock_bh();
1327 		t = ipset_dereference_bh_nfnl(h->table);
1328 		atomic_inc(&t->uref);
1329 		cb->args[IPSET_CB_PRIVATE] = (unsigned long)t;
1330 		rcu_read_unlock_bh();
1331 	} else if (cb->args[IPSET_CB_PRIVATE]) {
1332 		t = (struct htable *)cb->args[IPSET_CB_PRIVATE];
1333 		if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
1334 			pr_debug("Table destroy after resize "
1335 				 " by dump: %p\n", t);
1336 			mtype_ahash_destroy(set, t, false);
1337 		}
1338 		cb->args[IPSET_CB_PRIVATE] = 0;
1339 	}
1340 }
1341 
1342 /* Reply a LIST/SAVE request: dump the elements of the specified set */
1343 static int
mtype_list(const struct ip_set * set,struct sk_buff * skb,struct netlink_callback * cb)1344 mtype_list(const struct ip_set *set,
1345 	   struct sk_buff *skb, struct netlink_callback *cb)
1346 {
1347 	const struct htable *t;
1348 	struct nlattr *atd, *nested;
1349 	const struct hbucket *n;
1350 	const struct mtype_elem *e;
1351 	u32 first = cb->args[IPSET_CB_ARG0];
1352 	/* We assume that one hash bucket fills into one page */
1353 	void *incomplete;
1354 	int i, ret = 0;
1355 
1356 	atd = nla_nest_start(skb, IPSET_ATTR_ADT);
1357 	if (!atd)
1358 		return -EMSGSIZE;
1359 
1360 	pr_debug("list hash set %s\n", set->name);
1361 	t = (const struct htable *)cb->args[IPSET_CB_PRIVATE];
1362 	/* Expire may replace a hbucket with another one */
1363 	rcu_read_lock();
1364 	for (; cb->args[IPSET_CB_ARG0] < jhash_size(t->htable_bits);
1365 	     cb->args[IPSET_CB_ARG0]++) {
1366 		cond_resched_rcu();
1367 		incomplete = skb_tail_pointer(skb);
1368 		n = rcu_dereference(hbucket(t, cb->args[IPSET_CB_ARG0]));
1369 		pr_debug("cb->arg bucket: %lu, t %p n %p\n",
1370 			 cb->args[IPSET_CB_ARG0], t, n);
1371 		if (!n)
1372 			continue;
1373 		for (i = 0; i < n->pos; i++) {
1374 			if (!test_bit(i, n->used))
1375 				continue;
1376 			e = ahash_data(n, i, set->dsize);
1377 			if (SET_ELEM_EXPIRED(set, e))
1378 				continue;
1379 			pr_debug("list hash %lu hbucket %p i %u, data %p\n",
1380 				 cb->args[IPSET_CB_ARG0], n, i, e);
1381 			nested = nla_nest_start(skb, IPSET_ATTR_DATA);
1382 			if (!nested) {
1383 				if (cb->args[IPSET_CB_ARG0] == first) {
1384 					nla_nest_cancel(skb, atd);
1385 					ret = -EMSGSIZE;
1386 					goto out;
1387 				}
1388 				goto nla_put_failure;
1389 			}
1390 			if (mtype_data_list(skb, e))
1391 				goto nla_put_failure;
1392 			if (ip_set_put_extensions(skb, set, e, true))
1393 				goto nla_put_failure;
1394 			nla_nest_end(skb, nested);
1395 		}
1396 	}
1397 	nla_nest_end(skb, atd);
1398 	/* Set listing finished */
1399 	cb->args[IPSET_CB_ARG0] = 0;
1400 
1401 	goto out;
1402 
1403 nla_put_failure:
1404 	nlmsg_trim(skb, incomplete);
1405 	if (unlikely(first == cb->args[IPSET_CB_ARG0])) {
1406 		pr_warn("Can't list set %s: one bucket does not fit into a message. Please report it!\n",
1407 			set->name);
1408 		cb->args[IPSET_CB_ARG0] = 0;
1409 		ret = -EMSGSIZE;
1410 	} else {
1411 		nla_nest_end(skb, atd);
1412 	}
1413 out:
1414 	rcu_read_unlock();
1415 	return ret;
1416 }
1417 
1418 static int
1419 IPSET_TOKEN(MTYPE, _kadt)(struct ip_set *set, const struct sk_buff *skb,
1420 			  const struct xt_action_param *par,
1421 			  enum ipset_adt adt, struct ip_set_adt_opt *opt);
1422 
1423 static int
1424 IPSET_TOKEN(MTYPE, _uadt)(struct ip_set *set, struct nlattr *tb[],
1425 			  enum ipset_adt adt, u32 *lineno, u32 flags,
1426 			  bool retried);
1427 
1428 static const struct ip_set_type_variant mtype_variant = {
1429 	.kadt	= mtype_kadt,
1430 	.uadt	= mtype_uadt,
1431 	.adt	= {
1432 		[IPSET_ADD] = mtype_add,
1433 		[IPSET_DEL] = mtype_del,
1434 		[IPSET_TEST] = mtype_test,
1435 	},
1436 	.destroy = mtype_destroy,
1437 	.flush	= mtype_flush,
1438 	.head	= mtype_head,
1439 	.list	= mtype_list,
1440 	.uref	= mtype_uref,
1441 	.resize	= mtype_resize,
1442 	.same_set = mtype_same_set,
1443 	.region_lock = true,
1444 };
1445 
1446 #ifdef IP_SET_EMIT_CREATE
1447 static int
IPSET_TOKEN(HTYPE,_create)1448 IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
1449 			    struct nlattr *tb[], u32 flags)
1450 {
1451 	u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
1452 #ifdef IP_SET_HASH_WITH_MARKMASK
1453 	u32 markmask;
1454 #endif
1455 	u8 hbits;
1456 #if defined(IP_SET_HASH_WITH_NETMASK) || defined(IP_SET_HASH_WITH_BITMASK)
1457 	int ret __attribute__((unused)) = 0;
1458 	u8 netmask = set->family == NFPROTO_IPV4 ? 32 : 128;
1459 	union nf_inet_addr bitmask = onesmask;
1460 #endif
1461 	size_t hsize;
1462 	struct htype *h;
1463 	struct htable *t;
1464 	u32 i;
1465 
1466 	pr_debug("Create set %s with family %s\n",
1467 		 set->name, set->family == NFPROTO_IPV4 ? "inet" : "inet6");
1468 
1469 #ifdef IP_SET_PROTO_UNDEF
1470 	if (set->family != NFPROTO_UNSPEC)
1471 		return -IPSET_ERR_INVALID_FAMILY;
1472 #else
1473 	if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
1474 		return -IPSET_ERR_INVALID_FAMILY;
1475 #endif
1476 
1477 	if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
1478 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
1479 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
1480 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
1481 		return -IPSET_ERR_PROTOCOL;
1482 
1483 #ifdef IP_SET_HASH_WITH_MARKMASK
1484 	/* Separated condition in order to avoid directive in argument list */
1485 	if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_MARKMASK)))
1486 		return -IPSET_ERR_PROTOCOL;
1487 
1488 	markmask = 0xffffffff;
1489 	if (tb[IPSET_ATTR_MARKMASK]) {
1490 		markmask = ntohl(nla_get_be32(tb[IPSET_ATTR_MARKMASK]));
1491 		if (markmask == 0)
1492 			return -IPSET_ERR_INVALID_MARKMASK;
1493 	}
1494 #endif
1495 
1496 #ifdef IP_SET_HASH_WITH_NETMASK
1497 	if (tb[IPSET_ATTR_NETMASK]) {
1498 		netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]);
1499 
1500 		if ((set->family == NFPROTO_IPV4 && netmask > 32) ||
1501 		    (set->family == NFPROTO_IPV6 && netmask > 128) ||
1502 		    netmask == 0)
1503 			return -IPSET_ERR_INVALID_NETMASK;
1504 
1505 		/* we convert netmask to bitmask and store it */
1506 		if (set->family == NFPROTO_IPV4)
1507 			bitmask.ip = ip_set_netmask(netmask);
1508 		else
1509 			ip6_netmask(&bitmask, netmask);
1510 	}
1511 #endif
1512 
1513 #ifdef IP_SET_HASH_WITH_BITMASK
1514 	if (tb[IPSET_ATTR_BITMASK]) {
1515 		/* bitmask and netmask do the same thing, allow only one of these options */
1516 		if (tb[IPSET_ATTR_NETMASK])
1517 			return -IPSET_ERR_BITMASK_NETMASK_EXCL;
1518 
1519 		if (set->family == NFPROTO_IPV4) {
1520 			ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_BITMASK], &bitmask.ip);
1521 			if (ret || !bitmask.ip)
1522 				return -IPSET_ERR_INVALID_NETMASK;
1523 		} else if (set->family == NFPROTO_IPV6) {
1524 			ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_BITMASK], &bitmask);
1525 			if (ret || ipv6_addr_any(&bitmask.in6))
1526 				return -IPSET_ERR_INVALID_NETMASK;
1527 		}
1528 
1529 		if (nf_inet_addr_cmp(&bitmask, &zeromask))
1530 			return -IPSET_ERR_INVALID_NETMASK;
1531 	}
1532 #endif
1533 
1534 	if (tb[IPSET_ATTR_HASHSIZE]) {
1535 		hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
1536 		if (hashsize < IPSET_MIMINAL_HASHSIZE)
1537 			hashsize = IPSET_MIMINAL_HASHSIZE;
1538 	}
1539 
1540 	if (tb[IPSET_ATTR_MAXELEM])
1541 		maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
1542 
1543 	hsize = sizeof(*h);
1544 	h = kzalloc(hsize, GFP_KERNEL);
1545 	if (!h)
1546 		return -ENOMEM;
1547 
1548 	/* Compute htable_bits from the user input parameter hashsize.
1549 	 * Assume that hashsize == 2^htable_bits,
1550 	 * otherwise round up to the first 2^n value.
1551 	 */
1552 	hbits = fls(hashsize - 1);
1553 	hsize = htable_size(hbits);
1554 	if (hsize == 0) {
1555 		kfree(h);
1556 		return -ENOMEM;
1557 	}
1558 	t = ip_set_alloc(hsize);
1559 	if (!t) {
1560 		kfree(h);
1561 		return -ENOMEM;
1562 	}
1563 	t->hregion = ip_set_alloc(ahash_sizeof_regions(hbits));
1564 	if (!t->hregion) {
1565 		ip_set_free(t);
1566 		kfree(h);
1567 		return -ENOMEM;
1568 	}
1569 	h->gc.set = set;
1570 	for (i = 0; i < ahash_numof_locks(hbits); i++)
1571 		spin_lock_init(&t->hregion[i].lock);
1572 	h->maxelem = maxelem;
1573 #if defined(IP_SET_HASH_WITH_NETMASK) || defined(IP_SET_HASH_WITH_BITMASK)
1574 	h->bitmask = bitmask;
1575 	h->netmask = netmask;
1576 #endif
1577 #ifdef IP_SET_HASH_WITH_MARKMASK
1578 	h->markmask = markmask;
1579 #endif
1580 	if (tb[IPSET_ATTR_INITVAL])
1581 		h->initval = ntohl(nla_get_be32(tb[IPSET_ATTR_INITVAL]));
1582 	else
1583 		get_random_bytes(&h->initval, sizeof(h->initval));
1584 	h->bucketsize = AHASH_MAX_SIZE;
1585 	if (tb[IPSET_ATTR_BUCKETSIZE]) {
1586 		h->bucketsize = nla_get_u8(tb[IPSET_ATTR_BUCKETSIZE]);
1587 		if (h->bucketsize < AHASH_INIT_SIZE)
1588 			h->bucketsize = AHASH_INIT_SIZE;
1589 		else if (h->bucketsize > AHASH_MAX_SIZE)
1590 			h->bucketsize = AHASH_MAX_SIZE;
1591 		else if (h->bucketsize % 2)
1592 			h->bucketsize += 1;
1593 	}
1594 	t->htable_bits = hbits;
1595 	t->maxelem = h->maxelem / ahash_numof_locks(hbits);
1596 	RCU_INIT_POINTER(h->table, t);
1597 
1598 	INIT_LIST_HEAD(&h->ad);
1599 	set->data = h;
1600 #ifndef IP_SET_PROTO_UNDEF
1601 	if (set->family == NFPROTO_IPV4) {
1602 #endif
1603 		set->variant = &IPSET_TOKEN(HTYPE, 4_variant);
1604 		set->dsize = ip_set_elem_len(set, tb,
1605 			sizeof(struct IPSET_TOKEN(HTYPE, 4_elem)),
1606 			__alignof__(struct IPSET_TOKEN(HTYPE, 4_elem)));
1607 #ifndef IP_SET_PROTO_UNDEF
1608 	} else {
1609 		set->variant = &IPSET_TOKEN(HTYPE, 6_variant);
1610 		set->dsize = ip_set_elem_len(set, tb,
1611 			sizeof(struct IPSET_TOKEN(HTYPE, 6_elem)),
1612 			__alignof__(struct IPSET_TOKEN(HTYPE, 6_elem)));
1613 	}
1614 #endif
1615 	set->timeout = IPSET_NO_TIMEOUT;
1616 	if (tb[IPSET_ATTR_TIMEOUT]) {
1617 		set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
1618 #ifndef IP_SET_PROTO_UNDEF
1619 		if (set->family == NFPROTO_IPV4)
1620 #endif
1621 			IPSET_TOKEN(HTYPE, 4_gc_init)(&h->gc);
1622 #ifndef IP_SET_PROTO_UNDEF
1623 		else
1624 			IPSET_TOKEN(HTYPE, 6_gc_init)(&h->gc);
1625 #endif
1626 	}
1627 	pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
1628 		 set->name, jhash_size(t->htable_bits),
1629 		 t->htable_bits, h->maxelem, set->data, t);
1630 
1631 	return 0;
1632 }
1633 #endif /* IP_SET_EMIT_CREATE */
1634 
1635 #undef HKEY_DATALEN
1636