1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4 *
5 * Development of this code funded by Astaro AG (http://www.astaro.com/)
6 */
7
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/rbtree.h>
13 #include <linux/netlink.h>
14 #include <linux/netfilter.h>
15 #include <linux/netfilter/nf_tables.h>
16 #include <net/netfilter/nf_tables_core.h>
17
18 struct nft_rbtree {
19 struct rb_root root;
20 rwlock_t lock;
21 seqcount_rwlock_t count;
22 struct delayed_work gc_work;
23 };
24
25 struct nft_rbtree_elem {
26 struct rb_node node;
27 struct nft_set_ext ext;
28 };
29
nft_rbtree_interval_end(const struct nft_rbtree_elem * rbe)30 static bool nft_rbtree_interval_end(const struct nft_rbtree_elem *rbe)
31 {
32 return nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) &&
33 (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END);
34 }
35
nft_rbtree_interval_start(const struct nft_rbtree_elem * rbe)36 static bool nft_rbtree_interval_start(const struct nft_rbtree_elem *rbe)
37 {
38 return !nft_rbtree_interval_end(rbe);
39 }
40
nft_rbtree_cmp(const struct nft_set * set,const struct nft_rbtree_elem * e1,const struct nft_rbtree_elem * e2)41 static int nft_rbtree_cmp(const struct nft_set *set,
42 const struct nft_rbtree_elem *e1,
43 const struct nft_rbtree_elem *e2)
44 {
45 return memcmp(nft_set_ext_key(&e1->ext), nft_set_ext_key(&e2->ext),
46 set->klen);
47 }
48
nft_rbtree_elem_expired(const struct nft_rbtree_elem * rbe)49 static bool nft_rbtree_elem_expired(const struct nft_rbtree_elem *rbe)
50 {
51 return nft_set_elem_expired(&rbe->ext) ||
52 nft_set_elem_is_dead(&rbe->ext);
53 }
54
__nft_rbtree_lookup(const struct net * net,const struct nft_set * set,const u32 * key,const struct nft_set_ext ** ext,unsigned int seq)55 static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
56 const u32 *key, const struct nft_set_ext **ext,
57 unsigned int seq)
58 {
59 struct nft_rbtree *priv = nft_set_priv(set);
60 const struct nft_rbtree_elem *rbe, *interval = NULL;
61 u8 genmask = nft_genmask_cur(net);
62 const struct rb_node *parent;
63 int d;
64
65 parent = rcu_dereference_raw(priv->root.rb_node);
66 while (parent != NULL) {
67 if (read_seqcount_retry(&priv->count, seq))
68 return false;
69
70 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
71
72 d = memcmp(nft_set_ext_key(&rbe->ext), key, set->klen);
73 if (d < 0) {
74 parent = rcu_dereference_raw(parent->rb_left);
75 if (interval &&
76 !nft_rbtree_cmp(set, rbe, interval) &&
77 nft_rbtree_interval_end(rbe) &&
78 nft_rbtree_interval_start(interval))
79 continue;
80 interval = rbe;
81 } else if (d > 0)
82 parent = rcu_dereference_raw(parent->rb_right);
83 else {
84 if (!nft_set_elem_active(&rbe->ext, genmask)) {
85 parent = rcu_dereference_raw(parent->rb_left);
86 continue;
87 }
88
89 if (nft_rbtree_elem_expired(rbe))
90 return false;
91
92 if (nft_rbtree_interval_end(rbe)) {
93 if (nft_set_is_anonymous(set))
94 return false;
95 parent = rcu_dereference_raw(parent->rb_left);
96 interval = NULL;
97 continue;
98 }
99
100 *ext = &rbe->ext;
101 return true;
102 }
103 }
104
105 if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
106 nft_set_elem_active(&interval->ext, genmask) &&
107 !nft_rbtree_elem_expired(interval) &&
108 nft_rbtree_interval_start(interval)) {
109 *ext = &interval->ext;
110 return true;
111 }
112
113 return false;
114 }
115
116 INDIRECT_CALLABLE_SCOPE
nft_rbtree_lookup(const struct net * net,const struct nft_set * set,const u32 * key,const struct nft_set_ext ** ext)117 bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
118 const u32 *key, const struct nft_set_ext **ext)
119 {
120 struct nft_rbtree *priv = nft_set_priv(set);
121 unsigned int seq = read_seqcount_begin(&priv->count);
122 bool ret;
123
124 ret = __nft_rbtree_lookup(net, set, key, ext, seq);
125 if (ret || !read_seqcount_retry(&priv->count, seq))
126 return ret;
127
128 read_lock_bh(&priv->lock);
129 seq = read_seqcount_begin(&priv->count);
130 ret = __nft_rbtree_lookup(net, set, key, ext, seq);
131 read_unlock_bh(&priv->lock);
132
133 return ret;
134 }
135
__nft_rbtree_get(const struct net * net,const struct nft_set * set,const u32 * key,struct nft_rbtree_elem ** elem,unsigned int seq,unsigned int flags,u8 genmask)136 static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
137 const u32 *key, struct nft_rbtree_elem **elem,
138 unsigned int seq, unsigned int flags, u8 genmask)
139 {
140 struct nft_rbtree_elem *rbe, *interval = NULL;
141 struct nft_rbtree *priv = nft_set_priv(set);
142 const struct rb_node *parent;
143 const void *this;
144 int d;
145
146 parent = rcu_dereference_raw(priv->root.rb_node);
147 while (parent != NULL) {
148 if (read_seqcount_retry(&priv->count, seq))
149 return false;
150
151 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
152
153 this = nft_set_ext_key(&rbe->ext);
154 d = memcmp(this, key, set->klen);
155 if (d < 0) {
156 parent = rcu_dereference_raw(parent->rb_left);
157 if (!(flags & NFT_SET_ELEM_INTERVAL_END))
158 interval = rbe;
159 } else if (d > 0) {
160 parent = rcu_dereference_raw(parent->rb_right);
161 if (flags & NFT_SET_ELEM_INTERVAL_END)
162 interval = rbe;
163 } else {
164 if (!nft_set_elem_active(&rbe->ext, genmask)) {
165 parent = rcu_dereference_raw(parent->rb_left);
166 continue;
167 }
168
169 if (nft_set_elem_expired(&rbe->ext))
170 return false;
171
172 if (!nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) ||
173 (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END) ==
174 (flags & NFT_SET_ELEM_INTERVAL_END)) {
175 *elem = rbe;
176 return true;
177 }
178
179 if (nft_rbtree_interval_end(rbe))
180 interval = NULL;
181
182 parent = rcu_dereference_raw(parent->rb_left);
183 }
184 }
185
186 if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
187 nft_set_elem_active(&interval->ext, genmask) &&
188 !nft_set_elem_expired(&interval->ext) &&
189 ((!nft_rbtree_interval_end(interval) &&
190 !(flags & NFT_SET_ELEM_INTERVAL_END)) ||
191 (nft_rbtree_interval_end(interval) &&
192 (flags & NFT_SET_ELEM_INTERVAL_END)))) {
193 *elem = interval;
194 return true;
195 }
196
197 return false;
198 }
199
nft_rbtree_get(const struct net * net,const struct nft_set * set,const struct nft_set_elem * elem,unsigned int flags)200 static void *nft_rbtree_get(const struct net *net, const struct nft_set *set,
201 const struct nft_set_elem *elem, unsigned int flags)
202 {
203 struct nft_rbtree *priv = nft_set_priv(set);
204 unsigned int seq = read_seqcount_begin(&priv->count);
205 struct nft_rbtree_elem *rbe = ERR_PTR(-ENOENT);
206 const u32 *key = (const u32 *)&elem->key.val;
207 u8 genmask = nft_genmask_cur(net);
208 bool ret;
209
210 ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
211 if (ret || !read_seqcount_retry(&priv->count, seq))
212 return rbe;
213
214 read_lock_bh(&priv->lock);
215 seq = read_seqcount_begin(&priv->count);
216 ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
217 if (!ret)
218 rbe = ERR_PTR(-ENOENT);
219 read_unlock_bh(&priv->lock);
220
221 return rbe;
222 }
223
nft_rbtree_gc_remove(struct net * net,struct nft_set * set,struct nft_rbtree * priv,struct nft_rbtree_elem * rbe)224 static void nft_rbtree_gc_remove(struct net *net, struct nft_set *set,
225 struct nft_rbtree *priv,
226 struct nft_rbtree_elem *rbe)
227 {
228 struct nft_set_elem elem = {
229 .priv = rbe,
230 };
231
232 nft_setelem_data_deactivate(net, set, &elem);
233 rb_erase(&rbe->node, &priv->root);
234 }
235
236 static const struct nft_rbtree_elem *
nft_rbtree_gc_elem(const struct nft_set * __set,struct nft_rbtree * priv,struct nft_rbtree_elem * rbe,u8 genmask)237 nft_rbtree_gc_elem(const struct nft_set *__set, struct nft_rbtree *priv,
238 struct nft_rbtree_elem *rbe, u8 genmask)
239 {
240 struct nft_set *set = (struct nft_set *)__set;
241 struct rb_node *prev = rb_prev(&rbe->node);
242 struct net *net = read_pnet(&set->net);
243 struct nft_rbtree_elem *rbe_prev;
244 struct nft_trans_gc *gc;
245
246 gc = nft_trans_gc_alloc(set, 0, GFP_ATOMIC);
247 if (!gc)
248 return ERR_PTR(-ENOMEM);
249
250 /* search for end interval coming before this element.
251 * end intervals don't carry a timeout extension, they
252 * are coupled with the interval start element.
253 */
254 while (prev) {
255 rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
256 if (nft_rbtree_interval_end(rbe_prev) &&
257 nft_set_elem_active(&rbe_prev->ext, genmask))
258 break;
259
260 prev = rb_prev(prev);
261 }
262
263 rbe_prev = NULL;
264 if (prev) {
265 rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
266 nft_rbtree_gc_remove(net, set, priv, rbe_prev);
267
268 /* There is always room in this trans gc for this element,
269 * memory allocation never actually happens, hence, the warning
270 * splat in such case. No need to set NFT_SET_ELEM_DEAD_BIT,
271 * this is synchronous gc which never fails.
272 */
273 gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
274 if (WARN_ON_ONCE(!gc))
275 return ERR_PTR(-ENOMEM);
276
277 nft_trans_gc_elem_add(gc, rbe_prev);
278 }
279
280 nft_rbtree_gc_remove(net, set, priv, rbe);
281 gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
282 if (WARN_ON_ONCE(!gc))
283 return ERR_PTR(-ENOMEM);
284
285 nft_trans_gc_elem_add(gc, rbe);
286
287 nft_trans_gc_queue_sync_done(gc);
288
289 return rbe_prev;
290 }
291
nft_rbtree_update_first(const struct nft_set * set,struct nft_rbtree_elem * rbe,struct rb_node * first)292 static bool nft_rbtree_update_first(const struct nft_set *set,
293 struct nft_rbtree_elem *rbe,
294 struct rb_node *first)
295 {
296 struct nft_rbtree_elem *first_elem;
297
298 first_elem = rb_entry(first, struct nft_rbtree_elem, node);
299 /* this element is closest to where the new element is to be inserted:
300 * update the first element for the node list path.
301 */
302 if (nft_rbtree_cmp(set, rbe, first_elem) < 0)
303 return true;
304
305 return false;
306 }
307
__nft_rbtree_insert(const struct net * net,const struct nft_set * set,struct nft_rbtree_elem * new,struct nft_set_ext ** ext)308 static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
309 struct nft_rbtree_elem *new,
310 struct nft_set_ext **ext)
311 {
312 struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL;
313 struct rb_node *node, *next, *parent, **p, *first = NULL;
314 struct nft_rbtree *priv = nft_set_priv(set);
315 u8 cur_genmask = nft_genmask_cur(net);
316 u8 genmask = nft_genmask_next(net);
317 int d;
318
319 /* Descend the tree to search for an existing element greater than the
320 * key value to insert that is greater than the new element. This is the
321 * first element to walk the ordered elements to find possible overlap.
322 */
323 parent = NULL;
324 p = &priv->root.rb_node;
325 while (*p != NULL) {
326 parent = *p;
327 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
328 d = nft_rbtree_cmp(set, rbe, new);
329
330 if (d < 0) {
331 p = &parent->rb_left;
332 } else if (d > 0) {
333 if (!first ||
334 nft_rbtree_update_first(set, rbe, first))
335 first = &rbe->node;
336
337 p = &parent->rb_right;
338 } else {
339 if (nft_rbtree_interval_end(rbe))
340 p = &parent->rb_left;
341 else
342 p = &parent->rb_right;
343 }
344 }
345
346 if (!first)
347 first = rb_first(&priv->root);
348
349 /* Detect overlap by going through the list of valid tree nodes.
350 * Values stored in the tree are in reversed order, starting from
351 * highest to lowest value.
352 */
353 for (node = first; node != NULL; node = next) {
354 next = rb_next(node);
355
356 rbe = rb_entry(node, struct nft_rbtree_elem, node);
357
358 if (!nft_set_elem_active(&rbe->ext, genmask))
359 continue;
360
361 /* perform garbage collection to avoid bogus overlap reports
362 * but skip new elements in this transaction.
363 */
364 if (nft_set_elem_expired(&rbe->ext) &&
365 nft_set_elem_active(&rbe->ext, cur_genmask)) {
366 const struct nft_rbtree_elem *removed_end;
367
368 removed_end = nft_rbtree_gc_elem(set, priv, rbe, genmask);
369 if (IS_ERR(removed_end))
370 return PTR_ERR(removed_end);
371
372 if (removed_end == rbe_le || removed_end == rbe_ge)
373 return -EAGAIN;
374
375 continue;
376 }
377
378 d = nft_rbtree_cmp(set, rbe, new);
379 if (d == 0) {
380 /* Matching end element: no need to look for an
381 * overlapping greater or equal element.
382 */
383 if (nft_rbtree_interval_end(rbe)) {
384 rbe_le = rbe;
385 break;
386 }
387
388 /* first element that is greater or equal to key value. */
389 if (!rbe_ge) {
390 rbe_ge = rbe;
391 continue;
392 }
393
394 /* this is a closer more or equal element, update it. */
395 if (nft_rbtree_cmp(set, rbe_ge, new) != 0) {
396 rbe_ge = rbe;
397 continue;
398 }
399
400 /* element is equal to key value, make sure flags are
401 * the same, an existing more or equal start element
402 * must not be replaced by more or equal end element.
403 */
404 if ((nft_rbtree_interval_start(new) &&
405 nft_rbtree_interval_start(rbe_ge)) ||
406 (nft_rbtree_interval_end(new) &&
407 nft_rbtree_interval_end(rbe_ge))) {
408 rbe_ge = rbe;
409 continue;
410 }
411 } else if (d > 0) {
412 /* annotate element greater than the new element. */
413 rbe_ge = rbe;
414 continue;
415 } else if (d < 0) {
416 /* annotate element less than the new element. */
417 rbe_le = rbe;
418 break;
419 }
420 }
421
422 /* - new start element matching existing start element: full overlap
423 * reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
424 */
425 if (rbe_ge && !nft_rbtree_cmp(set, new, rbe_ge) &&
426 nft_rbtree_interval_start(rbe_ge) == nft_rbtree_interval_start(new)) {
427 *ext = &rbe_ge->ext;
428 return -EEXIST;
429 }
430
431 /* - new end element matching existing end element: full overlap
432 * reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
433 */
434 if (rbe_le && !nft_rbtree_cmp(set, new, rbe_le) &&
435 nft_rbtree_interval_end(rbe_le) == nft_rbtree_interval_end(new)) {
436 *ext = &rbe_le->ext;
437 return -EEXIST;
438 }
439
440 /* - new start element with existing closest, less or equal key value
441 * being a start element: partial overlap, reported as -ENOTEMPTY.
442 * Anonymous sets allow for two consecutive start element since they
443 * are constant, skip them to avoid bogus overlap reports.
444 */
445 if (!nft_set_is_anonymous(set) && rbe_le &&
446 nft_rbtree_interval_start(rbe_le) && nft_rbtree_interval_start(new))
447 return -ENOTEMPTY;
448
449 /* - new end element with existing closest, less or equal key value
450 * being a end element: partial overlap, reported as -ENOTEMPTY.
451 */
452 if (rbe_le &&
453 nft_rbtree_interval_end(rbe_le) && nft_rbtree_interval_end(new))
454 return -ENOTEMPTY;
455
456 /* - new end element with existing closest, greater or equal key value
457 * being an end element: partial overlap, reported as -ENOTEMPTY
458 */
459 if (rbe_ge &&
460 nft_rbtree_interval_end(rbe_ge) && nft_rbtree_interval_end(new))
461 return -ENOTEMPTY;
462
463 /* Accepted element: pick insertion point depending on key value */
464 parent = NULL;
465 p = &priv->root.rb_node;
466 while (*p != NULL) {
467 parent = *p;
468 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
469 d = nft_rbtree_cmp(set, rbe, new);
470
471 if (d < 0)
472 p = &parent->rb_left;
473 else if (d > 0)
474 p = &parent->rb_right;
475 else if (nft_rbtree_interval_end(rbe))
476 p = &parent->rb_left;
477 else
478 p = &parent->rb_right;
479 }
480
481 rb_link_node_rcu(&new->node, parent, p);
482 rb_insert_color(&new->node, &priv->root);
483 return 0;
484 }
485
nft_rbtree_insert(const struct net * net,const struct nft_set * set,const struct nft_set_elem * elem,struct nft_set_ext ** ext)486 static int nft_rbtree_insert(const struct net *net, const struct nft_set *set,
487 const struct nft_set_elem *elem,
488 struct nft_set_ext **ext)
489 {
490 struct nft_rbtree *priv = nft_set_priv(set);
491 struct nft_rbtree_elem *rbe = elem->priv;
492 int err;
493
494 do {
495 if (fatal_signal_pending(current))
496 return -EINTR;
497
498 cond_resched();
499
500 write_lock_bh(&priv->lock);
501 write_seqcount_begin(&priv->count);
502 err = __nft_rbtree_insert(net, set, rbe, ext);
503 write_seqcount_end(&priv->count);
504 write_unlock_bh(&priv->lock);
505 } while (err == -EAGAIN);
506
507 return err;
508 }
509
nft_rbtree_remove(const struct net * net,const struct nft_set * set,const struct nft_set_elem * elem)510 static void nft_rbtree_remove(const struct net *net,
511 const struct nft_set *set,
512 const struct nft_set_elem *elem)
513 {
514 struct nft_rbtree *priv = nft_set_priv(set);
515 struct nft_rbtree_elem *rbe = elem->priv;
516
517 write_lock_bh(&priv->lock);
518 write_seqcount_begin(&priv->count);
519 rb_erase(&rbe->node, &priv->root);
520 write_seqcount_end(&priv->count);
521 write_unlock_bh(&priv->lock);
522 }
523
nft_rbtree_activate(const struct net * net,const struct nft_set * set,const struct nft_set_elem * elem)524 static void nft_rbtree_activate(const struct net *net,
525 const struct nft_set *set,
526 const struct nft_set_elem *elem)
527 {
528 struct nft_rbtree_elem *rbe = elem->priv;
529
530 nft_set_elem_change_active(net, set, &rbe->ext);
531 }
532
nft_rbtree_flush(const struct net * net,const struct nft_set * set,void * priv)533 static bool nft_rbtree_flush(const struct net *net,
534 const struct nft_set *set, void *priv)
535 {
536 struct nft_rbtree_elem *rbe = priv;
537
538 nft_set_elem_change_active(net, set, &rbe->ext);
539
540 return true;
541 }
542
nft_rbtree_deactivate(const struct net * net,const struct nft_set * set,const struct nft_set_elem * elem)543 static void *nft_rbtree_deactivate(const struct net *net,
544 const struct nft_set *set,
545 const struct nft_set_elem *elem)
546 {
547 const struct nft_rbtree *priv = nft_set_priv(set);
548 const struct rb_node *parent = priv->root.rb_node;
549 struct nft_rbtree_elem *rbe, *this = elem->priv;
550 u8 genmask = nft_genmask_next(net);
551 int d;
552
553 while (parent != NULL) {
554 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
555
556 d = memcmp(nft_set_ext_key(&rbe->ext), &elem->key.val,
557 set->klen);
558 if (d < 0)
559 parent = parent->rb_left;
560 else if (d > 0)
561 parent = parent->rb_right;
562 else {
563 if (nft_rbtree_interval_end(rbe) &&
564 nft_rbtree_interval_start(this)) {
565 parent = parent->rb_left;
566 continue;
567 } else if (nft_rbtree_interval_start(rbe) &&
568 nft_rbtree_interval_end(this)) {
569 parent = parent->rb_right;
570 continue;
571 } else if (nft_set_elem_expired(&rbe->ext)) {
572 break;
573 } else if (!nft_set_elem_active(&rbe->ext, genmask)) {
574 parent = parent->rb_left;
575 continue;
576 }
577 nft_rbtree_flush(net, set, rbe);
578 return rbe;
579 }
580 }
581 return NULL;
582 }
583
nft_rbtree_walk(const struct nft_ctx * ctx,struct nft_set * set,struct nft_set_iter * iter)584 static void nft_rbtree_walk(const struct nft_ctx *ctx,
585 struct nft_set *set,
586 struct nft_set_iter *iter)
587 {
588 struct nft_rbtree *priv = nft_set_priv(set);
589 struct nft_rbtree_elem *rbe;
590 struct nft_set_elem elem;
591 struct rb_node *node;
592
593 read_lock_bh(&priv->lock);
594 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
595 rbe = rb_entry(node, struct nft_rbtree_elem, node);
596
597 if (iter->count < iter->skip)
598 goto cont;
599 if (!nft_set_elem_active(&rbe->ext, iter->genmask))
600 goto cont;
601
602 elem.priv = rbe;
603
604 iter->err = iter->fn(ctx, set, iter, &elem);
605 if (iter->err < 0) {
606 read_unlock_bh(&priv->lock);
607 return;
608 }
609 cont:
610 iter->count++;
611 }
612 read_unlock_bh(&priv->lock);
613 }
614
nft_rbtree_gc(struct work_struct * work)615 static void nft_rbtree_gc(struct work_struct *work)
616 {
617 struct nft_rbtree_elem *rbe, *rbe_end = NULL;
618 struct nftables_pernet *nft_net;
619 struct nft_rbtree *priv;
620 struct nft_trans_gc *gc;
621 struct rb_node *node;
622 struct nft_set *set;
623 unsigned int gc_seq;
624 struct net *net;
625
626 priv = container_of(work, struct nft_rbtree, gc_work.work);
627 set = nft_set_container_of(priv);
628 net = read_pnet(&set->net);
629 nft_net = nft_pernet(net);
630 gc_seq = READ_ONCE(nft_net->gc_seq);
631
632 if (nft_set_gc_is_pending(set))
633 goto done;
634
635 gc = nft_trans_gc_alloc(set, gc_seq, GFP_KERNEL);
636 if (!gc)
637 goto done;
638
639 read_lock_bh(&priv->lock);
640 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
641
642 /* Ruleset has been updated, try later. */
643 if (READ_ONCE(nft_net->gc_seq) != gc_seq) {
644 nft_trans_gc_destroy(gc);
645 gc = NULL;
646 goto try_later;
647 }
648
649 rbe = rb_entry(node, struct nft_rbtree_elem, node);
650
651 if (nft_set_elem_is_dead(&rbe->ext))
652 goto dead_elem;
653
654 /* elements are reversed in the rbtree for historical reasons,
655 * from highest to lowest value, that is why end element is
656 * always visited before the start element.
657 */
658 if (nft_rbtree_interval_end(rbe)) {
659 rbe_end = rbe;
660 continue;
661 }
662 if (!nft_set_elem_expired(&rbe->ext))
663 continue;
664
665 nft_set_elem_dead(&rbe->ext);
666
667 if (!rbe_end)
668 continue;
669
670 nft_set_elem_dead(&rbe_end->ext);
671
672 gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
673 if (!gc)
674 goto try_later;
675
676 nft_trans_gc_elem_add(gc, rbe_end);
677 rbe_end = NULL;
678 dead_elem:
679 gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
680 if (!gc)
681 goto try_later;
682
683 nft_trans_gc_elem_add(gc, rbe);
684 }
685
686 gc = nft_trans_gc_catchall_async(gc, gc_seq);
687
688 try_later:
689 read_unlock_bh(&priv->lock);
690
691 if (gc)
692 nft_trans_gc_queue_async_done(gc);
693 done:
694 queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
695 nft_set_gc_interval(set));
696 }
697
nft_rbtree_privsize(const struct nlattr * const nla[],const struct nft_set_desc * desc)698 static u64 nft_rbtree_privsize(const struct nlattr * const nla[],
699 const struct nft_set_desc *desc)
700 {
701 return sizeof(struct nft_rbtree);
702 }
703
nft_rbtree_init(const struct nft_set * set,const struct nft_set_desc * desc,const struct nlattr * const nla[])704 static int nft_rbtree_init(const struct nft_set *set,
705 const struct nft_set_desc *desc,
706 const struct nlattr * const nla[])
707 {
708 struct nft_rbtree *priv = nft_set_priv(set);
709
710 rwlock_init(&priv->lock);
711 seqcount_rwlock_init(&priv->count, &priv->lock);
712 priv->root = RB_ROOT;
713
714 INIT_DEFERRABLE_WORK(&priv->gc_work, nft_rbtree_gc);
715 if (set->flags & NFT_SET_TIMEOUT)
716 queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
717 nft_set_gc_interval(set));
718
719 return 0;
720 }
721
nft_rbtree_destroy(const struct nft_ctx * ctx,const struct nft_set * set)722 static void nft_rbtree_destroy(const struct nft_ctx *ctx,
723 const struct nft_set *set)
724 {
725 struct nft_rbtree *priv = nft_set_priv(set);
726 struct nft_rbtree_elem *rbe;
727 struct rb_node *node;
728
729 cancel_delayed_work_sync(&priv->gc_work);
730 rcu_barrier();
731 while ((node = priv->root.rb_node) != NULL) {
732 rb_erase(node, &priv->root);
733 rbe = rb_entry(node, struct nft_rbtree_elem, node);
734 nf_tables_set_elem_destroy(ctx, set, rbe);
735 }
736 }
737
nft_rbtree_estimate(const struct nft_set_desc * desc,u32 features,struct nft_set_estimate * est)738 static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
739 struct nft_set_estimate *est)
740 {
741 if (desc->field_count > 1)
742 return false;
743
744 if (desc->size)
745 est->size = sizeof(struct nft_rbtree) +
746 desc->size * sizeof(struct nft_rbtree_elem);
747 else
748 est->size = ~0;
749
750 est->lookup = NFT_SET_CLASS_O_LOG_N;
751 est->space = NFT_SET_CLASS_O_N;
752
753 return true;
754 }
755
756 const struct nft_set_type nft_set_rbtree_type = {
757 .features = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT,
758 .ops = {
759 .privsize = nft_rbtree_privsize,
760 .elemsize = offsetof(struct nft_rbtree_elem, ext),
761 .estimate = nft_rbtree_estimate,
762 .init = nft_rbtree_init,
763 .destroy = nft_rbtree_destroy,
764 .insert = nft_rbtree_insert,
765 .remove = nft_rbtree_remove,
766 .deactivate = nft_rbtree_deactivate,
767 .flush = nft_rbtree_flush,
768 .activate = nft_rbtree_activate,
769 .lookup = nft_rbtree_lookup,
770 .walk = nft_rbtree_walk,
771 .get = nft_rbtree_get,
772 },
773 };
774