1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
3 * Patrick Schaaf <bof@bof.de>
4 * Martin Josefsson <gandalf@wlug.westbo.se>
5 * Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@netfilter.org>
6 */
7 #ifndef _IP_SET_H
8 #define _IP_SET_H
9
10 #include <linux/ip.h>
11 #include <linux/ipv6.h>
12 #include <linux/netlink.h>
13 #include <linux/netfilter.h>
14 #include <linux/netfilter/x_tables.h>
15 #include <linux/stringify.h>
16 #include <linux/vmalloc.h>
17 #include <net/netlink.h>
18 #include <uapi/linux/netfilter/ipset/ip_set.h>
19
20 #define _IP_SET_MODULE_DESC(a, b, c) \
21 MODULE_DESCRIPTION(a " type of IP sets, revisions " b "-" c)
22 #define IP_SET_MODULE_DESC(a, b, c) \
23 _IP_SET_MODULE_DESC(a, __stringify(b), __stringify(c))
24
25 /* Set features */
26 enum ip_set_feature {
27 IPSET_TYPE_IP_FLAG = 0,
28 IPSET_TYPE_IP = (1 << IPSET_TYPE_IP_FLAG),
29 IPSET_TYPE_PORT_FLAG = 1,
30 IPSET_TYPE_PORT = (1 << IPSET_TYPE_PORT_FLAG),
31 IPSET_TYPE_MAC_FLAG = 2,
32 IPSET_TYPE_MAC = (1 << IPSET_TYPE_MAC_FLAG),
33 IPSET_TYPE_IP2_FLAG = 3,
34 IPSET_TYPE_IP2 = (1 << IPSET_TYPE_IP2_FLAG),
35 IPSET_TYPE_NAME_FLAG = 4,
36 IPSET_TYPE_NAME = (1 << IPSET_TYPE_NAME_FLAG),
37 IPSET_TYPE_IFACE_FLAG = 5,
38 IPSET_TYPE_IFACE = (1 << IPSET_TYPE_IFACE_FLAG),
39 IPSET_TYPE_MARK_FLAG = 6,
40 IPSET_TYPE_MARK = (1 << IPSET_TYPE_MARK_FLAG),
41 IPSET_TYPE_NOMATCH_FLAG = 7,
42 IPSET_TYPE_NOMATCH = (1 << IPSET_TYPE_NOMATCH_FLAG),
43 /* Strictly speaking not a feature, but a flag for dumping:
44 * this settype must be dumped last */
45 IPSET_DUMP_LAST_FLAG = 8,
46 IPSET_DUMP_LAST = (1 << IPSET_DUMP_LAST_FLAG),
47 };
48
49 /* Set extensions */
50 enum ip_set_extension {
51 IPSET_EXT_BIT_TIMEOUT = 0,
52 IPSET_EXT_TIMEOUT = (1 << IPSET_EXT_BIT_TIMEOUT),
53 IPSET_EXT_BIT_COUNTER = 1,
54 IPSET_EXT_COUNTER = (1 << IPSET_EXT_BIT_COUNTER),
55 IPSET_EXT_BIT_COMMENT = 2,
56 IPSET_EXT_COMMENT = (1 << IPSET_EXT_BIT_COMMENT),
57 IPSET_EXT_BIT_SKBINFO = 3,
58 IPSET_EXT_SKBINFO = (1 << IPSET_EXT_BIT_SKBINFO),
59 /* Mark set with an extension which needs to call destroy */
60 IPSET_EXT_BIT_DESTROY = 7,
61 IPSET_EXT_DESTROY = (1 << IPSET_EXT_BIT_DESTROY),
62 };
63
64 #define SET_WITH_TIMEOUT(s) ((s)->extensions & IPSET_EXT_TIMEOUT)
65 #define SET_WITH_COUNTER(s) ((s)->extensions & IPSET_EXT_COUNTER)
66 #define SET_WITH_COMMENT(s) ((s)->extensions & IPSET_EXT_COMMENT)
67 #define SET_WITH_SKBINFO(s) ((s)->extensions & IPSET_EXT_SKBINFO)
68 #define SET_WITH_FORCEADD(s) ((s)->flags & IPSET_CREATE_FLAG_FORCEADD)
69
70 /* Extension id, in size order */
71 enum ip_set_ext_id {
72 IPSET_EXT_ID_COUNTER = 0,
73 IPSET_EXT_ID_TIMEOUT,
74 IPSET_EXT_ID_SKBINFO,
75 IPSET_EXT_ID_COMMENT,
76 IPSET_EXT_ID_MAX,
77 };
78
79 struct ip_set;
80
81 /* Extension type */
82 struct ip_set_ext_type {
83 /* Destroy extension private data (can be NULL) */
84 void (*destroy)(struct ip_set *set, void *ext);
85 enum ip_set_extension type;
86 enum ipset_cadt_flags flag;
87 /* Size and minimal alignment */
88 u8 len;
89 u8 align;
90 };
91
92 extern const struct ip_set_ext_type ip_set_extensions[];
93
94 struct ip_set_counter {
95 atomic64_t bytes;
96 atomic64_t packets;
97 };
98
99 struct ip_set_comment_rcu {
100 struct rcu_head rcu;
101 char str[0];
102 };
103
104 struct ip_set_comment {
105 struct ip_set_comment_rcu __rcu *c;
106 };
107
108 struct ip_set_skbinfo {
109 u32 skbmark;
110 u32 skbmarkmask;
111 u32 skbprio;
112 u16 skbqueue;
113 u16 __pad;
114 };
115
116 struct ip_set_ext {
117 struct ip_set_skbinfo skbinfo;
118 u64 packets;
119 u64 bytes;
120 char *comment;
121 u32 timeout;
122 u8 packets_op;
123 u8 bytes_op;
124 };
125
126 struct ip_set;
127
128 #define ext_timeout(e, s) \
129 ((unsigned long *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_TIMEOUT]))
130 #define ext_counter(e, s) \
131 ((struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER]))
132 #define ext_comment(e, s) \
133 ((struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT]))
134 #define ext_skbinfo(e, s) \
135 ((struct ip_set_skbinfo *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_SKBINFO]))
136
137 typedef int (*ipset_adtfn)(struct ip_set *set, void *value,
138 const struct ip_set_ext *ext,
139 struct ip_set_ext *mext, u32 cmdflags);
140
141 /* Kernel API function options */
142 struct ip_set_adt_opt {
143 u8 family; /* Actual protocol family */
144 u8 dim; /* Dimension of match/target */
145 u8 flags; /* Direction and negation flags */
146 u32 cmdflags; /* Command-like flags */
147 struct ip_set_ext ext; /* Extensions */
148 };
149
150 /* Set type, variant-specific part */
151 struct ip_set_type_variant {
152 /* Kernelspace: test/add/del entries
153 * returns negative error code,
154 * zero for no match/success to add/delete
155 * positive for matching element */
156 int (*kadt)(struct ip_set *set, const struct sk_buff *skb,
157 const struct xt_action_param *par,
158 enum ipset_adt adt, struct ip_set_adt_opt *opt);
159
160 /* Userspace: test/add/del entries
161 * returns negative error code,
162 * zero for no match/success to add/delete
163 * positive for matching element */
164 int (*uadt)(struct ip_set *set, struct nlattr *tb[],
165 enum ipset_adt adt, u32 *lineno, u32 flags, bool retried);
166
167 /* Low level add/del/test functions */
168 ipset_adtfn adt[IPSET_ADT_MAX];
169
170 /* When adding entries and set is full, try to resize the set */
171 int (*resize)(struct ip_set *set, bool retried);
172 /* Destroy the set */
173 void (*destroy)(struct ip_set *set);
174 /* Flush the elements */
175 void (*flush)(struct ip_set *set);
176 /* Expire entries before listing */
177 void (*expire)(struct ip_set *set);
178 /* List set header data */
179 int (*head)(struct ip_set *set, struct sk_buff *skb);
180 /* List elements */
181 int (*list)(const struct ip_set *set, struct sk_buff *skb,
182 struct netlink_callback *cb);
183 /* Keep listing private when resizing runs parallel */
184 void (*uref)(struct ip_set *set, struct netlink_callback *cb,
185 bool start);
186
187 /* Return true if "b" set is the same as "a"
188 * according to the create set parameters */
189 bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
190 };
191
192 /* The core set type structure */
193 struct ip_set_type {
194 struct list_head list;
195
196 /* Typename */
197 char name[IPSET_MAXNAMELEN];
198 /* Protocol version */
199 u8 protocol;
200 /* Set type dimension */
201 u8 dimension;
202 /*
203 * Supported family: may be NFPROTO_UNSPEC for both
204 * NFPROTO_IPV4/NFPROTO_IPV6.
205 */
206 u8 family;
207 /* Type revisions */
208 u8 revision_min, revision_max;
209 /* Set features to control swapping */
210 u16 features;
211
212 /* Create set */
213 int (*create)(struct net *net, struct ip_set *set,
214 struct nlattr *tb[], u32 flags);
215
216 /* Attribute policies */
217 const struct nla_policy create_policy[IPSET_ATTR_CREATE_MAX + 1];
218 const struct nla_policy adt_policy[IPSET_ATTR_ADT_MAX + 1];
219
220 /* Set this to THIS_MODULE if you are a module, otherwise NULL */
221 struct module *me;
222 };
223
224 /* register and unregister set type */
225 extern int ip_set_type_register(struct ip_set_type *set_type);
226 extern void ip_set_type_unregister(struct ip_set_type *set_type);
227
228 /* A generic IP set */
229 struct ip_set {
230 /* The name of the set */
231 char name[IPSET_MAXNAMELEN];
232 /* Lock protecting the set data */
233 spinlock_t lock;
234 /* References to the set */
235 u32 ref;
236 /* References to the set for netlink events like dump,
237 * ref can be swapped out by ip_set_swap
238 */
239 u32 ref_netlink;
240 /* The core set type */
241 struct ip_set_type *type;
242 /* The type variant doing the real job */
243 const struct ip_set_type_variant *variant;
244 /* The actual INET family of the set */
245 u8 family;
246 /* The type revision */
247 u8 revision;
248 /* Extensions */
249 u8 extensions;
250 /* Create flags */
251 u8 flags;
252 /* Default timeout value, if enabled */
253 u32 timeout;
254 /* Number of elements (vs timeout) */
255 u32 elements;
256 /* Size of the dynamic extensions (vs timeout) */
257 size_t ext_size;
258 /* Element data size */
259 size_t dsize;
260 /* Offsets to extensions in elements */
261 size_t offset[IPSET_EXT_ID_MAX];
262 /* The type specific data */
263 void *data;
264 };
265
266 static inline void
ip_set_ext_destroy(struct ip_set * set,void * data)267 ip_set_ext_destroy(struct ip_set *set, void *data)
268 {
269 /* Check that the extension is enabled for the set and
270 * call it's destroy function for its extension part in data.
271 */
272 if (SET_WITH_COMMENT(set))
273 ip_set_extensions[IPSET_EXT_ID_COMMENT].destroy(
274 set, ext_comment(data, set));
275 }
276
277 static inline int
ip_set_put_flags(struct sk_buff * skb,struct ip_set * set)278 ip_set_put_flags(struct sk_buff *skb, struct ip_set *set)
279 {
280 u32 cadt_flags = 0;
281
282 if (SET_WITH_TIMEOUT(set))
283 if (unlikely(nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
284 htonl(set->timeout))))
285 return -EMSGSIZE;
286 if (SET_WITH_COUNTER(set))
287 cadt_flags |= IPSET_FLAG_WITH_COUNTERS;
288 if (SET_WITH_COMMENT(set))
289 cadt_flags |= IPSET_FLAG_WITH_COMMENT;
290 if (SET_WITH_SKBINFO(set))
291 cadt_flags |= IPSET_FLAG_WITH_SKBINFO;
292 if (SET_WITH_FORCEADD(set))
293 cadt_flags |= IPSET_FLAG_WITH_FORCEADD;
294
295 if (!cadt_flags)
296 return 0;
297 return nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(cadt_flags));
298 }
299
300 /* Netlink CB args */
301 enum {
302 IPSET_CB_NET = 0, /* net namespace */
303 IPSET_CB_PROTO, /* ipset protocol */
304 IPSET_CB_DUMP, /* dump single set/all sets */
305 IPSET_CB_INDEX, /* set index */
306 IPSET_CB_PRIVATE, /* set private data */
307 IPSET_CB_ARG0, /* type specific */
308 };
309
310 /* register and unregister set references */
311 extern ip_set_id_t ip_set_get_byname(struct net *net,
312 const char *name, struct ip_set **set);
313 extern void ip_set_put_byindex(struct net *net, ip_set_id_t index);
314 extern void ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name);
315 extern ip_set_id_t ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index);
316 extern void ip_set_nfnl_put(struct net *net, ip_set_id_t index);
317
318 /* API for iptables set match, and SET target */
319
320 extern int ip_set_add(ip_set_id_t id, const struct sk_buff *skb,
321 const struct xt_action_param *par,
322 struct ip_set_adt_opt *opt);
323 extern int ip_set_del(ip_set_id_t id, const struct sk_buff *skb,
324 const struct xt_action_param *par,
325 struct ip_set_adt_opt *opt);
326 extern int ip_set_test(ip_set_id_t id, const struct sk_buff *skb,
327 const struct xt_action_param *par,
328 struct ip_set_adt_opt *opt);
329
330 /* Utility functions */
331 extern void *ip_set_alloc(size_t size);
332 extern void ip_set_free(void *members);
333 extern int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr);
334 extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr);
335 extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[],
336 size_t len, size_t align);
337 extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
338 struct ip_set_ext *ext);
339 extern int ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
340 const void *e, bool active);
341 extern bool ip_set_match_extensions(struct ip_set *set,
342 const struct ip_set_ext *ext,
343 struct ip_set_ext *mext,
344 u32 flags, void *data);
345
346 static inline int
ip_set_get_hostipaddr4(struct nlattr * nla,u32 * ipaddr)347 ip_set_get_hostipaddr4(struct nlattr *nla, u32 *ipaddr)
348 {
349 __be32 ip;
350 int ret = ip_set_get_ipaddr4(nla, &ip);
351
352 if (ret)
353 return ret;
354 *ipaddr = ntohl(ip);
355 return 0;
356 }
357
358 /* Ignore IPSET_ERR_EXIST errors if asked to do so? */
359 static inline bool
ip_set_eexist(int ret,u32 flags)360 ip_set_eexist(int ret, u32 flags)
361 {
362 return ret == -IPSET_ERR_EXIST && (flags & IPSET_FLAG_EXIST);
363 }
364
365 /* Match elements marked with nomatch */
366 static inline bool
ip_set_enomatch(int ret,u32 flags,enum ipset_adt adt,struct ip_set * set)367 ip_set_enomatch(int ret, u32 flags, enum ipset_adt adt, struct ip_set *set)
368 {
369 return adt == IPSET_TEST &&
370 (set->type->features & IPSET_TYPE_NOMATCH) &&
371 ((flags >> 16) & IPSET_FLAG_NOMATCH) &&
372 (ret > 0 || ret == -ENOTEMPTY);
373 }
374
375 /* Check the NLA_F_NET_BYTEORDER flag */
376 static inline bool
ip_set_attr_netorder(struct nlattr * tb[],int type)377 ip_set_attr_netorder(struct nlattr *tb[], int type)
378 {
379 return tb[type] && (tb[type]->nla_type & NLA_F_NET_BYTEORDER);
380 }
381
382 static inline bool
ip_set_optattr_netorder(struct nlattr * tb[],int type)383 ip_set_optattr_netorder(struct nlattr *tb[], int type)
384 {
385 return !tb[type] || (tb[type]->nla_type & NLA_F_NET_BYTEORDER);
386 }
387
388 /* Useful converters */
389 static inline u32
ip_set_get_h32(const struct nlattr * attr)390 ip_set_get_h32(const struct nlattr *attr)
391 {
392 return ntohl(nla_get_be32(attr));
393 }
394
395 static inline u16
ip_set_get_h16(const struct nlattr * attr)396 ip_set_get_h16(const struct nlattr *attr)
397 {
398 return ntohs(nla_get_be16(attr));
399 }
400
nla_put_ipaddr4(struct sk_buff * skb,int type,__be32 ipaddr)401 static inline int nla_put_ipaddr4(struct sk_buff *skb, int type, __be32 ipaddr)
402 {
403 struct nlattr *__nested = nla_nest_start(skb, type);
404 int ret;
405
406 if (!__nested)
407 return -EMSGSIZE;
408 ret = nla_put_in_addr(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr);
409 if (!ret)
410 nla_nest_end(skb, __nested);
411 return ret;
412 }
413
nla_put_ipaddr6(struct sk_buff * skb,int type,const struct in6_addr * ipaddrptr)414 static inline int nla_put_ipaddr6(struct sk_buff *skb, int type,
415 const struct in6_addr *ipaddrptr)
416 {
417 struct nlattr *__nested = nla_nest_start(skb, type);
418 int ret;
419
420 if (!__nested)
421 return -EMSGSIZE;
422 ret = nla_put_in6_addr(skb, IPSET_ATTR_IPADDR_IPV6, ipaddrptr);
423 if (!ret)
424 nla_nest_end(skb, __nested);
425 return ret;
426 }
427
428 /* Get address from skbuff */
429 static inline __be32
ip4addr(const struct sk_buff * skb,bool src)430 ip4addr(const struct sk_buff *skb, bool src)
431 {
432 return src ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr;
433 }
434
435 static inline void
ip4addrptr(const struct sk_buff * skb,bool src,__be32 * addr)436 ip4addrptr(const struct sk_buff *skb, bool src, __be32 *addr)
437 {
438 *addr = src ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr;
439 }
440
441 static inline void
ip6addrptr(const struct sk_buff * skb,bool src,struct in6_addr * addr)442 ip6addrptr(const struct sk_buff *skb, bool src, struct in6_addr *addr)
443 {
444 memcpy(addr, src ? &ipv6_hdr(skb)->saddr : &ipv6_hdr(skb)->daddr,
445 sizeof(*addr));
446 }
447
448 /* Calculate the bytes required to store the inclusive range of a-b */
449 static inline int
bitmap_bytes(u32 a,u32 b)450 bitmap_bytes(u32 a, u32 b)
451 {
452 return 4 * ((((b - a + 8) / 8) + 3) / 4);
453 }
454
455 /* How often should the gc be run by default */
456 #define IPSET_GC_TIME (3 * 60)
457
458 /* Timeout period depending on the timeout value of the given set */
459 #define IPSET_GC_PERIOD(timeout) \
460 ((timeout/3) ? min_t(u32, (timeout)/3, IPSET_GC_TIME) : 1)
461
462 /* Entry is set with no timeout value */
463 #define IPSET_ELEM_PERMANENT 0
464
465 /* Set is defined with timeout support: timeout value may be 0 */
466 #define IPSET_NO_TIMEOUT UINT_MAX
467
468 /* Max timeout value, see msecs_to_jiffies() in jiffies.h */
469 #define IPSET_MAX_TIMEOUT (UINT_MAX >> 1)/MSEC_PER_SEC
470
471 #define ip_set_adt_opt_timeout(opt, set) \
472 ((opt)->ext.timeout != IPSET_NO_TIMEOUT ? (opt)->ext.timeout : (set)->timeout)
473
474 static inline unsigned int
ip_set_timeout_uget(struct nlattr * tb)475 ip_set_timeout_uget(struct nlattr *tb)
476 {
477 unsigned int timeout = ip_set_get_h32(tb);
478
479 /* Normalize to fit into jiffies */
480 if (timeout > IPSET_MAX_TIMEOUT)
481 timeout = IPSET_MAX_TIMEOUT;
482
483 return timeout;
484 }
485
486 static inline bool
ip_set_timeout_expired(const unsigned long * t)487 ip_set_timeout_expired(const unsigned long *t)
488 {
489 return *t != IPSET_ELEM_PERMANENT && time_is_before_jiffies(*t);
490 }
491
492 static inline void
ip_set_timeout_set(unsigned long * timeout,u32 value)493 ip_set_timeout_set(unsigned long *timeout, u32 value)
494 {
495 unsigned long t;
496
497 if (!value) {
498 *timeout = IPSET_ELEM_PERMANENT;
499 return;
500 }
501
502 t = msecs_to_jiffies(value * MSEC_PER_SEC) + jiffies;
503 if (t == IPSET_ELEM_PERMANENT)
504 /* Bingo! :-) */
505 t--;
506 *timeout = t;
507 }
508
509 static inline u32
ip_set_timeout_get(const unsigned long * timeout)510 ip_set_timeout_get(const unsigned long *timeout)
511 {
512 u32 t;
513
514 if (*timeout == IPSET_ELEM_PERMANENT)
515 return 0;
516
517 t = jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC;
518 /* Zero value in userspace means no timeout */
519 return t == 0 ? 1 : t;
520 }
521
522 static inline char*
ip_set_comment_uget(struct nlattr * tb)523 ip_set_comment_uget(struct nlattr *tb)
524 {
525 return nla_data(tb);
526 }
527
528 /* Called from uadd only, protected by the set spinlock.
529 * The kadt functions don't use the comment extensions in any way.
530 */
531 static inline void
ip_set_init_comment(struct ip_set * set,struct ip_set_comment * comment,const struct ip_set_ext * ext)532 ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment,
533 const struct ip_set_ext *ext)
534 {
535 struct ip_set_comment_rcu *c = rcu_dereference_protected(comment->c, 1);
536 size_t len = ext->comment ? strlen(ext->comment) : 0;
537
538 if (unlikely(c)) {
539 set->ext_size -= sizeof(*c) + strlen(c->str) + 1;
540 kfree_rcu(c, rcu);
541 rcu_assign_pointer(comment->c, NULL);
542 }
543 if (!len)
544 return;
545 if (unlikely(len > IPSET_MAX_COMMENT_SIZE))
546 len = IPSET_MAX_COMMENT_SIZE;
547 c = kmalloc(sizeof(*c) + len + 1, GFP_ATOMIC);
548 if (unlikely(!c))
549 return;
550 strlcpy(c->str, ext->comment, len + 1);
551 set->ext_size += sizeof(*c) + strlen(c->str) + 1;
552 rcu_assign_pointer(comment->c, c);
553 }
554
555 /* Used only when dumping a set, protected by rcu_read_lock() */
556 static inline int
ip_set_put_comment(struct sk_buff * skb,const struct ip_set_comment * comment)557 ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment)
558 {
559 struct ip_set_comment_rcu *c = rcu_dereference(comment->c);
560
561 if (!c)
562 return 0;
563 return nla_put_string(skb, IPSET_ATTR_COMMENT, c->str);
564 }
565
566 /* Called from uadd/udel, flush or the garbage collectors protected
567 * by the set spinlock.
568 * Called when the set is destroyed and when there can't be any user
569 * of the set data anymore.
570 */
571 static inline void
ip_set_comment_free(struct ip_set * set,struct ip_set_comment * comment)572 ip_set_comment_free(struct ip_set *set, struct ip_set_comment *comment)
573 {
574 struct ip_set_comment_rcu *c;
575
576 c = rcu_dereference_protected(comment->c, 1);
577 if (unlikely(!c))
578 return;
579 set->ext_size -= sizeof(*c) + strlen(c->str) + 1;
580 kfree_rcu(c, rcu);
581 rcu_assign_pointer(comment->c, NULL);
582 }
583
584 static inline void
ip_set_add_bytes(u64 bytes,struct ip_set_counter * counter)585 ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter)
586 {
587 atomic64_add((long long)bytes, &(counter)->bytes);
588 }
589
590 static inline void
ip_set_add_packets(u64 packets,struct ip_set_counter * counter)591 ip_set_add_packets(u64 packets, struct ip_set_counter *counter)
592 {
593 atomic64_add((long long)packets, &(counter)->packets);
594 }
595
596 static inline u64
ip_set_get_bytes(const struct ip_set_counter * counter)597 ip_set_get_bytes(const struct ip_set_counter *counter)
598 {
599 return (u64)atomic64_read(&(counter)->bytes);
600 }
601
602 static inline u64
ip_set_get_packets(const struct ip_set_counter * counter)603 ip_set_get_packets(const struct ip_set_counter *counter)
604 {
605 return (u64)atomic64_read(&(counter)->packets);
606 }
607
608 static inline bool
ip_set_match_counter(u64 counter,u64 match,u8 op)609 ip_set_match_counter(u64 counter, u64 match, u8 op)
610 {
611 switch (op) {
612 case IPSET_COUNTER_NONE:
613 return true;
614 case IPSET_COUNTER_EQ:
615 return counter == match;
616 case IPSET_COUNTER_NE:
617 return counter != match;
618 case IPSET_COUNTER_LT:
619 return counter < match;
620 case IPSET_COUNTER_GT:
621 return counter > match;
622 }
623 return false;
624 }
625
626 static inline void
ip_set_update_counter(struct ip_set_counter * counter,const struct ip_set_ext * ext,u32 flags)627 ip_set_update_counter(struct ip_set_counter *counter,
628 const struct ip_set_ext *ext, u32 flags)
629 {
630 if (ext->packets != ULLONG_MAX &&
631 !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) {
632 ip_set_add_bytes(ext->bytes, counter);
633 ip_set_add_packets(ext->packets, counter);
634 }
635 }
636
637 static inline bool
ip_set_put_counter(struct sk_buff * skb,const struct ip_set_counter * counter)638 ip_set_put_counter(struct sk_buff *skb, const struct ip_set_counter *counter)
639 {
640 return nla_put_net64(skb, IPSET_ATTR_BYTES,
641 cpu_to_be64(ip_set_get_bytes(counter)),
642 IPSET_ATTR_PAD) ||
643 nla_put_net64(skb, IPSET_ATTR_PACKETS,
644 cpu_to_be64(ip_set_get_packets(counter)),
645 IPSET_ATTR_PAD);
646 }
647
648 static inline void
ip_set_init_counter(struct ip_set_counter * counter,const struct ip_set_ext * ext)649 ip_set_init_counter(struct ip_set_counter *counter,
650 const struct ip_set_ext *ext)
651 {
652 if (ext->bytes != ULLONG_MAX)
653 atomic64_set(&(counter)->bytes, (long long)(ext->bytes));
654 if (ext->packets != ULLONG_MAX)
655 atomic64_set(&(counter)->packets, (long long)(ext->packets));
656 }
657
658 static inline void
ip_set_get_skbinfo(struct ip_set_skbinfo * skbinfo,const struct ip_set_ext * ext,struct ip_set_ext * mext,u32 flags)659 ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo,
660 const struct ip_set_ext *ext,
661 struct ip_set_ext *mext, u32 flags)
662 {
663 mext->skbinfo = *skbinfo;
664 }
665
666 static inline bool
ip_set_put_skbinfo(struct sk_buff * skb,const struct ip_set_skbinfo * skbinfo)667 ip_set_put_skbinfo(struct sk_buff *skb, const struct ip_set_skbinfo *skbinfo)
668 {
669 /* Send nonzero parameters only */
670 return ((skbinfo->skbmark || skbinfo->skbmarkmask) &&
671 nla_put_net64(skb, IPSET_ATTR_SKBMARK,
672 cpu_to_be64((u64)skbinfo->skbmark << 32 |
673 skbinfo->skbmarkmask),
674 IPSET_ATTR_PAD)) ||
675 (skbinfo->skbprio &&
676 nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
677 cpu_to_be32(skbinfo->skbprio))) ||
678 (skbinfo->skbqueue &&
679 nla_put_net16(skb, IPSET_ATTR_SKBQUEUE,
680 cpu_to_be16(skbinfo->skbqueue)));
681 }
682
683 static inline void
ip_set_init_skbinfo(struct ip_set_skbinfo * skbinfo,const struct ip_set_ext * ext)684 ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo,
685 const struct ip_set_ext *ext)
686 {
687 *skbinfo = ext->skbinfo;
688 }
689
690 #define IP_SET_INIT_KEXT(skb, opt, set) \
691 { .bytes = (skb)->len, .packets = 1, \
692 .timeout = ip_set_adt_opt_timeout(opt, set) }
693
694 #define IP_SET_INIT_UEXT(set) \
695 { .bytes = ULLONG_MAX, .packets = ULLONG_MAX, \
696 .timeout = (set)->timeout }
697
698 #define IPSET_CONCAT(a, b) a##b
699 #define IPSET_TOKEN(a, b) IPSET_CONCAT(a, b)
700
701 #endif /*_IP_SET_H */
702