1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2015 Nicira, Inc.
4 */
5
6 #include <linux/module.h>
7 #include <linux/openvswitch.h>
8 #include <linux/tcp.h>
9 #include <linux/udp.h>
10 #include <linux/sctp.h>
11 #include <linux/static_key.h>
12 #include <net/ip.h>
13 #include <net/genetlink.h>
14 #include <net/netfilter/nf_conntrack_core.h>
15 #include <net/netfilter/nf_conntrack_count.h>
16 #include <net/netfilter/nf_conntrack_helper.h>
17 #include <net/netfilter/nf_conntrack_labels.h>
18 #include <net/netfilter/nf_conntrack_seqadj.h>
19 #include <net/netfilter/nf_conntrack_timeout.h>
20 #include <net/netfilter/nf_conntrack_zones.h>
21 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
22 #include <net/ipv6_frag.h>
23
24 #if IS_ENABLED(CONFIG_NF_NAT)
25 #include <net/netfilter/nf_nat.h>
26 #endif
27
28 #include <net/netfilter/nf_conntrack_act_ct.h>
29
30 #include "datapath.h"
31 #include "conntrack.h"
32 #include "flow.h"
33 #include "flow_netlink.h"
34
35 struct ovs_ct_len_tbl {
36 int maxlen;
37 int minlen;
38 };
39
40 /* Metadata mark for masked write to conntrack mark */
41 struct md_mark {
42 u32 value;
43 u32 mask;
44 };
45
46 /* Metadata label for masked write to conntrack label. */
47 struct md_labels {
48 struct ovs_key_ct_labels value;
49 struct ovs_key_ct_labels mask;
50 };
51
52 enum ovs_ct_nat {
53 OVS_CT_NAT = 1 << 0, /* NAT for committed connections only. */
54 OVS_CT_SRC_NAT = 1 << 1, /* Source NAT for NEW connections. */
55 OVS_CT_DST_NAT = 1 << 2, /* Destination NAT for NEW connections. */
56 };
57
58 /* Conntrack action context for execution. */
59 struct ovs_conntrack_info {
60 struct nf_conntrack_helper *helper;
61 struct nf_conntrack_zone zone;
62 struct nf_conn *ct;
63 u8 commit : 1;
64 u8 nat : 3; /* enum ovs_ct_nat */
65 u8 force : 1;
66 u8 have_eventmask : 1;
67 u16 family;
68 u32 eventmask; /* Mask of 1 << IPCT_*. */
69 struct md_mark mark;
70 struct md_labels labels;
71 char timeout[CTNL_TIMEOUT_NAME_MAX];
72 struct nf_ct_timeout *nf_ct_timeout;
73 #if IS_ENABLED(CONFIG_NF_NAT)
74 struct nf_nat_range2 range; /* Only present for SRC NAT and DST NAT. */
75 #endif
76 };
77
78 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
79 #define OVS_CT_LIMIT_UNLIMITED 0
80 #define OVS_CT_LIMIT_DEFAULT OVS_CT_LIMIT_UNLIMITED
81 #define CT_LIMIT_HASH_BUCKETS 512
82 static DEFINE_STATIC_KEY_FALSE(ovs_ct_limit_enabled);
83
84 struct ovs_ct_limit {
85 /* Elements in ovs_ct_limit_info->limits hash table */
86 struct hlist_node hlist_node;
87 struct rcu_head rcu;
88 u16 zone;
89 u32 limit;
90 };
91
92 struct ovs_ct_limit_info {
93 u32 default_limit;
94 struct hlist_head *limits;
95 struct nf_conncount_data *data;
96 };
97
98 static const struct nla_policy ct_limit_policy[OVS_CT_LIMIT_ATTR_MAX + 1] = {
99 [OVS_CT_LIMIT_ATTR_ZONE_LIMIT] = { .type = NLA_NESTED, },
100 };
101 #endif
102
103 static bool labels_nonzero(const struct ovs_key_ct_labels *labels);
104
105 static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info);
106
key_to_nfproto(const struct sw_flow_key * key)107 static u16 key_to_nfproto(const struct sw_flow_key *key)
108 {
109 switch (ntohs(key->eth.type)) {
110 case ETH_P_IP:
111 return NFPROTO_IPV4;
112 case ETH_P_IPV6:
113 return NFPROTO_IPV6;
114 default:
115 return NFPROTO_UNSPEC;
116 }
117 }
118
119 /* Map SKB connection state into the values used by flow definition. */
ovs_ct_get_state(enum ip_conntrack_info ctinfo)120 static u8 ovs_ct_get_state(enum ip_conntrack_info ctinfo)
121 {
122 u8 ct_state = OVS_CS_F_TRACKED;
123
124 switch (ctinfo) {
125 case IP_CT_ESTABLISHED_REPLY:
126 case IP_CT_RELATED_REPLY:
127 ct_state |= OVS_CS_F_REPLY_DIR;
128 break;
129 default:
130 break;
131 }
132
133 switch (ctinfo) {
134 case IP_CT_ESTABLISHED:
135 case IP_CT_ESTABLISHED_REPLY:
136 ct_state |= OVS_CS_F_ESTABLISHED;
137 break;
138 case IP_CT_RELATED:
139 case IP_CT_RELATED_REPLY:
140 ct_state |= OVS_CS_F_RELATED;
141 break;
142 case IP_CT_NEW:
143 ct_state |= OVS_CS_F_NEW;
144 break;
145 default:
146 break;
147 }
148
149 return ct_state;
150 }
151
ovs_ct_get_mark(const struct nf_conn * ct)152 static u32 ovs_ct_get_mark(const struct nf_conn *ct)
153 {
154 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
155 return ct ? READ_ONCE(ct->mark) : 0;
156 #else
157 return 0;
158 #endif
159 }
160
161 /* Guard against conntrack labels max size shrinking below 128 bits. */
162 #if NF_CT_LABELS_MAX_SIZE < 16
163 #error NF_CT_LABELS_MAX_SIZE must be at least 16 bytes
164 #endif
165
ovs_ct_get_labels(const struct nf_conn * ct,struct ovs_key_ct_labels * labels)166 static void ovs_ct_get_labels(const struct nf_conn *ct,
167 struct ovs_key_ct_labels *labels)
168 {
169 struct nf_conn_labels *cl = ct ? nf_ct_labels_find(ct) : NULL;
170
171 if (cl)
172 memcpy(labels, cl->bits, OVS_CT_LABELS_LEN);
173 else
174 memset(labels, 0, OVS_CT_LABELS_LEN);
175 }
176
__ovs_ct_update_key_orig_tp(struct sw_flow_key * key,const struct nf_conntrack_tuple * orig,u8 icmp_proto)177 static void __ovs_ct_update_key_orig_tp(struct sw_flow_key *key,
178 const struct nf_conntrack_tuple *orig,
179 u8 icmp_proto)
180 {
181 key->ct_orig_proto = orig->dst.protonum;
182 if (orig->dst.protonum == icmp_proto) {
183 key->ct.orig_tp.src = htons(orig->dst.u.icmp.type);
184 key->ct.orig_tp.dst = htons(orig->dst.u.icmp.code);
185 } else {
186 key->ct.orig_tp.src = orig->src.u.all;
187 key->ct.orig_tp.dst = orig->dst.u.all;
188 }
189 }
190
__ovs_ct_update_key(struct sw_flow_key * key,u8 state,const struct nf_conntrack_zone * zone,const struct nf_conn * ct)191 static void __ovs_ct_update_key(struct sw_flow_key *key, u8 state,
192 const struct nf_conntrack_zone *zone,
193 const struct nf_conn *ct)
194 {
195 key->ct_state = state;
196 key->ct_zone = zone->id;
197 key->ct.mark = ovs_ct_get_mark(ct);
198 ovs_ct_get_labels(ct, &key->ct.labels);
199
200 if (ct) {
201 const struct nf_conntrack_tuple *orig;
202
203 /* Use the master if we have one. */
204 if (ct->master)
205 ct = ct->master;
206 orig = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
207
208 /* IP version must match with the master connection. */
209 if (key->eth.type == htons(ETH_P_IP) &&
210 nf_ct_l3num(ct) == NFPROTO_IPV4) {
211 key->ipv4.ct_orig.src = orig->src.u3.ip;
212 key->ipv4.ct_orig.dst = orig->dst.u3.ip;
213 __ovs_ct_update_key_orig_tp(key, orig, IPPROTO_ICMP);
214 return;
215 } else if (key->eth.type == htons(ETH_P_IPV6) &&
216 !sw_flow_key_is_nd(key) &&
217 nf_ct_l3num(ct) == NFPROTO_IPV6) {
218 key->ipv6.ct_orig.src = orig->src.u3.in6;
219 key->ipv6.ct_orig.dst = orig->dst.u3.in6;
220 __ovs_ct_update_key_orig_tp(key, orig, NEXTHDR_ICMP);
221 return;
222 }
223 }
224 /* Clear 'ct_orig_proto' to mark the non-existence of conntrack
225 * original direction key fields.
226 */
227 key->ct_orig_proto = 0;
228 }
229
230 /* Update 'key' based on skb->_nfct. If 'post_ct' is true, then OVS has
231 * previously sent the packet to conntrack via the ct action. If
232 * 'keep_nat_flags' is true, the existing NAT flags retained, else they are
233 * initialized from the connection status.
234 */
ovs_ct_update_key(const struct sk_buff * skb,const struct ovs_conntrack_info * info,struct sw_flow_key * key,bool post_ct,bool keep_nat_flags)235 static void ovs_ct_update_key(const struct sk_buff *skb,
236 const struct ovs_conntrack_info *info,
237 struct sw_flow_key *key, bool post_ct,
238 bool keep_nat_flags)
239 {
240 const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
241 enum ip_conntrack_info ctinfo;
242 struct nf_conn *ct;
243 u8 state = 0;
244
245 ct = nf_ct_get(skb, &ctinfo);
246 if (ct) {
247 state = ovs_ct_get_state(ctinfo);
248 /* All unconfirmed entries are NEW connections. */
249 if (!nf_ct_is_confirmed(ct))
250 state |= OVS_CS_F_NEW;
251 /* OVS persists the related flag for the duration of the
252 * connection.
253 */
254 if (ct->master)
255 state |= OVS_CS_F_RELATED;
256 if (keep_nat_flags) {
257 state |= key->ct_state & OVS_CS_F_NAT_MASK;
258 } else {
259 if (ct->status & IPS_SRC_NAT)
260 state |= OVS_CS_F_SRC_NAT;
261 if (ct->status & IPS_DST_NAT)
262 state |= OVS_CS_F_DST_NAT;
263 }
264 zone = nf_ct_zone(ct);
265 } else if (post_ct) {
266 state = OVS_CS_F_TRACKED | OVS_CS_F_INVALID;
267 if (info)
268 zone = &info->zone;
269 }
270 __ovs_ct_update_key(key, state, zone, ct);
271 }
272
273 /* This is called to initialize CT key fields possibly coming in from the local
274 * stack.
275 */
ovs_ct_fill_key(const struct sk_buff * skb,struct sw_flow_key * key,bool post_ct)276 void ovs_ct_fill_key(const struct sk_buff *skb,
277 struct sw_flow_key *key,
278 bool post_ct)
279 {
280 ovs_ct_update_key(skb, NULL, key, post_ct, false);
281 }
282
ovs_ct_put_key(const struct sw_flow_key * swkey,const struct sw_flow_key * output,struct sk_buff * skb)283 int ovs_ct_put_key(const struct sw_flow_key *swkey,
284 const struct sw_flow_key *output, struct sk_buff *skb)
285 {
286 if (nla_put_u32(skb, OVS_KEY_ATTR_CT_STATE, output->ct_state))
287 return -EMSGSIZE;
288
289 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
290 nla_put_u16(skb, OVS_KEY_ATTR_CT_ZONE, output->ct_zone))
291 return -EMSGSIZE;
292
293 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
294 nla_put_u32(skb, OVS_KEY_ATTR_CT_MARK, output->ct.mark))
295 return -EMSGSIZE;
296
297 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
298 nla_put(skb, OVS_KEY_ATTR_CT_LABELS, sizeof(output->ct.labels),
299 &output->ct.labels))
300 return -EMSGSIZE;
301
302 if (swkey->ct_orig_proto) {
303 if (swkey->eth.type == htons(ETH_P_IP)) {
304 struct ovs_key_ct_tuple_ipv4 orig;
305
306 memset(&orig, 0, sizeof(orig));
307 orig.ipv4_src = output->ipv4.ct_orig.src;
308 orig.ipv4_dst = output->ipv4.ct_orig.dst;
309 orig.src_port = output->ct.orig_tp.src;
310 orig.dst_port = output->ct.orig_tp.dst;
311 orig.ipv4_proto = output->ct_orig_proto;
312
313 if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4,
314 sizeof(orig), &orig))
315 return -EMSGSIZE;
316 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
317 struct ovs_key_ct_tuple_ipv6 orig;
318
319 memset(&orig, 0, sizeof(orig));
320 memcpy(orig.ipv6_src, output->ipv6.ct_orig.src.s6_addr32,
321 sizeof(orig.ipv6_src));
322 memcpy(orig.ipv6_dst, output->ipv6.ct_orig.dst.s6_addr32,
323 sizeof(orig.ipv6_dst));
324 orig.src_port = output->ct.orig_tp.src;
325 orig.dst_port = output->ct.orig_tp.dst;
326 orig.ipv6_proto = output->ct_orig_proto;
327
328 if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6,
329 sizeof(orig), &orig))
330 return -EMSGSIZE;
331 }
332 }
333
334 return 0;
335 }
336
ovs_ct_set_mark(struct nf_conn * ct,struct sw_flow_key * key,u32 ct_mark,u32 mask)337 static int ovs_ct_set_mark(struct nf_conn *ct, struct sw_flow_key *key,
338 u32 ct_mark, u32 mask)
339 {
340 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
341 u32 new_mark;
342
343 new_mark = ct_mark | (READ_ONCE(ct->mark) & ~(mask));
344 if (READ_ONCE(ct->mark) != new_mark) {
345 WRITE_ONCE(ct->mark, new_mark);
346 if (nf_ct_is_confirmed(ct))
347 nf_conntrack_event_cache(IPCT_MARK, ct);
348 key->ct.mark = new_mark;
349 }
350
351 return 0;
352 #else
353 return -ENOTSUPP;
354 #endif
355 }
356
ovs_ct_get_conn_labels(struct nf_conn * ct)357 static struct nf_conn_labels *ovs_ct_get_conn_labels(struct nf_conn *ct)
358 {
359 struct nf_conn_labels *cl;
360
361 cl = nf_ct_labels_find(ct);
362 if (!cl) {
363 nf_ct_labels_ext_add(ct);
364 cl = nf_ct_labels_find(ct);
365 }
366
367 return cl;
368 }
369
370 /* Initialize labels for a new, yet to be committed conntrack entry. Note that
371 * since the new connection is not yet confirmed, and thus no-one else has
372 * access to it's labels, we simply write them over.
373 */
ovs_ct_init_labels(struct nf_conn * ct,struct sw_flow_key * key,const struct ovs_key_ct_labels * labels,const struct ovs_key_ct_labels * mask)374 static int ovs_ct_init_labels(struct nf_conn *ct, struct sw_flow_key *key,
375 const struct ovs_key_ct_labels *labels,
376 const struct ovs_key_ct_labels *mask)
377 {
378 struct nf_conn_labels *cl, *master_cl;
379 bool have_mask = labels_nonzero(mask);
380
381 /* Inherit master's labels to the related connection? */
382 master_cl = ct->master ? nf_ct_labels_find(ct->master) : NULL;
383
384 if (!master_cl && !have_mask)
385 return 0; /* Nothing to do. */
386
387 cl = ovs_ct_get_conn_labels(ct);
388 if (!cl)
389 return -ENOSPC;
390
391 /* Inherit the master's labels, if any. */
392 if (master_cl)
393 *cl = *master_cl;
394
395 if (have_mask) {
396 u32 *dst = (u32 *)cl->bits;
397 int i;
398
399 for (i = 0; i < OVS_CT_LABELS_LEN_32; i++)
400 dst[i] = (dst[i] & ~mask->ct_labels_32[i]) |
401 (labels->ct_labels_32[i]
402 & mask->ct_labels_32[i]);
403 }
404
405 /* Labels are included in the IPCTNL_MSG_CT_NEW event only if the
406 * IPCT_LABEL bit is set in the event cache.
407 */
408 nf_conntrack_event_cache(IPCT_LABEL, ct);
409
410 memcpy(&key->ct.labels, cl->bits, OVS_CT_LABELS_LEN);
411
412 return 0;
413 }
414
ovs_ct_set_labels(struct nf_conn * ct,struct sw_flow_key * key,const struct ovs_key_ct_labels * labels,const struct ovs_key_ct_labels * mask)415 static int ovs_ct_set_labels(struct nf_conn *ct, struct sw_flow_key *key,
416 const struct ovs_key_ct_labels *labels,
417 const struct ovs_key_ct_labels *mask)
418 {
419 struct nf_conn_labels *cl;
420 int err;
421
422 cl = ovs_ct_get_conn_labels(ct);
423 if (!cl)
424 return -ENOSPC;
425
426 err = nf_connlabels_replace(ct, labels->ct_labels_32,
427 mask->ct_labels_32,
428 OVS_CT_LABELS_LEN_32);
429 if (err)
430 return err;
431
432 memcpy(&key->ct.labels, cl->bits, OVS_CT_LABELS_LEN);
433
434 return 0;
435 }
436
437 /* 'skb' should already be pulled to nh_ofs. */
ovs_ct_helper(struct sk_buff * skb,u16 proto)438 static int ovs_ct_helper(struct sk_buff *skb, u16 proto)
439 {
440 const struct nf_conntrack_helper *helper;
441 const struct nf_conn_help *help;
442 enum ip_conntrack_info ctinfo;
443 unsigned int protoff;
444 struct nf_conn *ct;
445 int err;
446
447 ct = nf_ct_get(skb, &ctinfo);
448 if (!ct || ctinfo == IP_CT_RELATED_REPLY)
449 return NF_ACCEPT;
450
451 help = nfct_help(ct);
452 if (!help)
453 return NF_ACCEPT;
454
455 helper = rcu_dereference(help->helper);
456 if (!helper)
457 return NF_ACCEPT;
458
459 switch (proto) {
460 case NFPROTO_IPV4:
461 protoff = ip_hdrlen(skb);
462 break;
463 case NFPROTO_IPV6: {
464 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
465 __be16 frag_off;
466 int ofs;
467
468 ofs = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
469 &frag_off);
470 if (ofs < 0 || (frag_off & htons(~0x7)) != 0) {
471 pr_debug("proto header not found\n");
472 return NF_ACCEPT;
473 }
474 protoff = ofs;
475 break;
476 }
477 default:
478 WARN_ONCE(1, "helper invoked on non-IP family!");
479 return NF_DROP;
480 }
481
482 err = helper->help(skb, protoff, ct, ctinfo);
483 if (err != NF_ACCEPT)
484 return err;
485
486 /* Adjust seqs after helper. This is needed due to some helpers (e.g.,
487 * FTP with NAT) adusting the TCP payload size when mangling IP
488 * addresses and/or port numbers in the text-based control connection.
489 */
490 if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
491 !nf_ct_seq_adjust(skb, ct, ctinfo, protoff))
492 return NF_DROP;
493 return NF_ACCEPT;
494 }
495
496 /* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
497 * value if 'skb' is freed.
498 */
handle_fragments(struct net * net,struct sw_flow_key * key,u16 zone,struct sk_buff * skb)499 static int handle_fragments(struct net *net, struct sw_flow_key *key,
500 u16 zone, struct sk_buff *skb)
501 {
502 struct ovs_skb_cb ovs_cb = *OVS_CB(skb);
503 int err;
504
505 if (key->eth.type == htons(ETH_P_IP)) {
506 enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
507
508 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
509 err = ip_defrag(net, skb, user);
510 if (err)
511 return err;
512
513 ovs_cb.mru = IPCB(skb)->frag_max_size;
514 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
515 } else if (key->eth.type == htons(ETH_P_IPV6)) {
516 enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
517
518 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
519 err = nf_ct_frag6_gather(net, skb, user);
520 if (err) {
521 if (err != -EINPROGRESS)
522 kfree_skb(skb);
523 return err;
524 }
525
526 key->ip.proto = ipv6_hdr(skb)->nexthdr;
527 ovs_cb.mru = IP6CB(skb)->frag_max_size;
528 #endif
529 } else {
530 kfree_skb(skb);
531 return -EPFNOSUPPORT;
532 }
533
534 /* The key extracted from the fragment that completed this datagram
535 * likely didn't have an L4 header, so regenerate it.
536 */
537 ovs_flow_key_update_l3l4(skb, key);
538
539 key->ip.frag = OVS_FRAG_TYPE_NONE;
540 skb_clear_hash(skb);
541 skb->ignore_df = 1;
542 *OVS_CB(skb) = ovs_cb;
543
544 return 0;
545 }
546
547 static struct nf_conntrack_expect *
ovs_ct_expect_find(struct net * net,const struct nf_conntrack_zone * zone,u16 proto,const struct sk_buff * skb)548 ovs_ct_expect_find(struct net *net, const struct nf_conntrack_zone *zone,
549 u16 proto, const struct sk_buff *skb)
550 {
551 struct nf_conntrack_tuple tuple;
552 struct nf_conntrack_expect *exp;
553
554 if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), proto, net, &tuple))
555 return NULL;
556
557 exp = __nf_ct_expect_find(net, zone, &tuple);
558 if (exp) {
559 struct nf_conntrack_tuple_hash *h;
560
561 /* Delete existing conntrack entry, if it clashes with the
562 * expectation. This can happen since conntrack ALGs do not
563 * check for clashes between (new) expectations and existing
564 * conntrack entries. nf_conntrack_in() will check the
565 * expectations only if a conntrack entry can not be found,
566 * which can lead to OVS finding the expectation (here) in the
567 * init direction, but which will not be removed by the
568 * nf_conntrack_in() call, if a matching conntrack entry is
569 * found instead. In this case all init direction packets
570 * would be reported as new related packets, while reply
571 * direction packets would be reported as un-related
572 * established packets.
573 */
574 h = nf_conntrack_find_get(net, zone, &tuple);
575 if (h) {
576 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
577
578 nf_ct_delete(ct, 0, 0);
579 nf_ct_put(ct);
580 }
581 }
582
583 return exp;
584 }
585
586 /* This replicates logic from nf_conntrack_core.c that is not exported. */
587 static enum ip_conntrack_info
ovs_ct_get_info(const struct nf_conntrack_tuple_hash * h)588 ovs_ct_get_info(const struct nf_conntrack_tuple_hash *h)
589 {
590 const struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
591
592 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY)
593 return IP_CT_ESTABLISHED_REPLY;
594 /* Once we've had two way comms, always ESTABLISHED. */
595 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status))
596 return IP_CT_ESTABLISHED;
597 if (test_bit(IPS_EXPECTED_BIT, &ct->status))
598 return IP_CT_RELATED;
599 return IP_CT_NEW;
600 }
601
602 /* Find an existing connection which this packet belongs to without
603 * re-attributing statistics or modifying the connection state. This allows an
604 * skb->_nfct lost due to an upcall to be recovered during actions execution.
605 *
606 * Must be called with rcu_read_lock.
607 *
608 * On success, populates skb->_nfct and returns the connection. Returns NULL
609 * if there is no existing entry.
610 */
611 static struct nf_conn *
ovs_ct_find_existing(struct net * net,const struct nf_conntrack_zone * zone,u8 l3num,struct sk_buff * skb,bool natted)612 ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone,
613 u8 l3num, struct sk_buff *skb, bool natted)
614 {
615 struct nf_conntrack_tuple tuple;
616 struct nf_conntrack_tuple_hash *h;
617 struct nf_conn *ct;
618
619 if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), l3num,
620 net, &tuple)) {
621 pr_debug("ovs_ct_find_existing: Can't get tuple\n");
622 return NULL;
623 }
624
625 /* Must invert the tuple if skb has been transformed by NAT. */
626 if (natted) {
627 struct nf_conntrack_tuple inverse;
628
629 if (!nf_ct_invert_tuple(&inverse, &tuple)) {
630 pr_debug("ovs_ct_find_existing: Inversion failed!\n");
631 return NULL;
632 }
633 tuple = inverse;
634 }
635
636 /* look for tuple match */
637 h = nf_conntrack_find_get(net, zone, &tuple);
638 if (!h)
639 return NULL; /* Not found. */
640
641 ct = nf_ct_tuplehash_to_ctrack(h);
642
643 /* Inverted packet tuple matches the reverse direction conntrack tuple,
644 * select the other tuplehash to get the right 'ctinfo' bits for this
645 * packet.
646 */
647 if (natted)
648 h = &ct->tuplehash[!h->tuple.dst.dir];
649
650 nf_ct_set(skb, ct, ovs_ct_get_info(h));
651 return ct;
652 }
653
654 static
ovs_ct_executed(struct net * net,const struct sw_flow_key * key,const struct ovs_conntrack_info * info,struct sk_buff * skb,bool * ct_executed)655 struct nf_conn *ovs_ct_executed(struct net *net,
656 const struct sw_flow_key *key,
657 const struct ovs_conntrack_info *info,
658 struct sk_buff *skb,
659 bool *ct_executed)
660 {
661 struct nf_conn *ct = NULL;
662
663 /* If no ct, check if we have evidence that an existing conntrack entry
664 * might be found for this skb. This happens when we lose a skb->_nfct
665 * due to an upcall, or if the direction is being forced. If the
666 * connection was not confirmed, it is not cached and needs to be run
667 * through conntrack again.
668 */
669 *ct_executed = (key->ct_state & OVS_CS_F_TRACKED) &&
670 !(key->ct_state & OVS_CS_F_INVALID) &&
671 (key->ct_zone == info->zone.id);
672
673 if (*ct_executed || (!key->ct_state && info->force)) {
674 ct = ovs_ct_find_existing(net, &info->zone, info->family, skb,
675 !!(key->ct_state &
676 OVS_CS_F_NAT_MASK));
677 }
678
679 return ct;
680 }
681
682 /* Determine whether skb->_nfct is equal to the result of conntrack lookup. */
skb_nfct_cached(struct net * net,const struct sw_flow_key * key,const struct ovs_conntrack_info * info,struct sk_buff * skb)683 static bool skb_nfct_cached(struct net *net,
684 const struct sw_flow_key *key,
685 const struct ovs_conntrack_info *info,
686 struct sk_buff *skb)
687 {
688 enum ip_conntrack_info ctinfo;
689 struct nf_conn *ct;
690 bool ct_executed = true;
691
692 ct = nf_ct_get(skb, &ctinfo);
693 if (!ct)
694 ct = ovs_ct_executed(net, key, info, skb, &ct_executed);
695
696 if (ct)
697 nf_ct_get(skb, &ctinfo);
698 else
699 return false;
700
701 if (!net_eq(net, read_pnet(&ct->ct_net)))
702 return false;
703 if (!nf_ct_zone_equal_any(info->ct, nf_ct_zone(ct)))
704 return false;
705 if (info->helper) {
706 struct nf_conn_help *help;
707
708 help = nf_ct_ext_find(ct, NF_CT_EXT_HELPER);
709 if (help && rcu_access_pointer(help->helper) != info->helper)
710 return false;
711 }
712 if (info->nf_ct_timeout) {
713 struct nf_conn_timeout *timeout_ext;
714
715 timeout_ext = nf_ct_timeout_find(ct);
716 if (!timeout_ext || info->nf_ct_timeout !=
717 rcu_dereference(timeout_ext->timeout))
718 return false;
719 }
720 /* Force conntrack entry direction to the current packet? */
721 if (info->force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
722 /* Delete the conntrack entry if confirmed, else just release
723 * the reference.
724 */
725 if (nf_ct_is_confirmed(ct))
726 nf_ct_delete(ct, 0, 0);
727
728 nf_ct_put(ct);
729 nf_ct_set(skb, NULL, 0);
730 return false;
731 }
732
733 return ct_executed;
734 }
735
736 #if IS_ENABLED(CONFIG_NF_NAT)
ovs_nat_update_key(struct sw_flow_key * key,const struct sk_buff * skb,enum nf_nat_manip_type maniptype)737 static void ovs_nat_update_key(struct sw_flow_key *key,
738 const struct sk_buff *skb,
739 enum nf_nat_manip_type maniptype)
740 {
741 if (maniptype == NF_NAT_MANIP_SRC) {
742 __be16 src;
743
744 key->ct_state |= OVS_CS_F_SRC_NAT;
745 if (key->eth.type == htons(ETH_P_IP))
746 key->ipv4.addr.src = ip_hdr(skb)->saddr;
747 else if (key->eth.type == htons(ETH_P_IPV6))
748 memcpy(&key->ipv6.addr.src, &ipv6_hdr(skb)->saddr,
749 sizeof(key->ipv6.addr.src));
750 else
751 return;
752
753 if (key->ip.proto == IPPROTO_UDP)
754 src = udp_hdr(skb)->source;
755 else if (key->ip.proto == IPPROTO_TCP)
756 src = tcp_hdr(skb)->source;
757 else if (key->ip.proto == IPPROTO_SCTP)
758 src = sctp_hdr(skb)->source;
759 else
760 return;
761
762 key->tp.src = src;
763 } else {
764 __be16 dst;
765
766 key->ct_state |= OVS_CS_F_DST_NAT;
767 if (key->eth.type == htons(ETH_P_IP))
768 key->ipv4.addr.dst = ip_hdr(skb)->daddr;
769 else if (key->eth.type == htons(ETH_P_IPV6))
770 memcpy(&key->ipv6.addr.dst, &ipv6_hdr(skb)->daddr,
771 sizeof(key->ipv6.addr.dst));
772 else
773 return;
774
775 if (key->ip.proto == IPPROTO_UDP)
776 dst = udp_hdr(skb)->dest;
777 else if (key->ip.proto == IPPROTO_TCP)
778 dst = tcp_hdr(skb)->dest;
779 else if (key->ip.proto == IPPROTO_SCTP)
780 dst = sctp_hdr(skb)->dest;
781 else
782 return;
783
784 key->tp.dst = dst;
785 }
786 }
787
788 /* Modelled after nf_nat_ipv[46]_fn().
789 * range is only used for new, uninitialized NAT state.
790 * Returns either NF_ACCEPT or NF_DROP.
791 */
ovs_ct_nat_execute(struct sk_buff * skb,struct nf_conn * ct,enum ip_conntrack_info ctinfo,const struct nf_nat_range2 * range,enum nf_nat_manip_type maniptype,struct sw_flow_key * key)792 static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
793 enum ip_conntrack_info ctinfo,
794 const struct nf_nat_range2 *range,
795 enum nf_nat_manip_type maniptype, struct sw_flow_key *key)
796 {
797 int hooknum, nh_off, err = NF_ACCEPT;
798
799 nh_off = skb_network_offset(skb);
800 skb_pull_rcsum(skb, nh_off);
801
802 /* See HOOK2MANIP(). */
803 if (maniptype == NF_NAT_MANIP_SRC)
804 hooknum = NF_INET_LOCAL_IN; /* Source NAT */
805 else
806 hooknum = NF_INET_LOCAL_OUT; /* Destination NAT */
807
808 switch (ctinfo) {
809 case IP_CT_RELATED:
810 case IP_CT_RELATED_REPLY:
811 if (IS_ENABLED(CONFIG_NF_NAT) &&
812 skb->protocol == htons(ETH_P_IP) &&
813 ip_hdr(skb)->protocol == IPPROTO_ICMP) {
814 if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
815 hooknum))
816 err = NF_DROP;
817 goto push;
818 } else if (IS_ENABLED(CONFIG_IPV6) &&
819 skb->protocol == htons(ETH_P_IPV6)) {
820 __be16 frag_off;
821 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
822 int hdrlen = ipv6_skip_exthdr(skb,
823 sizeof(struct ipv6hdr),
824 &nexthdr, &frag_off);
825
826 if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
827 if (!nf_nat_icmpv6_reply_translation(skb, ct,
828 ctinfo,
829 hooknum,
830 hdrlen))
831 err = NF_DROP;
832 goto push;
833 }
834 }
835 /* Non-ICMP, fall thru to initialize if needed. */
836 fallthrough;
837 case IP_CT_NEW:
838 /* Seen it before? This can happen for loopback, retrans,
839 * or local packets.
840 */
841 if (!nf_nat_initialized(ct, maniptype)) {
842 /* Initialize according to the NAT action. */
843 err = (range && range->flags & NF_NAT_RANGE_MAP_IPS)
844 /* Action is set up to establish a new
845 * mapping.
846 */
847 ? nf_nat_setup_info(ct, range, maniptype)
848 : nf_nat_alloc_null_binding(ct, hooknum);
849 if (err != NF_ACCEPT)
850 goto push;
851 }
852 break;
853
854 case IP_CT_ESTABLISHED:
855 case IP_CT_ESTABLISHED_REPLY:
856 break;
857
858 default:
859 err = NF_DROP;
860 goto push;
861 }
862
863 err = nf_nat_packet(ct, ctinfo, hooknum, skb);
864 push:
865 skb_push_rcsum(skb, nh_off);
866
867 /* Update the flow key if NAT successful. */
868 if (err == NF_ACCEPT)
869 ovs_nat_update_key(key, skb, maniptype);
870
871 return err;
872 }
873
874 /* Returns NF_DROP if the packet should be dropped, NF_ACCEPT otherwise. */
ovs_ct_nat(struct net * net,struct sw_flow_key * key,const struct ovs_conntrack_info * info,struct sk_buff * skb,struct nf_conn * ct,enum ip_conntrack_info ctinfo)875 static int ovs_ct_nat(struct net *net, struct sw_flow_key *key,
876 const struct ovs_conntrack_info *info,
877 struct sk_buff *skb, struct nf_conn *ct,
878 enum ip_conntrack_info ctinfo)
879 {
880 enum nf_nat_manip_type maniptype;
881 int err;
882
883 /* Add NAT extension if not confirmed yet. */
884 if (!nf_ct_is_confirmed(ct) && !nf_ct_nat_ext_add(ct))
885 return NF_ACCEPT; /* Can't NAT. */
886
887 /* Determine NAT type.
888 * Check if the NAT type can be deduced from the tracked connection.
889 * Make sure new expected connections (IP_CT_RELATED) are NATted only
890 * when committing.
891 */
892 if (info->nat & OVS_CT_NAT && ctinfo != IP_CT_NEW &&
893 ct->status & IPS_NAT_MASK &&
894 (ctinfo != IP_CT_RELATED || info->commit)) {
895 /* NAT an established or related connection like before. */
896 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY)
897 /* This is the REPLY direction for a connection
898 * for which NAT was applied in the forward
899 * direction. Do the reverse NAT.
900 */
901 maniptype = ct->status & IPS_SRC_NAT
902 ? NF_NAT_MANIP_DST : NF_NAT_MANIP_SRC;
903 else
904 maniptype = ct->status & IPS_SRC_NAT
905 ? NF_NAT_MANIP_SRC : NF_NAT_MANIP_DST;
906 } else if (info->nat & OVS_CT_SRC_NAT) {
907 maniptype = NF_NAT_MANIP_SRC;
908 } else if (info->nat & OVS_CT_DST_NAT) {
909 maniptype = NF_NAT_MANIP_DST;
910 } else {
911 return NF_ACCEPT; /* Connection is not NATed. */
912 }
913 err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, maniptype, key);
914
915 if (err == NF_ACCEPT && ct->status & IPS_DST_NAT) {
916 if (ct->status & IPS_SRC_NAT) {
917 if (maniptype == NF_NAT_MANIP_SRC)
918 maniptype = NF_NAT_MANIP_DST;
919 else
920 maniptype = NF_NAT_MANIP_SRC;
921
922 err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range,
923 maniptype, key);
924 } else if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
925 err = ovs_ct_nat_execute(skb, ct, ctinfo, NULL,
926 NF_NAT_MANIP_SRC, key);
927 }
928 }
929
930 return err;
931 }
932 #else /* !CONFIG_NF_NAT */
ovs_ct_nat(struct net * net,struct sw_flow_key * key,const struct ovs_conntrack_info * info,struct sk_buff * skb,struct nf_conn * ct,enum ip_conntrack_info ctinfo)933 static int ovs_ct_nat(struct net *net, struct sw_flow_key *key,
934 const struct ovs_conntrack_info *info,
935 struct sk_buff *skb, struct nf_conn *ct,
936 enum ip_conntrack_info ctinfo)
937 {
938 return NF_ACCEPT;
939 }
940 #endif
941
942 /* Pass 'skb' through conntrack in 'net', using zone configured in 'info', if
943 * not done already. Update key with new CT state after passing the packet
944 * through conntrack.
945 * Note that if the packet is deemed invalid by conntrack, skb->_nfct will be
946 * set to NULL and 0 will be returned.
947 */
__ovs_ct_lookup(struct net * net,struct sw_flow_key * key,const struct ovs_conntrack_info * info,struct sk_buff * skb)948 static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
949 const struct ovs_conntrack_info *info,
950 struct sk_buff *skb)
951 {
952 /* If we are recirculating packets to match on conntrack fields and
953 * committing with a separate conntrack action, then we don't need to
954 * actually run the packet through conntrack twice unless it's for a
955 * different zone.
956 */
957 bool cached = skb_nfct_cached(net, key, info, skb);
958 enum ip_conntrack_info ctinfo;
959 struct nf_conn *ct;
960
961 if (!cached) {
962 struct nf_hook_state state = {
963 .hook = NF_INET_PRE_ROUTING,
964 .pf = info->family,
965 .net = net,
966 };
967 struct nf_conn *tmpl = info->ct;
968 int err;
969
970 /* Associate skb with specified zone. */
971 if (tmpl) {
972 ct = nf_ct_get(skb, &ctinfo);
973 nf_ct_put(ct);
974 nf_conntrack_get(&tmpl->ct_general);
975 nf_ct_set(skb, tmpl, IP_CT_NEW);
976 }
977
978 err = nf_conntrack_in(skb, &state);
979 if (err != NF_ACCEPT)
980 return -ENOENT;
981
982 /* Clear CT state NAT flags to mark that we have not yet done
983 * NAT after the nf_conntrack_in() call. We can actually clear
984 * the whole state, as it will be re-initialized below.
985 */
986 key->ct_state = 0;
987
988 /* Update the key, but keep the NAT flags. */
989 ovs_ct_update_key(skb, info, key, true, true);
990 }
991
992 ct = nf_ct_get(skb, &ctinfo);
993 if (ct) {
994 bool add_helper = false;
995
996 /* Packets starting a new connection must be NATted before the
997 * helper, so that the helper knows about the NAT. We enforce
998 * this by delaying both NAT and helper calls for unconfirmed
999 * connections until the committing CT action. For later
1000 * packets NAT and Helper may be called in either order.
1001 *
1002 * NAT will be done only if the CT action has NAT, and only
1003 * once per packet (per zone), as guarded by the NAT bits in
1004 * the key->ct_state.
1005 */
1006 if (info->nat && !(key->ct_state & OVS_CS_F_NAT_MASK) &&
1007 (nf_ct_is_confirmed(ct) || info->commit) &&
1008 ovs_ct_nat(net, key, info, skb, ct, ctinfo) != NF_ACCEPT) {
1009 return -EINVAL;
1010 }
1011
1012 /* Userspace may decide to perform a ct lookup without a helper
1013 * specified followed by a (recirculate and) commit with one,
1014 * or attach a helper in a later commit. Therefore, for
1015 * connections which we will commit, we may need to attach
1016 * the helper here.
1017 */
1018 if (!nf_ct_is_confirmed(ct) && info->commit &&
1019 info->helper && !nfct_help(ct)) {
1020 int err = __nf_ct_try_assign_helper(ct, info->ct,
1021 GFP_ATOMIC);
1022 if (err)
1023 return err;
1024 add_helper = true;
1025
1026 /* helper installed, add seqadj if NAT is required */
1027 if (info->nat && !nfct_seqadj(ct)) {
1028 if (!nfct_seqadj_ext_add(ct))
1029 return -EINVAL;
1030 }
1031 }
1032
1033 /* Call the helper only if:
1034 * - nf_conntrack_in() was executed above ("!cached") or a
1035 * helper was just attached ("add_helper") for a confirmed
1036 * connection, or
1037 * - When committing an unconfirmed connection.
1038 */
1039 if ((nf_ct_is_confirmed(ct) ? !cached || add_helper :
1040 info->commit) &&
1041 ovs_ct_helper(skb, info->family) != NF_ACCEPT) {
1042 return -EINVAL;
1043 }
1044
1045 if (nf_ct_protonum(ct) == IPPROTO_TCP &&
1046 nf_ct_is_confirmed(ct) && nf_conntrack_tcp_established(ct)) {
1047 /* Be liberal for tcp packets so that out-of-window
1048 * packets are not marked invalid.
1049 */
1050 nf_ct_set_tcp_be_liberal(ct);
1051 }
1052
1053 nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
1054 }
1055
1056 return 0;
1057 }
1058
1059 /* Lookup connection and read fields into key. */
ovs_ct_lookup(struct net * net,struct sw_flow_key * key,const struct ovs_conntrack_info * info,struct sk_buff * skb)1060 static int ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
1061 const struct ovs_conntrack_info *info,
1062 struct sk_buff *skb)
1063 {
1064 struct nf_conntrack_expect *exp;
1065
1066 /* If we pass an expected packet through nf_conntrack_in() the
1067 * expectation is typically removed, but the packet could still be
1068 * lost in upcall processing. To prevent this from happening we
1069 * perform an explicit expectation lookup. Expected connections are
1070 * always new, and will be passed through conntrack only when they are
1071 * committed, as it is OK to remove the expectation at that time.
1072 */
1073 exp = ovs_ct_expect_find(net, &info->zone, info->family, skb);
1074 if (exp) {
1075 u8 state;
1076
1077 /* NOTE: New connections are NATted and Helped only when
1078 * committed, so we are not calling into NAT here.
1079 */
1080 state = OVS_CS_F_TRACKED | OVS_CS_F_NEW | OVS_CS_F_RELATED;
1081 __ovs_ct_update_key(key, state, &info->zone, exp->master);
1082 } else {
1083 struct nf_conn *ct;
1084 int err;
1085
1086 err = __ovs_ct_lookup(net, key, info, skb);
1087 if (err)
1088 return err;
1089
1090 ct = (struct nf_conn *)skb_nfct(skb);
1091 if (ct)
1092 nf_ct_deliver_cached_events(ct);
1093 }
1094
1095 return 0;
1096 }
1097
labels_nonzero(const struct ovs_key_ct_labels * labels)1098 static bool labels_nonzero(const struct ovs_key_ct_labels *labels)
1099 {
1100 size_t i;
1101
1102 for (i = 0; i < OVS_CT_LABELS_LEN_32; i++)
1103 if (labels->ct_labels_32[i])
1104 return true;
1105
1106 return false;
1107 }
1108
1109 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
ct_limit_hash_bucket(const struct ovs_ct_limit_info * info,u16 zone)1110 static struct hlist_head *ct_limit_hash_bucket(
1111 const struct ovs_ct_limit_info *info, u16 zone)
1112 {
1113 return &info->limits[zone & (CT_LIMIT_HASH_BUCKETS - 1)];
1114 }
1115
1116 /* Call with ovs_mutex */
ct_limit_set(const struct ovs_ct_limit_info * info,struct ovs_ct_limit * new_ct_limit)1117 static void ct_limit_set(const struct ovs_ct_limit_info *info,
1118 struct ovs_ct_limit *new_ct_limit)
1119 {
1120 struct ovs_ct_limit *ct_limit;
1121 struct hlist_head *head;
1122
1123 head = ct_limit_hash_bucket(info, new_ct_limit->zone);
1124 hlist_for_each_entry_rcu(ct_limit, head, hlist_node) {
1125 if (ct_limit->zone == new_ct_limit->zone) {
1126 hlist_replace_rcu(&ct_limit->hlist_node,
1127 &new_ct_limit->hlist_node);
1128 kfree_rcu(ct_limit, rcu);
1129 return;
1130 }
1131 }
1132
1133 hlist_add_head_rcu(&new_ct_limit->hlist_node, head);
1134 }
1135
1136 /* Call with ovs_mutex */
ct_limit_del(const struct ovs_ct_limit_info * info,u16 zone)1137 static void ct_limit_del(const struct ovs_ct_limit_info *info, u16 zone)
1138 {
1139 struct ovs_ct_limit *ct_limit;
1140 struct hlist_head *head;
1141 struct hlist_node *n;
1142
1143 head = ct_limit_hash_bucket(info, zone);
1144 hlist_for_each_entry_safe(ct_limit, n, head, hlist_node) {
1145 if (ct_limit->zone == zone) {
1146 hlist_del_rcu(&ct_limit->hlist_node);
1147 kfree_rcu(ct_limit, rcu);
1148 return;
1149 }
1150 }
1151 }
1152
1153 /* Call with RCU read lock */
ct_limit_get(const struct ovs_ct_limit_info * info,u16 zone)1154 static u32 ct_limit_get(const struct ovs_ct_limit_info *info, u16 zone)
1155 {
1156 struct ovs_ct_limit *ct_limit;
1157 struct hlist_head *head;
1158
1159 head = ct_limit_hash_bucket(info, zone);
1160 hlist_for_each_entry_rcu(ct_limit, head, hlist_node) {
1161 if (ct_limit->zone == zone)
1162 return ct_limit->limit;
1163 }
1164
1165 return info->default_limit;
1166 }
1167
ovs_ct_check_limit(struct net * net,const struct ovs_conntrack_info * info,const struct nf_conntrack_tuple * tuple)1168 static int ovs_ct_check_limit(struct net *net,
1169 const struct ovs_conntrack_info *info,
1170 const struct nf_conntrack_tuple *tuple)
1171 {
1172 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
1173 const struct ovs_ct_limit_info *ct_limit_info = ovs_net->ct_limit_info;
1174 u32 per_zone_limit, connections;
1175 u32 conncount_key;
1176
1177 conncount_key = info->zone.id;
1178
1179 per_zone_limit = ct_limit_get(ct_limit_info, info->zone.id);
1180 if (per_zone_limit == OVS_CT_LIMIT_UNLIMITED)
1181 return 0;
1182
1183 connections = nf_conncount_count(net, ct_limit_info->data,
1184 &conncount_key, tuple, &info->zone);
1185 if (connections > per_zone_limit)
1186 return -ENOMEM;
1187
1188 return 0;
1189 }
1190 #endif
1191
1192 /* Lookup connection and confirm if unconfirmed. */
ovs_ct_commit(struct net * net,struct sw_flow_key * key,const struct ovs_conntrack_info * info,struct sk_buff * skb)1193 static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
1194 const struct ovs_conntrack_info *info,
1195 struct sk_buff *skb)
1196 {
1197 enum ip_conntrack_info ctinfo;
1198 struct nf_conn *ct;
1199 int err;
1200
1201 err = __ovs_ct_lookup(net, key, info, skb);
1202 if (err)
1203 return err;
1204
1205 /* The connection could be invalid, in which case this is a no-op.*/
1206 ct = nf_ct_get(skb, &ctinfo);
1207 if (!ct)
1208 return 0;
1209
1210 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
1211 if (static_branch_unlikely(&ovs_ct_limit_enabled)) {
1212 if (!nf_ct_is_confirmed(ct)) {
1213 err = ovs_ct_check_limit(net, info,
1214 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
1215 if (err) {
1216 net_warn_ratelimited("openvswitch: zone: %u "
1217 "exceeds conntrack limit\n",
1218 info->zone.id);
1219 return err;
1220 }
1221 }
1222 }
1223 #endif
1224
1225 /* Set the conntrack event mask if given. NEW and DELETE events have
1226 * their own groups, but the NFNLGRP_CONNTRACK_UPDATE group listener
1227 * typically would receive many kinds of updates. Setting the event
1228 * mask allows those events to be filtered. The set event mask will
1229 * remain in effect for the lifetime of the connection unless changed
1230 * by a further CT action with both the commit flag and the eventmask
1231 * option. */
1232 if (info->have_eventmask) {
1233 struct nf_conntrack_ecache *cache = nf_ct_ecache_find(ct);
1234
1235 if (cache)
1236 cache->ctmask = info->eventmask;
1237 }
1238
1239 /* Apply changes before confirming the connection so that the initial
1240 * conntrack NEW netlink event carries the values given in the CT
1241 * action.
1242 */
1243 if (info->mark.mask) {
1244 err = ovs_ct_set_mark(ct, key, info->mark.value,
1245 info->mark.mask);
1246 if (err)
1247 return err;
1248 }
1249 if (!nf_ct_is_confirmed(ct)) {
1250 err = ovs_ct_init_labels(ct, key, &info->labels.value,
1251 &info->labels.mask);
1252 if (err)
1253 return err;
1254
1255 nf_conn_act_ct_ext_add(ct);
1256 } else if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
1257 labels_nonzero(&info->labels.mask)) {
1258 err = ovs_ct_set_labels(ct, key, &info->labels.value,
1259 &info->labels.mask);
1260 if (err)
1261 return err;
1262 }
1263 /* This will take care of sending queued events even if the connection
1264 * is already confirmed.
1265 */
1266 if (nf_conntrack_confirm(skb) != NF_ACCEPT)
1267 return -EINVAL;
1268
1269 return 0;
1270 }
1271
1272 /* Trim the skb to the length specified by the IP/IPv6 header,
1273 * removing any trailing lower-layer padding. This prepares the skb
1274 * for higher-layer processing that assumes skb->len excludes padding
1275 * (such as nf_ip_checksum). The caller needs to pull the skb to the
1276 * network header, and ensure ip_hdr/ipv6_hdr points to valid data.
1277 */
ovs_skb_network_trim(struct sk_buff * skb)1278 static int ovs_skb_network_trim(struct sk_buff *skb)
1279 {
1280 unsigned int len;
1281 int err;
1282
1283 switch (skb->protocol) {
1284 case htons(ETH_P_IP):
1285 len = ntohs(ip_hdr(skb)->tot_len);
1286 break;
1287 case htons(ETH_P_IPV6):
1288 len = sizeof(struct ipv6hdr)
1289 + ntohs(ipv6_hdr(skb)->payload_len);
1290 break;
1291 default:
1292 len = skb->len;
1293 }
1294
1295 err = pskb_trim_rcsum(skb, len);
1296 if (err)
1297 kfree_skb(skb);
1298
1299 return err;
1300 }
1301
1302 /* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
1303 * value if 'skb' is freed.
1304 */
ovs_ct_execute(struct net * net,struct sk_buff * skb,struct sw_flow_key * key,const struct ovs_conntrack_info * info)1305 int ovs_ct_execute(struct net *net, struct sk_buff *skb,
1306 struct sw_flow_key *key,
1307 const struct ovs_conntrack_info *info)
1308 {
1309 int nh_ofs;
1310 int err;
1311
1312 /* The conntrack module expects to be working at L3. */
1313 nh_ofs = skb_network_offset(skb);
1314 skb_pull_rcsum(skb, nh_ofs);
1315
1316 err = ovs_skb_network_trim(skb);
1317 if (err)
1318 return err;
1319
1320 if (key->ip.frag != OVS_FRAG_TYPE_NONE) {
1321 err = handle_fragments(net, key, info->zone.id, skb);
1322 if (err)
1323 return err;
1324 }
1325
1326 if (info->commit)
1327 err = ovs_ct_commit(net, key, info, skb);
1328 else
1329 err = ovs_ct_lookup(net, key, info, skb);
1330
1331 skb_push_rcsum(skb, nh_ofs);
1332 if (err)
1333 kfree_skb(skb);
1334 return err;
1335 }
1336
ovs_ct_clear(struct sk_buff * skb,struct sw_flow_key * key)1337 int ovs_ct_clear(struct sk_buff *skb, struct sw_flow_key *key)
1338 {
1339 enum ip_conntrack_info ctinfo;
1340 struct nf_conn *ct;
1341
1342 ct = nf_ct_get(skb, &ctinfo);
1343
1344 nf_ct_put(ct);
1345 nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
1346
1347 if (key)
1348 ovs_ct_fill_key(skb, key, false);
1349
1350 return 0;
1351 }
1352
ovs_ct_add_helper(struct ovs_conntrack_info * info,const char * name,const struct sw_flow_key * key,bool log)1353 static int ovs_ct_add_helper(struct ovs_conntrack_info *info, const char *name,
1354 const struct sw_flow_key *key, bool log)
1355 {
1356 struct nf_conntrack_helper *helper;
1357 struct nf_conn_help *help;
1358 int ret = 0;
1359
1360 helper = nf_conntrack_helper_try_module_get(name, info->family,
1361 key->ip.proto);
1362 if (!helper) {
1363 OVS_NLERR(log, "Unknown helper \"%s\"", name);
1364 return -EINVAL;
1365 }
1366
1367 help = nf_ct_helper_ext_add(info->ct, GFP_KERNEL);
1368 if (!help) {
1369 nf_conntrack_helper_put(helper);
1370 return -ENOMEM;
1371 }
1372
1373 #if IS_ENABLED(CONFIG_NF_NAT)
1374 if (info->nat) {
1375 ret = nf_nat_helper_try_module_get(name, info->family,
1376 key->ip.proto);
1377 if (ret) {
1378 nf_conntrack_helper_put(helper);
1379 OVS_NLERR(log, "Failed to load \"%s\" NAT helper, error: %d",
1380 name, ret);
1381 return ret;
1382 }
1383 }
1384 #endif
1385 rcu_assign_pointer(help->helper, helper);
1386 info->helper = helper;
1387 return ret;
1388 }
1389
1390 #if IS_ENABLED(CONFIG_NF_NAT)
parse_nat(const struct nlattr * attr,struct ovs_conntrack_info * info,bool log)1391 static int parse_nat(const struct nlattr *attr,
1392 struct ovs_conntrack_info *info, bool log)
1393 {
1394 struct nlattr *a;
1395 int rem;
1396 bool have_ip_max = false;
1397 bool have_proto_max = false;
1398 bool ip_vers = (info->family == NFPROTO_IPV6);
1399
1400 nla_for_each_nested(a, attr, rem) {
1401 static const int ovs_nat_attr_lens[OVS_NAT_ATTR_MAX + 1][2] = {
1402 [OVS_NAT_ATTR_SRC] = {0, 0},
1403 [OVS_NAT_ATTR_DST] = {0, 0},
1404 [OVS_NAT_ATTR_IP_MIN] = {sizeof(struct in_addr),
1405 sizeof(struct in6_addr)},
1406 [OVS_NAT_ATTR_IP_MAX] = {sizeof(struct in_addr),
1407 sizeof(struct in6_addr)},
1408 [OVS_NAT_ATTR_PROTO_MIN] = {sizeof(u16), sizeof(u16)},
1409 [OVS_NAT_ATTR_PROTO_MAX] = {sizeof(u16), sizeof(u16)},
1410 [OVS_NAT_ATTR_PERSISTENT] = {0, 0},
1411 [OVS_NAT_ATTR_PROTO_HASH] = {0, 0},
1412 [OVS_NAT_ATTR_PROTO_RANDOM] = {0, 0},
1413 };
1414 int type = nla_type(a);
1415
1416 if (type > OVS_NAT_ATTR_MAX) {
1417 OVS_NLERR(log, "Unknown NAT attribute (type=%d, max=%d)",
1418 type, OVS_NAT_ATTR_MAX);
1419 return -EINVAL;
1420 }
1421
1422 if (nla_len(a) != ovs_nat_attr_lens[type][ip_vers]) {
1423 OVS_NLERR(log, "NAT attribute type %d has unexpected length (%d != %d)",
1424 type, nla_len(a),
1425 ovs_nat_attr_lens[type][ip_vers]);
1426 return -EINVAL;
1427 }
1428
1429 switch (type) {
1430 case OVS_NAT_ATTR_SRC:
1431 case OVS_NAT_ATTR_DST:
1432 if (info->nat) {
1433 OVS_NLERR(log, "Only one type of NAT may be specified");
1434 return -ERANGE;
1435 }
1436 info->nat |= OVS_CT_NAT;
1437 info->nat |= ((type == OVS_NAT_ATTR_SRC)
1438 ? OVS_CT_SRC_NAT : OVS_CT_DST_NAT);
1439 break;
1440
1441 case OVS_NAT_ATTR_IP_MIN:
1442 nla_memcpy(&info->range.min_addr, a,
1443 sizeof(info->range.min_addr));
1444 info->range.flags |= NF_NAT_RANGE_MAP_IPS;
1445 break;
1446
1447 case OVS_NAT_ATTR_IP_MAX:
1448 have_ip_max = true;
1449 nla_memcpy(&info->range.max_addr, a,
1450 sizeof(info->range.max_addr));
1451 info->range.flags |= NF_NAT_RANGE_MAP_IPS;
1452 break;
1453
1454 case OVS_NAT_ATTR_PROTO_MIN:
1455 info->range.min_proto.all = htons(nla_get_u16(a));
1456 info->range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
1457 break;
1458
1459 case OVS_NAT_ATTR_PROTO_MAX:
1460 have_proto_max = true;
1461 info->range.max_proto.all = htons(nla_get_u16(a));
1462 info->range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
1463 break;
1464
1465 case OVS_NAT_ATTR_PERSISTENT:
1466 info->range.flags |= NF_NAT_RANGE_PERSISTENT;
1467 break;
1468
1469 case OVS_NAT_ATTR_PROTO_HASH:
1470 info->range.flags |= NF_NAT_RANGE_PROTO_RANDOM;
1471 break;
1472
1473 case OVS_NAT_ATTR_PROTO_RANDOM:
1474 info->range.flags |= NF_NAT_RANGE_PROTO_RANDOM_FULLY;
1475 break;
1476
1477 default:
1478 OVS_NLERR(log, "Unknown nat attribute (%d)", type);
1479 return -EINVAL;
1480 }
1481 }
1482
1483 if (rem > 0) {
1484 OVS_NLERR(log, "NAT attribute has %d unknown bytes", rem);
1485 return -EINVAL;
1486 }
1487 if (!info->nat) {
1488 /* Do not allow flags if no type is given. */
1489 if (info->range.flags) {
1490 OVS_NLERR(log,
1491 "NAT flags may be given only when NAT range (SRC or DST) is also specified."
1492 );
1493 return -EINVAL;
1494 }
1495 info->nat = OVS_CT_NAT; /* NAT existing connections. */
1496 } else if (!info->commit) {
1497 OVS_NLERR(log,
1498 "NAT attributes may be specified only when CT COMMIT flag is also specified."
1499 );
1500 return -EINVAL;
1501 }
1502 /* Allow missing IP_MAX. */
1503 if (info->range.flags & NF_NAT_RANGE_MAP_IPS && !have_ip_max) {
1504 memcpy(&info->range.max_addr, &info->range.min_addr,
1505 sizeof(info->range.max_addr));
1506 }
1507 /* Allow missing PROTO_MAX. */
1508 if (info->range.flags & NF_NAT_RANGE_PROTO_SPECIFIED &&
1509 !have_proto_max) {
1510 info->range.max_proto.all = info->range.min_proto.all;
1511 }
1512 return 0;
1513 }
1514 #endif
1515
1516 static const struct ovs_ct_len_tbl ovs_ct_attr_lens[OVS_CT_ATTR_MAX + 1] = {
1517 [OVS_CT_ATTR_COMMIT] = { .minlen = 0, .maxlen = 0 },
1518 [OVS_CT_ATTR_FORCE_COMMIT] = { .minlen = 0, .maxlen = 0 },
1519 [OVS_CT_ATTR_ZONE] = { .minlen = sizeof(u16),
1520 .maxlen = sizeof(u16) },
1521 [OVS_CT_ATTR_MARK] = { .minlen = sizeof(struct md_mark),
1522 .maxlen = sizeof(struct md_mark) },
1523 [OVS_CT_ATTR_LABELS] = { .minlen = sizeof(struct md_labels),
1524 .maxlen = sizeof(struct md_labels) },
1525 [OVS_CT_ATTR_HELPER] = { .minlen = 1,
1526 .maxlen = NF_CT_HELPER_NAME_LEN },
1527 #if IS_ENABLED(CONFIG_NF_NAT)
1528 /* NAT length is checked when parsing the nested attributes. */
1529 [OVS_CT_ATTR_NAT] = { .minlen = 0, .maxlen = INT_MAX },
1530 #endif
1531 [OVS_CT_ATTR_EVENTMASK] = { .minlen = sizeof(u32),
1532 .maxlen = sizeof(u32) },
1533 [OVS_CT_ATTR_TIMEOUT] = { .minlen = 1,
1534 .maxlen = CTNL_TIMEOUT_NAME_MAX },
1535 };
1536
parse_ct(const struct nlattr * attr,struct ovs_conntrack_info * info,const char ** helper,bool log)1537 static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
1538 const char **helper, bool log)
1539 {
1540 struct nlattr *a;
1541 int rem;
1542
1543 nla_for_each_nested(a, attr, rem) {
1544 int type = nla_type(a);
1545 int maxlen;
1546 int minlen;
1547
1548 if (type > OVS_CT_ATTR_MAX) {
1549 OVS_NLERR(log,
1550 "Unknown conntrack attr (type=%d, max=%d)",
1551 type, OVS_CT_ATTR_MAX);
1552 return -EINVAL;
1553 }
1554
1555 maxlen = ovs_ct_attr_lens[type].maxlen;
1556 minlen = ovs_ct_attr_lens[type].minlen;
1557 if (nla_len(a) < minlen || nla_len(a) > maxlen) {
1558 OVS_NLERR(log,
1559 "Conntrack attr type has unexpected length (type=%d, length=%d, expected=%d)",
1560 type, nla_len(a), maxlen);
1561 return -EINVAL;
1562 }
1563
1564 switch (type) {
1565 case OVS_CT_ATTR_FORCE_COMMIT:
1566 info->force = true;
1567 fallthrough;
1568 case OVS_CT_ATTR_COMMIT:
1569 info->commit = true;
1570 break;
1571 #ifdef CONFIG_NF_CONNTRACK_ZONES
1572 case OVS_CT_ATTR_ZONE:
1573 info->zone.id = nla_get_u16(a);
1574 break;
1575 #endif
1576 #ifdef CONFIG_NF_CONNTRACK_MARK
1577 case OVS_CT_ATTR_MARK: {
1578 struct md_mark *mark = nla_data(a);
1579
1580 if (!mark->mask) {
1581 OVS_NLERR(log, "ct_mark mask cannot be 0");
1582 return -EINVAL;
1583 }
1584 info->mark = *mark;
1585 break;
1586 }
1587 #endif
1588 #ifdef CONFIG_NF_CONNTRACK_LABELS
1589 case OVS_CT_ATTR_LABELS: {
1590 struct md_labels *labels = nla_data(a);
1591
1592 if (!labels_nonzero(&labels->mask)) {
1593 OVS_NLERR(log, "ct_labels mask cannot be 0");
1594 return -EINVAL;
1595 }
1596 info->labels = *labels;
1597 break;
1598 }
1599 #endif
1600 case OVS_CT_ATTR_HELPER:
1601 *helper = nla_data(a);
1602 if (!memchr(*helper, '\0', nla_len(a))) {
1603 OVS_NLERR(log, "Invalid conntrack helper");
1604 return -EINVAL;
1605 }
1606 break;
1607 #if IS_ENABLED(CONFIG_NF_NAT)
1608 case OVS_CT_ATTR_NAT: {
1609 int err = parse_nat(a, info, log);
1610
1611 if (err)
1612 return err;
1613 break;
1614 }
1615 #endif
1616 case OVS_CT_ATTR_EVENTMASK:
1617 info->have_eventmask = true;
1618 info->eventmask = nla_get_u32(a);
1619 break;
1620 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
1621 case OVS_CT_ATTR_TIMEOUT:
1622 memcpy(info->timeout, nla_data(a), nla_len(a));
1623 if (!memchr(info->timeout, '\0', nla_len(a))) {
1624 OVS_NLERR(log, "Invalid conntrack timeout");
1625 return -EINVAL;
1626 }
1627 break;
1628 #endif
1629
1630 default:
1631 OVS_NLERR(log, "Unknown conntrack attr (%d)",
1632 type);
1633 return -EINVAL;
1634 }
1635 }
1636
1637 #ifdef CONFIG_NF_CONNTRACK_MARK
1638 if (!info->commit && info->mark.mask) {
1639 OVS_NLERR(log,
1640 "Setting conntrack mark requires 'commit' flag.");
1641 return -EINVAL;
1642 }
1643 #endif
1644 #ifdef CONFIG_NF_CONNTRACK_LABELS
1645 if (!info->commit && labels_nonzero(&info->labels.mask)) {
1646 OVS_NLERR(log,
1647 "Setting conntrack labels requires 'commit' flag.");
1648 return -EINVAL;
1649 }
1650 #endif
1651 if (rem > 0) {
1652 OVS_NLERR(log, "Conntrack attr has %d unknown bytes", rem);
1653 return -EINVAL;
1654 }
1655
1656 return 0;
1657 }
1658
ovs_ct_verify(struct net * net,enum ovs_key_attr attr)1659 bool ovs_ct_verify(struct net *net, enum ovs_key_attr attr)
1660 {
1661 if (attr == OVS_KEY_ATTR_CT_STATE)
1662 return true;
1663 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
1664 attr == OVS_KEY_ATTR_CT_ZONE)
1665 return true;
1666 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
1667 attr == OVS_KEY_ATTR_CT_MARK)
1668 return true;
1669 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
1670 attr == OVS_KEY_ATTR_CT_LABELS) {
1671 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
1672
1673 return ovs_net->xt_label;
1674 }
1675
1676 return false;
1677 }
1678
ovs_ct_copy_action(struct net * net,const struct nlattr * attr,const struct sw_flow_key * key,struct sw_flow_actions ** sfa,bool log)1679 int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
1680 const struct sw_flow_key *key,
1681 struct sw_flow_actions **sfa, bool log)
1682 {
1683 struct ovs_conntrack_info ct_info;
1684 const char *helper = NULL;
1685 u16 family;
1686 int err;
1687
1688 family = key_to_nfproto(key);
1689 if (family == NFPROTO_UNSPEC) {
1690 OVS_NLERR(log, "ct family unspecified");
1691 return -EINVAL;
1692 }
1693
1694 memset(&ct_info, 0, sizeof(ct_info));
1695 ct_info.family = family;
1696
1697 nf_ct_zone_init(&ct_info.zone, NF_CT_DEFAULT_ZONE_ID,
1698 NF_CT_DEFAULT_ZONE_DIR, 0);
1699
1700 err = parse_ct(attr, &ct_info, &helper, log);
1701 if (err)
1702 return err;
1703
1704 /* Set up template for tracking connections in specific zones. */
1705 ct_info.ct = nf_ct_tmpl_alloc(net, &ct_info.zone, GFP_KERNEL);
1706 if (!ct_info.ct) {
1707 OVS_NLERR(log, "Failed to allocate conntrack template");
1708 return -ENOMEM;
1709 }
1710
1711 if (ct_info.timeout[0]) {
1712 if (nf_ct_set_timeout(net, ct_info.ct, family, key->ip.proto,
1713 ct_info.timeout))
1714 pr_info_ratelimited("Failed to associated timeout "
1715 "policy `%s'\n", ct_info.timeout);
1716 else
1717 ct_info.nf_ct_timeout = rcu_dereference(
1718 nf_ct_timeout_find(ct_info.ct)->timeout);
1719
1720 }
1721
1722 if (helper) {
1723 err = ovs_ct_add_helper(&ct_info, helper, key, log);
1724 if (err)
1725 goto err_free_ct;
1726 }
1727
1728 err = ovs_nla_add_action(sfa, OVS_ACTION_ATTR_CT, &ct_info,
1729 sizeof(ct_info), log);
1730 if (err)
1731 goto err_free_ct;
1732
1733 __set_bit(IPS_CONFIRMED_BIT, &ct_info.ct->status);
1734 return 0;
1735 err_free_ct:
1736 __ovs_ct_free_action(&ct_info);
1737 return err;
1738 }
1739
1740 #if IS_ENABLED(CONFIG_NF_NAT)
ovs_ct_nat_to_attr(const struct ovs_conntrack_info * info,struct sk_buff * skb)1741 static bool ovs_ct_nat_to_attr(const struct ovs_conntrack_info *info,
1742 struct sk_buff *skb)
1743 {
1744 struct nlattr *start;
1745
1746 start = nla_nest_start_noflag(skb, OVS_CT_ATTR_NAT);
1747 if (!start)
1748 return false;
1749
1750 if (info->nat & OVS_CT_SRC_NAT) {
1751 if (nla_put_flag(skb, OVS_NAT_ATTR_SRC))
1752 return false;
1753 } else if (info->nat & OVS_CT_DST_NAT) {
1754 if (nla_put_flag(skb, OVS_NAT_ATTR_DST))
1755 return false;
1756 } else {
1757 goto out;
1758 }
1759
1760 if (info->range.flags & NF_NAT_RANGE_MAP_IPS) {
1761 if (IS_ENABLED(CONFIG_NF_NAT) &&
1762 info->family == NFPROTO_IPV4) {
1763 if (nla_put_in_addr(skb, OVS_NAT_ATTR_IP_MIN,
1764 info->range.min_addr.ip) ||
1765 (info->range.max_addr.ip
1766 != info->range.min_addr.ip &&
1767 (nla_put_in_addr(skb, OVS_NAT_ATTR_IP_MAX,
1768 info->range.max_addr.ip))))
1769 return false;
1770 } else if (IS_ENABLED(CONFIG_IPV6) &&
1771 info->family == NFPROTO_IPV6) {
1772 if (nla_put_in6_addr(skb, OVS_NAT_ATTR_IP_MIN,
1773 &info->range.min_addr.in6) ||
1774 (memcmp(&info->range.max_addr.in6,
1775 &info->range.min_addr.in6,
1776 sizeof(info->range.max_addr.in6)) &&
1777 (nla_put_in6_addr(skb, OVS_NAT_ATTR_IP_MAX,
1778 &info->range.max_addr.in6))))
1779 return false;
1780 } else {
1781 return false;
1782 }
1783 }
1784 if (info->range.flags & NF_NAT_RANGE_PROTO_SPECIFIED &&
1785 (nla_put_u16(skb, OVS_NAT_ATTR_PROTO_MIN,
1786 ntohs(info->range.min_proto.all)) ||
1787 (info->range.max_proto.all != info->range.min_proto.all &&
1788 nla_put_u16(skb, OVS_NAT_ATTR_PROTO_MAX,
1789 ntohs(info->range.max_proto.all)))))
1790 return false;
1791
1792 if (info->range.flags & NF_NAT_RANGE_PERSISTENT &&
1793 nla_put_flag(skb, OVS_NAT_ATTR_PERSISTENT))
1794 return false;
1795 if (info->range.flags & NF_NAT_RANGE_PROTO_RANDOM &&
1796 nla_put_flag(skb, OVS_NAT_ATTR_PROTO_HASH))
1797 return false;
1798 if (info->range.flags & NF_NAT_RANGE_PROTO_RANDOM_FULLY &&
1799 nla_put_flag(skb, OVS_NAT_ATTR_PROTO_RANDOM))
1800 return false;
1801 out:
1802 nla_nest_end(skb, start);
1803
1804 return true;
1805 }
1806 #endif
1807
ovs_ct_action_to_attr(const struct ovs_conntrack_info * ct_info,struct sk_buff * skb)1808 int ovs_ct_action_to_attr(const struct ovs_conntrack_info *ct_info,
1809 struct sk_buff *skb)
1810 {
1811 struct nlattr *start;
1812
1813 start = nla_nest_start_noflag(skb, OVS_ACTION_ATTR_CT);
1814 if (!start)
1815 return -EMSGSIZE;
1816
1817 if (ct_info->commit && nla_put_flag(skb, ct_info->force
1818 ? OVS_CT_ATTR_FORCE_COMMIT
1819 : OVS_CT_ATTR_COMMIT))
1820 return -EMSGSIZE;
1821 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
1822 nla_put_u16(skb, OVS_CT_ATTR_ZONE, ct_info->zone.id))
1823 return -EMSGSIZE;
1824 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) && ct_info->mark.mask &&
1825 nla_put(skb, OVS_CT_ATTR_MARK, sizeof(ct_info->mark),
1826 &ct_info->mark))
1827 return -EMSGSIZE;
1828 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
1829 labels_nonzero(&ct_info->labels.mask) &&
1830 nla_put(skb, OVS_CT_ATTR_LABELS, sizeof(ct_info->labels),
1831 &ct_info->labels))
1832 return -EMSGSIZE;
1833 if (ct_info->helper) {
1834 if (nla_put_string(skb, OVS_CT_ATTR_HELPER,
1835 ct_info->helper->name))
1836 return -EMSGSIZE;
1837 }
1838 if (ct_info->have_eventmask &&
1839 nla_put_u32(skb, OVS_CT_ATTR_EVENTMASK, ct_info->eventmask))
1840 return -EMSGSIZE;
1841 if (ct_info->timeout[0]) {
1842 if (nla_put_string(skb, OVS_CT_ATTR_TIMEOUT, ct_info->timeout))
1843 return -EMSGSIZE;
1844 }
1845
1846 #if IS_ENABLED(CONFIG_NF_NAT)
1847 if (ct_info->nat && !ovs_ct_nat_to_attr(ct_info, skb))
1848 return -EMSGSIZE;
1849 #endif
1850 nla_nest_end(skb, start);
1851
1852 return 0;
1853 }
1854
ovs_ct_free_action(const struct nlattr * a)1855 void ovs_ct_free_action(const struct nlattr *a)
1856 {
1857 struct ovs_conntrack_info *ct_info = nla_data(a);
1858
1859 __ovs_ct_free_action(ct_info);
1860 }
1861
__ovs_ct_free_action(struct ovs_conntrack_info * ct_info)1862 static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info)
1863 {
1864 if (ct_info->helper) {
1865 #if IS_ENABLED(CONFIG_NF_NAT)
1866 if (ct_info->nat)
1867 nf_nat_helper_put(ct_info->helper);
1868 #endif
1869 nf_conntrack_helper_put(ct_info->helper);
1870 }
1871 if (ct_info->ct) {
1872 if (ct_info->timeout[0])
1873 nf_ct_destroy_timeout(ct_info->ct);
1874 nf_ct_tmpl_free(ct_info->ct);
1875 }
1876 }
1877
1878 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
ovs_ct_limit_init(struct net * net,struct ovs_net * ovs_net)1879 static int ovs_ct_limit_init(struct net *net, struct ovs_net *ovs_net)
1880 {
1881 int i, err;
1882
1883 ovs_net->ct_limit_info = kmalloc(sizeof(*ovs_net->ct_limit_info),
1884 GFP_KERNEL);
1885 if (!ovs_net->ct_limit_info)
1886 return -ENOMEM;
1887
1888 ovs_net->ct_limit_info->default_limit = OVS_CT_LIMIT_DEFAULT;
1889 ovs_net->ct_limit_info->limits =
1890 kmalloc_array(CT_LIMIT_HASH_BUCKETS, sizeof(struct hlist_head),
1891 GFP_KERNEL);
1892 if (!ovs_net->ct_limit_info->limits) {
1893 kfree(ovs_net->ct_limit_info);
1894 return -ENOMEM;
1895 }
1896
1897 for (i = 0; i < CT_LIMIT_HASH_BUCKETS; i++)
1898 INIT_HLIST_HEAD(&ovs_net->ct_limit_info->limits[i]);
1899
1900 ovs_net->ct_limit_info->data =
1901 nf_conncount_init(net, NFPROTO_INET, sizeof(u32));
1902
1903 if (IS_ERR(ovs_net->ct_limit_info->data)) {
1904 err = PTR_ERR(ovs_net->ct_limit_info->data);
1905 kfree(ovs_net->ct_limit_info->limits);
1906 kfree(ovs_net->ct_limit_info);
1907 pr_err("openvswitch: failed to init nf_conncount %d\n", err);
1908 return err;
1909 }
1910 return 0;
1911 }
1912
ovs_ct_limit_exit(struct net * net,struct ovs_net * ovs_net)1913 static void ovs_ct_limit_exit(struct net *net, struct ovs_net *ovs_net)
1914 {
1915 const struct ovs_ct_limit_info *info = ovs_net->ct_limit_info;
1916 int i;
1917
1918 nf_conncount_destroy(net, NFPROTO_INET, info->data);
1919 for (i = 0; i < CT_LIMIT_HASH_BUCKETS; ++i) {
1920 struct hlist_head *head = &info->limits[i];
1921 struct ovs_ct_limit *ct_limit;
1922
1923 hlist_for_each_entry_rcu(ct_limit, head, hlist_node,
1924 lockdep_ovsl_is_held())
1925 kfree_rcu(ct_limit, rcu);
1926 }
1927 kfree(info->limits);
1928 kfree(info);
1929 }
1930
1931 static struct sk_buff *
ovs_ct_limit_cmd_reply_start(struct genl_info * info,u8 cmd,struct ovs_header ** ovs_reply_header)1932 ovs_ct_limit_cmd_reply_start(struct genl_info *info, u8 cmd,
1933 struct ovs_header **ovs_reply_header)
1934 {
1935 struct ovs_header *ovs_header = info->userhdr;
1936 struct sk_buff *skb;
1937
1938 skb = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1939 if (!skb)
1940 return ERR_PTR(-ENOMEM);
1941
1942 *ovs_reply_header = genlmsg_put(skb, info->snd_portid,
1943 info->snd_seq,
1944 &dp_ct_limit_genl_family, 0, cmd);
1945
1946 if (!*ovs_reply_header) {
1947 nlmsg_free(skb);
1948 return ERR_PTR(-EMSGSIZE);
1949 }
1950 (*ovs_reply_header)->dp_ifindex = ovs_header->dp_ifindex;
1951
1952 return skb;
1953 }
1954
check_zone_id(int zone_id,u16 * pzone)1955 static bool check_zone_id(int zone_id, u16 *pzone)
1956 {
1957 if (zone_id >= 0 && zone_id <= 65535) {
1958 *pzone = (u16)zone_id;
1959 return true;
1960 }
1961 return false;
1962 }
1963
ovs_ct_limit_set_zone_limit(struct nlattr * nla_zone_limit,struct ovs_ct_limit_info * info)1964 static int ovs_ct_limit_set_zone_limit(struct nlattr *nla_zone_limit,
1965 struct ovs_ct_limit_info *info)
1966 {
1967 struct ovs_zone_limit *zone_limit;
1968 int rem;
1969 u16 zone;
1970
1971 rem = NLA_ALIGN(nla_len(nla_zone_limit));
1972 zone_limit = (struct ovs_zone_limit *)nla_data(nla_zone_limit);
1973
1974 while (rem >= sizeof(*zone_limit)) {
1975 if (unlikely(zone_limit->zone_id ==
1976 OVS_ZONE_LIMIT_DEFAULT_ZONE)) {
1977 ovs_lock();
1978 info->default_limit = zone_limit->limit;
1979 ovs_unlock();
1980 } else if (unlikely(!check_zone_id(
1981 zone_limit->zone_id, &zone))) {
1982 OVS_NLERR(true, "zone id is out of range");
1983 } else {
1984 struct ovs_ct_limit *ct_limit;
1985
1986 ct_limit = kmalloc(sizeof(*ct_limit),
1987 GFP_KERNEL_ACCOUNT);
1988 if (!ct_limit)
1989 return -ENOMEM;
1990
1991 ct_limit->zone = zone;
1992 ct_limit->limit = zone_limit->limit;
1993
1994 ovs_lock();
1995 ct_limit_set(info, ct_limit);
1996 ovs_unlock();
1997 }
1998 rem -= NLA_ALIGN(sizeof(*zone_limit));
1999 zone_limit = (struct ovs_zone_limit *)((u8 *)zone_limit +
2000 NLA_ALIGN(sizeof(*zone_limit)));
2001 }
2002
2003 if (rem)
2004 OVS_NLERR(true, "set zone limit has %d unknown bytes", rem);
2005
2006 return 0;
2007 }
2008
ovs_ct_limit_del_zone_limit(struct nlattr * nla_zone_limit,struct ovs_ct_limit_info * info)2009 static int ovs_ct_limit_del_zone_limit(struct nlattr *nla_zone_limit,
2010 struct ovs_ct_limit_info *info)
2011 {
2012 struct ovs_zone_limit *zone_limit;
2013 int rem;
2014 u16 zone;
2015
2016 rem = NLA_ALIGN(nla_len(nla_zone_limit));
2017 zone_limit = (struct ovs_zone_limit *)nla_data(nla_zone_limit);
2018
2019 while (rem >= sizeof(*zone_limit)) {
2020 if (unlikely(zone_limit->zone_id ==
2021 OVS_ZONE_LIMIT_DEFAULT_ZONE)) {
2022 ovs_lock();
2023 info->default_limit = OVS_CT_LIMIT_DEFAULT;
2024 ovs_unlock();
2025 } else if (unlikely(!check_zone_id(
2026 zone_limit->zone_id, &zone))) {
2027 OVS_NLERR(true, "zone id is out of range");
2028 } else {
2029 ovs_lock();
2030 ct_limit_del(info, zone);
2031 ovs_unlock();
2032 }
2033 rem -= NLA_ALIGN(sizeof(*zone_limit));
2034 zone_limit = (struct ovs_zone_limit *)((u8 *)zone_limit +
2035 NLA_ALIGN(sizeof(*zone_limit)));
2036 }
2037
2038 if (rem)
2039 OVS_NLERR(true, "del zone limit has %d unknown bytes", rem);
2040
2041 return 0;
2042 }
2043
ovs_ct_limit_get_default_limit(struct ovs_ct_limit_info * info,struct sk_buff * reply)2044 static int ovs_ct_limit_get_default_limit(struct ovs_ct_limit_info *info,
2045 struct sk_buff *reply)
2046 {
2047 struct ovs_zone_limit zone_limit = {
2048 .zone_id = OVS_ZONE_LIMIT_DEFAULT_ZONE,
2049 .limit = info->default_limit,
2050 };
2051
2052 return nla_put_nohdr(reply, sizeof(zone_limit), &zone_limit);
2053 }
2054
__ovs_ct_limit_get_zone_limit(struct net * net,struct nf_conncount_data * data,u16 zone_id,u32 limit,struct sk_buff * reply)2055 static int __ovs_ct_limit_get_zone_limit(struct net *net,
2056 struct nf_conncount_data *data,
2057 u16 zone_id, u32 limit,
2058 struct sk_buff *reply)
2059 {
2060 struct nf_conntrack_zone ct_zone;
2061 struct ovs_zone_limit zone_limit;
2062 u32 conncount_key = zone_id;
2063
2064 zone_limit.zone_id = zone_id;
2065 zone_limit.limit = limit;
2066 nf_ct_zone_init(&ct_zone, zone_id, NF_CT_DEFAULT_ZONE_DIR, 0);
2067
2068 zone_limit.count = nf_conncount_count(net, data, &conncount_key, NULL,
2069 &ct_zone);
2070 return nla_put_nohdr(reply, sizeof(zone_limit), &zone_limit);
2071 }
2072
ovs_ct_limit_get_zone_limit(struct net * net,struct nlattr * nla_zone_limit,struct ovs_ct_limit_info * info,struct sk_buff * reply)2073 static int ovs_ct_limit_get_zone_limit(struct net *net,
2074 struct nlattr *nla_zone_limit,
2075 struct ovs_ct_limit_info *info,
2076 struct sk_buff *reply)
2077 {
2078 struct ovs_zone_limit *zone_limit;
2079 int rem, err;
2080 u32 limit;
2081 u16 zone;
2082
2083 rem = NLA_ALIGN(nla_len(nla_zone_limit));
2084 zone_limit = (struct ovs_zone_limit *)nla_data(nla_zone_limit);
2085
2086 while (rem >= sizeof(*zone_limit)) {
2087 if (unlikely(zone_limit->zone_id ==
2088 OVS_ZONE_LIMIT_DEFAULT_ZONE)) {
2089 err = ovs_ct_limit_get_default_limit(info, reply);
2090 if (err)
2091 return err;
2092 } else if (unlikely(!check_zone_id(zone_limit->zone_id,
2093 &zone))) {
2094 OVS_NLERR(true, "zone id is out of range");
2095 } else {
2096 rcu_read_lock();
2097 limit = ct_limit_get(info, zone);
2098 rcu_read_unlock();
2099
2100 err = __ovs_ct_limit_get_zone_limit(
2101 net, info->data, zone, limit, reply);
2102 if (err)
2103 return err;
2104 }
2105 rem -= NLA_ALIGN(sizeof(*zone_limit));
2106 zone_limit = (struct ovs_zone_limit *)((u8 *)zone_limit +
2107 NLA_ALIGN(sizeof(*zone_limit)));
2108 }
2109
2110 if (rem)
2111 OVS_NLERR(true, "get zone limit has %d unknown bytes", rem);
2112
2113 return 0;
2114 }
2115
ovs_ct_limit_get_all_zone_limit(struct net * net,struct ovs_ct_limit_info * info,struct sk_buff * reply)2116 static int ovs_ct_limit_get_all_zone_limit(struct net *net,
2117 struct ovs_ct_limit_info *info,
2118 struct sk_buff *reply)
2119 {
2120 struct ovs_ct_limit *ct_limit;
2121 struct hlist_head *head;
2122 int i, err = 0;
2123
2124 err = ovs_ct_limit_get_default_limit(info, reply);
2125 if (err)
2126 return err;
2127
2128 rcu_read_lock();
2129 for (i = 0; i < CT_LIMIT_HASH_BUCKETS; ++i) {
2130 head = &info->limits[i];
2131 hlist_for_each_entry_rcu(ct_limit, head, hlist_node) {
2132 err = __ovs_ct_limit_get_zone_limit(net, info->data,
2133 ct_limit->zone, ct_limit->limit, reply);
2134 if (err)
2135 goto exit_err;
2136 }
2137 }
2138
2139 exit_err:
2140 rcu_read_unlock();
2141 return err;
2142 }
2143
ovs_ct_limit_cmd_set(struct sk_buff * skb,struct genl_info * info)2144 static int ovs_ct_limit_cmd_set(struct sk_buff *skb, struct genl_info *info)
2145 {
2146 struct nlattr **a = info->attrs;
2147 struct sk_buff *reply;
2148 struct ovs_header *ovs_reply_header;
2149 struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
2150 struct ovs_ct_limit_info *ct_limit_info = ovs_net->ct_limit_info;
2151 int err;
2152
2153 reply = ovs_ct_limit_cmd_reply_start(info, OVS_CT_LIMIT_CMD_SET,
2154 &ovs_reply_header);
2155 if (IS_ERR(reply))
2156 return PTR_ERR(reply);
2157
2158 if (!a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]) {
2159 err = -EINVAL;
2160 goto exit_err;
2161 }
2162
2163 err = ovs_ct_limit_set_zone_limit(a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT],
2164 ct_limit_info);
2165 if (err)
2166 goto exit_err;
2167
2168 static_branch_enable(&ovs_ct_limit_enabled);
2169
2170 genlmsg_end(reply, ovs_reply_header);
2171 return genlmsg_reply(reply, info);
2172
2173 exit_err:
2174 nlmsg_free(reply);
2175 return err;
2176 }
2177
ovs_ct_limit_cmd_del(struct sk_buff * skb,struct genl_info * info)2178 static int ovs_ct_limit_cmd_del(struct sk_buff *skb, struct genl_info *info)
2179 {
2180 struct nlattr **a = info->attrs;
2181 struct sk_buff *reply;
2182 struct ovs_header *ovs_reply_header;
2183 struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
2184 struct ovs_ct_limit_info *ct_limit_info = ovs_net->ct_limit_info;
2185 int err;
2186
2187 reply = ovs_ct_limit_cmd_reply_start(info, OVS_CT_LIMIT_CMD_DEL,
2188 &ovs_reply_header);
2189 if (IS_ERR(reply))
2190 return PTR_ERR(reply);
2191
2192 if (!a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]) {
2193 err = -EINVAL;
2194 goto exit_err;
2195 }
2196
2197 err = ovs_ct_limit_del_zone_limit(a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT],
2198 ct_limit_info);
2199 if (err)
2200 goto exit_err;
2201
2202 genlmsg_end(reply, ovs_reply_header);
2203 return genlmsg_reply(reply, info);
2204
2205 exit_err:
2206 nlmsg_free(reply);
2207 return err;
2208 }
2209
ovs_ct_limit_cmd_get(struct sk_buff * skb,struct genl_info * info)2210 static int ovs_ct_limit_cmd_get(struct sk_buff *skb, struct genl_info *info)
2211 {
2212 struct nlattr **a = info->attrs;
2213 struct nlattr *nla_reply;
2214 struct sk_buff *reply;
2215 struct ovs_header *ovs_reply_header;
2216 struct net *net = sock_net(skb->sk);
2217 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2218 struct ovs_ct_limit_info *ct_limit_info = ovs_net->ct_limit_info;
2219 int err;
2220
2221 reply = ovs_ct_limit_cmd_reply_start(info, OVS_CT_LIMIT_CMD_GET,
2222 &ovs_reply_header);
2223 if (IS_ERR(reply))
2224 return PTR_ERR(reply);
2225
2226 nla_reply = nla_nest_start_noflag(reply, OVS_CT_LIMIT_ATTR_ZONE_LIMIT);
2227 if (!nla_reply) {
2228 err = -EMSGSIZE;
2229 goto exit_err;
2230 }
2231
2232 if (a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]) {
2233 err = ovs_ct_limit_get_zone_limit(
2234 net, a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT], ct_limit_info,
2235 reply);
2236 if (err)
2237 goto exit_err;
2238 } else {
2239 err = ovs_ct_limit_get_all_zone_limit(net, ct_limit_info,
2240 reply);
2241 if (err)
2242 goto exit_err;
2243 }
2244
2245 nla_nest_end(reply, nla_reply);
2246 genlmsg_end(reply, ovs_reply_header);
2247 return genlmsg_reply(reply, info);
2248
2249 exit_err:
2250 nlmsg_free(reply);
2251 return err;
2252 }
2253
2254 static const struct genl_small_ops ct_limit_genl_ops[] = {
2255 { .cmd = OVS_CT_LIMIT_CMD_SET,
2256 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2257 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN
2258 * privilege.
2259 */
2260 .doit = ovs_ct_limit_cmd_set,
2261 },
2262 { .cmd = OVS_CT_LIMIT_CMD_DEL,
2263 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2264 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN
2265 * privilege.
2266 */
2267 .doit = ovs_ct_limit_cmd_del,
2268 },
2269 { .cmd = OVS_CT_LIMIT_CMD_GET,
2270 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2271 .flags = 0, /* OK for unprivileged users. */
2272 .doit = ovs_ct_limit_cmd_get,
2273 },
2274 };
2275
2276 static const struct genl_multicast_group ovs_ct_limit_multicast_group = {
2277 .name = OVS_CT_LIMIT_MCGROUP,
2278 };
2279
2280 struct genl_family dp_ct_limit_genl_family __ro_after_init = {
2281 .hdrsize = sizeof(struct ovs_header),
2282 .name = OVS_CT_LIMIT_FAMILY,
2283 .version = OVS_CT_LIMIT_VERSION,
2284 .maxattr = OVS_CT_LIMIT_ATTR_MAX,
2285 .policy = ct_limit_policy,
2286 .netnsok = true,
2287 .parallel_ops = true,
2288 .small_ops = ct_limit_genl_ops,
2289 .n_small_ops = ARRAY_SIZE(ct_limit_genl_ops),
2290 .resv_start_op = OVS_CT_LIMIT_CMD_GET + 1,
2291 .mcgrps = &ovs_ct_limit_multicast_group,
2292 .n_mcgrps = 1,
2293 .module = THIS_MODULE,
2294 };
2295 #endif
2296
ovs_ct_init(struct net * net)2297 int ovs_ct_init(struct net *net)
2298 {
2299 unsigned int n_bits = sizeof(struct ovs_key_ct_labels) * BITS_PER_BYTE;
2300 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2301
2302 if (nf_connlabels_get(net, n_bits - 1)) {
2303 ovs_net->xt_label = false;
2304 OVS_NLERR(true, "Failed to set connlabel length");
2305 } else {
2306 ovs_net->xt_label = true;
2307 }
2308
2309 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
2310 return ovs_ct_limit_init(net, ovs_net);
2311 #else
2312 return 0;
2313 #endif
2314 }
2315
ovs_ct_exit(struct net * net)2316 void ovs_ct_exit(struct net *net)
2317 {
2318 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2319
2320 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
2321 ovs_ct_limit_exit(net, ovs_net);
2322 #endif
2323
2324 if (ovs_net->xt_label)
2325 nf_connlabels_put(net);
2326 }
2327