1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4 * Copyright (c) 2016 Pablo Neira Ayuso <pablo@netfilter.org>
5 *
6 * Development of this code funded by Astaro AG (http://www.astaro.com/)
7 */
8
9 #include <linux/kernel.h>
10 #include <linux/if_vlan.h>
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/netlink.h>
14 #include <linux/netfilter.h>
15 #include <linux/netfilter/nf_tables.h>
16 #include <net/netfilter/nf_tables_core.h>
17 #include <net/netfilter/nf_tables.h>
18 #include <net/netfilter/nf_tables_offload.h>
19 /* For layer 4 checksum field offset. */
20 #include <linux/tcp.h>
21 #include <linux/udp.h>
22 #include <linux/icmpv6.h>
23 #include <linux/ip.h>
24 #include <linux/ipv6.h>
25 #include <net/sctp/checksum.h>
26
nft_payload_rebuild_vlan_hdr(const struct sk_buff * skb,int mac_off,struct vlan_ethhdr * veth)27 static bool nft_payload_rebuild_vlan_hdr(const struct sk_buff *skb, int mac_off,
28 struct vlan_ethhdr *veth)
29 {
30 if (skb_copy_bits(skb, mac_off, veth, ETH_HLEN))
31 return false;
32
33 veth->h_vlan_proto = skb->vlan_proto;
34 veth->h_vlan_TCI = htons(skb_vlan_tag_get(skb));
35 veth->h_vlan_encapsulated_proto = skb->protocol;
36
37 return true;
38 }
39
40 /* add vlan header into the user buffer for if tag was removed by offloads */
41 static bool
nft_payload_copy_vlan(u32 * d,const struct sk_buff * skb,u8 offset,u8 len)42 nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
43 {
44 int mac_off = skb_mac_header(skb) - skb->data;
45 u8 *vlanh, *dst_u8 = (u8 *) d;
46 struct vlan_ethhdr veth;
47 u8 vlan_hlen = 0;
48
49 if ((skb->protocol == htons(ETH_P_8021AD) ||
50 skb->protocol == htons(ETH_P_8021Q)) &&
51 offset >= VLAN_ETH_HLEN && offset < VLAN_ETH_HLEN + VLAN_HLEN)
52 vlan_hlen += VLAN_HLEN;
53
54 vlanh = (u8 *) &veth;
55 if (offset < VLAN_ETH_HLEN + vlan_hlen) {
56 u8 ethlen = len;
57
58 if (vlan_hlen &&
59 skb_copy_bits(skb, mac_off, &veth, VLAN_ETH_HLEN) < 0)
60 return false;
61 else if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
62 return false;
63
64 if (offset + len > VLAN_ETH_HLEN + vlan_hlen)
65 ethlen -= offset + len - VLAN_ETH_HLEN + vlan_hlen;
66
67 memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen);
68
69 len -= ethlen;
70 if (len == 0)
71 return true;
72
73 dst_u8 += ethlen;
74 offset = ETH_HLEN + vlan_hlen;
75 } else {
76 offset -= VLAN_HLEN + vlan_hlen;
77 }
78
79 return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
80 }
81
__nft_payload_inner_offset(struct nft_pktinfo * pkt)82 static int __nft_payload_inner_offset(struct nft_pktinfo *pkt)
83 {
84 unsigned int thoff = nft_thoff(pkt);
85
86 if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff)
87 return -1;
88
89 switch (pkt->tprot) {
90 case IPPROTO_UDP:
91 pkt->inneroff = thoff + sizeof(struct udphdr);
92 break;
93 case IPPROTO_TCP: {
94 struct tcphdr *th, _tcph;
95
96 th = skb_header_pointer(pkt->skb, thoff, sizeof(_tcph), &_tcph);
97 if (!th)
98 return -1;
99
100 pkt->inneroff = thoff + __tcp_hdrlen(th);
101 }
102 break;
103 default:
104 return -1;
105 }
106
107 pkt->flags |= NFT_PKTINFO_INNER;
108
109 return 0;
110 }
111
nft_payload_inner_offset(const struct nft_pktinfo * pkt)112 static int nft_payload_inner_offset(const struct nft_pktinfo *pkt)
113 {
114 if (!(pkt->flags & NFT_PKTINFO_INNER) &&
115 __nft_payload_inner_offset((struct nft_pktinfo *)pkt) < 0)
116 return -1;
117
118 return pkt->inneroff;
119 }
120
nft_payload_eval(const struct nft_expr * expr,struct nft_regs * regs,const struct nft_pktinfo * pkt)121 void nft_payload_eval(const struct nft_expr *expr,
122 struct nft_regs *regs,
123 const struct nft_pktinfo *pkt)
124 {
125 const struct nft_payload *priv = nft_expr_priv(expr);
126 const struct sk_buff *skb = pkt->skb;
127 u32 *dest = ®s->data[priv->dreg];
128 int offset;
129
130 if (priv->len % NFT_REG32_SIZE)
131 dest[priv->len / NFT_REG32_SIZE] = 0;
132
133 switch (priv->base) {
134 case NFT_PAYLOAD_LL_HEADER:
135 if (!skb_mac_header_was_set(skb))
136 goto err;
137
138 if (skb_vlan_tag_present(skb)) {
139 if (!nft_payload_copy_vlan(dest, skb,
140 priv->offset, priv->len))
141 goto err;
142 return;
143 }
144 offset = skb_mac_header(skb) - skb->data;
145 break;
146 case NFT_PAYLOAD_NETWORK_HEADER:
147 offset = skb_network_offset(skb);
148 break;
149 case NFT_PAYLOAD_TRANSPORT_HEADER:
150 if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff)
151 goto err;
152 offset = nft_thoff(pkt);
153 break;
154 case NFT_PAYLOAD_INNER_HEADER:
155 offset = nft_payload_inner_offset(pkt);
156 if (offset < 0)
157 goto err;
158 break;
159 default:
160 WARN_ON_ONCE(1);
161 goto err;
162 }
163 offset += priv->offset;
164
165 if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
166 goto err;
167 return;
168 err:
169 regs->verdict.code = NFT_BREAK;
170 }
171
172 static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
173 [NFTA_PAYLOAD_SREG] = { .type = NLA_U32 },
174 [NFTA_PAYLOAD_DREG] = { .type = NLA_U32 },
175 [NFTA_PAYLOAD_BASE] = { .type = NLA_U32 },
176 [NFTA_PAYLOAD_OFFSET] = NLA_POLICY_MAX(NLA_BE32, 255),
177 [NFTA_PAYLOAD_LEN] = NLA_POLICY_MAX(NLA_BE32, 255),
178 [NFTA_PAYLOAD_CSUM_TYPE] = { .type = NLA_U32 },
179 [NFTA_PAYLOAD_CSUM_OFFSET] = NLA_POLICY_MAX(NLA_BE32, 255),
180 [NFTA_PAYLOAD_CSUM_FLAGS] = { .type = NLA_U32 },
181 };
182
nft_payload_init(const struct nft_ctx * ctx,const struct nft_expr * expr,const struct nlattr * const tb[])183 static int nft_payload_init(const struct nft_ctx *ctx,
184 const struct nft_expr *expr,
185 const struct nlattr * const tb[])
186 {
187 struct nft_payload *priv = nft_expr_priv(expr);
188
189 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
190 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
191 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
192
193 return nft_parse_register_store(ctx, tb[NFTA_PAYLOAD_DREG],
194 &priv->dreg, NULL, NFT_DATA_VALUE,
195 priv->len);
196 }
197
nft_payload_dump(struct sk_buff * skb,const struct nft_expr * expr)198 static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr)
199 {
200 const struct nft_payload *priv = nft_expr_priv(expr);
201
202 if (nft_dump_register(skb, NFTA_PAYLOAD_DREG, priv->dreg) ||
203 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
204 nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
205 nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)))
206 goto nla_put_failure;
207 return 0;
208
209 nla_put_failure:
210 return -1;
211 }
212
nft_payload_reduce(struct nft_regs_track * track,const struct nft_expr * expr)213 static bool nft_payload_reduce(struct nft_regs_track *track,
214 const struct nft_expr *expr)
215 {
216 const struct nft_payload *priv = nft_expr_priv(expr);
217 const struct nft_payload *payload;
218
219 if (!nft_reg_track_cmp(track, expr, priv->dreg)) {
220 nft_reg_track_update(track, expr, priv->dreg, priv->len);
221 return false;
222 }
223
224 payload = nft_expr_priv(track->regs[priv->dreg].selector);
225 if (priv->base != payload->base ||
226 priv->offset != payload->offset ||
227 priv->len != payload->len) {
228 nft_reg_track_update(track, expr, priv->dreg, priv->len);
229 return false;
230 }
231
232 if (!track->regs[priv->dreg].bitwise)
233 return true;
234
235 return nft_expr_reduce_bitwise(track, expr);
236 }
237
nft_payload_offload_mask(struct nft_offload_reg * reg,u32 priv_len,u32 field_len)238 static bool nft_payload_offload_mask(struct nft_offload_reg *reg,
239 u32 priv_len, u32 field_len)
240 {
241 unsigned int remainder, delta, k;
242 struct nft_data mask = {};
243 __be32 remainder_mask;
244
245 if (priv_len == field_len) {
246 memset(®->mask, 0xff, priv_len);
247 return true;
248 } else if (priv_len > field_len) {
249 return false;
250 }
251
252 memset(&mask, 0xff, field_len);
253 remainder = priv_len % sizeof(u32);
254 if (remainder) {
255 k = priv_len / sizeof(u32);
256 delta = field_len - priv_len;
257 remainder_mask = htonl(~((1 << (delta * BITS_PER_BYTE)) - 1));
258 mask.data[k] = (__force u32)remainder_mask;
259 }
260
261 memcpy(®->mask, &mask, field_len);
262
263 return true;
264 }
265
nft_payload_offload_ll(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_payload * priv)266 static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
267 struct nft_flow_rule *flow,
268 const struct nft_payload *priv)
269 {
270 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
271
272 switch (priv->offset) {
273 case offsetof(struct ethhdr, h_source):
274 if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN))
275 return -EOPNOTSUPP;
276
277 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
278 src, ETH_ALEN, reg);
279 break;
280 case offsetof(struct ethhdr, h_dest):
281 if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN))
282 return -EOPNOTSUPP;
283
284 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
285 dst, ETH_ALEN, reg);
286 break;
287 case offsetof(struct ethhdr, h_proto):
288 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
289 return -EOPNOTSUPP;
290
291 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic,
292 n_proto, sizeof(__be16), reg);
293 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
294 break;
295 case offsetof(struct vlan_ethhdr, h_vlan_TCI):
296 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
297 return -EOPNOTSUPP;
298
299 NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_VLAN, vlan,
300 vlan_tci, sizeof(__be16), reg,
301 NFT_OFFLOAD_F_NETWORK2HOST);
302 break;
303 case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto):
304 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
305 return -EOPNOTSUPP;
306
307 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
308 vlan_tpid, sizeof(__be16), reg);
309 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
310 break;
311 case offsetof(struct vlan_ethhdr, h_vlan_TCI) + sizeof(struct vlan_hdr):
312 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
313 return -EOPNOTSUPP;
314
315 NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
316 vlan_tci, sizeof(__be16), reg,
317 NFT_OFFLOAD_F_NETWORK2HOST);
318 break;
319 case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto) +
320 sizeof(struct vlan_hdr):
321 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
322 return -EOPNOTSUPP;
323
324 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
325 vlan_tpid, sizeof(__be16), reg);
326 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
327 break;
328 default:
329 return -EOPNOTSUPP;
330 }
331
332 return 0;
333 }
334
nft_payload_offload_ip(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_payload * priv)335 static int nft_payload_offload_ip(struct nft_offload_ctx *ctx,
336 struct nft_flow_rule *flow,
337 const struct nft_payload *priv)
338 {
339 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
340
341 switch (priv->offset) {
342 case offsetof(struct iphdr, saddr):
343 if (!nft_payload_offload_mask(reg, priv->len,
344 sizeof(struct in_addr)))
345 return -EOPNOTSUPP;
346
347 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src,
348 sizeof(struct in_addr), reg);
349 nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
350 break;
351 case offsetof(struct iphdr, daddr):
352 if (!nft_payload_offload_mask(reg, priv->len,
353 sizeof(struct in_addr)))
354 return -EOPNOTSUPP;
355
356 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst,
357 sizeof(struct in_addr), reg);
358 nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
359 break;
360 case offsetof(struct iphdr, protocol):
361 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8)))
362 return -EOPNOTSUPP;
363
364 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
365 sizeof(__u8), reg);
366 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
367 break;
368 default:
369 return -EOPNOTSUPP;
370 }
371
372 return 0;
373 }
374
nft_payload_offload_ip6(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_payload * priv)375 static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx,
376 struct nft_flow_rule *flow,
377 const struct nft_payload *priv)
378 {
379 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
380
381 switch (priv->offset) {
382 case offsetof(struct ipv6hdr, saddr):
383 if (!nft_payload_offload_mask(reg, priv->len,
384 sizeof(struct in6_addr)))
385 return -EOPNOTSUPP;
386
387 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src,
388 sizeof(struct in6_addr), reg);
389 nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
390 break;
391 case offsetof(struct ipv6hdr, daddr):
392 if (!nft_payload_offload_mask(reg, priv->len,
393 sizeof(struct in6_addr)))
394 return -EOPNOTSUPP;
395
396 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst,
397 sizeof(struct in6_addr), reg);
398 nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
399 break;
400 case offsetof(struct ipv6hdr, nexthdr):
401 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8)))
402 return -EOPNOTSUPP;
403
404 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
405 sizeof(__u8), reg);
406 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
407 break;
408 default:
409 return -EOPNOTSUPP;
410 }
411
412 return 0;
413 }
414
nft_payload_offload_nh(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_payload * priv)415 static int nft_payload_offload_nh(struct nft_offload_ctx *ctx,
416 struct nft_flow_rule *flow,
417 const struct nft_payload *priv)
418 {
419 int err;
420
421 switch (ctx->dep.l3num) {
422 case htons(ETH_P_IP):
423 err = nft_payload_offload_ip(ctx, flow, priv);
424 break;
425 case htons(ETH_P_IPV6):
426 err = nft_payload_offload_ip6(ctx, flow, priv);
427 break;
428 default:
429 return -EOPNOTSUPP;
430 }
431
432 return err;
433 }
434
nft_payload_offload_tcp(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_payload * priv)435 static int nft_payload_offload_tcp(struct nft_offload_ctx *ctx,
436 struct nft_flow_rule *flow,
437 const struct nft_payload *priv)
438 {
439 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
440
441 switch (priv->offset) {
442 case offsetof(struct tcphdr, source):
443 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
444 return -EOPNOTSUPP;
445
446 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
447 sizeof(__be16), reg);
448 break;
449 case offsetof(struct tcphdr, dest):
450 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
451 return -EOPNOTSUPP;
452
453 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
454 sizeof(__be16), reg);
455 break;
456 default:
457 return -EOPNOTSUPP;
458 }
459
460 return 0;
461 }
462
nft_payload_offload_udp(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_payload * priv)463 static int nft_payload_offload_udp(struct nft_offload_ctx *ctx,
464 struct nft_flow_rule *flow,
465 const struct nft_payload *priv)
466 {
467 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
468
469 switch (priv->offset) {
470 case offsetof(struct udphdr, source):
471 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
472 return -EOPNOTSUPP;
473
474 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
475 sizeof(__be16), reg);
476 break;
477 case offsetof(struct udphdr, dest):
478 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
479 return -EOPNOTSUPP;
480
481 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
482 sizeof(__be16), reg);
483 break;
484 default:
485 return -EOPNOTSUPP;
486 }
487
488 return 0;
489 }
490
nft_payload_offload_th(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_payload * priv)491 static int nft_payload_offload_th(struct nft_offload_ctx *ctx,
492 struct nft_flow_rule *flow,
493 const struct nft_payload *priv)
494 {
495 int err;
496
497 switch (ctx->dep.protonum) {
498 case IPPROTO_TCP:
499 err = nft_payload_offload_tcp(ctx, flow, priv);
500 break;
501 case IPPROTO_UDP:
502 err = nft_payload_offload_udp(ctx, flow, priv);
503 break;
504 default:
505 return -EOPNOTSUPP;
506 }
507
508 return err;
509 }
510
nft_payload_offload(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_expr * expr)511 static int nft_payload_offload(struct nft_offload_ctx *ctx,
512 struct nft_flow_rule *flow,
513 const struct nft_expr *expr)
514 {
515 const struct nft_payload *priv = nft_expr_priv(expr);
516 int err;
517
518 switch (priv->base) {
519 case NFT_PAYLOAD_LL_HEADER:
520 err = nft_payload_offload_ll(ctx, flow, priv);
521 break;
522 case NFT_PAYLOAD_NETWORK_HEADER:
523 err = nft_payload_offload_nh(ctx, flow, priv);
524 break;
525 case NFT_PAYLOAD_TRANSPORT_HEADER:
526 err = nft_payload_offload_th(ctx, flow, priv);
527 break;
528 default:
529 err = -EOPNOTSUPP;
530 break;
531 }
532 return err;
533 }
534
535 static const struct nft_expr_ops nft_payload_ops = {
536 .type = &nft_payload_type,
537 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
538 .eval = nft_payload_eval,
539 .init = nft_payload_init,
540 .dump = nft_payload_dump,
541 .reduce = nft_payload_reduce,
542 .offload = nft_payload_offload,
543 };
544
545 const struct nft_expr_ops nft_payload_fast_ops = {
546 .type = &nft_payload_type,
547 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
548 .eval = nft_payload_eval,
549 .init = nft_payload_init,
550 .dump = nft_payload_dump,
551 .reduce = nft_payload_reduce,
552 .offload = nft_payload_offload,
553 };
554
nft_csum_replace(__sum16 * sum,__wsum fsum,__wsum tsum)555 static inline void nft_csum_replace(__sum16 *sum, __wsum fsum, __wsum tsum)
556 {
557 *sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), fsum), tsum));
558 if (*sum == 0)
559 *sum = CSUM_MANGLED_0;
560 }
561
nft_payload_udp_checksum(struct sk_buff * skb,unsigned int thoff)562 static bool nft_payload_udp_checksum(struct sk_buff *skb, unsigned int thoff)
563 {
564 struct udphdr *uh, _uh;
565
566 uh = skb_header_pointer(skb, thoff, sizeof(_uh), &_uh);
567 if (!uh)
568 return false;
569
570 return (__force bool)uh->check;
571 }
572
nft_payload_l4csum_offset(const struct nft_pktinfo * pkt,struct sk_buff * skb,unsigned int * l4csum_offset)573 static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt,
574 struct sk_buff *skb,
575 unsigned int *l4csum_offset)
576 {
577 if (pkt->fragoff)
578 return -1;
579
580 switch (pkt->tprot) {
581 case IPPROTO_TCP:
582 *l4csum_offset = offsetof(struct tcphdr, check);
583 break;
584 case IPPROTO_UDP:
585 if (!nft_payload_udp_checksum(skb, nft_thoff(pkt)))
586 return -1;
587 fallthrough;
588 case IPPROTO_UDPLITE:
589 *l4csum_offset = offsetof(struct udphdr, check);
590 break;
591 case IPPROTO_ICMPV6:
592 *l4csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
593 break;
594 default:
595 return -1;
596 }
597
598 *l4csum_offset += nft_thoff(pkt);
599 return 0;
600 }
601
nft_payload_csum_sctp(struct sk_buff * skb,int offset)602 static int nft_payload_csum_sctp(struct sk_buff *skb, int offset)
603 {
604 struct sctphdr *sh;
605
606 if (skb_ensure_writable(skb, offset + sizeof(*sh)))
607 return -1;
608
609 sh = (struct sctphdr *)(skb->data + offset);
610 sh->checksum = sctp_compute_cksum(skb, offset);
611 skb->ip_summed = CHECKSUM_UNNECESSARY;
612 return 0;
613 }
614
nft_payload_l4csum_update(const struct nft_pktinfo * pkt,struct sk_buff * skb,__wsum fsum,__wsum tsum)615 static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt,
616 struct sk_buff *skb,
617 __wsum fsum, __wsum tsum)
618 {
619 int l4csum_offset;
620 __sum16 sum;
621
622 /* If we cannot determine layer 4 checksum offset or this packet doesn't
623 * require layer 4 checksum recalculation, skip this packet.
624 */
625 if (nft_payload_l4csum_offset(pkt, skb, &l4csum_offset) < 0)
626 return 0;
627
628 if (skb_copy_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
629 return -1;
630
631 /* Checksum mangling for an arbitrary amount of bytes, based on
632 * inet_proto_csum_replace*() functions.
633 */
634 if (skb->ip_summed != CHECKSUM_PARTIAL) {
635 nft_csum_replace(&sum, fsum, tsum);
636 if (skb->ip_summed == CHECKSUM_COMPLETE) {
637 skb->csum = ~csum_add(csum_sub(~(skb->csum), fsum),
638 tsum);
639 }
640 } else {
641 sum = ~csum_fold(csum_add(csum_sub(csum_unfold(sum), fsum),
642 tsum));
643 }
644
645 if (skb_ensure_writable(skb, l4csum_offset + sizeof(sum)) ||
646 skb_store_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
647 return -1;
648
649 return 0;
650 }
651
nft_payload_csum_inet(struct sk_buff * skb,const u32 * src,__wsum fsum,__wsum tsum,int csum_offset)652 static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
653 __wsum fsum, __wsum tsum, int csum_offset)
654 {
655 __sum16 sum;
656
657 if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
658 return -1;
659
660 nft_csum_replace(&sum, fsum, tsum);
661 if (skb_ensure_writable(skb, csum_offset + sizeof(sum)) ||
662 skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
663 return -1;
664
665 return 0;
666 }
667
nft_payload_set_eval(const struct nft_expr * expr,struct nft_regs * regs,const struct nft_pktinfo * pkt)668 static void nft_payload_set_eval(const struct nft_expr *expr,
669 struct nft_regs *regs,
670 const struct nft_pktinfo *pkt)
671 {
672 const struct nft_payload_set *priv = nft_expr_priv(expr);
673 struct sk_buff *skb = pkt->skb;
674 const u32 *src = ®s->data[priv->sreg];
675 int offset, csum_offset;
676 __wsum fsum, tsum;
677
678 switch (priv->base) {
679 case NFT_PAYLOAD_LL_HEADER:
680 if (!skb_mac_header_was_set(skb))
681 goto err;
682 offset = skb_mac_header(skb) - skb->data;
683 break;
684 case NFT_PAYLOAD_NETWORK_HEADER:
685 offset = skb_network_offset(skb);
686 break;
687 case NFT_PAYLOAD_TRANSPORT_HEADER:
688 if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff)
689 goto err;
690 offset = nft_thoff(pkt);
691 break;
692 case NFT_PAYLOAD_INNER_HEADER:
693 offset = nft_payload_inner_offset(pkt);
694 if (offset < 0)
695 goto err;
696 break;
697 default:
698 WARN_ON_ONCE(1);
699 goto err;
700 }
701
702 csum_offset = offset + priv->csum_offset;
703 offset += priv->offset;
704
705 if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) &&
706 ((priv->base != NFT_PAYLOAD_TRANSPORT_HEADER &&
707 priv->base != NFT_PAYLOAD_INNER_HEADER) ||
708 skb->ip_summed != CHECKSUM_PARTIAL)) {
709 fsum = skb_checksum(skb, offset, priv->len, 0);
710 tsum = csum_partial(src, priv->len, 0);
711
712 if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
713 nft_payload_csum_inet(skb, src, fsum, tsum, csum_offset))
714 goto err;
715
716 if (priv->csum_flags &&
717 nft_payload_l4csum_update(pkt, skb, fsum, tsum) < 0)
718 goto err;
719 }
720
721 if (skb_ensure_writable(skb, max(offset + priv->len, 0)) ||
722 skb_store_bits(skb, offset, src, priv->len) < 0)
723 goto err;
724
725 if (priv->csum_type == NFT_PAYLOAD_CSUM_SCTP &&
726 pkt->tprot == IPPROTO_SCTP &&
727 skb->ip_summed != CHECKSUM_PARTIAL) {
728 if (pkt->fragoff == 0 &&
729 nft_payload_csum_sctp(skb, nft_thoff(pkt)))
730 goto err;
731 }
732
733 return;
734 err:
735 regs->verdict.code = NFT_BREAK;
736 }
737
nft_payload_set_init(const struct nft_ctx * ctx,const struct nft_expr * expr,const struct nlattr * const tb[])738 static int nft_payload_set_init(const struct nft_ctx *ctx,
739 const struct nft_expr *expr,
740 const struct nlattr * const tb[])
741 {
742 struct nft_payload_set *priv = nft_expr_priv(expr);
743 u32 csum_offset, csum_type = NFT_PAYLOAD_CSUM_NONE;
744 int err;
745
746 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
747 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
748 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
749
750 if (tb[NFTA_PAYLOAD_CSUM_TYPE])
751 csum_type = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
752 if (tb[NFTA_PAYLOAD_CSUM_OFFSET]) {
753 err = nft_parse_u32_check(tb[NFTA_PAYLOAD_CSUM_OFFSET], U8_MAX,
754 &csum_offset);
755 if (err < 0)
756 return err;
757
758 priv->csum_offset = csum_offset;
759 }
760 if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) {
761 u32 flags;
762
763 flags = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_FLAGS]));
764 if (flags & ~NFT_PAYLOAD_L4CSUM_PSEUDOHDR)
765 return -EINVAL;
766
767 priv->csum_flags = flags;
768 }
769
770 switch (csum_type) {
771 case NFT_PAYLOAD_CSUM_NONE:
772 case NFT_PAYLOAD_CSUM_INET:
773 break;
774 case NFT_PAYLOAD_CSUM_SCTP:
775 if (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER)
776 return -EINVAL;
777
778 if (priv->csum_offset != offsetof(struct sctphdr, checksum))
779 return -EINVAL;
780 break;
781 default:
782 return -EOPNOTSUPP;
783 }
784 priv->csum_type = csum_type;
785
786 return nft_parse_register_load(tb[NFTA_PAYLOAD_SREG], &priv->sreg,
787 priv->len);
788 }
789
nft_payload_set_dump(struct sk_buff * skb,const struct nft_expr * expr)790 static int nft_payload_set_dump(struct sk_buff *skb, const struct nft_expr *expr)
791 {
792 const struct nft_payload_set *priv = nft_expr_priv(expr);
793
794 if (nft_dump_register(skb, NFTA_PAYLOAD_SREG, priv->sreg) ||
795 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
796 nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
797 nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)) ||
798 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_TYPE, htonl(priv->csum_type)) ||
799 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_OFFSET,
800 htonl(priv->csum_offset)) ||
801 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_FLAGS, htonl(priv->csum_flags)))
802 goto nla_put_failure;
803 return 0;
804
805 nla_put_failure:
806 return -1;
807 }
808
nft_payload_set_reduce(struct nft_regs_track * track,const struct nft_expr * expr)809 static bool nft_payload_set_reduce(struct nft_regs_track *track,
810 const struct nft_expr *expr)
811 {
812 int i;
813
814 for (i = 0; i < NFT_REG32_NUM; i++) {
815 if (!track->regs[i].selector)
816 continue;
817
818 if (track->regs[i].selector->ops != &nft_payload_ops &&
819 track->regs[i].selector->ops != &nft_payload_fast_ops)
820 continue;
821
822 __nft_reg_track_cancel(track, i);
823 }
824
825 return false;
826 }
827
828 static const struct nft_expr_ops nft_payload_set_ops = {
829 .type = &nft_payload_type,
830 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload_set)),
831 .eval = nft_payload_set_eval,
832 .init = nft_payload_set_init,
833 .dump = nft_payload_set_dump,
834 .reduce = nft_payload_set_reduce,
835 };
836
837 static const struct nft_expr_ops *
nft_payload_select_ops(const struct nft_ctx * ctx,const struct nlattr * const tb[])838 nft_payload_select_ops(const struct nft_ctx *ctx,
839 const struct nlattr * const tb[])
840 {
841 enum nft_payload_bases base;
842 unsigned int offset, len;
843 int err;
844
845 if (tb[NFTA_PAYLOAD_BASE] == NULL ||
846 tb[NFTA_PAYLOAD_OFFSET] == NULL ||
847 tb[NFTA_PAYLOAD_LEN] == NULL)
848 return ERR_PTR(-EINVAL);
849
850 base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
851 switch (base) {
852 case NFT_PAYLOAD_LL_HEADER:
853 case NFT_PAYLOAD_NETWORK_HEADER:
854 case NFT_PAYLOAD_TRANSPORT_HEADER:
855 case NFT_PAYLOAD_INNER_HEADER:
856 break;
857 default:
858 return ERR_PTR(-EOPNOTSUPP);
859 }
860
861 if (tb[NFTA_PAYLOAD_SREG] != NULL) {
862 if (tb[NFTA_PAYLOAD_DREG] != NULL)
863 return ERR_PTR(-EINVAL);
864 return &nft_payload_set_ops;
865 }
866
867 if (tb[NFTA_PAYLOAD_DREG] == NULL)
868 return ERR_PTR(-EINVAL);
869
870 err = nft_parse_u32_check(tb[NFTA_PAYLOAD_OFFSET], U8_MAX, &offset);
871 if (err < 0)
872 return ERR_PTR(err);
873
874 err = nft_parse_u32_check(tb[NFTA_PAYLOAD_LEN], U8_MAX, &len);
875 if (err < 0)
876 return ERR_PTR(err);
877
878 if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
879 base != NFT_PAYLOAD_LL_HEADER && base != NFT_PAYLOAD_INNER_HEADER)
880 return &nft_payload_fast_ops;
881 else
882 return &nft_payload_ops;
883 }
884
885 struct nft_expr_type nft_payload_type __read_mostly = {
886 .name = "payload",
887 .select_ops = nft_payload_select_ops,
888 .policy = nft_payload_policy,
889 .maxattr = NFTA_PAYLOAD_MAX,
890 .owner = THIS_MODULE,
891 };
892