1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * xfrm_input.c
4  *
5  * Changes:
6  * 	YOSHIFUJI Hideaki @USAGI
7  * 		Split up af-specific portion
8  *
9  */
10 
11 #include <linux/bottom_half.h>
12 #include <linux/cache.h>
13 #include <linux/interrupt.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/netdevice.h>
17 #include <linux/percpu.h>
18 #include <net/dst.h>
19 #include <net/ip.h>
20 #include <net/xfrm.h>
21 #include <net/ip_tunnels.h>
22 #include <net/ip6_tunnel.h>
23 
24 struct xfrm_trans_tasklet {
25 	struct tasklet_struct tasklet;
26 	struct sk_buff_head queue;
27 };
28 
29 struct xfrm_trans_cb {
30 	union {
31 		struct inet_skb_parm	h4;
32 #if IS_ENABLED(CONFIG_IPV6)
33 		struct inet6_skb_parm	h6;
34 #endif
35 	} header;
36 	int (*finish)(struct net *net, struct sock *sk, struct sk_buff *skb);
37 };
38 
39 #define XFRM_TRANS_SKB_CB(__skb) ((struct xfrm_trans_cb *)&((__skb)->cb[0]))
40 
41 static struct kmem_cache *secpath_cachep __ro_after_init;
42 
43 static DEFINE_SPINLOCK(xfrm_input_afinfo_lock);
44 static struct xfrm_input_afinfo const __rcu *xfrm_input_afinfo[AF_INET6 + 1];
45 
46 static struct gro_cells gro_cells;
47 static struct net_device xfrm_napi_dev;
48 
49 static DEFINE_PER_CPU(struct xfrm_trans_tasklet, xfrm_trans_tasklet);
50 
xfrm_input_register_afinfo(const struct xfrm_input_afinfo * afinfo)51 int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo)
52 {
53 	int err = 0;
54 
55 	if (WARN_ON(afinfo->family >= ARRAY_SIZE(xfrm_input_afinfo)))
56 		return -EAFNOSUPPORT;
57 
58 	spin_lock_bh(&xfrm_input_afinfo_lock);
59 	if (unlikely(xfrm_input_afinfo[afinfo->family] != NULL))
60 		err = -EEXIST;
61 	else
62 		rcu_assign_pointer(xfrm_input_afinfo[afinfo->family], afinfo);
63 	spin_unlock_bh(&xfrm_input_afinfo_lock);
64 	return err;
65 }
66 EXPORT_SYMBOL(xfrm_input_register_afinfo);
67 
xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo * afinfo)68 int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo)
69 {
70 	int err = 0;
71 
72 	spin_lock_bh(&xfrm_input_afinfo_lock);
73 	if (likely(xfrm_input_afinfo[afinfo->family] != NULL)) {
74 		if (unlikely(xfrm_input_afinfo[afinfo->family] != afinfo))
75 			err = -EINVAL;
76 		else
77 			RCU_INIT_POINTER(xfrm_input_afinfo[afinfo->family], NULL);
78 	}
79 	spin_unlock_bh(&xfrm_input_afinfo_lock);
80 	synchronize_rcu();
81 	return err;
82 }
83 EXPORT_SYMBOL(xfrm_input_unregister_afinfo);
84 
xfrm_input_get_afinfo(unsigned int family)85 static const struct xfrm_input_afinfo *xfrm_input_get_afinfo(unsigned int family)
86 {
87 	const struct xfrm_input_afinfo *afinfo;
88 
89 	if (WARN_ON_ONCE(family >= ARRAY_SIZE(xfrm_input_afinfo)))
90 		return NULL;
91 
92 	rcu_read_lock();
93 	afinfo = rcu_dereference(xfrm_input_afinfo[family]);
94 	if (unlikely(!afinfo))
95 		rcu_read_unlock();
96 	return afinfo;
97 }
98 
xfrm_rcv_cb(struct sk_buff * skb,unsigned int family,u8 protocol,int err)99 static int xfrm_rcv_cb(struct sk_buff *skb, unsigned int family, u8 protocol,
100 		       int err)
101 {
102 	int ret;
103 	const struct xfrm_input_afinfo *afinfo = xfrm_input_get_afinfo(family);
104 
105 	if (!afinfo)
106 		return -EAFNOSUPPORT;
107 
108 	ret = afinfo->callback(skb, protocol, err);
109 	rcu_read_unlock();
110 
111 	return ret;
112 }
113 
__secpath_destroy(struct sec_path * sp)114 void __secpath_destroy(struct sec_path *sp)
115 {
116 	int i;
117 	for (i = 0; i < sp->len; i++)
118 		xfrm_state_put(sp->xvec[i]);
119 	kmem_cache_free(secpath_cachep, sp);
120 }
121 EXPORT_SYMBOL(__secpath_destroy);
122 
secpath_dup(struct sec_path * src)123 struct sec_path *secpath_dup(struct sec_path *src)
124 {
125 	struct sec_path *sp;
126 
127 	sp = kmem_cache_alloc(secpath_cachep, GFP_ATOMIC);
128 	if (!sp)
129 		return NULL;
130 
131 	sp->len = 0;
132 	sp->olen = 0;
133 
134 	memset(sp->ovec, 0, sizeof(sp->ovec[XFRM_MAX_OFFLOAD_DEPTH]));
135 
136 	if (src) {
137 		int i;
138 
139 		memcpy(sp, src, sizeof(*sp));
140 		for (i = 0; i < sp->len; i++)
141 			xfrm_state_hold(sp->xvec[i]);
142 	}
143 	refcount_set(&sp->refcnt, 1);
144 	return sp;
145 }
146 EXPORT_SYMBOL(secpath_dup);
147 
secpath_set(struct sk_buff * skb)148 int secpath_set(struct sk_buff *skb)
149 {
150 	struct sec_path *sp;
151 
152 	/* Allocate new secpath or COW existing one. */
153 	if (!skb->sp || refcount_read(&skb->sp->refcnt) != 1) {
154 		sp = secpath_dup(skb->sp);
155 		if (!sp)
156 			return -ENOMEM;
157 
158 		if (skb->sp)
159 			secpath_put(skb->sp);
160 		skb->sp = sp;
161 	}
162 	return 0;
163 }
164 EXPORT_SYMBOL(secpath_set);
165 
166 /* Fetch spi and seq from ipsec header */
167 
xfrm_parse_spi(struct sk_buff * skb,u8 nexthdr,__be32 * spi,__be32 * seq)168 int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq)
169 {
170 	int offset, offset_seq;
171 	int hlen;
172 
173 	switch (nexthdr) {
174 	case IPPROTO_AH:
175 		hlen = sizeof(struct ip_auth_hdr);
176 		offset = offsetof(struct ip_auth_hdr, spi);
177 		offset_seq = offsetof(struct ip_auth_hdr, seq_no);
178 		break;
179 	case IPPROTO_ESP:
180 		hlen = sizeof(struct ip_esp_hdr);
181 		offset = offsetof(struct ip_esp_hdr, spi);
182 		offset_seq = offsetof(struct ip_esp_hdr, seq_no);
183 		break;
184 	case IPPROTO_COMP:
185 		if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr)))
186 			return -EINVAL;
187 		*spi = htonl(ntohs(*(__be16 *)(skb_transport_header(skb) + 2)));
188 		*seq = 0;
189 		return 0;
190 	default:
191 		return 1;
192 	}
193 
194 	if (!pskb_may_pull(skb, hlen))
195 		return -EINVAL;
196 
197 	*spi = *(__be32 *)(skb_transport_header(skb) + offset);
198 	*seq = *(__be32 *)(skb_transport_header(skb) + offset_seq);
199 	return 0;
200 }
201 EXPORT_SYMBOL(xfrm_parse_spi);
202 
xfrm_prepare_input(struct xfrm_state * x,struct sk_buff * skb)203 int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb)
204 {
205 	struct xfrm_mode *inner_mode = x->inner_mode;
206 	int err;
207 
208 	err = x->outer_mode->afinfo->extract_input(x, skb);
209 	if (err)
210 		return err;
211 
212 	if (x->sel.family == AF_UNSPEC) {
213 		inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
214 		if (inner_mode == NULL)
215 			return -EAFNOSUPPORT;
216 	}
217 
218 	skb->protocol = inner_mode->afinfo->eth_proto;
219 	return inner_mode->input2(x, skb);
220 }
221 EXPORT_SYMBOL(xfrm_prepare_input);
222 
xfrm_input(struct sk_buff * skb,int nexthdr,__be32 spi,int encap_type)223 int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
224 {
225 	struct net *net = dev_net(skb->dev);
226 	int err;
227 	__be32 seq;
228 	__be32 seq_hi;
229 	struct xfrm_state *x = NULL;
230 	xfrm_address_t *daddr;
231 	struct xfrm_mode *inner_mode;
232 	u32 mark = skb->mark;
233 	unsigned int family = AF_UNSPEC;
234 	int decaps = 0;
235 	int async = 0;
236 	bool xfrm_gro = false;
237 	bool crypto_done = false;
238 	struct xfrm_offload *xo = xfrm_offload(skb);
239 
240 	if (encap_type < 0) {
241 		x = xfrm_input_state(skb);
242 
243 		if (unlikely(x->km.state != XFRM_STATE_VALID)) {
244 			if (x->km.state == XFRM_STATE_ACQ)
245 				XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
246 			else
247 				XFRM_INC_STATS(net,
248 					       LINUX_MIB_XFRMINSTATEINVALID);
249 			goto drop;
250 		}
251 
252 		family = x->outer_mode->afinfo->family;
253 
254 		/* An encap_type of -1 indicates async resumption. */
255 		if (encap_type == -1) {
256 			async = 1;
257 			seq = XFRM_SKB_CB(skb)->seq.input.low;
258 			goto resume;
259 		}
260 
261 		/* encap_type < -1 indicates a GRO call. */
262 		encap_type = 0;
263 		seq = XFRM_SPI_SKB_CB(skb)->seq;
264 
265 		if (xo && (xo->flags & CRYPTO_DONE)) {
266 			crypto_done = true;
267 			family = XFRM_SPI_SKB_CB(skb)->family;
268 
269 			if (!(xo->status & CRYPTO_SUCCESS)) {
270 				if (xo->status &
271 				    (CRYPTO_TRANSPORT_AH_AUTH_FAILED |
272 				     CRYPTO_TRANSPORT_ESP_AUTH_FAILED |
273 				     CRYPTO_TUNNEL_AH_AUTH_FAILED |
274 				     CRYPTO_TUNNEL_ESP_AUTH_FAILED)) {
275 
276 					xfrm_audit_state_icvfail(x, skb,
277 								 x->type->proto);
278 					x->stats.integrity_failed++;
279 					XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
280 					goto drop;
281 				}
282 
283 				if (xo->status & CRYPTO_INVALID_PROTOCOL) {
284 					XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
285 					goto drop;
286 				}
287 
288 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
289 				goto drop;
290 			}
291 
292 			if ((err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) {
293 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
294 				goto drop;
295 			}
296 		}
297 
298 		goto lock;
299 	}
300 
301 	family = XFRM_SPI_SKB_CB(skb)->family;
302 
303 	/* if tunnel is present override skb->mark value with tunnel i_key */
304 	switch (family) {
305 	case AF_INET:
306 		if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4)
307 			mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key);
308 		break;
309 	case AF_INET6:
310 		if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6)
311 			mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key);
312 		break;
313 	}
314 
315 	err = secpath_set(skb);
316 	if (err) {
317 		XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
318 		goto drop;
319 	}
320 
321 	seq = 0;
322 	if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) {
323 		secpath_reset(skb);
324 		XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
325 		goto drop;
326 	}
327 
328 	daddr = (xfrm_address_t *)(skb_network_header(skb) +
329 				   XFRM_SPI_SKB_CB(skb)->daddroff);
330 	do {
331 		if (skb->sp->len == XFRM_MAX_DEPTH) {
332 			secpath_reset(skb);
333 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
334 			goto drop;
335 		}
336 
337 		x = xfrm_state_lookup(net, mark, daddr, spi, nexthdr, family);
338 		if (x == NULL) {
339 			secpath_reset(skb);
340 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
341 			xfrm_audit_state_notfound(skb, family, spi, seq);
342 			goto drop;
343 		}
344 
345 		skb->mark = xfrm_smark_get(skb->mark, x);
346 
347 		skb->sp->xvec[skb->sp->len++] = x;
348 
349 lock:
350 		spin_lock(&x->lock);
351 
352 		if (unlikely(x->km.state != XFRM_STATE_VALID)) {
353 			if (x->km.state == XFRM_STATE_ACQ)
354 				XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
355 			else
356 				XFRM_INC_STATS(net,
357 					       LINUX_MIB_XFRMINSTATEINVALID);
358 			goto drop_unlock;
359 		}
360 
361 		if ((x->encap ? x->encap->encap_type : 0) != encap_type) {
362 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
363 			goto drop_unlock;
364 		}
365 
366 		if (x->repl->check(x, skb, seq)) {
367 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
368 			goto drop_unlock;
369 		}
370 
371 		if (xfrm_state_check_expire(x)) {
372 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEEXPIRED);
373 			goto drop_unlock;
374 		}
375 
376 		spin_unlock(&x->lock);
377 
378 		if (xfrm_tunnel_check(skb, x, family)) {
379 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
380 			goto drop;
381 		}
382 
383 		seq_hi = htonl(xfrm_replay_seqhi(x, seq));
384 
385 		XFRM_SKB_CB(skb)->seq.input.low = seq;
386 		XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
387 
388 		skb_dst_force(skb);
389 		dev_hold(skb->dev);
390 
391 		if (crypto_done)
392 			nexthdr = x->type_offload->input_tail(x, skb);
393 		else
394 			nexthdr = x->type->input(x, skb);
395 
396 		if (nexthdr == -EINPROGRESS)
397 			return 0;
398 resume:
399 		dev_put(skb->dev);
400 
401 		spin_lock(&x->lock);
402 		if (nexthdr <= 0) {
403 			if (nexthdr == -EBADMSG) {
404 				xfrm_audit_state_icvfail(x, skb,
405 							 x->type->proto);
406 				x->stats.integrity_failed++;
407 			}
408 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
409 			goto drop_unlock;
410 		}
411 
412 		/* only the first xfrm gets the encap type */
413 		encap_type = 0;
414 
415 		if (async && x->repl->recheck(x, skb, seq)) {
416 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
417 			goto drop_unlock;
418 		}
419 
420 		x->repl->advance(x, seq);
421 
422 		x->curlft.bytes += skb->len;
423 		x->curlft.packets++;
424 
425 		spin_unlock(&x->lock);
426 
427 		XFRM_MODE_SKB_CB(skb)->protocol = nexthdr;
428 
429 		inner_mode = x->inner_mode;
430 
431 		if (x->sel.family == AF_UNSPEC) {
432 			inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
433 			if (inner_mode == NULL) {
434 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
435 				goto drop;
436 			}
437 		}
438 
439 		if (inner_mode->input(x, skb)) {
440 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
441 			goto drop;
442 		}
443 
444 		if (x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) {
445 			decaps = 1;
446 			break;
447 		}
448 
449 		/*
450 		 * We need the inner address.  However, we only get here for
451 		 * transport mode so the outer address is identical.
452 		 */
453 		daddr = &x->id.daddr;
454 		family = x->outer_mode->afinfo->family;
455 
456 		err = xfrm_parse_spi(skb, nexthdr, &spi, &seq);
457 		if (err < 0) {
458 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
459 			goto drop;
460 		}
461 		crypto_done = false;
462 	} while (!err);
463 
464 	err = xfrm_rcv_cb(skb, family, x->type->proto, 0);
465 	if (err)
466 		goto drop;
467 
468 	nf_reset(skb);
469 
470 	if (decaps) {
471 		if (skb->sp)
472 			skb->sp->olen = 0;
473 		skb_dst_drop(skb);
474 		gro_cells_receive(&gro_cells, skb);
475 		return 0;
476 	} else {
477 		xo = xfrm_offload(skb);
478 		if (xo)
479 			xfrm_gro = xo->flags & XFRM_GRO;
480 
481 		err = x->inner_mode->afinfo->transport_finish(skb, xfrm_gro || async);
482 		if (xfrm_gro) {
483 			if (skb->sp)
484 				skb->sp->olen = 0;
485 			skb_dst_drop(skb);
486 			gro_cells_receive(&gro_cells, skb);
487 			return err;
488 		}
489 
490 		return err;
491 	}
492 
493 drop_unlock:
494 	spin_unlock(&x->lock);
495 drop:
496 	xfrm_rcv_cb(skb, family, x && x->type ? x->type->proto : nexthdr, -1);
497 	kfree_skb(skb);
498 	return 0;
499 }
500 EXPORT_SYMBOL(xfrm_input);
501 
xfrm_input_resume(struct sk_buff * skb,int nexthdr)502 int xfrm_input_resume(struct sk_buff *skb, int nexthdr)
503 {
504 	return xfrm_input(skb, nexthdr, 0, -1);
505 }
506 EXPORT_SYMBOL(xfrm_input_resume);
507 
xfrm_trans_reinject(unsigned long data)508 static void xfrm_trans_reinject(unsigned long data)
509 {
510 	struct xfrm_trans_tasklet *trans = (void *)data;
511 	struct sk_buff_head queue;
512 	struct sk_buff *skb;
513 
514 	__skb_queue_head_init(&queue);
515 	skb_queue_splice_init(&trans->queue, &queue);
516 
517 	while ((skb = __skb_dequeue(&queue)))
518 		XFRM_TRANS_SKB_CB(skb)->finish(dev_net(skb->dev), NULL, skb);
519 }
520 
xfrm_trans_queue(struct sk_buff * skb,int (* finish)(struct net *,struct sock *,struct sk_buff *))521 int xfrm_trans_queue(struct sk_buff *skb,
522 		     int (*finish)(struct net *, struct sock *,
523 				   struct sk_buff *))
524 {
525 	struct xfrm_trans_tasklet *trans;
526 
527 	trans = this_cpu_ptr(&xfrm_trans_tasklet);
528 
529 	if (skb_queue_len(&trans->queue) >= netdev_max_backlog)
530 		return -ENOBUFS;
531 
532 	XFRM_TRANS_SKB_CB(skb)->finish = finish;
533 	__skb_queue_tail(&trans->queue, skb);
534 	tasklet_schedule(&trans->tasklet);
535 	return 0;
536 }
537 EXPORT_SYMBOL(xfrm_trans_queue);
538 
xfrm_input_init(void)539 void __init xfrm_input_init(void)
540 {
541 	int err;
542 	int i;
543 
544 	init_dummy_netdev(&xfrm_napi_dev);
545 	err = gro_cells_init(&gro_cells, &xfrm_napi_dev);
546 	if (err)
547 		gro_cells.cells = NULL;
548 
549 	secpath_cachep = kmem_cache_create("secpath_cache",
550 					   sizeof(struct sec_path),
551 					   0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
552 					   NULL);
553 
554 	for_each_possible_cpu(i) {
555 		struct xfrm_trans_tasklet *trans;
556 
557 		trans = &per_cpu(xfrm_trans_tasklet, i);
558 		__skb_queue_head_init(&trans->queue);
559 		tasklet_init(&trans->tasklet, xfrm_trans_reinject,
560 			     (unsigned long)trans);
561 	}
562 }
563