1 /*
2  * IPv6 fragment reassembly for connection tracking
3  *
4  * Copyright (C)2004 USAGI/WIDE Project
5  *
6  * Author:
7  *	Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
8  *
9  * Based on: net/ipv6/reassembly.c
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License
13  * as published by the Free Software Foundation; either version
14  * 2 of the License, or (at your option) any later version.
15  */
16 
17 #define pr_fmt(fmt) "IPv6-nf: " fmt
18 
19 #include <linux/errno.h>
20 #include <linux/types.h>
21 #include <linux/string.h>
22 #include <linux/socket.h>
23 #include <linux/sockios.h>
24 #include <linux/jiffies.h>
25 #include <linux/net.h>
26 #include <linux/list.h>
27 #include <linux/netdevice.h>
28 #include <linux/in6.h>
29 #include <linux/ipv6.h>
30 #include <linux/icmpv6.h>
31 #include <linux/random.h>
32 #include <linux/slab.h>
33 
34 #include <net/sock.h>
35 #include <net/snmp.h>
36 #include <net/ipv6_frag.h>
37 
38 #include <net/protocol.h>
39 #include <net/transp_v6.h>
40 #include <net/rawv6.h>
41 #include <net/ndisc.h>
42 #include <net/addrconf.h>
43 #include <net/inet_ecn.h>
44 #include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
45 #include <linux/sysctl.h>
46 #include <linux/netfilter.h>
47 #include <linux/netfilter_ipv6.h>
48 #include <linux/kernel.h>
49 #include <linux/module.h>
50 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
51 
52 static const char nf_frags_cache_name[] = "nf-frags";
53 
54 static struct inet_frags nf_frags;
55 
56 #ifdef CONFIG_SYSCTL
57 
58 static struct ctl_table nf_ct_frag6_sysctl_table[] = {
59 	{
60 		.procname	= "nf_conntrack_frag6_timeout",
61 		.data		= &init_net.nf_frag.frags.timeout,
62 		.maxlen		= sizeof(unsigned int),
63 		.mode		= 0644,
64 		.proc_handler	= proc_dointvec_jiffies,
65 	},
66 	{
67 		.procname	= "nf_conntrack_frag6_low_thresh",
68 		.data		= &init_net.nf_frag.frags.low_thresh,
69 		.maxlen		= sizeof(unsigned long),
70 		.mode		= 0644,
71 		.proc_handler	= proc_doulongvec_minmax,
72 		.extra2		= &init_net.nf_frag.frags.high_thresh
73 	},
74 	{
75 		.procname	= "nf_conntrack_frag6_high_thresh",
76 		.data		= &init_net.nf_frag.frags.high_thresh,
77 		.maxlen		= sizeof(unsigned long),
78 		.mode		= 0644,
79 		.proc_handler	= proc_doulongvec_minmax,
80 		.extra1		= &init_net.nf_frag.frags.low_thresh
81 	},
82 	{ }
83 };
84 
nf_ct_frag6_sysctl_register(struct net * net)85 static int nf_ct_frag6_sysctl_register(struct net *net)
86 {
87 	struct ctl_table *table;
88 	struct ctl_table_header *hdr;
89 
90 	table = nf_ct_frag6_sysctl_table;
91 	if (!net_eq(net, &init_net)) {
92 		table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
93 				GFP_KERNEL);
94 		if (table == NULL)
95 			goto err_alloc;
96 
97 		table[0].data = &net->nf_frag.frags.timeout;
98 		table[1].data = &net->nf_frag.frags.low_thresh;
99 		table[1].extra2 = &net->nf_frag.frags.high_thresh;
100 		table[2].data = &net->nf_frag.frags.high_thresh;
101 		table[2].extra1 = &net->nf_frag.frags.low_thresh;
102 		table[2].extra2 = &init_net.nf_frag.frags.high_thresh;
103 	}
104 
105 	hdr = register_net_sysctl(net, "net/netfilter", table);
106 	if (hdr == NULL)
107 		goto err_reg;
108 
109 	net->nf_frag_frags_hdr = hdr;
110 	return 0;
111 
112 err_reg:
113 	if (!net_eq(net, &init_net))
114 		kfree(table);
115 err_alloc:
116 	return -ENOMEM;
117 }
118 
nf_ct_frags6_sysctl_unregister(struct net * net)119 static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
120 {
121 	struct ctl_table *table;
122 
123 	table = net->nf_frag_frags_hdr->ctl_table_arg;
124 	unregister_net_sysctl_table(net->nf_frag_frags_hdr);
125 	if (!net_eq(net, &init_net))
126 		kfree(table);
127 }
128 
129 #else
nf_ct_frag6_sysctl_register(struct net * net)130 static int nf_ct_frag6_sysctl_register(struct net *net)
131 {
132 	return 0;
133 }
nf_ct_frags6_sysctl_unregister(struct net * net)134 static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
135 {
136 }
137 #endif
138 
ip6_frag_ecn(const struct ipv6hdr * ipv6h)139 static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
140 {
141 	return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
142 }
143 
nf_ct_frag6_expire(struct timer_list * t)144 static void nf_ct_frag6_expire(struct timer_list *t)
145 {
146 	struct inet_frag_queue *frag = from_timer(frag, t, timer);
147 	struct frag_queue *fq;
148 	struct net *net;
149 
150 	fq = container_of(frag, struct frag_queue, q);
151 	net = container_of(fq->q.net, struct net, nf_frag.frags);
152 
153 	ip6frag_expire_frag_queue(net, fq);
154 }
155 
156 /* Creation primitives. */
fq_find(struct net * net,__be32 id,u32 user,const struct ipv6hdr * hdr,int iif)157 static struct frag_queue *fq_find(struct net *net, __be32 id, u32 user,
158 				  const struct ipv6hdr *hdr, int iif)
159 {
160 	struct frag_v6_compare_key key = {
161 		.id = id,
162 		.saddr = hdr->saddr,
163 		.daddr = hdr->daddr,
164 		.user = user,
165 		.iif = iif,
166 	};
167 	struct inet_frag_queue *q;
168 
169 	q = inet_frag_find(&net->nf_frag.frags, &key);
170 	if (!q)
171 		return NULL;
172 
173 	return container_of(q, struct frag_queue, q);
174 }
175 
176 
nf_ct_frag6_queue(struct frag_queue * fq,struct sk_buff * skb,const struct frag_hdr * fhdr,int nhoff)177 static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
178 			     const struct frag_hdr *fhdr, int nhoff)
179 {
180 	struct sk_buff *prev, *next;
181 	unsigned int payload_len;
182 	int offset, end;
183 	u8 ecn;
184 
185 	if (fq->q.flags & INET_FRAG_COMPLETE) {
186 		pr_debug("Already completed\n");
187 		goto err;
188 	}
189 
190 	payload_len = ntohs(ipv6_hdr(skb)->payload_len);
191 
192 	offset = ntohs(fhdr->frag_off) & ~0x7;
193 	end = offset + (payload_len -
194 			((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
195 
196 	if ((unsigned int)end > IPV6_MAXPLEN) {
197 		pr_debug("offset is too large.\n");
198 		return -EINVAL;
199 	}
200 
201 	ecn = ip6_frag_ecn(ipv6_hdr(skb));
202 
203 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
204 		const unsigned char *nh = skb_network_header(skb);
205 		skb->csum = csum_sub(skb->csum,
206 				     csum_partial(nh, (u8 *)(fhdr + 1) - nh,
207 						  0));
208 	}
209 
210 	/* Is this the final fragment? */
211 	if (!(fhdr->frag_off & htons(IP6_MF))) {
212 		/* If we already have some bits beyond end
213 		 * or have different end, the segment is corrupted.
214 		 */
215 		if (end < fq->q.len ||
216 		    ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) {
217 			pr_debug("already received last fragment\n");
218 			goto err;
219 		}
220 		fq->q.flags |= INET_FRAG_LAST_IN;
221 		fq->q.len = end;
222 	} else {
223 		/* Check if the fragment is rounded to 8 bytes.
224 		 * Required by the RFC.
225 		 */
226 		if (end & 0x7) {
227 			/* RFC2460 says always send parameter problem in
228 			 * this case. -DaveM
229 			 */
230 			pr_debug("end of fragment not rounded to 8 bytes.\n");
231 			inet_frag_kill(&fq->q);
232 			return -EPROTO;
233 		}
234 		if (end > fq->q.len) {
235 			/* Some bits beyond end -> corruption. */
236 			if (fq->q.flags & INET_FRAG_LAST_IN) {
237 				pr_debug("last packet already reached.\n");
238 				goto err;
239 			}
240 			fq->q.len = end;
241 		}
242 	}
243 
244 	if (end == offset)
245 		goto err;
246 
247 	/* Point into the IP datagram 'data' part. */
248 	if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data)) {
249 		pr_debug("queue: message is too short.\n");
250 		goto err;
251 	}
252 	if (pskb_trim_rcsum(skb, end - offset)) {
253 		pr_debug("Can't trim\n");
254 		goto err;
255 	}
256 
257 	/* Find out which fragments are in front and at the back of us
258 	 * in the chain of fragments so far.  We must know where to put
259 	 * this fragment, right?
260 	 */
261 	prev = fq->q.fragments_tail;
262 	if (!prev || prev->ip_defrag_offset < offset) {
263 		next = NULL;
264 		goto found;
265 	}
266 	prev = NULL;
267 	for (next = fq->q.fragments; next != NULL; next = next->next) {
268 		if (next->ip_defrag_offset >= offset)
269 			break;	/* bingo! */
270 		prev = next;
271 	}
272 
273 found:
274 	/* RFC5722, Section 4:
275 	 *                                  When reassembling an IPv6 datagram, if
276 	 *   one or more its constituent fragments is determined to be an
277 	 *   overlapping fragment, the entire datagram (and any constituent
278 	 *   fragments, including those not yet received) MUST be silently
279 	 *   discarded.
280 	 */
281 
282 	/* Check for overlap with preceding fragment. */
283 	if (prev &&
284 	    (prev->ip_defrag_offset + prev->len) > offset)
285 		goto discard_fq;
286 
287 	/* Look for overlap with succeeding segment. */
288 	if (next && next->ip_defrag_offset < end)
289 		goto discard_fq;
290 
291 	/* Note : skb->ip_defrag_offset and skb->dev share the same location */
292 	if (skb->dev)
293 		fq->iif = skb->dev->ifindex;
294 	/* Makes sure compiler wont do silly aliasing games */
295 	barrier();
296 	skb->ip_defrag_offset = offset;
297 
298 	/* Insert this fragment in the chain of fragments. */
299 	skb->next = next;
300 	if (!next)
301 		fq->q.fragments_tail = skb;
302 	if (prev)
303 		prev->next = skb;
304 	else
305 		fq->q.fragments = skb;
306 
307 	fq->q.stamp = skb->tstamp;
308 	fq->q.meat += skb->len;
309 	fq->ecn |= ecn;
310 	if (payload_len > fq->q.max_size)
311 		fq->q.max_size = payload_len;
312 	add_frag_mem_limit(fq->q.net, skb->truesize);
313 
314 	/* The first fragment.
315 	 * nhoffset is obtained from the first fragment, of course.
316 	 */
317 	if (offset == 0) {
318 		fq->nhoffset = nhoff;
319 		fq->q.flags |= INET_FRAG_FIRST_IN;
320 	}
321 
322 	return 0;
323 
324 discard_fq:
325 	inet_frag_kill(&fq->q);
326 err:
327 	return -EINVAL;
328 }
329 
330 /*
331  *	Check if this packet is complete.
332  *
333  *	It is called with locked fq, and caller must check that
334  *	queue is eligible for reassembly i.e. it is not COMPLETE,
335  *	the last and the first frames arrived and all the bits are here.
336  *
337  *	returns true if *prev skb has been transformed into the reassembled
338  *	skb, false otherwise.
339  */
340 static bool
nf_ct_frag6_reasm(struct frag_queue * fq,struct sk_buff * prev,struct net_device * dev)341 nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev,  struct net_device *dev)
342 {
343 	struct sk_buff *fp, *head = fq->q.fragments;
344 	int    payload_len;
345 	u8 ecn;
346 
347 	inet_frag_kill(&fq->q);
348 
349 	WARN_ON(head == NULL);
350 	WARN_ON(head->ip_defrag_offset != 0);
351 
352 	ecn = ip_frag_ecn_table[fq->ecn];
353 	if (unlikely(ecn == 0xff))
354 		return false;
355 
356 	/* Unfragmented part is taken from the first segment. */
357 	payload_len = ((head->data - skb_network_header(head)) -
358 		       sizeof(struct ipv6hdr) + fq->q.len -
359 		       sizeof(struct frag_hdr));
360 	if (payload_len > IPV6_MAXPLEN) {
361 		net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n",
362 				    payload_len);
363 		return false;
364 	}
365 
366 	/* Head of list must not be cloned. */
367 	if (skb_unclone(head, GFP_ATOMIC))
368 		return false;
369 
370 	/* If the first fragment is fragmented itself, we split
371 	 * it to two chunks: the first with data and paged part
372 	 * and the second, holding only fragments. */
373 	if (skb_has_frag_list(head)) {
374 		struct sk_buff *clone;
375 		int i, plen = 0;
376 
377 		clone = alloc_skb(0, GFP_ATOMIC);
378 		if (clone == NULL)
379 			return false;
380 
381 		clone->next = head->next;
382 		head->next = clone;
383 		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
384 		skb_frag_list_init(head);
385 		for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
386 			plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
387 		clone->len = clone->data_len = head->data_len - plen;
388 		head->data_len -= clone->len;
389 		head->len -= clone->len;
390 		clone->csum = 0;
391 		clone->ip_summed = head->ip_summed;
392 
393 		add_frag_mem_limit(fq->q.net, clone->truesize);
394 	}
395 
396 	/* morph head into last received skb: prev.
397 	 *
398 	 * This allows callers of ipv6 conntrack defrag to continue
399 	 * to use the last skb(frag) passed into the reasm engine.
400 	 * The last skb frag 'silently' turns into the full reassembled skb.
401 	 *
402 	 * Since prev is also part of q->fragments we have to clone it first.
403 	 */
404 	if (head != prev) {
405 		struct sk_buff *iter;
406 
407 		fp = skb_clone(prev, GFP_ATOMIC);
408 		if (!fp)
409 			return false;
410 
411 		fp->next = prev->next;
412 
413 		iter = head;
414 		while (iter) {
415 			if (iter->next == prev) {
416 				iter->next = fp;
417 				break;
418 			}
419 			iter = iter->next;
420 		}
421 
422 		skb_morph(prev, head);
423 		prev->next = head->next;
424 		consume_skb(head);
425 		head = prev;
426 	}
427 
428 	/* We have to remove fragment header from datagram and to relocate
429 	 * header in order to calculate ICV correctly. */
430 	skb_network_header(head)[fq->nhoffset] = skb_transport_header(head)[0];
431 	memmove(head->head + sizeof(struct frag_hdr), head->head,
432 		(head->data - head->head) - sizeof(struct frag_hdr));
433 	head->mac_header += sizeof(struct frag_hdr);
434 	head->network_header += sizeof(struct frag_hdr);
435 
436 	skb_shinfo(head)->frag_list = head->next;
437 	skb_reset_transport_header(head);
438 	skb_push(head, head->data - skb_network_header(head));
439 
440 	for (fp = head->next; fp; fp = fp->next) {
441 		head->data_len += fp->len;
442 		head->len += fp->len;
443 		if (head->ip_summed != fp->ip_summed)
444 			head->ip_summed = CHECKSUM_NONE;
445 		else if (head->ip_summed == CHECKSUM_COMPLETE)
446 			head->csum = csum_add(head->csum, fp->csum);
447 		head->truesize += fp->truesize;
448 		fp->sk = NULL;
449 	}
450 	sub_frag_mem_limit(fq->q.net, head->truesize);
451 
452 	head->ignore_df = 1;
453 	head->next = NULL;
454 	head->dev = dev;
455 	head->tstamp = fq->q.stamp;
456 	ipv6_hdr(head)->payload_len = htons(payload_len);
457 	ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
458 	IP6CB(head)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size;
459 
460 	/* Yes, and fold redundant checksum back. 8) */
461 	if (head->ip_summed == CHECKSUM_COMPLETE)
462 		head->csum = csum_partial(skb_network_header(head),
463 					  skb_network_header_len(head),
464 					  head->csum);
465 
466 	fq->q.fragments = NULL;
467 	fq->q.rb_fragments = RB_ROOT;
468 	fq->q.fragments_tail = NULL;
469 
470 	return true;
471 }
472 
473 /*
474  * find the header just before Fragment Header.
475  *
476  * if success return 0 and set ...
477  * (*prevhdrp): the value of "Next Header Field" in the header
478  *		just before Fragment Header.
479  * (*prevhoff): the offset of "Next Header Field" in the header
480  *		just before Fragment Header.
481  * (*fhoff)   : the offset of Fragment Header.
482  *
483  * Based on ipv6_skip_hdr() in net/ipv6/exthdr.c
484  *
485  */
486 static int
find_prev_fhdr(struct sk_buff * skb,u8 * prevhdrp,int * prevhoff,int * fhoff)487 find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
488 {
489 	u8 nexthdr = ipv6_hdr(skb)->nexthdr;
490 	const int netoff = skb_network_offset(skb);
491 	u8 prev_nhoff = netoff + offsetof(struct ipv6hdr, nexthdr);
492 	int start = netoff + sizeof(struct ipv6hdr);
493 	int len = skb->len - start;
494 	u8 prevhdr = NEXTHDR_IPV6;
495 
496 	while (nexthdr != NEXTHDR_FRAGMENT) {
497 		struct ipv6_opt_hdr hdr;
498 		int hdrlen;
499 
500 		if (!ipv6_ext_hdr(nexthdr)) {
501 			return -1;
502 		}
503 		if (nexthdr == NEXTHDR_NONE) {
504 			pr_debug("next header is none\n");
505 			return -1;
506 		}
507 		if (len < (int)sizeof(struct ipv6_opt_hdr)) {
508 			pr_debug("too short\n");
509 			return -1;
510 		}
511 		if (skb_copy_bits(skb, start, &hdr, sizeof(hdr)))
512 			BUG();
513 		if (nexthdr == NEXTHDR_AUTH)
514 			hdrlen = (hdr.hdrlen+2)<<2;
515 		else
516 			hdrlen = ipv6_optlen(&hdr);
517 
518 		prevhdr = nexthdr;
519 		prev_nhoff = start;
520 
521 		nexthdr = hdr.nexthdr;
522 		len -= hdrlen;
523 		start += hdrlen;
524 	}
525 
526 	if (len < 0)
527 		return -1;
528 
529 	*prevhdrp = prevhdr;
530 	*prevhoff = prev_nhoff;
531 	*fhoff = start;
532 
533 	return 0;
534 }
535 
nf_ct_frag6_gather(struct net * net,struct sk_buff * skb,u32 user)536 int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
537 {
538 	u16 savethdr = skb->transport_header;
539 	struct net_device *dev = skb->dev;
540 	int fhoff, nhoff, ret;
541 	struct frag_hdr *fhdr;
542 	struct frag_queue *fq;
543 	struct ipv6hdr *hdr;
544 	u8 prevhdr;
545 
546 	/* Jumbo payload inhibits frag. header */
547 	if (ipv6_hdr(skb)->payload_len == 0) {
548 		pr_debug("payload len = 0\n");
549 		return 0;
550 	}
551 
552 	if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0)
553 		return 0;
554 
555 	if (!pskb_may_pull(skb, fhoff + sizeof(*fhdr)))
556 		return -ENOMEM;
557 
558 	skb_set_transport_header(skb, fhoff);
559 	hdr = ipv6_hdr(skb);
560 	fhdr = (struct frag_hdr *)skb_transport_header(skb);
561 
562 	if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
563 	    fhdr->frag_off & htons(IP6_MF))
564 		return -EINVAL;
565 
566 	skb_orphan(skb);
567 	fq = fq_find(net, fhdr->identification, user, hdr,
568 		     skb->dev ? skb->dev->ifindex : 0);
569 	if (fq == NULL) {
570 		pr_debug("Can't find and can't create new queue\n");
571 		return -ENOMEM;
572 	}
573 
574 	spin_lock_bh(&fq->q.lock);
575 
576 	ret = nf_ct_frag6_queue(fq, skb, fhdr, nhoff);
577 	if (ret < 0) {
578 		if (ret == -EPROTO) {
579 			skb->transport_header = savethdr;
580 			ret = 0;
581 		}
582 		goto out_unlock;
583 	}
584 
585 	/* after queue has assumed skb ownership, only 0 or -EINPROGRESS
586 	 * must be returned.
587 	 */
588 	ret = -EINPROGRESS;
589 	if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
590 	    fq->q.meat == fq->q.len &&
591 	    nf_ct_frag6_reasm(fq, skb, dev))
592 		ret = 0;
593 	else
594 		skb_dst_drop(skb);
595 
596 out_unlock:
597 	spin_unlock_bh(&fq->q.lock);
598 	inet_frag_put(&fq->q);
599 	return ret;
600 }
601 EXPORT_SYMBOL_GPL(nf_ct_frag6_gather);
602 
nf_ct_net_init(struct net * net)603 static int nf_ct_net_init(struct net *net)
604 {
605 	int res;
606 
607 	net->nf_frag.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
608 	net->nf_frag.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
609 	net->nf_frag.frags.timeout = IPV6_FRAG_TIMEOUT;
610 	net->nf_frag.frags.f = &nf_frags;
611 
612 	res = inet_frags_init_net(&net->nf_frag.frags);
613 	if (res < 0)
614 		return res;
615 	res = nf_ct_frag6_sysctl_register(net);
616 	if (res < 0)
617 		inet_frags_exit_net(&net->nf_frag.frags);
618 	return res;
619 }
620 
nf_ct_net_exit(struct net * net)621 static void nf_ct_net_exit(struct net *net)
622 {
623 	nf_ct_frags6_sysctl_unregister(net);
624 	inet_frags_exit_net(&net->nf_frag.frags);
625 }
626 
627 static struct pernet_operations nf_ct_net_ops = {
628 	.init = nf_ct_net_init,
629 	.exit = nf_ct_net_exit,
630 };
631 
632 static const struct rhashtable_params nfct_rhash_params = {
633 	.head_offset		= offsetof(struct inet_frag_queue, node),
634 	.hashfn			= ip6frag_key_hashfn,
635 	.obj_hashfn		= ip6frag_obj_hashfn,
636 	.obj_cmpfn		= ip6frag_obj_cmpfn,
637 	.automatic_shrinking	= true,
638 };
639 
nf_ct_frag6_init(void)640 int nf_ct_frag6_init(void)
641 {
642 	int ret = 0;
643 
644 	nf_frags.constructor = ip6frag_init;
645 	nf_frags.destructor = NULL;
646 	nf_frags.qsize = sizeof(struct frag_queue);
647 	nf_frags.frag_expire = nf_ct_frag6_expire;
648 	nf_frags.frags_cache_name = nf_frags_cache_name;
649 	nf_frags.rhash_params = nfct_rhash_params;
650 	ret = inet_frags_init(&nf_frags);
651 	if (ret)
652 		goto out;
653 	ret = register_pernet_subsys(&nf_ct_net_ops);
654 	if (ret)
655 		inet_frags_fini(&nf_frags);
656 
657 out:
658 	return ret;
659 }
660 
nf_ct_frag6_cleanup(void)661 void nf_ct_frag6_cleanup(void)
662 {
663 	unregister_pernet_subsys(&nf_ct_net_ops);
664 	inet_frags_fini(&nf_frags);
665 }
666