1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/jhash.h>
3 #include <linux/netfilter.h>
4 #include <linux/rcupdate.h>
5 #include <linux/rhashtable.h>
6 #include <linux/vmalloc.h>
7 #include <net/genetlink.h>
8 #include <net/ila.h>
9 #include <net/netns/generic.h>
10 #include <uapi/linux/genetlink.h>
11 #include "ila.h"
12
13 struct ila_xlat_params {
14 struct ila_params ip;
15 int ifindex;
16 };
17
18 struct ila_map {
19 struct ila_xlat_params xp;
20 struct rhash_head node;
21 struct ila_map __rcu *next;
22 struct rcu_head rcu;
23 };
24
25 #define MAX_LOCKS 1024
26 #define LOCKS_PER_CPU 10
27
alloc_ila_locks(struct ila_net * ilan)28 static int alloc_ila_locks(struct ila_net *ilan)
29 {
30 return alloc_bucket_spinlocks(&ilan->xlat.locks, &ilan->xlat.locks_mask,
31 MAX_LOCKS, LOCKS_PER_CPU,
32 GFP_KERNEL);
33 }
34
35 static u32 hashrnd __read_mostly;
__ila_hash_secret_init(void)36 static __always_inline void __ila_hash_secret_init(void)
37 {
38 net_get_random_once(&hashrnd, sizeof(hashrnd));
39 }
40
ila_locator_hash(struct ila_locator loc)41 static inline u32 ila_locator_hash(struct ila_locator loc)
42 {
43 u32 *v = (u32 *)loc.v32;
44
45 __ila_hash_secret_init();
46 return jhash_2words(v[0], v[1], hashrnd);
47 }
48
ila_get_lock(struct ila_net * ilan,struct ila_locator loc)49 static inline spinlock_t *ila_get_lock(struct ila_net *ilan,
50 struct ila_locator loc)
51 {
52 return &ilan->xlat.locks[ila_locator_hash(loc) & ilan->xlat.locks_mask];
53 }
54
ila_cmp_wildcards(struct ila_map * ila,struct ila_addr * iaddr,int ifindex)55 static inline int ila_cmp_wildcards(struct ila_map *ila,
56 struct ila_addr *iaddr, int ifindex)
57 {
58 return (ila->xp.ifindex && ila->xp.ifindex != ifindex);
59 }
60
ila_cmp_params(struct ila_map * ila,struct ila_xlat_params * xp)61 static inline int ila_cmp_params(struct ila_map *ila,
62 struct ila_xlat_params *xp)
63 {
64 return (ila->xp.ifindex != xp->ifindex);
65 }
66
ila_cmpfn(struct rhashtable_compare_arg * arg,const void * obj)67 static int ila_cmpfn(struct rhashtable_compare_arg *arg,
68 const void *obj)
69 {
70 const struct ila_map *ila = obj;
71
72 return (ila->xp.ip.locator_match.v64 != *(__be64 *)arg->key);
73 }
74
ila_order(struct ila_map * ila)75 static inline int ila_order(struct ila_map *ila)
76 {
77 int score = 0;
78
79 if (ila->xp.ifindex)
80 score += 1 << 1;
81
82 return score;
83 }
84
85 static const struct rhashtable_params rht_params = {
86 .nelem_hint = 1024,
87 .head_offset = offsetof(struct ila_map, node),
88 .key_offset = offsetof(struct ila_map, xp.ip.locator_match),
89 .key_len = sizeof(u64), /* identifier */
90 .max_size = 1048576,
91 .min_size = 256,
92 .automatic_shrinking = true,
93 .obj_cmpfn = ila_cmpfn,
94 };
95
parse_nl_config(struct genl_info * info,struct ila_xlat_params * xp)96 static int parse_nl_config(struct genl_info *info,
97 struct ila_xlat_params *xp)
98 {
99 memset(xp, 0, sizeof(*xp));
100
101 if (info->attrs[ILA_ATTR_LOCATOR])
102 xp->ip.locator.v64 = (__force __be64)nla_get_u64(
103 info->attrs[ILA_ATTR_LOCATOR]);
104
105 if (info->attrs[ILA_ATTR_LOCATOR_MATCH])
106 xp->ip.locator_match.v64 = (__force __be64)nla_get_u64(
107 info->attrs[ILA_ATTR_LOCATOR_MATCH]);
108
109 if (info->attrs[ILA_ATTR_CSUM_MODE])
110 xp->ip.csum_mode = nla_get_u8(info->attrs[ILA_ATTR_CSUM_MODE]);
111 else
112 xp->ip.csum_mode = ILA_CSUM_NO_ACTION;
113
114 if (info->attrs[ILA_ATTR_IDENT_TYPE])
115 xp->ip.ident_type = nla_get_u8(
116 info->attrs[ILA_ATTR_IDENT_TYPE]);
117 else
118 xp->ip.ident_type = ILA_ATYPE_USE_FORMAT;
119
120 if (info->attrs[ILA_ATTR_IFINDEX])
121 xp->ifindex = nla_get_s32(info->attrs[ILA_ATTR_IFINDEX]);
122
123 return 0;
124 }
125
126 /* Must be called with rcu readlock */
ila_lookup_wildcards(struct ila_addr * iaddr,int ifindex,struct ila_net * ilan)127 static inline struct ila_map *ila_lookup_wildcards(struct ila_addr *iaddr,
128 int ifindex,
129 struct ila_net *ilan)
130 {
131 struct ila_map *ila;
132
133 ila = rhashtable_lookup_fast(&ilan->xlat.rhash_table, &iaddr->loc,
134 rht_params);
135 while (ila) {
136 if (!ila_cmp_wildcards(ila, iaddr, ifindex))
137 return ila;
138 ila = rcu_access_pointer(ila->next);
139 }
140
141 return NULL;
142 }
143
144 /* Must be called with rcu readlock */
ila_lookup_by_params(struct ila_xlat_params * xp,struct ila_net * ilan)145 static inline struct ila_map *ila_lookup_by_params(struct ila_xlat_params *xp,
146 struct ila_net *ilan)
147 {
148 struct ila_map *ila;
149
150 ila = rhashtable_lookup_fast(&ilan->xlat.rhash_table,
151 &xp->ip.locator_match,
152 rht_params);
153 while (ila) {
154 if (!ila_cmp_params(ila, xp))
155 return ila;
156 ila = rcu_access_pointer(ila->next);
157 }
158
159 return NULL;
160 }
161
ila_release(struct ila_map * ila)162 static inline void ila_release(struct ila_map *ila)
163 {
164 kfree_rcu(ila, rcu);
165 }
166
ila_free_node(struct ila_map * ila)167 static void ila_free_node(struct ila_map *ila)
168 {
169 struct ila_map *next;
170
171 /* Assume rcu_readlock held */
172 while (ila) {
173 next = rcu_access_pointer(ila->next);
174 ila_release(ila);
175 ila = next;
176 }
177 }
178
ila_free_cb(void * ptr,void * arg)179 static void ila_free_cb(void *ptr, void *arg)
180 {
181 ila_free_node((struct ila_map *)ptr);
182 }
183
184 static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila);
185
186 static unsigned int
ila_nf_input(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)187 ila_nf_input(void *priv,
188 struct sk_buff *skb,
189 const struct nf_hook_state *state)
190 {
191 ila_xlat_addr(skb, false);
192 return NF_ACCEPT;
193 }
194
195 static const struct nf_hook_ops ila_nf_hook_ops[] = {
196 {
197 .hook = ila_nf_input,
198 .pf = NFPROTO_IPV6,
199 .hooknum = NF_INET_PRE_ROUTING,
200 .priority = -1,
201 },
202 };
203
ila_add_mapping(struct net * net,struct ila_xlat_params * xp)204 static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp)
205 {
206 struct ila_net *ilan = net_generic(net, ila_net_id);
207 struct ila_map *ila, *head;
208 spinlock_t *lock = ila_get_lock(ilan, xp->ip.locator_match);
209 int err = 0, order;
210
211 if (!ilan->xlat.hooks_registered) {
212 /* We defer registering net hooks in the namespace until the
213 * first mapping is added.
214 */
215 err = nf_register_net_hooks(net, ila_nf_hook_ops,
216 ARRAY_SIZE(ila_nf_hook_ops));
217 if (err)
218 return err;
219
220 ilan->xlat.hooks_registered = true;
221 }
222
223 ila = kzalloc(sizeof(*ila), GFP_KERNEL);
224 if (!ila)
225 return -ENOMEM;
226
227 ila_init_saved_csum(&xp->ip);
228
229 ila->xp = *xp;
230
231 order = ila_order(ila);
232
233 spin_lock(lock);
234
235 head = rhashtable_lookup_fast(&ilan->xlat.rhash_table,
236 &xp->ip.locator_match,
237 rht_params);
238 if (!head) {
239 /* New entry for the rhash_table */
240 err = rhashtable_lookup_insert_fast(&ilan->xlat.rhash_table,
241 &ila->node, rht_params);
242 } else {
243 struct ila_map *tila = head, *prev = NULL;
244
245 do {
246 if (!ila_cmp_params(tila, xp)) {
247 err = -EEXIST;
248 goto out;
249 }
250
251 if (order > ila_order(tila))
252 break;
253
254 prev = tila;
255 tila = rcu_dereference_protected(tila->next,
256 lockdep_is_held(lock));
257 } while (tila);
258
259 if (prev) {
260 /* Insert in sub list of head */
261 RCU_INIT_POINTER(ila->next, tila);
262 rcu_assign_pointer(prev->next, ila);
263 } else {
264 /* Make this ila new head */
265 RCU_INIT_POINTER(ila->next, head);
266 err = rhashtable_replace_fast(&ilan->xlat.rhash_table,
267 &head->node,
268 &ila->node, rht_params);
269 if (err)
270 goto out;
271 }
272 }
273
274 out:
275 spin_unlock(lock);
276
277 if (err)
278 kfree(ila);
279
280 return err;
281 }
282
ila_del_mapping(struct net * net,struct ila_xlat_params * xp)283 static int ila_del_mapping(struct net *net, struct ila_xlat_params *xp)
284 {
285 struct ila_net *ilan = net_generic(net, ila_net_id);
286 struct ila_map *ila, *head, *prev;
287 spinlock_t *lock = ila_get_lock(ilan, xp->ip.locator_match);
288 int err = -ENOENT;
289
290 spin_lock(lock);
291
292 head = rhashtable_lookup_fast(&ilan->xlat.rhash_table,
293 &xp->ip.locator_match, rht_params);
294 ila = head;
295
296 prev = NULL;
297
298 while (ila) {
299 if (ila_cmp_params(ila, xp)) {
300 prev = ila;
301 ila = rcu_dereference_protected(ila->next,
302 lockdep_is_held(lock));
303 continue;
304 }
305
306 err = 0;
307
308 if (prev) {
309 /* Not head, just delete from list */
310 rcu_assign_pointer(prev->next, ila->next);
311 } else {
312 /* It is the head. If there is something in the
313 * sublist we need to make a new head.
314 */
315 head = rcu_dereference_protected(ila->next,
316 lockdep_is_held(lock));
317 if (head) {
318 /* Put first entry in the sublist into the
319 * table
320 */
321 err = rhashtable_replace_fast(
322 &ilan->xlat.rhash_table, &ila->node,
323 &head->node, rht_params);
324 if (err)
325 goto out;
326 } else {
327 /* Entry no longer used */
328 err = rhashtable_remove_fast(
329 &ilan->xlat.rhash_table,
330 &ila->node, rht_params);
331 }
332 }
333
334 ila_release(ila);
335
336 break;
337 }
338
339 out:
340 spin_unlock(lock);
341
342 return err;
343 }
344
ila_xlat_nl_cmd_add_mapping(struct sk_buff * skb,struct genl_info * info)345 int ila_xlat_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info)
346 {
347 struct net *net = genl_info_net(info);
348 struct ila_xlat_params p;
349 int err;
350
351 err = parse_nl_config(info, &p);
352 if (err)
353 return err;
354
355 return ila_add_mapping(net, &p);
356 }
357
ila_xlat_nl_cmd_del_mapping(struct sk_buff * skb,struct genl_info * info)358 int ila_xlat_nl_cmd_del_mapping(struct sk_buff *skb, struct genl_info *info)
359 {
360 struct net *net = genl_info_net(info);
361 struct ila_xlat_params xp;
362 int err;
363
364 err = parse_nl_config(info, &xp);
365 if (err)
366 return err;
367
368 ila_del_mapping(net, &xp);
369
370 return 0;
371 }
372
lock_from_ila_map(struct ila_net * ilan,struct ila_map * ila)373 static inline spinlock_t *lock_from_ila_map(struct ila_net *ilan,
374 struct ila_map *ila)
375 {
376 return ila_get_lock(ilan, ila->xp.ip.locator_match);
377 }
378
ila_xlat_nl_cmd_flush(struct sk_buff * skb,struct genl_info * info)379 int ila_xlat_nl_cmd_flush(struct sk_buff *skb, struct genl_info *info)
380 {
381 struct net *net = genl_info_net(info);
382 struct ila_net *ilan = net_generic(net, ila_net_id);
383 struct rhashtable_iter iter;
384 struct ila_map *ila;
385 spinlock_t *lock;
386 int ret;
387
388 ret = rhashtable_walk_init(&ilan->xlat.rhash_table, &iter, GFP_KERNEL);
389 if (ret)
390 goto done;
391
392 rhashtable_walk_start(&iter);
393
394 for (;;) {
395 ila = rhashtable_walk_next(&iter);
396
397 if (IS_ERR(ila)) {
398 if (PTR_ERR(ila) == -EAGAIN)
399 continue;
400 ret = PTR_ERR(ila);
401 goto done;
402 } else if (!ila) {
403 break;
404 }
405
406 lock = lock_from_ila_map(ilan, ila);
407
408 spin_lock(lock);
409
410 ret = rhashtable_remove_fast(&ilan->xlat.rhash_table,
411 &ila->node, rht_params);
412 if (!ret)
413 ila_free_node(ila);
414
415 spin_unlock(lock);
416
417 if (ret)
418 break;
419 }
420
421 done:
422 rhashtable_walk_stop(&iter);
423 return ret;
424 }
425
ila_fill_info(struct ila_map * ila,struct sk_buff * msg)426 static int ila_fill_info(struct ila_map *ila, struct sk_buff *msg)
427 {
428 if (nla_put_u64_64bit(msg, ILA_ATTR_LOCATOR,
429 (__force u64)ila->xp.ip.locator.v64,
430 ILA_ATTR_PAD) ||
431 nla_put_u64_64bit(msg, ILA_ATTR_LOCATOR_MATCH,
432 (__force u64)ila->xp.ip.locator_match.v64,
433 ILA_ATTR_PAD) ||
434 nla_put_s32(msg, ILA_ATTR_IFINDEX, ila->xp.ifindex) ||
435 nla_put_u8(msg, ILA_ATTR_CSUM_MODE, ila->xp.ip.csum_mode) ||
436 nla_put_u8(msg, ILA_ATTR_IDENT_TYPE, ila->xp.ip.ident_type))
437 return -1;
438
439 return 0;
440 }
441
ila_dump_info(struct ila_map * ila,u32 portid,u32 seq,u32 flags,struct sk_buff * skb,u8 cmd)442 static int ila_dump_info(struct ila_map *ila,
443 u32 portid, u32 seq, u32 flags,
444 struct sk_buff *skb, u8 cmd)
445 {
446 void *hdr;
447
448 hdr = genlmsg_put(skb, portid, seq, &ila_nl_family, flags, cmd);
449 if (!hdr)
450 return -ENOMEM;
451
452 if (ila_fill_info(ila, skb) < 0)
453 goto nla_put_failure;
454
455 genlmsg_end(skb, hdr);
456 return 0;
457
458 nla_put_failure:
459 genlmsg_cancel(skb, hdr);
460 return -EMSGSIZE;
461 }
462
ila_xlat_nl_cmd_get_mapping(struct sk_buff * skb,struct genl_info * info)463 int ila_xlat_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info)
464 {
465 struct net *net = genl_info_net(info);
466 struct ila_net *ilan = net_generic(net, ila_net_id);
467 struct sk_buff *msg;
468 struct ila_xlat_params xp;
469 struct ila_map *ila;
470 int ret;
471
472 ret = parse_nl_config(info, &xp);
473 if (ret)
474 return ret;
475
476 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
477 if (!msg)
478 return -ENOMEM;
479
480 rcu_read_lock();
481
482 ila = ila_lookup_by_params(&xp, ilan);
483 if (ila) {
484 ret = ila_dump_info(ila,
485 info->snd_portid,
486 info->snd_seq, 0, msg,
487 info->genlhdr->cmd);
488 }
489
490 rcu_read_unlock();
491
492 if (ret < 0)
493 goto out_free;
494
495 return genlmsg_reply(msg, info);
496
497 out_free:
498 nlmsg_free(msg);
499 return ret;
500 }
501
502 struct ila_dump_iter {
503 struct rhashtable_iter rhiter;
504 int skip;
505 };
506
ila_xlat_nl_dump_start(struct netlink_callback * cb)507 int ila_xlat_nl_dump_start(struct netlink_callback *cb)
508 {
509 struct net *net = sock_net(cb->skb->sk);
510 struct ila_net *ilan = net_generic(net, ila_net_id);
511 struct ila_dump_iter *iter;
512 int ret;
513
514 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
515 if (!iter)
516 return -ENOMEM;
517
518 ret = rhashtable_walk_init(&ilan->xlat.rhash_table, &iter->rhiter,
519 GFP_KERNEL);
520 if (ret) {
521 kfree(iter);
522 return ret;
523 }
524
525 iter->skip = 0;
526 cb->args[0] = (long)iter;
527
528 return ret;
529 }
530
ila_xlat_nl_dump_done(struct netlink_callback * cb)531 int ila_xlat_nl_dump_done(struct netlink_callback *cb)
532 {
533 struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args[0];
534
535 rhashtable_walk_exit(&iter->rhiter);
536
537 kfree(iter);
538
539 return 0;
540 }
541
ila_xlat_nl_dump(struct sk_buff * skb,struct netlink_callback * cb)542 int ila_xlat_nl_dump(struct sk_buff *skb, struct netlink_callback *cb)
543 {
544 struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args[0];
545 struct rhashtable_iter *rhiter = &iter->rhiter;
546 int skip = iter->skip;
547 struct ila_map *ila;
548 int ret;
549
550 rhashtable_walk_start(rhiter);
551
552 /* Get first entry */
553 ila = rhashtable_walk_peek(rhiter);
554
555 if (ila && !IS_ERR(ila) && skip) {
556 /* Skip over visited entries */
557
558 while (ila && skip) {
559 /* Skip over any ila entries in this list that we
560 * have already dumped.
561 */
562 ila = rcu_access_pointer(ila->next);
563 skip--;
564 }
565 }
566
567 skip = 0;
568
569 for (;;) {
570 if (IS_ERR(ila)) {
571 ret = PTR_ERR(ila);
572 if (ret == -EAGAIN) {
573 /* Table has changed and iter has reset. Return
574 * -EAGAIN to the application even if we have
575 * written data to the skb. The application
576 * needs to deal with this.
577 */
578
579 goto out_ret;
580 } else {
581 break;
582 }
583 } else if (!ila) {
584 ret = 0;
585 break;
586 }
587
588 while (ila) {
589 ret = ila_dump_info(ila, NETLINK_CB(cb->skb).portid,
590 cb->nlh->nlmsg_seq, NLM_F_MULTI,
591 skb, ILA_CMD_GET);
592 if (ret)
593 goto out;
594
595 skip++;
596 ila = rcu_access_pointer(ila->next);
597 }
598
599 skip = 0;
600 ila = rhashtable_walk_next(rhiter);
601 }
602
603 out:
604 iter->skip = skip;
605 ret = (skb->len ? : ret);
606
607 out_ret:
608 rhashtable_walk_stop(rhiter);
609 return ret;
610 }
611
612 #define ILA_HASH_TABLE_SIZE 1024
613
ila_xlat_init_net(struct net * net)614 int ila_xlat_init_net(struct net *net)
615 {
616 struct ila_net *ilan = net_generic(net, ila_net_id);
617 int err;
618
619 err = alloc_ila_locks(ilan);
620 if (err)
621 return err;
622
623 rhashtable_init(&ilan->xlat.rhash_table, &rht_params);
624
625 return 0;
626 }
627
ila_xlat_exit_net(struct net * net)628 void ila_xlat_exit_net(struct net *net)
629 {
630 struct ila_net *ilan = net_generic(net, ila_net_id);
631
632 rhashtable_free_and_destroy(&ilan->xlat.rhash_table, ila_free_cb, NULL);
633
634 free_bucket_spinlocks(ilan->xlat.locks);
635
636 if (ilan->xlat.hooks_registered)
637 nf_unregister_net_hooks(net, ila_nf_hook_ops,
638 ARRAY_SIZE(ila_nf_hook_ops));
639 }
640
ila_xlat_addr(struct sk_buff * skb,bool sir2ila)641 static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila)
642 {
643 struct ila_map *ila;
644 struct ipv6hdr *ip6h = ipv6_hdr(skb);
645 struct net *net = dev_net(skb->dev);
646 struct ila_net *ilan = net_generic(net, ila_net_id);
647 struct ila_addr *iaddr = ila_a2i(&ip6h->daddr);
648
649 /* Assumes skb contains a valid IPv6 header that is pulled */
650
651 /* No check here that ILA type in the mapping matches what is in the
652 * address. We assume that whatever sender gaves us can be translated.
653 * The checksum mode however is relevant.
654 */
655
656 rcu_read_lock();
657
658 ila = ila_lookup_wildcards(iaddr, skb->dev->ifindex, ilan);
659 if (ila)
660 ila_update_ipv6_locator(skb, &ila->xp.ip, sir2ila);
661
662 rcu_read_unlock();
663
664 return 0;
665 }
666