1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
3 */
4
5 /* Devmaps primary use is as a backend map for XDP BPF helper call
6 * bpf_redirect_map(). Because XDP is mostly concerned with performance we
7 * spent some effort to ensure the datapath with redirect maps does not use
8 * any locking. This is a quick note on the details.
9 *
10 * We have three possible paths to get into the devmap control plane bpf
11 * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
12 * will invoke an update, delete, or lookup operation. To ensure updates and
13 * deletes appear atomic from the datapath side xchg() is used to modify the
14 * netdev_map array. Then because the datapath does a lookup into the netdev_map
15 * array (read-only) from an RCU critical section we use call_rcu() to wait for
16 * an rcu grace period before free'ing the old data structures. This ensures the
17 * datapath always has a valid copy. However, the datapath does a "flush"
18 * operation that pushes any pending packets in the driver outside the RCU
19 * critical section. Each bpf_dtab_netdev tracks these pending operations using
20 * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed until
21 * this list is empty, indicating outstanding flush operations have completed.
22 *
23 * BPF syscalls may race with BPF program calls on any of the update, delete
24 * or lookup operations. As noted above the xchg() operation also keep the
25 * netdev_map consistent in this case. From the devmap side BPF programs
26 * calling into these operations are the same as multiple user space threads
27 * making system calls.
28 *
29 * Finally, any of the above may race with a netdev_unregister notifier. The
30 * unregister notifier must search for net devices in the map structure that
31 * contain a reference to the net device and remove them. This is a two step
32 * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
33 * check to see if the ifindex is the same as the net_device being removed.
34 * When removing the dev a cmpxchg() is used to ensure the correct dev is
35 * removed, in the case of a concurrent update or delete operation it is
36 * possible that the initially referenced dev is no longer in the map. As the
37 * notifier hook walks the map we know that new dev references can not be
38 * added by the user because core infrastructure ensures dev_get_by_index()
39 * calls will fail at this point.
40 *
41 * The devmap_hash type is a map type which interprets keys as ifindexes and
42 * indexes these using a hashmap. This allows maps that use ifindex as key to be
43 * densely packed instead of having holes in the lookup array for unused
44 * ifindexes. The setup and packet enqueue/send code is shared between the two
45 * types of devmap; only the lookup and insertion is different.
46 */
47 #include <linux/bpf.h>
48 #include <net/xdp.h>
49 #include <linux/filter.h>
50 #include <trace/events/xdp.h>
51 #include <linux/btf_ids.h>
52
53 #define DEV_CREATE_FLAG_MASK \
54 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
55
56 struct xdp_dev_bulk_queue {
57 struct xdp_frame *q[DEV_MAP_BULK_SIZE];
58 struct list_head flush_node;
59 struct net_device *dev;
60 struct net_device *dev_rx;
61 struct bpf_prog *xdp_prog;
62 unsigned int count;
63 };
64
65 struct bpf_dtab_netdev {
66 struct net_device *dev; /* must be first member, due to tracepoint */
67 struct hlist_node index_hlist;
68 struct bpf_prog *xdp_prog;
69 struct rcu_head rcu;
70 unsigned int idx;
71 struct bpf_devmap_val val;
72 };
73
74 struct bpf_dtab {
75 struct bpf_map map;
76 struct bpf_dtab_netdev __rcu **netdev_map; /* DEVMAP type only */
77 struct list_head list;
78
79 /* these are only used for DEVMAP_HASH type maps */
80 struct hlist_head *dev_index_head;
81 spinlock_t index_lock;
82 unsigned int items;
83 u32 n_buckets;
84 };
85
86 static DEFINE_PER_CPU(struct list_head, dev_flush_list);
87 static DEFINE_SPINLOCK(dev_map_lock);
88 static LIST_HEAD(dev_map_list);
89
dev_map_create_hash(unsigned int entries,int numa_node)90 static struct hlist_head *dev_map_create_hash(unsigned int entries,
91 int numa_node)
92 {
93 int i;
94 struct hlist_head *hash;
95
96 hash = bpf_map_area_alloc((u64) entries * sizeof(*hash), numa_node);
97 if (hash != NULL)
98 for (i = 0; i < entries; i++)
99 INIT_HLIST_HEAD(&hash[i]);
100
101 return hash;
102 }
103
dev_map_index_hash(struct bpf_dtab * dtab,int idx)104 static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
105 int idx)
106 {
107 return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
108 }
109
dev_map_init_map(struct bpf_dtab * dtab,union bpf_attr * attr)110 static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
111 {
112 u32 valsize = attr->value_size;
113
114 /* check sanity of attributes. 2 value sizes supported:
115 * 4 bytes: ifindex
116 * 8 bytes: ifindex + prog fd
117 */
118 if (attr->max_entries == 0 || attr->key_size != 4 ||
119 (valsize != offsetofend(struct bpf_devmap_val, ifindex) &&
120 valsize != offsetofend(struct bpf_devmap_val, bpf_prog.fd)) ||
121 attr->map_flags & ~DEV_CREATE_FLAG_MASK)
122 return -EINVAL;
123
124 /* Lookup returns a pointer straight to dev->ifindex, so make sure the
125 * verifier prevents writes from the BPF side
126 */
127 attr->map_flags |= BPF_F_RDONLY_PROG;
128
129
130 bpf_map_init_from_attr(&dtab->map, attr);
131
132 if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
133 dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
134
135 if (!dtab->n_buckets) /* Overflow check */
136 return -EINVAL;
137 }
138
139 if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
140 dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
141 dtab->map.numa_node);
142 if (!dtab->dev_index_head)
143 return -ENOMEM;
144
145 spin_lock_init(&dtab->index_lock);
146 } else {
147 dtab->netdev_map = bpf_map_area_alloc((u64) dtab->map.max_entries *
148 sizeof(struct bpf_dtab_netdev *),
149 dtab->map.numa_node);
150 if (!dtab->netdev_map)
151 return -ENOMEM;
152 }
153
154 return 0;
155 }
156
dev_map_alloc(union bpf_attr * attr)157 static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
158 {
159 struct bpf_dtab *dtab;
160 int err;
161
162 dtab = bpf_map_area_alloc(sizeof(*dtab), NUMA_NO_NODE);
163 if (!dtab)
164 return ERR_PTR(-ENOMEM);
165
166 err = dev_map_init_map(dtab, attr);
167 if (err) {
168 bpf_map_area_free(dtab);
169 return ERR_PTR(err);
170 }
171
172 spin_lock(&dev_map_lock);
173 list_add_tail_rcu(&dtab->list, &dev_map_list);
174 spin_unlock(&dev_map_lock);
175
176 return &dtab->map;
177 }
178
dev_map_free(struct bpf_map * map)179 static void dev_map_free(struct bpf_map *map)
180 {
181 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
182 int i;
183
184 /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
185 * so the programs (can be more than one that used this map) were
186 * disconnected from events. The following synchronize_rcu() guarantees
187 * both rcu read critical sections complete and waits for
188 * preempt-disable regions (NAPI being the relevant context here) so we
189 * are certain there will be no further reads against the netdev_map and
190 * all flush operations are complete. Flush operations can only be done
191 * from NAPI context for this reason.
192 */
193
194 spin_lock(&dev_map_lock);
195 list_del_rcu(&dtab->list);
196 spin_unlock(&dev_map_lock);
197
198 bpf_clear_redirect_map(map);
199 synchronize_rcu();
200
201 /* Make sure prior __dev_map_entry_free() have completed. */
202 rcu_barrier();
203
204 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
205 for (i = 0; i < dtab->n_buckets; i++) {
206 struct bpf_dtab_netdev *dev;
207 struct hlist_head *head;
208 struct hlist_node *next;
209
210 head = dev_map_index_hash(dtab, i);
211
212 hlist_for_each_entry_safe(dev, next, head, index_hlist) {
213 hlist_del_rcu(&dev->index_hlist);
214 if (dev->xdp_prog)
215 bpf_prog_put(dev->xdp_prog);
216 dev_put(dev->dev);
217 kfree(dev);
218 }
219 }
220
221 bpf_map_area_free(dtab->dev_index_head);
222 } else {
223 for (i = 0; i < dtab->map.max_entries; i++) {
224 struct bpf_dtab_netdev *dev;
225
226 dev = rcu_dereference_raw(dtab->netdev_map[i]);
227 if (!dev)
228 continue;
229
230 if (dev->xdp_prog)
231 bpf_prog_put(dev->xdp_prog);
232 dev_put(dev->dev);
233 kfree(dev);
234 }
235
236 bpf_map_area_free(dtab->netdev_map);
237 }
238
239 bpf_map_area_free(dtab);
240 }
241
dev_map_get_next_key(struct bpf_map * map,void * key,void * next_key)242 static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
243 {
244 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
245 u32 index = key ? *(u32 *)key : U32_MAX;
246 u32 *next = next_key;
247
248 if (index >= dtab->map.max_entries) {
249 *next = 0;
250 return 0;
251 }
252
253 if (index == dtab->map.max_entries - 1)
254 return -ENOENT;
255 *next = index + 1;
256 return 0;
257 }
258
259 /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
260 * by local_bh_disable() (from XDP calls inside NAPI). The
261 * rcu_read_lock_bh_held() below makes lockdep accept both.
262 */
__dev_map_hash_lookup_elem(struct bpf_map * map,u32 key)263 static void *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
264 {
265 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
266 struct hlist_head *head = dev_map_index_hash(dtab, key);
267 struct bpf_dtab_netdev *dev;
268
269 hlist_for_each_entry_rcu(dev, head, index_hlist,
270 lockdep_is_held(&dtab->index_lock))
271 if (dev->idx == key)
272 return dev;
273
274 return NULL;
275 }
276
dev_map_hash_get_next_key(struct bpf_map * map,void * key,void * next_key)277 static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
278 void *next_key)
279 {
280 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
281 u32 idx, *next = next_key;
282 struct bpf_dtab_netdev *dev, *next_dev;
283 struct hlist_head *head;
284 int i = 0;
285
286 if (!key)
287 goto find_first;
288
289 idx = *(u32 *)key;
290
291 dev = __dev_map_hash_lookup_elem(map, idx);
292 if (!dev)
293 goto find_first;
294
295 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)),
296 struct bpf_dtab_netdev, index_hlist);
297
298 if (next_dev) {
299 *next = next_dev->idx;
300 return 0;
301 }
302
303 i = idx & (dtab->n_buckets - 1);
304 i++;
305
306 find_first:
307 for (; i < dtab->n_buckets; i++) {
308 head = dev_map_index_hash(dtab, i);
309
310 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
311 struct bpf_dtab_netdev,
312 index_hlist);
313 if (next_dev) {
314 *next = next_dev->idx;
315 return 0;
316 }
317 }
318
319 return -ENOENT;
320 }
321
dev_map_bpf_prog_run(struct bpf_prog * xdp_prog,struct xdp_frame ** frames,int n,struct net_device * dev)322 static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog,
323 struct xdp_frame **frames, int n,
324 struct net_device *dev)
325 {
326 struct xdp_txq_info txq = { .dev = dev };
327 struct xdp_buff xdp;
328 int i, nframes = 0;
329
330 for (i = 0; i < n; i++) {
331 struct xdp_frame *xdpf = frames[i];
332 u32 act;
333 int err;
334
335 xdp_convert_frame_to_buff(xdpf, &xdp);
336 xdp.txq = &txq;
337
338 act = bpf_prog_run_xdp(xdp_prog, &xdp);
339 switch (act) {
340 case XDP_PASS:
341 err = xdp_update_frame_from_buff(&xdp, xdpf);
342 if (unlikely(err < 0))
343 xdp_return_frame_rx_napi(xdpf);
344 else
345 frames[nframes++] = xdpf;
346 break;
347 default:
348 bpf_warn_invalid_xdp_action(NULL, xdp_prog, act);
349 fallthrough;
350 case XDP_ABORTED:
351 trace_xdp_exception(dev, xdp_prog, act);
352 fallthrough;
353 case XDP_DROP:
354 xdp_return_frame_rx_napi(xdpf);
355 break;
356 }
357 }
358 return nframes; /* sent frames count */
359 }
360
bq_xmit_all(struct xdp_dev_bulk_queue * bq,u32 flags)361 static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
362 {
363 struct net_device *dev = bq->dev;
364 unsigned int cnt = bq->count;
365 int sent = 0, err = 0;
366 int to_send = cnt;
367 int i;
368
369 if (unlikely(!cnt))
370 return;
371
372 for (i = 0; i < cnt; i++) {
373 struct xdp_frame *xdpf = bq->q[i];
374
375 prefetch(xdpf);
376 }
377
378 if (bq->xdp_prog) {
379 to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev);
380 if (!to_send)
381 goto out;
382 }
383
384 sent = dev->netdev_ops->ndo_xdp_xmit(dev, to_send, bq->q, flags);
385 if (sent < 0) {
386 /* If ndo_xdp_xmit fails with an errno, no frames have
387 * been xmit'ed.
388 */
389 err = sent;
390 sent = 0;
391 }
392
393 /* If not all frames have been transmitted, it is our
394 * responsibility to free them
395 */
396 for (i = sent; unlikely(i < to_send); i++)
397 xdp_return_frame_rx_napi(bq->q[i]);
398
399 out:
400 bq->count = 0;
401 trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, cnt - sent, err);
402 }
403
404 /* __dev_flush is called from xdp_do_flush() which _must_ be signalled from the
405 * driver before returning from its napi->poll() routine. See the comment above
406 * xdp_do_flush() in filter.c.
407 */
__dev_flush(void)408 void __dev_flush(void)
409 {
410 struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
411 struct xdp_dev_bulk_queue *bq, *tmp;
412
413 list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
414 bq_xmit_all(bq, XDP_XMIT_FLUSH);
415 bq->dev_rx = NULL;
416 bq->xdp_prog = NULL;
417 __list_del_clearprev(&bq->flush_node);
418 }
419 }
420
421 /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
422 * by local_bh_disable() (from XDP calls inside NAPI). The
423 * rcu_read_lock_bh_held() below makes lockdep accept both.
424 */
__dev_map_lookup_elem(struct bpf_map * map,u32 key)425 static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
426 {
427 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
428 struct bpf_dtab_netdev *obj;
429
430 if (key >= map->max_entries)
431 return NULL;
432
433 obj = rcu_dereference_check(dtab->netdev_map[key],
434 rcu_read_lock_bh_held());
435 return obj;
436 }
437
438 /* Runs in NAPI, i.e., softirq under local_bh_disable(). Thus, safe percpu
439 * variable access, and map elements stick around. See comment above
440 * xdp_do_flush() in filter.c.
441 */
bq_enqueue(struct net_device * dev,struct xdp_frame * xdpf,struct net_device * dev_rx,struct bpf_prog * xdp_prog)442 static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
443 struct net_device *dev_rx, struct bpf_prog *xdp_prog)
444 {
445 struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
446 struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
447
448 if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
449 bq_xmit_all(bq, 0);
450
451 /* Ingress dev_rx will be the same for all xdp_frame's in
452 * bulk_queue, because bq stored per-CPU and must be flushed
453 * from net_device drivers NAPI func end.
454 *
455 * Do the same with xdp_prog and flush_list since these fields
456 * are only ever modified together.
457 */
458 if (!bq->dev_rx) {
459 bq->dev_rx = dev_rx;
460 bq->xdp_prog = xdp_prog;
461 list_add(&bq->flush_node, flush_list);
462 }
463
464 bq->q[bq->count++] = xdpf;
465 }
466
__xdp_enqueue(struct net_device * dev,struct xdp_frame * xdpf,struct net_device * dev_rx,struct bpf_prog * xdp_prog)467 static inline int __xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
468 struct net_device *dev_rx,
469 struct bpf_prog *xdp_prog)
470 {
471 int err;
472
473 if (!(dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT))
474 return -EOPNOTSUPP;
475
476 if (unlikely(!(dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT_SG) &&
477 xdp_frame_has_frags(xdpf)))
478 return -EOPNOTSUPP;
479
480 err = xdp_ok_fwd_dev(dev, xdp_get_frame_len(xdpf));
481 if (unlikely(err))
482 return err;
483
484 bq_enqueue(dev, xdpf, dev_rx, xdp_prog);
485 return 0;
486 }
487
dev_map_bpf_prog_run_skb(struct sk_buff * skb,struct bpf_dtab_netdev * dst)488 static u32 dev_map_bpf_prog_run_skb(struct sk_buff *skb, struct bpf_dtab_netdev *dst)
489 {
490 struct xdp_txq_info txq = { .dev = dst->dev };
491 struct xdp_buff xdp;
492 u32 act;
493
494 if (!dst->xdp_prog)
495 return XDP_PASS;
496
497 __skb_pull(skb, skb->mac_len);
498 xdp.txq = &txq;
499
500 act = bpf_prog_run_generic_xdp(skb, &xdp, dst->xdp_prog);
501 switch (act) {
502 case XDP_PASS:
503 __skb_push(skb, skb->mac_len);
504 break;
505 default:
506 bpf_warn_invalid_xdp_action(NULL, dst->xdp_prog, act);
507 fallthrough;
508 case XDP_ABORTED:
509 trace_xdp_exception(dst->dev, dst->xdp_prog, act);
510 fallthrough;
511 case XDP_DROP:
512 kfree_skb(skb);
513 break;
514 }
515
516 return act;
517 }
518
dev_xdp_enqueue(struct net_device * dev,struct xdp_frame * xdpf,struct net_device * dev_rx)519 int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
520 struct net_device *dev_rx)
521 {
522 return __xdp_enqueue(dev, xdpf, dev_rx, NULL);
523 }
524
dev_map_enqueue(struct bpf_dtab_netdev * dst,struct xdp_frame * xdpf,struct net_device * dev_rx)525 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
526 struct net_device *dev_rx)
527 {
528 struct net_device *dev = dst->dev;
529
530 return __xdp_enqueue(dev, xdpf, dev_rx, dst->xdp_prog);
531 }
532
is_valid_dst(struct bpf_dtab_netdev * obj,struct xdp_frame * xdpf)533 static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf)
534 {
535 if (!obj)
536 return false;
537
538 if (!(obj->dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT))
539 return false;
540
541 if (unlikely(!(obj->dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT_SG) &&
542 xdp_frame_has_frags(xdpf)))
543 return false;
544
545 if (xdp_ok_fwd_dev(obj->dev, xdp_get_frame_len(xdpf)))
546 return false;
547
548 return true;
549 }
550
dev_map_enqueue_clone(struct bpf_dtab_netdev * obj,struct net_device * dev_rx,struct xdp_frame * xdpf)551 static int dev_map_enqueue_clone(struct bpf_dtab_netdev *obj,
552 struct net_device *dev_rx,
553 struct xdp_frame *xdpf)
554 {
555 struct xdp_frame *nxdpf;
556
557 nxdpf = xdpf_clone(xdpf);
558 if (!nxdpf)
559 return -ENOMEM;
560
561 bq_enqueue(obj->dev, nxdpf, dev_rx, obj->xdp_prog);
562
563 return 0;
564 }
565
is_ifindex_excluded(int * excluded,int num_excluded,int ifindex)566 static inline bool is_ifindex_excluded(int *excluded, int num_excluded, int ifindex)
567 {
568 while (num_excluded--) {
569 if (ifindex == excluded[num_excluded])
570 return true;
571 }
572 return false;
573 }
574
575 /* Get ifindex of each upper device. 'indexes' must be able to hold at
576 * least MAX_NEST_DEV elements.
577 * Returns the number of ifindexes added.
578 */
get_upper_ifindexes(struct net_device * dev,int * indexes)579 static int get_upper_ifindexes(struct net_device *dev, int *indexes)
580 {
581 struct net_device *upper;
582 struct list_head *iter;
583 int n = 0;
584
585 netdev_for_each_upper_dev_rcu(dev, upper, iter) {
586 indexes[n++] = upper->ifindex;
587 }
588 return n;
589 }
590
dev_map_enqueue_multi(struct xdp_frame * xdpf,struct net_device * dev_rx,struct bpf_map * map,bool exclude_ingress)591 int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
592 struct bpf_map *map, bool exclude_ingress)
593 {
594 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
595 struct bpf_dtab_netdev *dst, *last_dst = NULL;
596 int excluded_devices[1+MAX_NEST_DEV];
597 struct hlist_head *head;
598 int num_excluded = 0;
599 unsigned int i;
600 int err;
601
602 if (exclude_ingress) {
603 num_excluded = get_upper_ifindexes(dev_rx, excluded_devices);
604 excluded_devices[num_excluded++] = dev_rx->ifindex;
605 }
606
607 if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
608 for (i = 0; i < map->max_entries; i++) {
609 dst = rcu_dereference_check(dtab->netdev_map[i],
610 rcu_read_lock_bh_held());
611 if (!is_valid_dst(dst, xdpf))
612 continue;
613
614 if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex))
615 continue;
616
617 /* we only need n-1 clones; last_dst enqueued below */
618 if (!last_dst) {
619 last_dst = dst;
620 continue;
621 }
622
623 err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
624 if (err)
625 return err;
626
627 last_dst = dst;
628 }
629 } else { /* BPF_MAP_TYPE_DEVMAP_HASH */
630 for (i = 0; i < dtab->n_buckets; i++) {
631 head = dev_map_index_hash(dtab, i);
632 hlist_for_each_entry_rcu(dst, head, index_hlist,
633 lockdep_is_held(&dtab->index_lock)) {
634 if (!is_valid_dst(dst, xdpf))
635 continue;
636
637 if (is_ifindex_excluded(excluded_devices, num_excluded,
638 dst->dev->ifindex))
639 continue;
640
641 /* we only need n-1 clones; last_dst enqueued below */
642 if (!last_dst) {
643 last_dst = dst;
644 continue;
645 }
646
647 err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
648 if (err)
649 return err;
650
651 last_dst = dst;
652 }
653 }
654 }
655
656 /* consume the last copy of the frame */
657 if (last_dst)
658 bq_enqueue(last_dst->dev, xdpf, dev_rx, last_dst->xdp_prog);
659 else
660 xdp_return_frame_rx_napi(xdpf); /* dtab is empty */
661
662 return 0;
663 }
664
dev_map_generic_redirect(struct bpf_dtab_netdev * dst,struct sk_buff * skb,struct bpf_prog * xdp_prog)665 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
666 struct bpf_prog *xdp_prog)
667 {
668 int err;
669
670 err = xdp_ok_fwd_dev(dst->dev, skb->len);
671 if (unlikely(err))
672 return err;
673
674 /* Redirect has already succeeded semantically at this point, so we just
675 * return 0 even if packet is dropped. Helper below takes care of
676 * freeing skb.
677 */
678 if (dev_map_bpf_prog_run_skb(skb, dst) != XDP_PASS)
679 return 0;
680
681 skb->dev = dst->dev;
682 generic_xdp_tx(skb, xdp_prog);
683
684 return 0;
685 }
686
dev_map_redirect_clone(struct bpf_dtab_netdev * dst,struct sk_buff * skb,struct bpf_prog * xdp_prog)687 static int dev_map_redirect_clone(struct bpf_dtab_netdev *dst,
688 struct sk_buff *skb,
689 struct bpf_prog *xdp_prog)
690 {
691 struct sk_buff *nskb;
692 int err;
693
694 nskb = skb_clone(skb, GFP_ATOMIC);
695 if (!nskb)
696 return -ENOMEM;
697
698 err = dev_map_generic_redirect(dst, nskb, xdp_prog);
699 if (unlikely(err)) {
700 consume_skb(nskb);
701 return err;
702 }
703
704 return 0;
705 }
706
dev_map_redirect_multi(struct net_device * dev,struct sk_buff * skb,struct bpf_prog * xdp_prog,struct bpf_map * map,bool exclude_ingress)707 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
708 struct bpf_prog *xdp_prog, struct bpf_map *map,
709 bool exclude_ingress)
710 {
711 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
712 struct bpf_dtab_netdev *dst, *last_dst = NULL;
713 int excluded_devices[1+MAX_NEST_DEV];
714 struct hlist_head *head;
715 struct hlist_node *next;
716 int num_excluded = 0;
717 unsigned int i;
718 int err;
719
720 if (exclude_ingress) {
721 num_excluded = get_upper_ifindexes(dev, excluded_devices);
722 excluded_devices[num_excluded++] = dev->ifindex;
723 }
724
725 if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
726 for (i = 0; i < map->max_entries; i++) {
727 dst = rcu_dereference_check(dtab->netdev_map[i],
728 rcu_read_lock_bh_held());
729 if (!dst)
730 continue;
731
732 if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex))
733 continue;
734
735 /* we only need n-1 clones; last_dst enqueued below */
736 if (!last_dst) {
737 last_dst = dst;
738 continue;
739 }
740
741 err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
742 if (err)
743 return err;
744
745 last_dst = dst;
746
747 }
748 } else { /* BPF_MAP_TYPE_DEVMAP_HASH */
749 for (i = 0; i < dtab->n_buckets; i++) {
750 head = dev_map_index_hash(dtab, i);
751 hlist_for_each_entry_safe(dst, next, head, index_hlist) {
752 if (!dst)
753 continue;
754
755 if (is_ifindex_excluded(excluded_devices, num_excluded,
756 dst->dev->ifindex))
757 continue;
758
759 /* we only need n-1 clones; last_dst enqueued below */
760 if (!last_dst) {
761 last_dst = dst;
762 continue;
763 }
764
765 err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
766 if (err)
767 return err;
768
769 last_dst = dst;
770 }
771 }
772 }
773
774 /* consume the first skb and return */
775 if (last_dst)
776 return dev_map_generic_redirect(last_dst, skb, xdp_prog);
777
778 /* dtab is empty */
779 consume_skb(skb);
780 return 0;
781 }
782
dev_map_lookup_elem(struct bpf_map * map,void * key)783 static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
784 {
785 struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
786
787 return obj ? &obj->val : NULL;
788 }
789
dev_map_hash_lookup_elem(struct bpf_map * map,void * key)790 static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key)
791 {
792 struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map,
793 *(u32 *)key);
794 return obj ? &obj->val : NULL;
795 }
796
__dev_map_entry_free(struct rcu_head * rcu)797 static void __dev_map_entry_free(struct rcu_head *rcu)
798 {
799 struct bpf_dtab_netdev *dev;
800
801 dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
802 if (dev->xdp_prog)
803 bpf_prog_put(dev->xdp_prog);
804 dev_put(dev->dev);
805 kfree(dev);
806 }
807
dev_map_delete_elem(struct bpf_map * map,void * key)808 static long dev_map_delete_elem(struct bpf_map *map, void *key)
809 {
810 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
811 struct bpf_dtab_netdev *old_dev;
812 int k = *(u32 *)key;
813
814 if (k >= map->max_entries)
815 return -EINVAL;
816
817 old_dev = unrcu_pointer(xchg(&dtab->netdev_map[k], NULL));
818 if (old_dev) {
819 call_rcu(&old_dev->rcu, __dev_map_entry_free);
820 atomic_dec((atomic_t *)&dtab->items);
821 }
822 return 0;
823 }
824
dev_map_hash_delete_elem(struct bpf_map * map,void * key)825 static long dev_map_hash_delete_elem(struct bpf_map *map, void *key)
826 {
827 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
828 struct bpf_dtab_netdev *old_dev;
829 int k = *(u32 *)key;
830 unsigned long flags;
831 int ret = -ENOENT;
832
833 spin_lock_irqsave(&dtab->index_lock, flags);
834
835 old_dev = __dev_map_hash_lookup_elem(map, k);
836 if (old_dev) {
837 dtab->items--;
838 hlist_del_init_rcu(&old_dev->index_hlist);
839 call_rcu(&old_dev->rcu, __dev_map_entry_free);
840 ret = 0;
841 }
842 spin_unlock_irqrestore(&dtab->index_lock, flags);
843
844 return ret;
845 }
846
__dev_map_alloc_node(struct net * net,struct bpf_dtab * dtab,struct bpf_devmap_val * val,unsigned int idx)847 static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
848 struct bpf_dtab *dtab,
849 struct bpf_devmap_val *val,
850 unsigned int idx)
851 {
852 struct bpf_prog *prog = NULL;
853 struct bpf_dtab_netdev *dev;
854
855 dev = bpf_map_kmalloc_node(&dtab->map, sizeof(*dev),
856 GFP_NOWAIT | __GFP_NOWARN,
857 dtab->map.numa_node);
858 if (!dev)
859 return ERR_PTR(-ENOMEM);
860
861 dev->dev = dev_get_by_index(net, val->ifindex);
862 if (!dev->dev)
863 goto err_out;
864
865 if (val->bpf_prog.fd > 0) {
866 prog = bpf_prog_get_type_dev(val->bpf_prog.fd,
867 BPF_PROG_TYPE_XDP, false);
868 if (IS_ERR(prog))
869 goto err_put_dev;
870 if (prog->expected_attach_type != BPF_XDP_DEVMAP ||
871 !bpf_prog_map_compatible(&dtab->map, prog))
872 goto err_put_prog;
873 }
874
875 dev->idx = idx;
876 if (prog) {
877 dev->xdp_prog = prog;
878 dev->val.bpf_prog.id = prog->aux->id;
879 } else {
880 dev->xdp_prog = NULL;
881 dev->val.bpf_prog.id = 0;
882 }
883 dev->val.ifindex = val->ifindex;
884
885 return dev;
886 err_put_prog:
887 bpf_prog_put(prog);
888 err_put_dev:
889 dev_put(dev->dev);
890 err_out:
891 kfree(dev);
892 return ERR_PTR(-EINVAL);
893 }
894
__dev_map_update_elem(struct net * net,struct bpf_map * map,void * key,void * value,u64 map_flags)895 static long __dev_map_update_elem(struct net *net, struct bpf_map *map,
896 void *key, void *value, u64 map_flags)
897 {
898 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
899 struct bpf_dtab_netdev *dev, *old_dev;
900 struct bpf_devmap_val val = {};
901 u32 i = *(u32 *)key;
902
903 if (unlikely(map_flags > BPF_EXIST))
904 return -EINVAL;
905 if (unlikely(i >= dtab->map.max_entries))
906 return -E2BIG;
907 if (unlikely(map_flags == BPF_NOEXIST))
908 return -EEXIST;
909
910 /* already verified value_size <= sizeof val */
911 memcpy(&val, value, map->value_size);
912
913 if (!val.ifindex) {
914 dev = NULL;
915 /* can not specify fd if ifindex is 0 */
916 if (val.bpf_prog.fd > 0)
917 return -EINVAL;
918 } else {
919 dev = __dev_map_alloc_node(net, dtab, &val, i);
920 if (IS_ERR(dev))
921 return PTR_ERR(dev);
922 }
923
924 /* Use call_rcu() here to ensure rcu critical sections have completed
925 * Remembering the driver side flush operation will happen before the
926 * net device is removed.
927 */
928 old_dev = unrcu_pointer(xchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev)));
929 if (old_dev)
930 call_rcu(&old_dev->rcu, __dev_map_entry_free);
931 else
932 atomic_inc((atomic_t *)&dtab->items);
933
934 return 0;
935 }
936
dev_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)937 static long dev_map_update_elem(struct bpf_map *map, void *key, void *value,
938 u64 map_flags)
939 {
940 return __dev_map_update_elem(current->nsproxy->net_ns,
941 map, key, value, map_flags);
942 }
943
__dev_map_hash_update_elem(struct net * net,struct bpf_map * map,void * key,void * value,u64 map_flags)944 static long __dev_map_hash_update_elem(struct net *net, struct bpf_map *map,
945 void *key, void *value, u64 map_flags)
946 {
947 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
948 struct bpf_dtab_netdev *dev, *old_dev;
949 struct bpf_devmap_val val = {};
950 u32 idx = *(u32 *)key;
951 unsigned long flags;
952 int err = -EEXIST;
953
954 /* already verified value_size <= sizeof val */
955 memcpy(&val, value, map->value_size);
956
957 if (unlikely(map_flags > BPF_EXIST || !val.ifindex))
958 return -EINVAL;
959
960 spin_lock_irqsave(&dtab->index_lock, flags);
961
962 old_dev = __dev_map_hash_lookup_elem(map, idx);
963 if (old_dev && (map_flags & BPF_NOEXIST))
964 goto out_err;
965
966 dev = __dev_map_alloc_node(net, dtab, &val, idx);
967 if (IS_ERR(dev)) {
968 err = PTR_ERR(dev);
969 goto out_err;
970 }
971
972 if (old_dev) {
973 hlist_del_rcu(&old_dev->index_hlist);
974 } else {
975 if (dtab->items >= dtab->map.max_entries) {
976 spin_unlock_irqrestore(&dtab->index_lock, flags);
977 call_rcu(&dev->rcu, __dev_map_entry_free);
978 return -E2BIG;
979 }
980 dtab->items++;
981 }
982
983 hlist_add_head_rcu(&dev->index_hlist,
984 dev_map_index_hash(dtab, idx));
985 spin_unlock_irqrestore(&dtab->index_lock, flags);
986
987 if (old_dev)
988 call_rcu(&old_dev->rcu, __dev_map_entry_free);
989
990 return 0;
991
992 out_err:
993 spin_unlock_irqrestore(&dtab->index_lock, flags);
994 return err;
995 }
996
dev_map_hash_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)997 static long dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
998 u64 map_flags)
999 {
1000 return __dev_map_hash_update_elem(current->nsproxy->net_ns,
1001 map, key, value, map_flags);
1002 }
1003
dev_map_redirect(struct bpf_map * map,u64 ifindex,u64 flags)1004 static long dev_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
1005 {
1006 return __bpf_xdp_redirect_map(map, ifindex, flags,
1007 BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
1008 __dev_map_lookup_elem);
1009 }
1010
dev_hash_map_redirect(struct bpf_map * map,u64 ifindex,u64 flags)1011 static long dev_hash_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
1012 {
1013 return __bpf_xdp_redirect_map(map, ifindex, flags,
1014 BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
1015 __dev_map_hash_lookup_elem);
1016 }
1017
dev_map_mem_usage(const struct bpf_map * map)1018 static u64 dev_map_mem_usage(const struct bpf_map *map)
1019 {
1020 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
1021 u64 usage = sizeof(struct bpf_dtab);
1022
1023 if (map->map_type == BPF_MAP_TYPE_DEVMAP_HASH)
1024 usage += (u64)dtab->n_buckets * sizeof(struct hlist_head);
1025 else
1026 usage += (u64)map->max_entries * sizeof(struct bpf_dtab_netdev *);
1027 usage += atomic_read((atomic_t *)&dtab->items) *
1028 (u64)sizeof(struct bpf_dtab_netdev);
1029 return usage;
1030 }
1031
1032 BTF_ID_LIST_SINGLE(dev_map_btf_ids, struct, bpf_dtab)
1033 const struct bpf_map_ops dev_map_ops = {
1034 .map_meta_equal = bpf_map_meta_equal,
1035 .map_alloc = dev_map_alloc,
1036 .map_free = dev_map_free,
1037 .map_get_next_key = dev_map_get_next_key,
1038 .map_lookup_elem = dev_map_lookup_elem,
1039 .map_update_elem = dev_map_update_elem,
1040 .map_delete_elem = dev_map_delete_elem,
1041 .map_check_btf = map_check_no_btf,
1042 .map_mem_usage = dev_map_mem_usage,
1043 .map_btf_id = &dev_map_btf_ids[0],
1044 .map_redirect = dev_map_redirect,
1045 };
1046
1047 const struct bpf_map_ops dev_map_hash_ops = {
1048 .map_meta_equal = bpf_map_meta_equal,
1049 .map_alloc = dev_map_alloc,
1050 .map_free = dev_map_free,
1051 .map_get_next_key = dev_map_hash_get_next_key,
1052 .map_lookup_elem = dev_map_hash_lookup_elem,
1053 .map_update_elem = dev_map_hash_update_elem,
1054 .map_delete_elem = dev_map_hash_delete_elem,
1055 .map_check_btf = map_check_no_btf,
1056 .map_mem_usage = dev_map_mem_usage,
1057 .map_btf_id = &dev_map_btf_ids[0],
1058 .map_redirect = dev_hash_map_redirect,
1059 };
1060
dev_map_hash_remove_netdev(struct bpf_dtab * dtab,struct net_device * netdev)1061 static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
1062 struct net_device *netdev)
1063 {
1064 unsigned long flags;
1065 u32 i;
1066
1067 spin_lock_irqsave(&dtab->index_lock, flags);
1068 for (i = 0; i < dtab->n_buckets; i++) {
1069 struct bpf_dtab_netdev *dev;
1070 struct hlist_head *head;
1071 struct hlist_node *next;
1072
1073 head = dev_map_index_hash(dtab, i);
1074
1075 hlist_for_each_entry_safe(dev, next, head, index_hlist) {
1076 if (netdev != dev->dev)
1077 continue;
1078
1079 dtab->items--;
1080 hlist_del_rcu(&dev->index_hlist);
1081 call_rcu(&dev->rcu, __dev_map_entry_free);
1082 }
1083 }
1084 spin_unlock_irqrestore(&dtab->index_lock, flags);
1085 }
1086
dev_map_notification(struct notifier_block * notifier,ulong event,void * ptr)1087 static int dev_map_notification(struct notifier_block *notifier,
1088 ulong event, void *ptr)
1089 {
1090 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
1091 struct bpf_dtab *dtab;
1092 int i, cpu;
1093
1094 switch (event) {
1095 case NETDEV_REGISTER:
1096 if (!netdev->netdev_ops->ndo_xdp_xmit || netdev->xdp_bulkq)
1097 break;
1098
1099 /* will be freed in free_netdev() */
1100 netdev->xdp_bulkq = alloc_percpu(struct xdp_dev_bulk_queue);
1101 if (!netdev->xdp_bulkq)
1102 return NOTIFY_BAD;
1103
1104 for_each_possible_cpu(cpu)
1105 per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev;
1106 break;
1107 case NETDEV_UNREGISTER:
1108 /* This rcu_read_lock/unlock pair is needed because
1109 * dev_map_list is an RCU list AND to ensure a delete
1110 * operation does not free a netdev_map entry while we
1111 * are comparing it against the netdev being unregistered.
1112 */
1113 rcu_read_lock();
1114 list_for_each_entry_rcu(dtab, &dev_map_list, list) {
1115 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
1116 dev_map_hash_remove_netdev(dtab, netdev);
1117 continue;
1118 }
1119
1120 for (i = 0; i < dtab->map.max_entries; i++) {
1121 struct bpf_dtab_netdev *dev, *odev;
1122
1123 dev = rcu_dereference(dtab->netdev_map[i]);
1124 if (!dev || netdev != dev->dev)
1125 continue;
1126 odev = unrcu_pointer(cmpxchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev), NULL));
1127 if (dev == odev) {
1128 call_rcu(&dev->rcu,
1129 __dev_map_entry_free);
1130 atomic_dec((atomic_t *)&dtab->items);
1131 }
1132 }
1133 }
1134 rcu_read_unlock();
1135 break;
1136 default:
1137 break;
1138 }
1139 return NOTIFY_OK;
1140 }
1141
1142 static struct notifier_block dev_map_notifier = {
1143 .notifier_call = dev_map_notification,
1144 };
1145
dev_map_init(void)1146 static int __init dev_map_init(void)
1147 {
1148 int cpu;
1149
1150 /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
1151 BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
1152 offsetof(struct _bpf_dtab_netdev, dev));
1153 register_netdevice_notifier(&dev_map_notifier);
1154
1155 for_each_possible_cpu(cpu)
1156 INIT_LIST_HEAD(&per_cpu(dev_flush_list, cpu));
1157 return 0;
1158 }
1159
1160 subsys_initcall(dev_map_init);
1161