1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Generic address resolution entity
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 *
9 * Fixes:
10 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
11 * Harald Welte Add neighbour cache statistics like rtstat
12 */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16 #include <linux/slab.h>
17 #include <linux/kmemleak.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/socket.h>
22 #include <linux/netdevice.h>
23 #include <linux/proc_fs.h>
24 #ifdef CONFIG_SYSCTL
25 #include <linux/sysctl.h>
26 #endif
27 #include <linux/times.h>
28 #include <net/net_namespace.h>
29 #include <net/neighbour.h>
30 #include <net/arp.h>
31 #include <net/dst.h>
32 #include <net/sock.h>
33 #include <net/netevent.h>
34 #include <net/netlink.h>
35 #include <linux/rtnetlink.h>
36 #include <linux/random.h>
37 #include <linux/string.h>
38 #include <linux/log2.h>
39 #include <linux/inetdevice.h>
40 #include <net/addrconf.h>
41
42 #include <trace/events/neigh.h>
43
44 #define NEIGH_DEBUG 1
45 #define neigh_dbg(level, fmt, ...) \
46 do { \
47 if (level <= NEIGH_DEBUG) \
48 pr_debug(fmt, ##__VA_ARGS__); \
49 } while (0)
50
51 #define PNEIGH_HASHMASK 0xF
52
53 static void neigh_timer_handler(struct timer_list *t);
54 static void __neigh_notify(struct neighbour *n, int type, int flags,
55 u32 pid);
56 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
57 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
58 struct net_device *dev);
59
60 #ifdef CONFIG_PROC_FS
61 static const struct seq_operations neigh_stat_seq_ops;
62 #endif
63
64 /*
65 Neighbour hash table buckets are protected with rwlock tbl->lock.
66
67 - All the scans/updates to hash buckets MUST be made under this lock.
68 - NOTHING clever should be made under this lock: no callbacks
69 to protocol backends, no attempts to send something to network.
70 It will result in deadlocks, if backend/driver wants to use neighbour
71 cache.
72 - If the entry requires some non-trivial actions, increase
73 its reference count and release table lock.
74
75 Neighbour entries are protected:
76 - with reference count.
77 - with rwlock neigh->lock
78
79 Reference count prevents destruction.
80
81 neigh->lock mainly serializes ll address data and its validity state.
82 However, the same lock is used to protect another entry fields:
83 - timer
84 - resolution queue
85
86 Again, nothing clever shall be made under neigh->lock,
87 the most complicated procedure, which we allow is dev->hard_header.
88 It is supposed, that dev->hard_header is simplistic and does
89 not make callbacks to neighbour tables.
90 */
91
neigh_blackhole(struct neighbour * neigh,struct sk_buff * skb)92 static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
93 {
94 kfree_skb(skb);
95 return -ENETDOWN;
96 }
97
neigh_cleanup_and_release(struct neighbour * neigh)98 static void neigh_cleanup_and_release(struct neighbour *neigh)
99 {
100 trace_neigh_cleanup_and_release(neigh, 0);
101 __neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
102 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
103 neigh_release(neigh);
104 }
105
106 /*
107 * It is random distribution in the interval (1/2)*base...(3/2)*base.
108 * It corresponds to default IPv6 settings and is not overridable,
109 * because it is really reasonable choice.
110 */
111
neigh_rand_reach_time(unsigned long base)112 unsigned long neigh_rand_reach_time(unsigned long base)
113 {
114 return base ? get_random_u32_below(base) + (base >> 1) : 0;
115 }
116 EXPORT_SYMBOL(neigh_rand_reach_time);
117
neigh_mark_dead(struct neighbour * n)118 static void neigh_mark_dead(struct neighbour *n)
119 {
120 n->dead = 1;
121 if (!list_empty(&n->gc_list)) {
122 list_del_init(&n->gc_list);
123 atomic_dec(&n->tbl->gc_entries);
124 }
125 if (!list_empty(&n->managed_list))
126 list_del_init(&n->managed_list);
127 }
128
neigh_update_gc_list(struct neighbour * n)129 static void neigh_update_gc_list(struct neighbour *n)
130 {
131 bool on_gc_list, exempt_from_gc;
132
133 write_lock_bh(&n->tbl->lock);
134 write_lock(&n->lock);
135 if (n->dead)
136 goto out;
137
138 /* remove from the gc list if new state is permanent or if neighbor
139 * is externally learned; otherwise entry should be on the gc list
140 */
141 exempt_from_gc = n->nud_state & NUD_PERMANENT ||
142 n->flags & NTF_EXT_LEARNED;
143 on_gc_list = !list_empty(&n->gc_list);
144
145 if (exempt_from_gc && on_gc_list) {
146 list_del_init(&n->gc_list);
147 atomic_dec(&n->tbl->gc_entries);
148 } else if (!exempt_from_gc && !on_gc_list) {
149 /* add entries to the tail; cleaning removes from the front */
150 list_add_tail(&n->gc_list, &n->tbl->gc_list);
151 atomic_inc(&n->tbl->gc_entries);
152 }
153 out:
154 write_unlock(&n->lock);
155 write_unlock_bh(&n->tbl->lock);
156 }
157
neigh_update_managed_list(struct neighbour * n)158 static void neigh_update_managed_list(struct neighbour *n)
159 {
160 bool on_managed_list, add_to_managed;
161
162 write_lock_bh(&n->tbl->lock);
163 write_lock(&n->lock);
164 if (n->dead)
165 goto out;
166
167 add_to_managed = n->flags & NTF_MANAGED;
168 on_managed_list = !list_empty(&n->managed_list);
169
170 if (!add_to_managed && on_managed_list)
171 list_del_init(&n->managed_list);
172 else if (add_to_managed && !on_managed_list)
173 list_add_tail(&n->managed_list, &n->tbl->managed_list);
174 out:
175 write_unlock(&n->lock);
176 write_unlock_bh(&n->tbl->lock);
177 }
178
neigh_update_flags(struct neighbour * neigh,u32 flags,int * notify,bool * gc_update,bool * managed_update)179 static void neigh_update_flags(struct neighbour *neigh, u32 flags, int *notify,
180 bool *gc_update, bool *managed_update)
181 {
182 u32 ndm_flags, old_flags = neigh->flags;
183
184 if (!(flags & NEIGH_UPDATE_F_ADMIN))
185 return;
186
187 ndm_flags = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0;
188 ndm_flags |= (flags & NEIGH_UPDATE_F_MANAGED) ? NTF_MANAGED : 0;
189
190 if ((old_flags ^ ndm_flags) & NTF_EXT_LEARNED) {
191 if (ndm_flags & NTF_EXT_LEARNED)
192 neigh->flags |= NTF_EXT_LEARNED;
193 else
194 neigh->flags &= ~NTF_EXT_LEARNED;
195 *notify = 1;
196 *gc_update = true;
197 }
198 if ((old_flags ^ ndm_flags) & NTF_MANAGED) {
199 if (ndm_flags & NTF_MANAGED)
200 neigh->flags |= NTF_MANAGED;
201 else
202 neigh->flags &= ~NTF_MANAGED;
203 *notify = 1;
204 *managed_update = true;
205 }
206 }
207
neigh_del(struct neighbour * n,struct neighbour __rcu ** np,struct neigh_table * tbl)208 static bool neigh_del(struct neighbour *n, struct neighbour __rcu **np,
209 struct neigh_table *tbl)
210 {
211 bool retval = false;
212
213 write_lock(&n->lock);
214 if (refcount_read(&n->refcnt) == 1) {
215 struct neighbour *neigh;
216
217 neigh = rcu_dereference_protected(n->next,
218 lockdep_is_held(&tbl->lock));
219 rcu_assign_pointer(*np, neigh);
220 neigh_mark_dead(n);
221 retval = true;
222 }
223 write_unlock(&n->lock);
224 if (retval)
225 neigh_cleanup_and_release(n);
226 return retval;
227 }
228
neigh_remove_one(struct neighbour * ndel,struct neigh_table * tbl)229 bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
230 {
231 struct neigh_hash_table *nht;
232 void *pkey = ndel->primary_key;
233 u32 hash_val;
234 struct neighbour *n;
235 struct neighbour __rcu **np;
236
237 nht = rcu_dereference_protected(tbl->nht,
238 lockdep_is_held(&tbl->lock));
239 hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd);
240 hash_val = hash_val >> (32 - nht->hash_shift);
241
242 np = &nht->hash_buckets[hash_val];
243 while ((n = rcu_dereference_protected(*np,
244 lockdep_is_held(&tbl->lock)))) {
245 if (n == ndel)
246 return neigh_del(n, np, tbl);
247 np = &n->next;
248 }
249 return false;
250 }
251
neigh_forced_gc(struct neigh_table * tbl)252 static int neigh_forced_gc(struct neigh_table *tbl)
253 {
254 int max_clean = atomic_read(&tbl->gc_entries) -
255 READ_ONCE(tbl->gc_thresh2);
256 unsigned long tref = jiffies - 5 * HZ;
257 struct neighbour *n, *tmp;
258 int shrunk = 0;
259
260 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
261
262 write_lock_bh(&tbl->lock);
263
264 list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) {
265 if (refcount_read(&n->refcnt) == 1) {
266 bool remove = false;
267
268 write_lock(&n->lock);
269 if ((n->nud_state == NUD_FAILED) ||
270 (n->nud_state == NUD_NOARP) ||
271 (tbl->is_multicast &&
272 tbl->is_multicast(n->primary_key)) ||
273 !time_in_range(n->updated, tref, jiffies))
274 remove = true;
275 write_unlock(&n->lock);
276
277 if (remove && neigh_remove_one(n, tbl))
278 shrunk++;
279 if (shrunk >= max_clean)
280 break;
281 }
282 }
283
284 WRITE_ONCE(tbl->last_flush, jiffies);
285
286 write_unlock_bh(&tbl->lock);
287
288 return shrunk;
289 }
290
neigh_add_timer(struct neighbour * n,unsigned long when)291 static void neigh_add_timer(struct neighbour *n, unsigned long when)
292 {
293 /* Use safe distance from the jiffies - LONG_MAX point while timer
294 * is running in DELAY/PROBE state but still show to user space
295 * large times in the past.
296 */
297 unsigned long mint = jiffies - (LONG_MAX - 86400 * HZ);
298
299 neigh_hold(n);
300 if (!time_in_range(n->confirmed, mint, jiffies))
301 n->confirmed = mint;
302 if (time_before(n->used, n->confirmed))
303 n->used = n->confirmed;
304 if (unlikely(mod_timer(&n->timer, when))) {
305 printk("NEIGH: BUG, double timer add, state is %x\n",
306 n->nud_state);
307 dump_stack();
308 }
309 }
310
neigh_del_timer(struct neighbour * n)311 static int neigh_del_timer(struct neighbour *n)
312 {
313 if ((n->nud_state & NUD_IN_TIMER) &&
314 del_timer(&n->timer)) {
315 neigh_release(n);
316 return 1;
317 }
318 return 0;
319 }
320
neigh_get_dev_parms_rcu(struct net_device * dev,int family)321 static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
322 int family)
323 {
324 switch (family) {
325 case AF_INET:
326 return __in_dev_arp_parms_get_rcu(dev);
327 case AF_INET6:
328 return __in6_dev_nd_parms_get_rcu(dev);
329 }
330 return NULL;
331 }
332
neigh_parms_qlen_dec(struct net_device * dev,int family)333 static void neigh_parms_qlen_dec(struct net_device *dev, int family)
334 {
335 struct neigh_parms *p;
336
337 rcu_read_lock();
338 p = neigh_get_dev_parms_rcu(dev, family);
339 if (p)
340 p->qlen--;
341 rcu_read_unlock();
342 }
343
pneigh_queue_purge(struct sk_buff_head * list,struct net * net,int family)344 static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net,
345 int family)
346 {
347 struct sk_buff_head tmp;
348 unsigned long flags;
349 struct sk_buff *skb;
350
351 skb_queue_head_init(&tmp);
352 spin_lock_irqsave(&list->lock, flags);
353 skb = skb_peek(list);
354 while (skb != NULL) {
355 struct sk_buff *skb_next = skb_peek_next(skb, list);
356 struct net_device *dev = skb->dev;
357
358 if (net == NULL || net_eq(dev_net(dev), net)) {
359 neigh_parms_qlen_dec(dev, family);
360 __skb_unlink(skb, list);
361 __skb_queue_tail(&tmp, skb);
362 }
363 skb = skb_next;
364 }
365 spin_unlock_irqrestore(&list->lock, flags);
366
367 while ((skb = __skb_dequeue(&tmp))) {
368 dev_put(skb->dev);
369 kfree_skb(skb);
370 }
371 }
372
neigh_flush_dev(struct neigh_table * tbl,struct net_device * dev,bool skip_perm)373 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
374 bool skip_perm)
375 {
376 int i;
377 struct neigh_hash_table *nht;
378
379 nht = rcu_dereference_protected(tbl->nht,
380 lockdep_is_held(&tbl->lock));
381
382 for (i = 0; i < (1 << nht->hash_shift); i++) {
383 struct neighbour *n;
384 struct neighbour __rcu **np = &nht->hash_buckets[i];
385
386 while ((n = rcu_dereference_protected(*np,
387 lockdep_is_held(&tbl->lock))) != NULL) {
388 if (dev && n->dev != dev) {
389 np = &n->next;
390 continue;
391 }
392 if (skip_perm && n->nud_state & NUD_PERMANENT) {
393 np = &n->next;
394 continue;
395 }
396 rcu_assign_pointer(*np,
397 rcu_dereference_protected(n->next,
398 lockdep_is_held(&tbl->lock)));
399 write_lock(&n->lock);
400 neigh_del_timer(n);
401 neigh_mark_dead(n);
402 if (refcount_read(&n->refcnt) != 1) {
403 /* The most unpleasant situation.
404 We must destroy neighbour entry,
405 but someone still uses it.
406
407 The destroy will be delayed until
408 the last user releases us, but
409 we must kill timers etc. and move
410 it to safe state.
411 */
412 __skb_queue_purge(&n->arp_queue);
413 n->arp_queue_len_bytes = 0;
414 WRITE_ONCE(n->output, neigh_blackhole);
415 if (n->nud_state & NUD_VALID)
416 n->nud_state = NUD_NOARP;
417 else
418 n->nud_state = NUD_NONE;
419 neigh_dbg(2, "neigh %p is stray\n", n);
420 }
421 write_unlock(&n->lock);
422 neigh_cleanup_and_release(n);
423 }
424 }
425 }
426
neigh_changeaddr(struct neigh_table * tbl,struct net_device * dev)427 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
428 {
429 write_lock_bh(&tbl->lock);
430 neigh_flush_dev(tbl, dev, false);
431 write_unlock_bh(&tbl->lock);
432 }
433 EXPORT_SYMBOL(neigh_changeaddr);
434
__neigh_ifdown(struct neigh_table * tbl,struct net_device * dev,bool skip_perm)435 static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
436 bool skip_perm)
437 {
438 write_lock_bh(&tbl->lock);
439 neigh_flush_dev(tbl, dev, skip_perm);
440 pneigh_ifdown_and_unlock(tbl, dev);
441 pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL,
442 tbl->family);
443 if (skb_queue_empty_lockless(&tbl->proxy_queue))
444 del_timer_sync(&tbl->proxy_timer);
445 return 0;
446 }
447
neigh_carrier_down(struct neigh_table * tbl,struct net_device * dev)448 int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev)
449 {
450 __neigh_ifdown(tbl, dev, true);
451 return 0;
452 }
453 EXPORT_SYMBOL(neigh_carrier_down);
454
neigh_ifdown(struct neigh_table * tbl,struct net_device * dev)455 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
456 {
457 __neigh_ifdown(tbl, dev, false);
458 return 0;
459 }
460 EXPORT_SYMBOL(neigh_ifdown);
461
neigh_alloc(struct neigh_table * tbl,struct net_device * dev,u32 flags,bool exempt_from_gc)462 static struct neighbour *neigh_alloc(struct neigh_table *tbl,
463 struct net_device *dev,
464 u32 flags, bool exempt_from_gc)
465 {
466 struct neighbour *n = NULL;
467 unsigned long now = jiffies;
468 int entries, gc_thresh3;
469
470 if (exempt_from_gc)
471 goto do_alloc;
472
473 entries = atomic_inc_return(&tbl->gc_entries) - 1;
474 gc_thresh3 = READ_ONCE(tbl->gc_thresh3);
475 if (entries >= gc_thresh3 ||
476 (entries >= READ_ONCE(tbl->gc_thresh2) &&
477 time_after(now, READ_ONCE(tbl->last_flush) + 5 * HZ))) {
478 if (!neigh_forced_gc(tbl) && entries >= gc_thresh3) {
479 net_info_ratelimited("%s: neighbor table overflow!\n",
480 tbl->id);
481 NEIGH_CACHE_STAT_INC(tbl, table_fulls);
482 goto out_entries;
483 }
484 }
485
486 do_alloc:
487 n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
488 if (!n)
489 goto out_entries;
490
491 __skb_queue_head_init(&n->arp_queue);
492 rwlock_init(&n->lock);
493 seqlock_init(&n->ha_lock);
494 n->updated = n->used = now;
495 n->nud_state = NUD_NONE;
496 n->output = neigh_blackhole;
497 n->flags = flags;
498 seqlock_init(&n->hh.hh_lock);
499 n->parms = neigh_parms_clone(&tbl->parms);
500 timer_setup(&n->timer, neigh_timer_handler, 0);
501
502 NEIGH_CACHE_STAT_INC(tbl, allocs);
503 n->tbl = tbl;
504 refcount_set(&n->refcnt, 1);
505 n->dead = 1;
506 INIT_LIST_HEAD(&n->gc_list);
507 INIT_LIST_HEAD(&n->managed_list);
508
509 atomic_inc(&tbl->entries);
510 out:
511 return n;
512
513 out_entries:
514 if (!exempt_from_gc)
515 atomic_dec(&tbl->gc_entries);
516 goto out;
517 }
518
neigh_get_hash_rnd(u32 * x)519 static void neigh_get_hash_rnd(u32 *x)
520 {
521 *x = get_random_u32() | 1;
522 }
523
neigh_hash_alloc(unsigned int shift)524 static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
525 {
526 size_t size = (1 << shift) * sizeof(struct neighbour *);
527 struct neigh_hash_table *ret;
528 struct neighbour __rcu **buckets;
529 int i;
530
531 ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
532 if (!ret)
533 return NULL;
534 if (size <= PAGE_SIZE) {
535 buckets = kzalloc(size, GFP_ATOMIC);
536 } else {
537 buckets = (struct neighbour __rcu **)
538 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
539 get_order(size));
540 kmemleak_alloc(buckets, size, 1, GFP_ATOMIC);
541 }
542 if (!buckets) {
543 kfree(ret);
544 return NULL;
545 }
546 ret->hash_buckets = buckets;
547 ret->hash_shift = shift;
548 for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
549 neigh_get_hash_rnd(&ret->hash_rnd[i]);
550 return ret;
551 }
552
neigh_hash_free_rcu(struct rcu_head * head)553 static void neigh_hash_free_rcu(struct rcu_head *head)
554 {
555 struct neigh_hash_table *nht = container_of(head,
556 struct neigh_hash_table,
557 rcu);
558 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
559 struct neighbour __rcu **buckets = nht->hash_buckets;
560
561 if (size <= PAGE_SIZE) {
562 kfree(buckets);
563 } else {
564 kmemleak_free(buckets);
565 free_pages((unsigned long)buckets, get_order(size));
566 }
567 kfree(nht);
568 }
569
neigh_hash_grow(struct neigh_table * tbl,unsigned long new_shift)570 static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
571 unsigned long new_shift)
572 {
573 unsigned int i, hash;
574 struct neigh_hash_table *new_nht, *old_nht;
575
576 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
577
578 old_nht = rcu_dereference_protected(tbl->nht,
579 lockdep_is_held(&tbl->lock));
580 new_nht = neigh_hash_alloc(new_shift);
581 if (!new_nht)
582 return old_nht;
583
584 for (i = 0; i < (1 << old_nht->hash_shift); i++) {
585 struct neighbour *n, *next;
586
587 for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
588 lockdep_is_held(&tbl->lock));
589 n != NULL;
590 n = next) {
591 hash = tbl->hash(n->primary_key, n->dev,
592 new_nht->hash_rnd);
593
594 hash >>= (32 - new_nht->hash_shift);
595 next = rcu_dereference_protected(n->next,
596 lockdep_is_held(&tbl->lock));
597
598 rcu_assign_pointer(n->next,
599 rcu_dereference_protected(
600 new_nht->hash_buckets[hash],
601 lockdep_is_held(&tbl->lock)));
602 rcu_assign_pointer(new_nht->hash_buckets[hash], n);
603 }
604 }
605
606 rcu_assign_pointer(tbl->nht, new_nht);
607 call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
608 return new_nht;
609 }
610
neigh_lookup(struct neigh_table * tbl,const void * pkey,struct net_device * dev)611 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
612 struct net_device *dev)
613 {
614 struct neighbour *n;
615
616 NEIGH_CACHE_STAT_INC(tbl, lookups);
617
618 rcu_read_lock();
619 n = __neigh_lookup_noref(tbl, pkey, dev);
620 if (n) {
621 if (!refcount_inc_not_zero(&n->refcnt))
622 n = NULL;
623 NEIGH_CACHE_STAT_INC(tbl, hits);
624 }
625
626 rcu_read_unlock();
627 return n;
628 }
629 EXPORT_SYMBOL(neigh_lookup);
630
631 static struct neighbour *
___neigh_create(struct neigh_table * tbl,const void * pkey,struct net_device * dev,u32 flags,bool exempt_from_gc,bool want_ref)632 ___neigh_create(struct neigh_table *tbl, const void *pkey,
633 struct net_device *dev, u32 flags,
634 bool exempt_from_gc, bool want_ref)
635 {
636 u32 hash_val, key_len = tbl->key_len;
637 struct neighbour *n1, *rc, *n;
638 struct neigh_hash_table *nht;
639 int error;
640
641 n = neigh_alloc(tbl, dev, flags, exempt_from_gc);
642 trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc);
643 if (!n) {
644 rc = ERR_PTR(-ENOBUFS);
645 goto out;
646 }
647
648 memcpy(n->primary_key, pkey, key_len);
649 n->dev = dev;
650 netdev_hold(dev, &n->dev_tracker, GFP_ATOMIC);
651
652 /* Protocol specific setup. */
653 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
654 rc = ERR_PTR(error);
655 goto out_neigh_release;
656 }
657
658 if (dev->netdev_ops->ndo_neigh_construct) {
659 error = dev->netdev_ops->ndo_neigh_construct(dev, n);
660 if (error < 0) {
661 rc = ERR_PTR(error);
662 goto out_neigh_release;
663 }
664 }
665
666 /* Device specific setup. */
667 if (n->parms->neigh_setup &&
668 (error = n->parms->neigh_setup(n)) < 0) {
669 rc = ERR_PTR(error);
670 goto out_neigh_release;
671 }
672
673 n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
674
675 write_lock_bh(&tbl->lock);
676 nht = rcu_dereference_protected(tbl->nht,
677 lockdep_is_held(&tbl->lock));
678
679 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
680 nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
681
682 hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
683
684 if (n->parms->dead) {
685 rc = ERR_PTR(-EINVAL);
686 goto out_tbl_unlock;
687 }
688
689 for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
690 lockdep_is_held(&tbl->lock));
691 n1 != NULL;
692 n1 = rcu_dereference_protected(n1->next,
693 lockdep_is_held(&tbl->lock))) {
694 if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
695 if (want_ref)
696 neigh_hold(n1);
697 rc = n1;
698 goto out_tbl_unlock;
699 }
700 }
701
702 n->dead = 0;
703 if (!exempt_from_gc)
704 list_add_tail(&n->gc_list, &n->tbl->gc_list);
705 if (n->flags & NTF_MANAGED)
706 list_add_tail(&n->managed_list, &n->tbl->managed_list);
707 if (want_ref)
708 neigh_hold(n);
709 rcu_assign_pointer(n->next,
710 rcu_dereference_protected(nht->hash_buckets[hash_val],
711 lockdep_is_held(&tbl->lock)));
712 rcu_assign_pointer(nht->hash_buckets[hash_val], n);
713 write_unlock_bh(&tbl->lock);
714 neigh_dbg(2, "neigh %p is created\n", n);
715 rc = n;
716 out:
717 return rc;
718 out_tbl_unlock:
719 write_unlock_bh(&tbl->lock);
720 out_neigh_release:
721 if (!exempt_from_gc)
722 atomic_dec(&tbl->gc_entries);
723 neigh_release(n);
724 goto out;
725 }
726
__neigh_create(struct neigh_table * tbl,const void * pkey,struct net_device * dev,bool want_ref)727 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
728 struct net_device *dev, bool want_ref)
729 {
730 return ___neigh_create(tbl, pkey, dev, 0, false, want_ref);
731 }
732 EXPORT_SYMBOL(__neigh_create);
733
pneigh_hash(const void * pkey,unsigned int key_len)734 static u32 pneigh_hash(const void *pkey, unsigned int key_len)
735 {
736 u32 hash_val = *(u32 *)(pkey + key_len - 4);
737 hash_val ^= (hash_val >> 16);
738 hash_val ^= hash_val >> 8;
739 hash_val ^= hash_val >> 4;
740 hash_val &= PNEIGH_HASHMASK;
741 return hash_val;
742 }
743
__pneigh_lookup_1(struct pneigh_entry * n,struct net * net,const void * pkey,unsigned int key_len,struct net_device * dev)744 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
745 struct net *net,
746 const void *pkey,
747 unsigned int key_len,
748 struct net_device *dev)
749 {
750 while (n) {
751 if (!memcmp(n->key, pkey, key_len) &&
752 net_eq(pneigh_net(n), net) &&
753 (n->dev == dev || !n->dev))
754 return n;
755 n = n->next;
756 }
757 return NULL;
758 }
759
__pneigh_lookup(struct neigh_table * tbl,struct net * net,const void * pkey,struct net_device * dev)760 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
761 struct net *net, const void *pkey, struct net_device *dev)
762 {
763 unsigned int key_len = tbl->key_len;
764 u32 hash_val = pneigh_hash(pkey, key_len);
765
766 return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
767 net, pkey, key_len, dev);
768 }
769 EXPORT_SYMBOL_GPL(__pneigh_lookup);
770
pneigh_lookup(struct neigh_table * tbl,struct net * net,const void * pkey,struct net_device * dev,int creat)771 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
772 struct net *net, const void *pkey,
773 struct net_device *dev, int creat)
774 {
775 struct pneigh_entry *n;
776 unsigned int key_len = tbl->key_len;
777 u32 hash_val = pneigh_hash(pkey, key_len);
778
779 read_lock_bh(&tbl->lock);
780 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
781 net, pkey, key_len, dev);
782 read_unlock_bh(&tbl->lock);
783
784 if (n || !creat)
785 goto out;
786
787 ASSERT_RTNL();
788
789 n = kzalloc(sizeof(*n) + key_len, GFP_KERNEL);
790 if (!n)
791 goto out;
792
793 write_pnet(&n->net, net);
794 memcpy(n->key, pkey, key_len);
795 n->dev = dev;
796 netdev_hold(dev, &n->dev_tracker, GFP_KERNEL);
797
798 if (tbl->pconstructor && tbl->pconstructor(n)) {
799 netdev_put(dev, &n->dev_tracker);
800 kfree(n);
801 n = NULL;
802 goto out;
803 }
804
805 write_lock_bh(&tbl->lock);
806 n->next = tbl->phash_buckets[hash_val];
807 tbl->phash_buckets[hash_val] = n;
808 write_unlock_bh(&tbl->lock);
809 out:
810 return n;
811 }
812 EXPORT_SYMBOL(pneigh_lookup);
813
814
pneigh_delete(struct neigh_table * tbl,struct net * net,const void * pkey,struct net_device * dev)815 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
816 struct net_device *dev)
817 {
818 struct pneigh_entry *n, **np;
819 unsigned int key_len = tbl->key_len;
820 u32 hash_val = pneigh_hash(pkey, key_len);
821
822 write_lock_bh(&tbl->lock);
823 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
824 np = &n->next) {
825 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
826 net_eq(pneigh_net(n), net)) {
827 *np = n->next;
828 write_unlock_bh(&tbl->lock);
829 if (tbl->pdestructor)
830 tbl->pdestructor(n);
831 netdev_put(n->dev, &n->dev_tracker);
832 kfree(n);
833 return 0;
834 }
835 }
836 write_unlock_bh(&tbl->lock);
837 return -ENOENT;
838 }
839
pneigh_ifdown_and_unlock(struct neigh_table * tbl,struct net_device * dev)840 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
841 struct net_device *dev)
842 {
843 struct pneigh_entry *n, **np, *freelist = NULL;
844 u32 h;
845
846 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
847 np = &tbl->phash_buckets[h];
848 while ((n = *np) != NULL) {
849 if (!dev || n->dev == dev) {
850 *np = n->next;
851 n->next = freelist;
852 freelist = n;
853 continue;
854 }
855 np = &n->next;
856 }
857 }
858 write_unlock_bh(&tbl->lock);
859 while ((n = freelist)) {
860 freelist = n->next;
861 n->next = NULL;
862 if (tbl->pdestructor)
863 tbl->pdestructor(n);
864 netdev_put(n->dev, &n->dev_tracker);
865 kfree(n);
866 }
867 return -ENOENT;
868 }
869
870 static void neigh_parms_destroy(struct neigh_parms *parms);
871
neigh_parms_put(struct neigh_parms * parms)872 static inline void neigh_parms_put(struct neigh_parms *parms)
873 {
874 if (refcount_dec_and_test(&parms->refcnt))
875 neigh_parms_destroy(parms);
876 }
877
878 /*
879 * neighbour must already be out of the table;
880 *
881 */
neigh_destroy(struct neighbour * neigh)882 void neigh_destroy(struct neighbour *neigh)
883 {
884 struct net_device *dev = neigh->dev;
885
886 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
887
888 if (!neigh->dead) {
889 pr_warn("Destroying alive neighbour %p\n", neigh);
890 dump_stack();
891 return;
892 }
893
894 if (neigh_del_timer(neigh))
895 pr_warn("Impossible event\n");
896
897 write_lock_bh(&neigh->lock);
898 __skb_queue_purge(&neigh->arp_queue);
899 write_unlock_bh(&neigh->lock);
900 neigh->arp_queue_len_bytes = 0;
901
902 if (dev->netdev_ops->ndo_neigh_destroy)
903 dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
904
905 netdev_put(dev, &neigh->dev_tracker);
906 neigh_parms_put(neigh->parms);
907
908 neigh_dbg(2, "neigh %p is destroyed\n", neigh);
909
910 atomic_dec(&neigh->tbl->entries);
911 kfree_rcu(neigh, rcu);
912 }
913 EXPORT_SYMBOL(neigh_destroy);
914
915 /* Neighbour state is suspicious;
916 disable fast path.
917
918 Called with write_locked neigh.
919 */
neigh_suspect(struct neighbour * neigh)920 static void neigh_suspect(struct neighbour *neigh)
921 {
922 neigh_dbg(2, "neigh %p is suspected\n", neigh);
923
924 WRITE_ONCE(neigh->output, neigh->ops->output);
925 }
926
927 /* Neighbour state is OK;
928 enable fast path.
929
930 Called with write_locked neigh.
931 */
neigh_connect(struct neighbour * neigh)932 static void neigh_connect(struct neighbour *neigh)
933 {
934 neigh_dbg(2, "neigh %p is connected\n", neigh);
935
936 WRITE_ONCE(neigh->output, neigh->ops->connected_output);
937 }
938
neigh_periodic_work(struct work_struct * work)939 static void neigh_periodic_work(struct work_struct *work)
940 {
941 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
942 struct neighbour *n;
943 struct neighbour __rcu **np;
944 unsigned int i;
945 struct neigh_hash_table *nht;
946
947 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
948
949 write_lock_bh(&tbl->lock);
950 nht = rcu_dereference_protected(tbl->nht,
951 lockdep_is_held(&tbl->lock));
952
953 /*
954 * periodically recompute ReachableTime from random function
955 */
956
957 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
958 struct neigh_parms *p;
959
960 WRITE_ONCE(tbl->last_rand, jiffies);
961 list_for_each_entry(p, &tbl->parms_list, list)
962 p->reachable_time =
963 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
964 }
965
966 if (atomic_read(&tbl->entries) < READ_ONCE(tbl->gc_thresh1))
967 goto out;
968
969 for (i = 0 ; i < (1 << nht->hash_shift); i++) {
970 np = &nht->hash_buckets[i];
971
972 while ((n = rcu_dereference_protected(*np,
973 lockdep_is_held(&tbl->lock))) != NULL) {
974 unsigned int state;
975
976 write_lock(&n->lock);
977
978 state = n->nud_state;
979 if ((state & (NUD_PERMANENT | NUD_IN_TIMER)) ||
980 (n->flags & NTF_EXT_LEARNED)) {
981 write_unlock(&n->lock);
982 goto next_elt;
983 }
984
985 if (time_before(n->used, n->confirmed) &&
986 time_is_before_eq_jiffies(n->confirmed))
987 n->used = n->confirmed;
988
989 if (refcount_read(&n->refcnt) == 1 &&
990 (state == NUD_FAILED ||
991 !time_in_range_open(jiffies, n->used,
992 n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
993 rcu_assign_pointer(*np,
994 rcu_dereference_protected(n->next,
995 lockdep_is_held(&tbl->lock)));
996 neigh_mark_dead(n);
997 write_unlock(&n->lock);
998 neigh_cleanup_and_release(n);
999 continue;
1000 }
1001 write_unlock(&n->lock);
1002
1003 next_elt:
1004 np = &n->next;
1005 }
1006 /*
1007 * It's fine to release lock here, even if hash table
1008 * grows while we are preempted.
1009 */
1010 write_unlock_bh(&tbl->lock);
1011 cond_resched();
1012 write_lock_bh(&tbl->lock);
1013 nht = rcu_dereference_protected(tbl->nht,
1014 lockdep_is_held(&tbl->lock));
1015 }
1016 out:
1017 /* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
1018 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
1019 * BASE_REACHABLE_TIME.
1020 */
1021 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1022 NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
1023 write_unlock_bh(&tbl->lock);
1024 }
1025
neigh_max_probes(struct neighbour * n)1026 static __inline__ int neigh_max_probes(struct neighbour *n)
1027 {
1028 struct neigh_parms *p = n->parms;
1029 return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
1030 (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
1031 NEIGH_VAR(p, MCAST_PROBES));
1032 }
1033
neigh_invalidate(struct neighbour * neigh)1034 static void neigh_invalidate(struct neighbour *neigh)
1035 __releases(neigh->lock)
1036 __acquires(neigh->lock)
1037 {
1038 struct sk_buff *skb;
1039
1040 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
1041 neigh_dbg(2, "neigh %p is failed\n", neigh);
1042 neigh->updated = jiffies;
1043
1044 /* It is very thin place. report_unreachable is very complicated
1045 routine. Particularly, it can hit the same neighbour entry!
1046
1047 So that, we try to be accurate and avoid dead loop. --ANK
1048 */
1049 while (neigh->nud_state == NUD_FAILED &&
1050 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1051 write_unlock(&neigh->lock);
1052 neigh->ops->error_report(neigh, skb);
1053 write_lock(&neigh->lock);
1054 }
1055 __skb_queue_purge(&neigh->arp_queue);
1056 neigh->arp_queue_len_bytes = 0;
1057 }
1058
neigh_probe(struct neighbour * neigh)1059 static void neigh_probe(struct neighbour *neigh)
1060 __releases(neigh->lock)
1061 {
1062 struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
1063 /* keep skb alive even if arp_queue overflows */
1064 if (skb)
1065 skb = skb_clone(skb, GFP_ATOMIC);
1066 write_unlock(&neigh->lock);
1067 if (neigh->ops->solicit)
1068 neigh->ops->solicit(neigh, skb);
1069 atomic_inc(&neigh->probes);
1070 consume_skb(skb);
1071 }
1072
1073 /* Called when a timer expires for a neighbour entry. */
1074
neigh_timer_handler(struct timer_list * t)1075 static void neigh_timer_handler(struct timer_list *t)
1076 {
1077 unsigned long now, next;
1078 struct neighbour *neigh = from_timer(neigh, t, timer);
1079 unsigned int state;
1080 int notify = 0;
1081
1082 write_lock(&neigh->lock);
1083
1084 state = neigh->nud_state;
1085 now = jiffies;
1086 next = now + HZ;
1087
1088 if (!(state & NUD_IN_TIMER))
1089 goto out;
1090
1091 if (state & NUD_REACHABLE) {
1092 if (time_before_eq(now,
1093 neigh->confirmed + neigh->parms->reachable_time)) {
1094 neigh_dbg(2, "neigh %p is still alive\n", neigh);
1095 next = neigh->confirmed + neigh->parms->reachable_time;
1096 } else if (time_before_eq(now,
1097 neigh->used +
1098 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1099 neigh_dbg(2, "neigh %p is delayed\n", neigh);
1100 WRITE_ONCE(neigh->nud_state, NUD_DELAY);
1101 neigh->updated = jiffies;
1102 neigh_suspect(neigh);
1103 next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
1104 } else {
1105 neigh_dbg(2, "neigh %p is suspected\n", neigh);
1106 WRITE_ONCE(neigh->nud_state, NUD_STALE);
1107 neigh->updated = jiffies;
1108 neigh_suspect(neigh);
1109 notify = 1;
1110 }
1111 } else if (state & NUD_DELAY) {
1112 if (time_before_eq(now,
1113 neigh->confirmed +
1114 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1115 neigh_dbg(2, "neigh %p is now reachable\n", neigh);
1116 WRITE_ONCE(neigh->nud_state, NUD_REACHABLE);
1117 neigh->updated = jiffies;
1118 neigh_connect(neigh);
1119 notify = 1;
1120 next = neigh->confirmed + neigh->parms->reachable_time;
1121 } else {
1122 neigh_dbg(2, "neigh %p is probed\n", neigh);
1123 WRITE_ONCE(neigh->nud_state, NUD_PROBE);
1124 neigh->updated = jiffies;
1125 atomic_set(&neigh->probes, 0);
1126 notify = 1;
1127 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1128 HZ/100);
1129 }
1130 } else {
1131 /* NUD_PROBE|NUD_INCOMPLETE */
1132 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), HZ/100);
1133 }
1134
1135 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
1136 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
1137 WRITE_ONCE(neigh->nud_state, NUD_FAILED);
1138 notify = 1;
1139 neigh_invalidate(neigh);
1140 goto out;
1141 }
1142
1143 if (neigh->nud_state & NUD_IN_TIMER) {
1144 if (time_before(next, jiffies + HZ/100))
1145 next = jiffies + HZ/100;
1146 if (!mod_timer(&neigh->timer, next))
1147 neigh_hold(neigh);
1148 }
1149 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
1150 neigh_probe(neigh);
1151 } else {
1152 out:
1153 write_unlock(&neigh->lock);
1154 }
1155
1156 if (notify)
1157 neigh_update_notify(neigh, 0);
1158
1159 trace_neigh_timer_handler(neigh, 0);
1160
1161 neigh_release(neigh);
1162 }
1163
__neigh_event_send(struct neighbour * neigh,struct sk_buff * skb,const bool immediate_ok)1164 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb,
1165 const bool immediate_ok)
1166 {
1167 int rc;
1168 bool immediate_probe = false;
1169
1170 write_lock_bh(&neigh->lock);
1171
1172 rc = 0;
1173 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
1174 goto out_unlock_bh;
1175 if (neigh->dead)
1176 goto out_dead;
1177
1178 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
1179 if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
1180 NEIGH_VAR(neigh->parms, APP_PROBES)) {
1181 unsigned long next, now = jiffies;
1182
1183 atomic_set(&neigh->probes,
1184 NEIGH_VAR(neigh->parms, UCAST_PROBES));
1185 neigh_del_timer(neigh);
1186 WRITE_ONCE(neigh->nud_state, NUD_INCOMPLETE);
1187 neigh->updated = now;
1188 if (!immediate_ok) {
1189 next = now + 1;
1190 } else {
1191 immediate_probe = true;
1192 next = now + max(NEIGH_VAR(neigh->parms,
1193 RETRANS_TIME),
1194 HZ / 100);
1195 }
1196 neigh_add_timer(neigh, next);
1197 } else {
1198 WRITE_ONCE(neigh->nud_state, NUD_FAILED);
1199 neigh->updated = jiffies;
1200 write_unlock_bh(&neigh->lock);
1201
1202 kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_FAILED);
1203 return 1;
1204 }
1205 } else if (neigh->nud_state & NUD_STALE) {
1206 neigh_dbg(2, "neigh %p is delayed\n", neigh);
1207 neigh_del_timer(neigh);
1208 WRITE_ONCE(neigh->nud_state, NUD_DELAY);
1209 neigh->updated = jiffies;
1210 neigh_add_timer(neigh, jiffies +
1211 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
1212 }
1213
1214 if (neigh->nud_state == NUD_INCOMPLETE) {
1215 if (skb) {
1216 while (neigh->arp_queue_len_bytes + skb->truesize >
1217 NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1218 struct sk_buff *buff;
1219
1220 buff = __skb_dequeue(&neigh->arp_queue);
1221 if (!buff)
1222 break;
1223 neigh->arp_queue_len_bytes -= buff->truesize;
1224 kfree_skb_reason(buff, SKB_DROP_REASON_NEIGH_QUEUEFULL);
1225 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1226 }
1227 skb_dst_force(skb);
1228 __skb_queue_tail(&neigh->arp_queue, skb);
1229 neigh->arp_queue_len_bytes += skb->truesize;
1230 }
1231 rc = 1;
1232 }
1233 out_unlock_bh:
1234 if (immediate_probe)
1235 neigh_probe(neigh);
1236 else
1237 write_unlock(&neigh->lock);
1238 local_bh_enable();
1239 trace_neigh_event_send_done(neigh, rc);
1240 return rc;
1241
1242 out_dead:
1243 if (neigh->nud_state & NUD_STALE)
1244 goto out_unlock_bh;
1245 write_unlock_bh(&neigh->lock);
1246 kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_DEAD);
1247 trace_neigh_event_send_dead(neigh, 1);
1248 return 1;
1249 }
1250 EXPORT_SYMBOL(__neigh_event_send);
1251
neigh_update_hhs(struct neighbour * neigh)1252 static void neigh_update_hhs(struct neighbour *neigh)
1253 {
1254 struct hh_cache *hh;
1255 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1256 = NULL;
1257
1258 if (neigh->dev->header_ops)
1259 update = neigh->dev->header_ops->cache_update;
1260
1261 if (update) {
1262 hh = &neigh->hh;
1263 if (READ_ONCE(hh->hh_len)) {
1264 write_seqlock_bh(&hh->hh_lock);
1265 update(hh, neigh->dev, neigh->ha);
1266 write_sequnlock_bh(&hh->hh_lock);
1267 }
1268 }
1269 }
1270
1271 /* Generic update routine.
1272 -- lladdr is new lladdr or NULL, if it is not supplied.
1273 -- new is new state.
1274 -- flags
1275 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1276 if it is different.
1277 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1278 lladdr instead of overriding it
1279 if it is different.
1280 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
1281 NEIGH_UPDATE_F_USE means that the entry is user triggered.
1282 NEIGH_UPDATE_F_MANAGED means that the entry will be auto-refreshed.
1283 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1284 NTF_ROUTER flag.
1285 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
1286 a router.
1287
1288 Caller MUST hold reference count on the entry.
1289 */
__neigh_update(struct neighbour * neigh,const u8 * lladdr,u8 new,u32 flags,u32 nlmsg_pid,struct netlink_ext_ack * extack)1290 static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
1291 u8 new, u32 flags, u32 nlmsg_pid,
1292 struct netlink_ext_ack *extack)
1293 {
1294 bool gc_update = false, managed_update = false;
1295 int update_isrouter = 0;
1296 struct net_device *dev;
1297 int err, notify = 0;
1298 u8 old;
1299
1300 trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid);
1301
1302 write_lock_bh(&neigh->lock);
1303
1304 dev = neigh->dev;
1305 old = neigh->nud_state;
1306 err = -EPERM;
1307
1308 if (neigh->dead) {
1309 NL_SET_ERR_MSG(extack, "Neighbor entry is now dead");
1310 new = old;
1311 goto out;
1312 }
1313 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1314 (old & (NUD_NOARP | NUD_PERMANENT)))
1315 goto out;
1316
1317 neigh_update_flags(neigh, flags, ¬ify, &gc_update, &managed_update);
1318 if (flags & (NEIGH_UPDATE_F_USE | NEIGH_UPDATE_F_MANAGED)) {
1319 new = old & ~NUD_PERMANENT;
1320 WRITE_ONCE(neigh->nud_state, new);
1321 err = 0;
1322 goto out;
1323 }
1324
1325 if (!(new & NUD_VALID)) {
1326 neigh_del_timer(neigh);
1327 if (old & NUD_CONNECTED)
1328 neigh_suspect(neigh);
1329 WRITE_ONCE(neigh->nud_state, new);
1330 err = 0;
1331 notify = old & NUD_VALID;
1332 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1333 (new & NUD_FAILED)) {
1334 neigh_invalidate(neigh);
1335 notify = 1;
1336 }
1337 goto out;
1338 }
1339
1340 /* Compare new lladdr with cached one */
1341 if (!dev->addr_len) {
1342 /* First case: device needs no address. */
1343 lladdr = neigh->ha;
1344 } else if (lladdr) {
1345 /* The second case: if something is already cached
1346 and a new address is proposed:
1347 - compare new & old
1348 - if they are different, check override flag
1349 */
1350 if ((old & NUD_VALID) &&
1351 !memcmp(lladdr, neigh->ha, dev->addr_len))
1352 lladdr = neigh->ha;
1353 } else {
1354 /* No address is supplied; if we know something,
1355 use it, otherwise discard the request.
1356 */
1357 err = -EINVAL;
1358 if (!(old & NUD_VALID)) {
1359 NL_SET_ERR_MSG(extack, "No link layer address given");
1360 goto out;
1361 }
1362 lladdr = neigh->ha;
1363 }
1364
1365 /* Update confirmed timestamp for neighbour entry after we
1366 * received ARP packet even if it doesn't change IP to MAC binding.
1367 */
1368 if (new & NUD_CONNECTED)
1369 neigh->confirmed = jiffies;
1370
1371 /* If entry was valid and address is not changed,
1372 do not change entry state, if new one is STALE.
1373 */
1374 err = 0;
1375 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1376 if (old & NUD_VALID) {
1377 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1378 update_isrouter = 0;
1379 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1380 (old & NUD_CONNECTED)) {
1381 lladdr = neigh->ha;
1382 new = NUD_STALE;
1383 } else
1384 goto out;
1385 } else {
1386 if (lladdr == neigh->ha && new == NUD_STALE &&
1387 !(flags & NEIGH_UPDATE_F_ADMIN))
1388 new = old;
1389 }
1390 }
1391
1392 /* Update timestamp only once we know we will make a change to the
1393 * neighbour entry. Otherwise we risk to move the locktime window with
1394 * noop updates and ignore relevant ARP updates.
1395 */
1396 if (new != old || lladdr != neigh->ha)
1397 neigh->updated = jiffies;
1398
1399 if (new != old) {
1400 neigh_del_timer(neigh);
1401 if (new & NUD_PROBE)
1402 atomic_set(&neigh->probes, 0);
1403 if (new & NUD_IN_TIMER)
1404 neigh_add_timer(neigh, (jiffies +
1405 ((new & NUD_REACHABLE) ?
1406 neigh->parms->reachable_time :
1407 0)));
1408 WRITE_ONCE(neigh->nud_state, new);
1409 notify = 1;
1410 }
1411
1412 if (lladdr != neigh->ha) {
1413 write_seqlock(&neigh->ha_lock);
1414 memcpy(&neigh->ha, lladdr, dev->addr_len);
1415 write_sequnlock(&neigh->ha_lock);
1416 neigh_update_hhs(neigh);
1417 if (!(new & NUD_CONNECTED))
1418 neigh->confirmed = jiffies -
1419 (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1420 notify = 1;
1421 }
1422 if (new == old)
1423 goto out;
1424 if (new & NUD_CONNECTED)
1425 neigh_connect(neigh);
1426 else
1427 neigh_suspect(neigh);
1428 if (!(old & NUD_VALID)) {
1429 struct sk_buff *skb;
1430
1431 /* Again: avoid dead loop if something went wrong */
1432
1433 while (neigh->nud_state & NUD_VALID &&
1434 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1435 struct dst_entry *dst = skb_dst(skb);
1436 struct neighbour *n2, *n1 = neigh;
1437 write_unlock_bh(&neigh->lock);
1438
1439 rcu_read_lock();
1440
1441 /* Why not just use 'neigh' as-is? The problem is that
1442 * things such as shaper, eql, and sch_teql can end up
1443 * using alternative, different, neigh objects to output
1444 * the packet in the output path. So what we need to do
1445 * here is re-lookup the top-level neigh in the path so
1446 * we can reinject the packet there.
1447 */
1448 n2 = NULL;
1449 if (dst && dst->obsolete != DST_OBSOLETE_DEAD) {
1450 n2 = dst_neigh_lookup_skb(dst, skb);
1451 if (n2)
1452 n1 = n2;
1453 }
1454 READ_ONCE(n1->output)(n1, skb);
1455 if (n2)
1456 neigh_release(n2);
1457 rcu_read_unlock();
1458
1459 write_lock_bh(&neigh->lock);
1460 }
1461 __skb_queue_purge(&neigh->arp_queue);
1462 neigh->arp_queue_len_bytes = 0;
1463 }
1464 out:
1465 if (update_isrouter)
1466 neigh_update_is_router(neigh, flags, ¬ify);
1467 write_unlock_bh(&neigh->lock);
1468 if (((new ^ old) & NUD_PERMANENT) || gc_update)
1469 neigh_update_gc_list(neigh);
1470 if (managed_update)
1471 neigh_update_managed_list(neigh);
1472 if (notify)
1473 neigh_update_notify(neigh, nlmsg_pid);
1474 trace_neigh_update_done(neigh, err);
1475 return err;
1476 }
1477
neigh_update(struct neighbour * neigh,const u8 * lladdr,u8 new,u32 flags,u32 nlmsg_pid)1478 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1479 u32 flags, u32 nlmsg_pid)
1480 {
1481 return __neigh_update(neigh, lladdr, new, flags, nlmsg_pid, NULL);
1482 }
1483 EXPORT_SYMBOL(neigh_update);
1484
1485 /* Update the neigh to listen temporarily for probe responses, even if it is
1486 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1487 */
__neigh_set_probe_once(struct neighbour * neigh)1488 void __neigh_set_probe_once(struct neighbour *neigh)
1489 {
1490 if (neigh->dead)
1491 return;
1492 neigh->updated = jiffies;
1493 if (!(neigh->nud_state & NUD_FAILED))
1494 return;
1495 WRITE_ONCE(neigh->nud_state, NUD_INCOMPLETE);
1496 atomic_set(&neigh->probes, neigh_max_probes(neigh));
1497 neigh_add_timer(neigh,
1498 jiffies + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1499 HZ/100));
1500 }
1501 EXPORT_SYMBOL(__neigh_set_probe_once);
1502
neigh_event_ns(struct neigh_table * tbl,u8 * lladdr,void * saddr,struct net_device * dev)1503 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1504 u8 *lladdr, void *saddr,
1505 struct net_device *dev)
1506 {
1507 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1508 lladdr || !dev->addr_len);
1509 if (neigh)
1510 neigh_update(neigh, lladdr, NUD_STALE,
1511 NEIGH_UPDATE_F_OVERRIDE, 0);
1512 return neigh;
1513 }
1514 EXPORT_SYMBOL(neigh_event_ns);
1515
1516 /* called with read_lock_bh(&n->lock); */
neigh_hh_init(struct neighbour * n)1517 static void neigh_hh_init(struct neighbour *n)
1518 {
1519 struct net_device *dev = n->dev;
1520 __be16 prot = n->tbl->protocol;
1521 struct hh_cache *hh = &n->hh;
1522
1523 write_lock_bh(&n->lock);
1524
1525 /* Only one thread can come in here and initialize the
1526 * hh_cache entry.
1527 */
1528 if (!hh->hh_len)
1529 dev->header_ops->cache(n, hh, prot);
1530
1531 write_unlock_bh(&n->lock);
1532 }
1533
1534 /* Slow and careful. */
1535
neigh_resolve_output(struct neighbour * neigh,struct sk_buff * skb)1536 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1537 {
1538 int rc = 0;
1539
1540 if (!neigh_event_send(neigh, skb)) {
1541 int err;
1542 struct net_device *dev = neigh->dev;
1543 unsigned int seq;
1544
1545 if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len))
1546 neigh_hh_init(neigh);
1547
1548 do {
1549 __skb_pull(skb, skb_network_offset(skb));
1550 seq = read_seqbegin(&neigh->ha_lock);
1551 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1552 neigh->ha, NULL, skb->len);
1553 } while (read_seqretry(&neigh->ha_lock, seq));
1554
1555 if (err >= 0)
1556 rc = dev_queue_xmit(skb);
1557 else
1558 goto out_kfree_skb;
1559 }
1560 out:
1561 return rc;
1562 out_kfree_skb:
1563 rc = -EINVAL;
1564 kfree_skb(skb);
1565 goto out;
1566 }
1567 EXPORT_SYMBOL(neigh_resolve_output);
1568
1569 /* As fast as possible without hh cache */
1570
neigh_connected_output(struct neighbour * neigh,struct sk_buff * skb)1571 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1572 {
1573 struct net_device *dev = neigh->dev;
1574 unsigned int seq;
1575 int err;
1576
1577 do {
1578 __skb_pull(skb, skb_network_offset(skb));
1579 seq = read_seqbegin(&neigh->ha_lock);
1580 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1581 neigh->ha, NULL, skb->len);
1582 } while (read_seqretry(&neigh->ha_lock, seq));
1583
1584 if (err >= 0)
1585 err = dev_queue_xmit(skb);
1586 else {
1587 err = -EINVAL;
1588 kfree_skb(skb);
1589 }
1590 return err;
1591 }
1592 EXPORT_SYMBOL(neigh_connected_output);
1593
neigh_direct_output(struct neighbour * neigh,struct sk_buff * skb)1594 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1595 {
1596 return dev_queue_xmit(skb);
1597 }
1598 EXPORT_SYMBOL(neigh_direct_output);
1599
neigh_managed_work(struct work_struct * work)1600 static void neigh_managed_work(struct work_struct *work)
1601 {
1602 struct neigh_table *tbl = container_of(work, struct neigh_table,
1603 managed_work.work);
1604 struct neighbour *neigh;
1605
1606 write_lock_bh(&tbl->lock);
1607 list_for_each_entry(neigh, &tbl->managed_list, managed_list)
1608 neigh_event_send_probe(neigh, NULL, false);
1609 queue_delayed_work(system_power_efficient_wq, &tbl->managed_work,
1610 NEIGH_VAR(&tbl->parms, INTERVAL_PROBE_TIME_MS));
1611 write_unlock_bh(&tbl->lock);
1612 }
1613
neigh_proxy_process(struct timer_list * t)1614 static void neigh_proxy_process(struct timer_list *t)
1615 {
1616 struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
1617 long sched_next = 0;
1618 unsigned long now = jiffies;
1619 struct sk_buff *skb, *n;
1620
1621 spin_lock(&tbl->proxy_queue.lock);
1622
1623 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1624 long tdif = NEIGH_CB(skb)->sched_next - now;
1625
1626 if (tdif <= 0) {
1627 struct net_device *dev = skb->dev;
1628
1629 neigh_parms_qlen_dec(dev, tbl->family);
1630 __skb_unlink(skb, &tbl->proxy_queue);
1631
1632 if (tbl->proxy_redo && netif_running(dev)) {
1633 rcu_read_lock();
1634 tbl->proxy_redo(skb);
1635 rcu_read_unlock();
1636 } else {
1637 kfree_skb(skb);
1638 }
1639
1640 dev_put(dev);
1641 } else if (!sched_next || tdif < sched_next)
1642 sched_next = tdif;
1643 }
1644 del_timer(&tbl->proxy_timer);
1645 if (sched_next)
1646 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1647 spin_unlock(&tbl->proxy_queue.lock);
1648 }
1649
neigh_proxy_delay(struct neigh_parms * p)1650 static unsigned long neigh_proxy_delay(struct neigh_parms *p)
1651 {
1652 /* If proxy_delay is zero, do not call get_random_u32_below()
1653 * as it is undefined behavior.
1654 */
1655 unsigned long proxy_delay = NEIGH_VAR(p, PROXY_DELAY);
1656
1657 return proxy_delay ?
1658 jiffies + get_random_u32_below(proxy_delay) : jiffies;
1659 }
1660
pneigh_enqueue(struct neigh_table * tbl,struct neigh_parms * p,struct sk_buff * skb)1661 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1662 struct sk_buff *skb)
1663 {
1664 unsigned long sched_next = neigh_proxy_delay(p);
1665
1666 if (p->qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1667 kfree_skb(skb);
1668 return;
1669 }
1670
1671 NEIGH_CB(skb)->sched_next = sched_next;
1672 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1673
1674 spin_lock(&tbl->proxy_queue.lock);
1675 if (del_timer(&tbl->proxy_timer)) {
1676 if (time_before(tbl->proxy_timer.expires, sched_next))
1677 sched_next = tbl->proxy_timer.expires;
1678 }
1679 skb_dst_drop(skb);
1680 dev_hold(skb->dev);
1681 __skb_queue_tail(&tbl->proxy_queue, skb);
1682 p->qlen++;
1683 mod_timer(&tbl->proxy_timer, sched_next);
1684 spin_unlock(&tbl->proxy_queue.lock);
1685 }
1686 EXPORT_SYMBOL(pneigh_enqueue);
1687
lookup_neigh_parms(struct neigh_table * tbl,struct net * net,int ifindex)1688 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1689 struct net *net, int ifindex)
1690 {
1691 struct neigh_parms *p;
1692
1693 list_for_each_entry(p, &tbl->parms_list, list) {
1694 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1695 (!p->dev && !ifindex && net_eq(net, &init_net)))
1696 return p;
1697 }
1698
1699 return NULL;
1700 }
1701
neigh_parms_alloc(struct net_device * dev,struct neigh_table * tbl)1702 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1703 struct neigh_table *tbl)
1704 {
1705 struct neigh_parms *p;
1706 struct net *net = dev_net(dev);
1707 const struct net_device_ops *ops = dev->netdev_ops;
1708
1709 p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1710 if (p) {
1711 p->tbl = tbl;
1712 refcount_set(&p->refcnt, 1);
1713 p->reachable_time =
1714 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1715 p->qlen = 0;
1716 netdev_hold(dev, &p->dev_tracker, GFP_KERNEL);
1717 p->dev = dev;
1718 write_pnet(&p->net, net);
1719 p->sysctl_table = NULL;
1720
1721 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1722 netdev_put(dev, &p->dev_tracker);
1723 kfree(p);
1724 return NULL;
1725 }
1726
1727 write_lock_bh(&tbl->lock);
1728 list_add(&p->list, &tbl->parms.list);
1729 write_unlock_bh(&tbl->lock);
1730
1731 neigh_parms_data_state_cleanall(p);
1732 }
1733 return p;
1734 }
1735 EXPORT_SYMBOL(neigh_parms_alloc);
1736
neigh_rcu_free_parms(struct rcu_head * head)1737 static void neigh_rcu_free_parms(struct rcu_head *head)
1738 {
1739 struct neigh_parms *parms =
1740 container_of(head, struct neigh_parms, rcu_head);
1741
1742 neigh_parms_put(parms);
1743 }
1744
neigh_parms_release(struct neigh_table * tbl,struct neigh_parms * parms)1745 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1746 {
1747 if (!parms || parms == &tbl->parms)
1748 return;
1749 write_lock_bh(&tbl->lock);
1750 list_del(&parms->list);
1751 parms->dead = 1;
1752 write_unlock_bh(&tbl->lock);
1753 netdev_put(parms->dev, &parms->dev_tracker);
1754 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1755 }
1756 EXPORT_SYMBOL(neigh_parms_release);
1757
neigh_parms_destroy(struct neigh_parms * parms)1758 static void neigh_parms_destroy(struct neigh_parms *parms)
1759 {
1760 kfree(parms);
1761 }
1762
1763 static struct lock_class_key neigh_table_proxy_queue_class;
1764
1765 static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1766
neigh_table_init(int index,struct neigh_table * tbl)1767 void neigh_table_init(int index, struct neigh_table *tbl)
1768 {
1769 unsigned long now = jiffies;
1770 unsigned long phsize;
1771
1772 INIT_LIST_HEAD(&tbl->parms_list);
1773 INIT_LIST_HEAD(&tbl->gc_list);
1774 INIT_LIST_HEAD(&tbl->managed_list);
1775
1776 list_add(&tbl->parms.list, &tbl->parms_list);
1777 write_pnet(&tbl->parms.net, &init_net);
1778 refcount_set(&tbl->parms.refcnt, 1);
1779 tbl->parms.reachable_time =
1780 neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1781 tbl->parms.qlen = 0;
1782
1783 tbl->stats = alloc_percpu(struct neigh_statistics);
1784 if (!tbl->stats)
1785 panic("cannot create neighbour cache statistics");
1786
1787 #ifdef CONFIG_PROC_FS
1788 if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat,
1789 &neigh_stat_seq_ops, tbl))
1790 panic("cannot create neighbour proc dir entry");
1791 #endif
1792
1793 RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1794
1795 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1796 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1797
1798 if (!tbl->nht || !tbl->phash_buckets)
1799 panic("cannot allocate neighbour cache hashes");
1800
1801 if (!tbl->entry_size)
1802 tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1803 tbl->key_len, NEIGH_PRIV_ALIGN);
1804 else
1805 WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1806
1807 rwlock_init(&tbl->lock);
1808
1809 INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1810 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1811 tbl->parms.reachable_time);
1812 INIT_DEFERRABLE_WORK(&tbl->managed_work, neigh_managed_work);
1813 queue_delayed_work(system_power_efficient_wq, &tbl->managed_work, 0);
1814
1815 timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
1816 skb_queue_head_init_class(&tbl->proxy_queue,
1817 &neigh_table_proxy_queue_class);
1818
1819 tbl->last_flush = now;
1820 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1821
1822 neigh_tables[index] = tbl;
1823 }
1824 EXPORT_SYMBOL(neigh_table_init);
1825
neigh_table_clear(int index,struct neigh_table * tbl)1826 int neigh_table_clear(int index, struct neigh_table *tbl)
1827 {
1828 neigh_tables[index] = NULL;
1829 /* It is not clean... Fix it to unload IPv6 module safely */
1830 cancel_delayed_work_sync(&tbl->managed_work);
1831 cancel_delayed_work_sync(&tbl->gc_work);
1832 del_timer_sync(&tbl->proxy_timer);
1833 pneigh_queue_purge(&tbl->proxy_queue, NULL, tbl->family);
1834 neigh_ifdown(tbl, NULL);
1835 if (atomic_read(&tbl->entries))
1836 pr_crit("neighbour leakage\n");
1837
1838 call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1839 neigh_hash_free_rcu);
1840 tbl->nht = NULL;
1841
1842 kfree(tbl->phash_buckets);
1843 tbl->phash_buckets = NULL;
1844
1845 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1846
1847 free_percpu(tbl->stats);
1848 tbl->stats = NULL;
1849
1850 return 0;
1851 }
1852 EXPORT_SYMBOL(neigh_table_clear);
1853
neigh_find_table(int family)1854 static struct neigh_table *neigh_find_table(int family)
1855 {
1856 struct neigh_table *tbl = NULL;
1857
1858 switch (family) {
1859 case AF_INET:
1860 tbl = neigh_tables[NEIGH_ARP_TABLE];
1861 break;
1862 case AF_INET6:
1863 tbl = neigh_tables[NEIGH_ND_TABLE];
1864 break;
1865 }
1866
1867 return tbl;
1868 }
1869
1870 const struct nla_policy nda_policy[NDA_MAX+1] = {
1871 [NDA_UNSPEC] = { .strict_start_type = NDA_NH_ID },
1872 [NDA_DST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1873 [NDA_LLADDR] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1874 [NDA_CACHEINFO] = { .len = sizeof(struct nda_cacheinfo) },
1875 [NDA_PROBES] = { .type = NLA_U32 },
1876 [NDA_VLAN] = { .type = NLA_U16 },
1877 [NDA_PORT] = { .type = NLA_U16 },
1878 [NDA_VNI] = { .type = NLA_U32 },
1879 [NDA_IFINDEX] = { .type = NLA_U32 },
1880 [NDA_MASTER] = { .type = NLA_U32 },
1881 [NDA_PROTOCOL] = { .type = NLA_U8 },
1882 [NDA_NH_ID] = { .type = NLA_U32 },
1883 [NDA_FLAGS_EXT] = NLA_POLICY_MASK(NLA_U32, NTF_EXT_MASK),
1884 [NDA_FDB_EXT_ATTRS] = { .type = NLA_NESTED },
1885 };
1886
neigh_delete(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)1887 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
1888 struct netlink_ext_ack *extack)
1889 {
1890 struct net *net = sock_net(skb->sk);
1891 struct ndmsg *ndm;
1892 struct nlattr *dst_attr;
1893 struct neigh_table *tbl;
1894 struct neighbour *neigh;
1895 struct net_device *dev = NULL;
1896 int err = -EINVAL;
1897
1898 ASSERT_RTNL();
1899 if (nlmsg_len(nlh) < sizeof(*ndm))
1900 goto out;
1901
1902 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1903 if (!dst_attr) {
1904 NL_SET_ERR_MSG(extack, "Network address not specified");
1905 goto out;
1906 }
1907
1908 ndm = nlmsg_data(nlh);
1909 if (ndm->ndm_ifindex) {
1910 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1911 if (dev == NULL) {
1912 err = -ENODEV;
1913 goto out;
1914 }
1915 }
1916
1917 tbl = neigh_find_table(ndm->ndm_family);
1918 if (tbl == NULL)
1919 return -EAFNOSUPPORT;
1920
1921 if (nla_len(dst_attr) < (int)tbl->key_len) {
1922 NL_SET_ERR_MSG(extack, "Invalid network address");
1923 goto out;
1924 }
1925
1926 if (ndm->ndm_flags & NTF_PROXY) {
1927 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1928 goto out;
1929 }
1930
1931 if (dev == NULL)
1932 goto out;
1933
1934 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1935 if (neigh == NULL) {
1936 err = -ENOENT;
1937 goto out;
1938 }
1939
1940 err = __neigh_update(neigh, NULL, NUD_FAILED,
1941 NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN,
1942 NETLINK_CB(skb).portid, extack);
1943 write_lock_bh(&tbl->lock);
1944 neigh_release(neigh);
1945 neigh_remove_one(neigh, tbl);
1946 write_unlock_bh(&tbl->lock);
1947
1948 out:
1949 return err;
1950 }
1951
neigh_add(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)1952 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1953 struct netlink_ext_ack *extack)
1954 {
1955 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE |
1956 NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1957 struct net *net = sock_net(skb->sk);
1958 struct ndmsg *ndm;
1959 struct nlattr *tb[NDA_MAX+1];
1960 struct neigh_table *tbl;
1961 struct net_device *dev = NULL;
1962 struct neighbour *neigh;
1963 void *dst, *lladdr;
1964 u8 protocol = 0;
1965 u32 ndm_flags;
1966 int err;
1967
1968 ASSERT_RTNL();
1969 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
1970 nda_policy, extack);
1971 if (err < 0)
1972 goto out;
1973
1974 err = -EINVAL;
1975 if (!tb[NDA_DST]) {
1976 NL_SET_ERR_MSG(extack, "Network address not specified");
1977 goto out;
1978 }
1979
1980 ndm = nlmsg_data(nlh);
1981 ndm_flags = ndm->ndm_flags;
1982 if (tb[NDA_FLAGS_EXT]) {
1983 u32 ext = nla_get_u32(tb[NDA_FLAGS_EXT]);
1984
1985 BUILD_BUG_ON(sizeof(neigh->flags) * BITS_PER_BYTE <
1986 (sizeof(ndm->ndm_flags) * BITS_PER_BYTE +
1987 hweight32(NTF_EXT_MASK)));
1988 ndm_flags |= (ext << NTF_EXT_SHIFT);
1989 }
1990 if (ndm->ndm_ifindex) {
1991 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1992 if (dev == NULL) {
1993 err = -ENODEV;
1994 goto out;
1995 }
1996
1997 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) {
1998 NL_SET_ERR_MSG(extack, "Invalid link address");
1999 goto out;
2000 }
2001 }
2002
2003 tbl = neigh_find_table(ndm->ndm_family);
2004 if (tbl == NULL)
2005 return -EAFNOSUPPORT;
2006
2007 if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) {
2008 NL_SET_ERR_MSG(extack, "Invalid network address");
2009 goto out;
2010 }
2011
2012 dst = nla_data(tb[NDA_DST]);
2013 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
2014
2015 if (tb[NDA_PROTOCOL])
2016 protocol = nla_get_u8(tb[NDA_PROTOCOL]);
2017 if (ndm_flags & NTF_PROXY) {
2018 struct pneigh_entry *pn;
2019
2020 if (ndm_flags & NTF_MANAGED) {
2021 NL_SET_ERR_MSG(extack, "Invalid NTF_* flag combination");
2022 goto out;
2023 }
2024
2025 err = -ENOBUFS;
2026 pn = pneigh_lookup(tbl, net, dst, dev, 1);
2027 if (pn) {
2028 pn->flags = ndm_flags;
2029 if (protocol)
2030 pn->protocol = protocol;
2031 err = 0;
2032 }
2033 goto out;
2034 }
2035
2036 if (!dev) {
2037 NL_SET_ERR_MSG(extack, "Device not specified");
2038 goto out;
2039 }
2040
2041 if (tbl->allow_add && !tbl->allow_add(dev, extack)) {
2042 err = -EINVAL;
2043 goto out;
2044 }
2045
2046 neigh = neigh_lookup(tbl, dst, dev);
2047 if (neigh == NULL) {
2048 bool ndm_permanent = ndm->ndm_state & NUD_PERMANENT;
2049 bool exempt_from_gc = ndm_permanent ||
2050 ndm_flags & NTF_EXT_LEARNED;
2051
2052 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
2053 err = -ENOENT;
2054 goto out;
2055 }
2056 if (ndm_permanent && (ndm_flags & NTF_MANAGED)) {
2057 NL_SET_ERR_MSG(extack, "Invalid NTF_* flag for permanent entry");
2058 err = -EINVAL;
2059 goto out;
2060 }
2061
2062 neigh = ___neigh_create(tbl, dst, dev,
2063 ndm_flags &
2064 (NTF_EXT_LEARNED | NTF_MANAGED),
2065 exempt_from_gc, true);
2066 if (IS_ERR(neigh)) {
2067 err = PTR_ERR(neigh);
2068 goto out;
2069 }
2070 } else {
2071 if (nlh->nlmsg_flags & NLM_F_EXCL) {
2072 err = -EEXIST;
2073 neigh_release(neigh);
2074 goto out;
2075 }
2076
2077 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
2078 flags &= ~(NEIGH_UPDATE_F_OVERRIDE |
2079 NEIGH_UPDATE_F_OVERRIDE_ISROUTER);
2080 }
2081
2082 if (protocol)
2083 neigh->protocol = protocol;
2084 if (ndm_flags & NTF_EXT_LEARNED)
2085 flags |= NEIGH_UPDATE_F_EXT_LEARNED;
2086 if (ndm_flags & NTF_ROUTER)
2087 flags |= NEIGH_UPDATE_F_ISROUTER;
2088 if (ndm_flags & NTF_MANAGED)
2089 flags |= NEIGH_UPDATE_F_MANAGED;
2090 if (ndm_flags & NTF_USE)
2091 flags |= NEIGH_UPDATE_F_USE;
2092
2093 err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
2094 NETLINK_CB(skb).portid, extack);
2095 if (!err && ndm_flags & (NTF_USE | NTF_MANAGED)) {
2096 neigh_event_send(neigh, NULL);
2097 err = 0;
2098 }
2099 neigh_release(neigh);
2100 out:
2101 return err;
2102 }
2103
neightbl_fill_parms(struct sk_buff * skb,struct neigh_parms * parms)2104 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
2105 {
2106 struct nlattr *nest;
2107
2108 nest = nla_nest_start_noflag(skb, NDTA_PARMS);
2109 if (nest == NULL)
2110 return -ENOBUFS;
2111
2112 if ((parms->dev &&
2113 nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
2114 nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) ||
2115 nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
2116 NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
2117 /* approximative value for deprecated QUEUE_LEN (in packets) */
2118 nla_put_u32(skb, NDTPA_QUEUE_LEN,
2119 NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
2120 nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
2121 nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
2122 nla_put_u32(skb, NDTPA_UCAST_PROBES,
2123 NEIGH_VAR(parms, UCAST_PROBES)) ||
2124 nla_put_u32(skb, NDTPA_MCAST_PROBES,
2125 NEIGH_VAR(parms, MCAST_PROBES)) ||
2126 nla_put_u32(skb, NDTPA_MCAST_REPROBES,
2127 NEIGH_VAR(parms, MCAST_REPROBES)) ||
2128 nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
2129 NDTPA_PAD) ||
2130 nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
2131 NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
2132 nla_put_msecs(skb, NDTPA_GC_STALETIME,
2133 NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
2134 nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
2135 NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
2136 nla_put_msecs(skb, NDTPA_RETRANS_TIME,
2137 NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
2138 nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
2139 NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
2140 nla_put_msecs(skb, NDTPA_PROXY_DELAY,
2141 NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
2142 nla_put_msecs(skb, NDTPA_LOCKTIME,
2143 NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD) ||
2144 nla_put_msecs(skb, NDTPA_INTERVAL_PROBE_TIME_MS,
2145 NEIGH_VAR(parms, INTERVAL_PROBE_TIME_MS), NDTPA_PAD))
2146 goto nla_put_failure;
2147 return nla_nest_end(skb, nest);
2148
2149 nla_put_failure:
2150 nla_nest_cancel(skb, nest);
2151 return -EMSGSIZE;
2152 }
2153
neightbl_fill_info(struct sk_buff * skb,struct neigh_table * tbl,u32 pid,u32 seq,int type,int flags)2154 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
2155 u32 pid, u32 seq, int type, int flags)
2156 {
2157 struct nlmsghdr *nlh;
2158 struct ndtmsg *ndtmsg;
2159
2160 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2161 if (nlh == NULL)
2162 return -EMSGSIZE;
2163
2164 ndtmsg = nlmsg_data(nlh);
2165
2166 read_lock_bh(&tbl->lock);
2167 ndtmsg->ndtm_family = tbl->family;
2168 ndtmsg->ndtm_pad1 = 0;
2169 ndtmsg->ndtm_pad2 = 0;
2170
2171 if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
2172 nla_put_msecs(skb, NDTA_GC_INTERVAL, READ_ONCE(tbl->gc_interval),
2173 NDTA_PAD) ||
2174 nla_put_u32(skb, NDTA_THRESH1, READ_ONCE(tbl->gc_thresh1)) ||
2175 nla_put_u32(skb, NDTA_THRESH2, READ_ONCE(tbl->gc_thresh2)) ||
2176 nla_put_u32(skb, NDTA_THRESH3, READ_ONCE(tbl->gc_thresh3)))
2177 goto nla_put_failure;
2178 {
2179 unsigned long now = jiffies;
2180 long flush_delta = now - READ_ONCE(tbl->last_flush);
2181 long rand_delta = now - READ_ONCE(tbl->last_rand);
2182 struct neigh_hash_table *nht;
2183 struct ndt_config ndc = {
2184 .ndtc_key_len = tbl->key_len,
2185 .ndtc_entry_size = tbl->entry_size,
2186 .ndtc_entries = atomic_read(&tbl->entries),
2187 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
2188 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
2189 .ndtc_proxy_qlen = READ_ONCE(tbl->proxy_queue.qlen),
2190 };
2191
2192 rcu_read_lock();
2193 nht = rcu_dereference(tbl->nht);
2194 ndc.ndtc_hash_rnd = nht->hash_rnd[0];
2195 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
2196 rcu_read_unlock();
2197
2198 if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
2199 goto nla_put_failure;
2200 }
2201
2202 {
2203 int cpu;
2204 struct ndt_stats ndst;
2205
2206 memset(&ndst, 0, sizeof(ndst));
2207
2208 for_each_possible_cpu(cpu) {
2209 struct neigh_statistics *st;
2210
2211 st = per_cpu_ptr(tbl->stats, cpu);
2212 ndst.ndts_allocs += READ_ONCE(st->allocs);
2213 ndst.ndts_destroys += READ_ONCE(st->destroys);
2214 ndst.ndts_hash_grows += READ_ONCE(st->hash_grows);
2215 ndst.ndts_res_failed += READ_ONCE(st->res_failed);
2216 ndst.ndts_lookups += READ_ONCE(st->lookups);
2217 ndst.ndts_hits += READ_ONCE(st->hits);
2218 ndst.ndts_rcv_probes_mcast += READ_ONCE(st->rcv_probes_mcast);
2219 ndst.ndts_rcv_probes_ucast += READ_ONCE(st->rcv_probes_ucast);
2220 ndst.ndts_periodic_gc_runs += READ_ONCE(st->periodic_gc_runs);
2221 ndst.ndts_forced_gc_runs += READ_ONCE(st->forced_gc_runs);
2222 ndst.ndts_table_fulls += READ_ONCE(st->table_fulls);
2223 }
2224
2225 if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
2226 NDTA_PAD))
2227 goto nla_put_failure;
2228 }
2229
2230 BUG_ON(tbl->parms.dev);
2231 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
2232 goto nla_put_failure;
2233
2234 read_unlock_bh(&tbl->lock);
2235 nlmsg_end(skb, nlh);
2236 return 0;
2237
2238 nla_put_failure:
2239 read_unlock_bh(&tbl->lock);
2240 nlmsg_cancel(skb, nlh);
2241 return -EMSGSIZE;
2242 }
2243
neightbl_fill_param_info(struct sk_buff * skb,struct neigh_table * tbl,struct neigh_parms * parms,u32 pid,u32 seq,int type,unsigned int flags)2244 static int neightbl_fill_param_info(struct sk_buff *skb,
2245 struct neigh_table *tbl,
2246 struct neigh_parms *parms,
2247 u32 pid, u32 seq, int type,
2248 unsigned int flags)
2249 {
2250 struct ndtmsg *ndtmsg;
2251 struct nlmsghdr *nlh;
2252
2253 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2254 if (nlh == NULL)
2255 return -EMSGSIZE;
2256
2257 ndtmsg = nlmsg_data(nlh);
2258
2259 read_lock_bh(&tbl->lock);
2260 ndtmsg->ndtm_family = tbl->family;
2261 ndtmsg->ndtm_pad1 = 0;
2262 ndtmsg->ndtm_pad2 = 0;
2263
2264 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
2265 neightbl_fill_parms(skb, parms) < 0)
2266 goto errout;
2267
2268 read_unlock_bh(&tbl->lock);
2269 nlmsg_end(skb, nlh);
2270 return 0;
2271 errout:
2272 read_unlock_bh(&tbl->lock);
2273 nlmsg_cancel(skb, nlh);
2274 return -EMSGSIZE;
2275 }
2276
2277 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
2278 [NDTA_NAME] = { .type = NLA_STRING },
2279 [NDTA_THRESH1] = { .type = NLA_U32 },
2280 [NDTA_THRESH2] = { .type = NLA_U32 },
2281 [NDTA_THRESH3] = { .type = NLA_U32 },
2282 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
2283 [NDTA_PARMS] = { .type = NLA_NESTED },
2284 };
2285
2286 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
2287 [NDTPA_IFINDEX] = { .type = NLA_U32 },
2288 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
2289 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
2290 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
2291 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
2292 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
2293 [NDTPA_MCAST_REPROBES] = { .type = NLA_U32 },
2294 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
2295 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
2296 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
2297 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
2298 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
2299 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
2300 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
2301 [NDTPA_INTERVAL_PROBE_TIME_MS] = { .type = NLA_U64, .min = 1 },
2302 };
2303
neightbl_set(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)2304 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
2305 struct netlink_ext_ack *extack)
2306 {
2307 struct net *net = sock_net(skb->sk);
2308 struct neigh_table *tbl;
2309 struct ndtmsg *ndtmsg;
2310 struct nlattr *tb[NDTA_MAX+1];
2311 bool found = false;
2312 int err, tidx;
2313
2314 err = nlmsg_parse_deprecated(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
2315 nl_neightbl_policy, extack);
2316 if (err < 0)
2317 goto errout;
2318
2319 if (tb[NDTA_NAME] == NULL) {
2320 err = -EINVAL;
2321 goto errout;
2322 }
2323
2324 ndtmsg = nlmsg_data(nlh);
2325
2326 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2327 tbl = neigh_tables[tidx];
2328 if (!tbl)
2329 continue;
2330 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
2331 continue;
2332 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
2333 found = true;
2334 break;
2335 }
2336 }
2337
2338 if (!found)
2339 return -ENOENT;
2340
2341 /*
2342 * We acquire tbl->lock to be nice to the periodic timers and
2343 * make sure they always see a consistent set of values.
2344 */
2345 write_lock_bh(&tbl->lock);
2346
2347 if (tb[NDTA_PARMS]) {
2348 struct nlattr *tbp[NDTPA_MAX+1];
2349 struct neigh_parms *p;
2350 int i, ifindex = 0;
2351
2352 err = nla_parse_nested_deprecated(tbp, NDTPA_MAX,
2353 tb[NDTA_PARMS],
2354 nl_ntbl_parm_policy, extack);
2355 if (err < 0)
2356 goto errout_tbl_lock;
2357
2358 if (tbp[NDTPA_IFINDEX])
2359 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
2360
2361 p = lookup_neigh_parms(tbl, net, ifindex);
2362 if (p == NULL) {
2363 err = -ENOENT;
2364 goto errout_tbl_lock;
2365 }
2366
2367 for (i = 1; i <= NDTPA_MAX; i++) {
2368 if (tbp[i] == NULL)
2369 continue;
2370
2371 switch (i) {
2372 case NDTPA_QUEUE_LEN:
2373 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2374 nla_get_u32(tbp[i]) *
2375 SKB_TRUESIZE(ETH_FRAME_LEN));
2376 break;
2377 case NDTPA_QUEUE_LENBYTES:
2378 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2379 nla_get_u32(tbp[i]));
2380 break;
2381 case NDTPA_PROXY_QLEN:
2382 NEIGH_VAR_SET(p, PROXY_QLEN,
2383 nla_get_u32(tbp[i]));
2384 break;
2385 case NDTPA_APP_PROBES:
2386 NEIGH_VAR_SET(p, APP_PROBES,
2387 nla_get_u32(tbp[i]));
2388 break;
2389 case NDTPA_UCAST_PROBES:
2390 NEIGH_VAR_SET(p, UCAST_PROBES,
2391 nla_get_u32(tbp[i]));
2392 break;
2393 case NDTPA_MCAST_PROBES:
2394 NEIGH_VAR_SET(p, MCAST_PROBES,
2395 nla_get_u32(tbp[i]));
2396 break;
2397 case NDTPA_MCAST_REPROBES:
2398 NEIGH_VAR_SET(p, MCAST_REPROBES,
2399 nla_get_u32(tbp[i]));
2400 break;
2401 case NDTPA_BASE_REACHABLE_TIME:
2402 NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2403 nla_get_msecs(tbp[i]));
2404 /* update reachable_time as well, otherwise, the change will
2405 * only be effective after the next time neigh_periodic_work
2406 * decides to recompute it (can be multiple minutes)
2407 */
2408 p->reachable_time =
2409 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2410 break;
2411 case NDTPA_GC_STALETIME:
2412 NEIGH_VAR_SET(p, GC_STALETIME,
2413 nla_get_msecs(tbp[i]));
2414 break;
2415 case NDTPA_DELAY_PROBE_TIME:
2416 NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2417 nla_get_msecs(tbp[i]));
2418 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2419 break;
2420 case NDTPA_INTERVAL_PROBE_TIME_MS:
2421 NEIGH_VAR_SET(p, INTERVAL_PROBE_TIME_MS,
2422 nla_get_msecs(tbp[i]));
2423 break;
2424 case NDTPA_RETRANS_TIME:
2425 NEIGH_VAR_SET(p, RETRANS_TIME,
2426 nla_get_msecs(tbp[i]));
2427 break;
2428 case NDTPA_ANYCAST_DELAY:
2429 NEIGH_VAR_SET(p, ANYCAST_DELAY,
2430 nla_get_msecs(tbp[i]));
2431 break;
2432 case NDTPA_PROXY_DELAY:
2433 NEIGH_VAR_SET(p, PROXY_DELAY,
2434 nla_get_msecs(tbp[i]));
2435 break;
2436 case NDTPA_LOCKTIME:
2437 NEIGH_VAR_SET(p, LOCKTIME,
2438 nla_get_msecs(tbp[i]));
2439 break;
2440 }
2441 }
2442 }
2443
2444 err = -ENOENT;
2445 if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2446 tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2447 !net_eq(net, &init_net))
2448 goto errout_tbl_lock;
2449
2450 if (tb[NDTA_THRESH1])
2451 WRITE_ONCE(tbl->gc_thresh1, nla_get_u32(tb[NDTA_THRESH1]));
2452
2453 if (tb[NDTA_THRESH2])
2454 WRITE_ONCE(tbl->gc_thresh2, nla_get_u32(tb[NDTA_THRESH2]));
2455
2456 if (tb[NDTA_THRESH3])
2457 WRITE_ONCE(tbl->gc_thresh3, nla_get_u32(tb[NDTA_THRESH3]));
2458
2459 if (tb[NDTA_GC_INTERVAL])
2460 WRITE_ONCE(tbl->gc_interval, nla_get_msecs(tb[NDTA_GC_INTERVAL]));
2461
2462 err = 0;
2463
2464 errout_tbl_lock:
2465 write_unlock_bh(&tbl->lock);
2466 errout:
2467 return err;
2468 }
2469
neightbl_valid_dump_info(const struct nlmsghdr * nlh,struct netlink_ext_ack * extack)2470 static int neightbl_valid_dump_info(const struct nlmsghdr *nlh,
2471 struct netlink_ext_ack *extack)
2472 {
2473 struct ndtmsg *ndtm;
2474
2475 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndtm))) {
2476 NL_SET_ERR_MSG(extack, "Invalid header for neighbor table dump request");
2477 return -EINVAL;
2478 }
2479
2480 ndtm = nlmsg_data(nlh);
2481 if (ndtm->ndtm_pad1 || ndtm->ndtm_pad2) {
2482 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor table dump request");
2483 return -EINVAL;
2484 }
2485
2486 if (nlmsg_attrlen(nlh, sizeof(*ndtm))) {
2487 NL_SET_ERR_MSG(extack, "Invalid data after header in neighbor table dump request");
2488 return -EINVAL;
2489 }
2490
2491 return 0;
2492 }
2493
neightbl_dump_info(struct sk_buff * skb,struct netlink_callback * cb)2494 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2495 {
2496 const struct nlmsghdr *nlh = cb->nlh;
2497 struct net *net = sock_net(skb->sk);
2498 int family, tidx, nidx = 0;
2499 int tbl_skip = cb->args[0];
2500 int neigh_skip = cb->args[1];
2501 struct neigh_table *tbl;
2502
2503 if (cb->strict_check) {
2504 int err = neightbl_valid_dump_info(nlh, cb->extack);
2505
2506 if (err < 0)
2507 return err;
2508 }
2509
2510 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2511
2512 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2513 struct neigh_parms *p;
2514
2515 tbl = neigh_tables[tidx];
2516 if (!tbl)
2517 continue;
2518
2519 if (tidx < tbl_skip || (family && tbl->family != family))
2520 continue;
2521
2522 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2523 nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2524 NLM_F_MULTI) < 0)
2525 break;
2526
2527 nidx = 0;
2528 p = list_next_entry(&tbl->parms, list);
2529 list_for_each_entry_from(p, &tbl->parms_list, list) {
2530 if (!net_eq(neigh_parms_net(p), net))
2531 continue;
2532
2533 if (nidx < neigh_skip)
2534 goto next;
2535
2536 if (neightbl_fill_param_info(skb, tbl, p,
2537 NETLINK_CB(cb->skb).portid,
2538 nlh->nlmsg_seq,
2539 RTM_NEWNEIGHTBL,
2540 NLM_F_MULTI) < 0)
2541 goto out;
2542 next:
2543 nidx++;
2544 }
2545
2546 neigh_skip = 0;
2547 }
2548 out:
2549 cb->args[0] = tidx;
2550 cb->args[1] = nidx;
2551
2552 return skb->len;
2553 }
2554
neigh_fill_info(struct sk_buff * skb,struct neighbour * neigh,u32 pid,u32 seq,int type,unsigned int flags)2555 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2556 u32 pid, u32 seq, int type, unsigned int flags)
2557 {
2558 u32 neigh_flags, neigh_flags_ext;
2559 unsigned long now = jiffies;
2560 struct nda_cacheinfo ci;
2561 struct nlmsghdr *nlh;
2562 struct ndmsg *ndm;
2563
2564 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2565 if (nlh == NULL)
2566 return -EMSGSIZE;
2567
2568 neigh_flags_ext = neigh->flags >> NTF_EXT_SHIFT;
2569 neigh_flags = neigh->flags & NTF_OLD_MASK;
2570
2571 ndm = nlmsg_data(nlh);
2572 ndm->ndm_family = neigh->ops->family;
2573 ndm->ndm_pad1 = 0;
2574 ndm->ndm_pad2 = 0;
2575 ndm->ndm_flags = neigh_flags;
2576 ndm->ndm_type = neigh->type;
2577 ndm->ndm_ifindex = neigh->dev->ifindex;
2578
2579 if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2580 goto nla_put_failure;
2581
2582 read_lock_bh(&neigh->lock);
2583 ndm->ndm_state = neigh->nud_state;
2584 if (neigh->nud_state & NUD_VALID) {
2585 char haddr[MAX_ADDR_LEN];
2586
2587 neigh_ha_snapshot(haddr, neigh, neigh->dev);
2588 if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2589 read_unlock_bh(&neigh->lock);
2590 goto nla_put_failure;
2591 }
2592 }
2593
2594 ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
2595 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2596 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
2597 ci.ndm_refcnt = refcount_read(&neigh->refcnt) - 1;
2598 read_unlock_bh(&neigh->lock);
2599
2600 if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2601 nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2602 goto nla_put_failure;
2603
2604 if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol))
2605 goto nla_put_failure;
2606 if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext))
2607 goto nla_put_failure;
2608
2609 nlmsg_end(skb, nlh);
2610 return 0;
2611
2612 nla_put_failure:
2613 nlmsg_cancel(skb, nlh);
2614 return -EMSGSIZE;
2615 }
2616
pneigh_fill_info(struct sk_buff * skb,struct pneigh_entry * pn,u32 pid,u32 seq,int type,unsigned int flags,struct neigh_table * tbl)2617 static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2618 u32 pid, u32 seq, int type, unsigned int flags,
2619 struct neigh_table *tbl)
2620 {
2621 u32 neigh_flags, neigh_flags_ext;
2622 struct nlmsghdr *nlh;
2623 struct ndmsg *ndm;
2624
2625 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2626 if (nlh == NULL)
2627 return -EMSGSIZE;
2628
2629 neigh_flags_ext = pn->flags >> NTF_EXT_SHIFT;
2630 neigh_flags = pn->flags & NTF_OLD_MASK;
2631
2632 ndm = nlmsg_data(nlh);
2633 ndm->ndm_family = tbl->family;
2634 ndm->ndm_pad1 = 0;
2635 ndm->ndm_pad2 = 0;
2636 ndm->ndm_flags = neigh_flags | NTF_PROXY;
2637 ndm->ndm_type = RTN_UNICAST;
2638 ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2639 ndm->ndm_state = NUD_NONE;
2640
2641 if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2642 goto nla_put_failure;
2643
2644 if (pn->protocol && nla_put_u8(skb, NDA_PROTOCOL, pn->protocol))
2645 goto nla_put_failure;
2646 if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext))
2647 goto nla_put_failure;
2648
2649 nlmsg_end(skb, nlh);
2650 return 0;
2651
2652 nla_put_failure:
2653 nlmsg_cancel(skb, nlh);
2654 return -EMSGSIZE;
2655 }
2656
neigh_update_notify(struct neighbour * neigh,u32 nlmsg_pid)2657 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid)
2658 {
2659 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2660 __neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid);
2661 }
2662
neigh_master_filtered(struct net_device * dev,int master_idx)2663 static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2664 {
2665 struct net_device *master;
2666
2667 if (!master_idx)
2668 return false;
2669
2670 master = dev ? netdev_master_upper_dev_get(dev) : NULL;
2671
2672 /* 0 is already used to denote NDA_MASTER wasn't passed, therefore need another
2673 * invalid value for ifindex to denote "no master".
2674 */
2675 if (master_idx == -1)
2676 return !!master;
2677
2678 if (!master || master->ifindex != master_idx)
2679 return true;
2680
2681 return false;
2682 }
2683
neigh_ifindex_filtered(struct net_device * dev,int filter_idx)2684 static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2685 {
2686 if (filter_idx && (!dev || dev->ifindex != filter_idx))
2687 return true;
2688
2689 return false;
2690 }
2691
2692 struct neigh_dump_filter {
2693 int master_idx;
2694 int dev_idx;
2695 };
2696
neigh_dump_table(struct neigh_table * tbl,struct sk_buff * skb,struct netlink_callback * cb,struct neigh_dump_filter * filter)2697 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2698 struct netlink_callback *cb,
2699 struct neigh_dump_filter *filter)
2700 {
2701 struct net *net = sock_net(skb->sk);
2702 struct neighbour *n;
2703 int rc, h, s_h = cb->args[1];
2704 int idx, s_idx = idx = cb->args[2];
2705 struct neigh_hash_table *nht;
2706 unsigned int flags = NLM_F_MULTI;
2707
2708 if (filter->dev_idx || filter->master_idx)
2709 flags |= NLM_F_DUMP_FILTERED;
2710
2711 rcu_read_lock();
2712 nht = rcu_dereference(tbl->nht);
2713
2714 for (h = s_h; h < (1 << nht->hash_shift); h++) {
2715 if (h > s_h)
2716 s_idx = 0;
2717 for (n = rcu_dereference(nht->hash_buckets[h]), idx = 0;
2718 n != NULL;
2719 n = rcu_dereference(n->next)) {
2720 if (idx < s_idx || !net_eq(dev_net(n->dev), net))
2721 goto next;
2722 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2723 neigh_master_filtered(n->dev, filter->master_idx))
2724 goto next;
2725 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2726 cb->nlh->nlmsg_seq,
2727 RTM_NEWNEIGH,
2728 flags) < 0) {
2729 rc = -1;
2730 goto out;
2731 }
2732 next:
2733 idx++;
2734 }
2735 }
2736 rc = skb->len;
2737 out:
2738 rcu_read_unlock();
2739 cb->args[1] = h;
2740 cb->args[2] = idx;
2741 return rc;
2742 }
2743
pneigh_dump_table(struct neigh_table * tbl,struct sk_buff * skb,struct netlink_callback * cb,struct neigh_dump_filter * filter)2744 static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2745 struct netlink_callback *cb,
2746 struct neigh_dump_filter *filter)
2747 {
2748 struct pneigh_entry *n;
2749 struct net *net = sock_net(skb->sk);
2750 int rc, h, s_h = cb->args[3];
2751 int idx, s_idx = idx = cb->args[4];
2752 unsigned int flags = NLM_F_MULTI;
2753
2754 if (filter->dev_idx || filter->master_idx)
2755 flags |= NLM_F_DUMP_FILTERED;
2756
2757 read_lock_bh(&tbl->lock);
2758
2759 for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2760 if (h > s_h)
2761 s_idx = 0;
2762 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2763 if (idx < s_idx || pneigh_net(n) != net)
2764 goto next;
2765 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2766 neigh_master_filtered(n->dev, filter->master_idx))
2767 goto next;
2768 if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2769 cb->nlh->nlmsg_seq,
2770 RTM_NEWNEIGH, flags, tbl) < 0) {
2771 read_unlock_bh(&tbl->lock);
2772 rc = -1;
2773 goto out;
2774 }
2775 next:
2776 idx++;
2777 }
2778 }
2779
2780 read_unlock_bh(&tbl->lock);
2781 rc = skb->len;
2782 out:
2783 cb->args[3] = h;
2784 cb->args[4] = idx;
2785 return rc;
2786
2787 }
2788
neigh_valid_dump_req(const struct nlmsghdr * nlh,bool strict_check,struct neigh_dump_filter * filter,struct netlink_ext_ack * extack)2789 static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
2790 bool strict_check,
2791 struct neigh_dump_filter *filter,
2792 struct netlink_ext_ack *extack)
2793 {
2794 struct nlattr *tb[NDA_MAX + 1];
2795 int err, i;
2796
2797 if (strict_check) {
2798 struct ndmsg *ndm;
2799
2800 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2801 NL_SET_ERR_MSG(extack, "Invalid header for neighbor dump request");
2802 return -EINVAL;
2803 }
2804
2805 ndm = nlmsg_data(nlh);
2806 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_ifindex ||
2807 ndm->ndm_state || ndm->ndm_type) {
2808 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request");
2809 return -EINVAL;
2810 }
2811
2812 if (ndm->ndm_flags & ~NTF_PROXY) {
2813 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request");
2814 return -EINVAL;
2815 }
2816
2817 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg),
2818 tb, NDA_MAX, nda_policy,
2819 extack);
2820 } else {
2821 err = nlmsg_parse_deprecated(nlh, sizeof(struct ndmsg), tb,
2822 NDA_MAX, nda_policy, extack);
2823 }
2824 if (err < 0)
2825 return err;
2826
2827 for (i = 0; i <= NDA_MAX; ++i) {
2828 if (!tb[i])
2829 continue;
2830
2831 /* all new attributes should require strict_check */
2832 switch (i) {
2833 case NDA_IFINDEX:
2834 filter->dev_idx = nla_get_u32(tb[i]);
2835 break;
2836 case NDA_MASTER:
2837 filter->master_idx = nla_get_u32(tb[i]);
2838 break;
2839 default:
2840 if (strict_check) {
2841 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor dump request");
2842 return -EINVAL;
2843 }
2844 }
2845 }
2846
2847 return 0;
2848 }
2849
neigh_dump_info(struct sk_buff * skb,struct netlink_callback * cb)2850 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2851 {
2852 const struct nlmsghdr *nlh = cb->nlh;
2853 struct neigh_dump_filter filter = {};
2854 struct neigh_table *tbl;
2855 int t, family, s_t;
2856 int proxy = 0;
2857 int err;
2858
2859 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2860
2861 /* check for full ndmsg structure presence, family member is
2862 * the same for both structures
2863 */
2864 if (nlmsg_len(nlh) >= sizeof(struct ndmsg) &&
2865 ((struct ndmsg *)nlmsg_data(nlh))->ndm_flags == NTF_PROXY)
2866 proxy = 1;
2867
2868 err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack);
2869 if (err < 0 && cb->strict_check)
2870 return err;
2871
2872 s_t = cb->args[0];
2873
2874 for (t = 0; t < NEIGH_NR_TABLES; t++) {
2875 tbl = neigh_tables[t];
2876
2877 if (!tbl)
2878 continue;
2879 if (t < s_t || (family && tbl->family != family))
2880 continue;
2881 if (t > s_t)
2882 memset(&cb->args[1], 0, sizeof(cb->args) -
2883 sizeof(cb->args[0]));
2884 if (proxy)
2885 err = pneigh_dump_table(tbl, skb, cb, &filter);
2886 else
2887 err = neigh_dump_table(tbl, skb, cb, &filter);
2888 if (err < 0)
2889 break;
2890 }
2891
2892 cb->args[0] = t;
2893 return skb->len;
2894 }
2895
neigh_valid_get_req(const struct nlmsghdr * nlh,struct neigh_table ** tbl,void ** dst,int * dev_idx,u8 * ndm_flags,struct netlink_ext_ack * extack)2896 static int neigh_valid_get_req(const struct nlmsghdr *nlh,
2897 struct neigh_table **tbl,
2898 void **dst, int *dev_idx, u8 *ndm_flags,
2899 struct netlink_ext_ack *extack)
2900 {
2901 struct nlattr *tb[NDA_MAX + 1];
2902 struct ndmsg *ndm;
2903 int err, i;
2904
2905 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2906 NL_SET_ERR_MSG(extack, "Invalid header for neighbor get request");
2907 return -EINVAL;
2908 }
2909
2910 ndm = nlmsg_data(nlh);
2911 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
2912 ndm->ndm_type) {
2913 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor get request");
2914 return -EINVAL;
2915 }
2916
2917 if (ndm->ndm_flags & ~NTF_PROXY) {
2918 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor get request");
2919 return -EINVAL;
2920 }
2921
2922 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
2923 NDA_MAX, nda_policy, extack);
2924 if (err < 0)
2925 return err;
2926
2927 *ndm_flags = ndm->ndm_flags;
2928 *dev_idx = ndm->ndm_ifindex;
2929 *tbl = neigh_find_table(ndm->ndm_family);
2930 if (*tbl == NULL) {
2931 NL_SET_ERR_MSG(extack, "Unsupported family in header for neighbor get request");
2932 return -EAFNOSUPPORT;
2933 }
2934
2935 for (i = 0; i <= NDA_MAX; ++i) {
2936 if (!tb[i])
2937 continue;
2938
2939 switch (i) {
2940 case NDA_DST:
2941 if (nla_len(tb[i]) != (int)(*tbl)->key_len) {
2942 NL_SET_ERR_MSG(extack, "Invalid network address in neighbor get request");
2943 return -EINVAL;
2944 }
2945 *dst = nla_data(tb[i]);
2946 break;
2947 default:
2948 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor get request");
2949 return -EINVAL;
2950 }
2951 }
2952
2953 return 0;
2954 }
2955
neigh_nlmsg_size(void)2956 static inline size_t neigh_nlmsg_size(void)
2957 {
2958 return NLMSG_ALIGN(sizeof(struct ndmsg))
2959 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2960 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2961 + nla_total_size(sizeof(struct nda_cacheinfo))
2962 + nla_total_size(4) /* NDA_PROBES */
2963 + nla_total_size(4) /* NDA_FLAGS_EXT */
2964 + nla_total_size(1); /* NDA_PROTOCOL */
2965 }
2966
neigh_get_reply(struct net * net,struct neighbour * neigh,u32 pid,u32 seq)2967 static int neigh_get_reply(struct net *net, struct neighbour *neigh,
2968 u32 pid, u32 seq)
2969 {
2970 struct sk_buff *skb;
2971 int err = 0;
2972
2973 skb = nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL);
2974 if (!skb)
2975 return -ENOBUFS;
2976
2977 err = neigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0);
2978 if (err) {
2979 kfree_skb(skb);
2980 goto errout;
2981 }
2982
2983 err = rtnl_unicast(skb, net, pid);
2984 errout:
2985 return err;
2986 }
2987
pneigh_nlmsg_size(void)2988 static inline size_t pneigh_nlmsg_size(void)
2989 {
2990 return NLMSG_ALIGN(sizeof(struct ndmsg))
2991 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2992 + nla_total_size(4) /* NDA_FLAGS_EXT */
2993 + nla_total_size(1); /* NDA_PROTOCOL */
2994 }
2995
pneigh_get_reply(struct net * net,struct pneigh_entry * neigh,u32 pid,u32 seq,struct neigh_table * tbl)2996 static int pneigh_get_reply(struct net *net, struct pneigh_entry *neigh,
2997 u32 pid, u32 seq, struct neigh_table *tbl)
2998 {
2999 struct sk_buff *skb;
3000 int err = 0;
3001
3002 skb = nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL);
3003 if (!skb)
3004 return -ENOBUFS;
3005
3006 err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl);
3007 if (err) {
3008 kfree_skb(skb);
3009 goto errout;
3010 }
3011
3012 err = rtnl_unicast(skb, net, pid);
3013 errout:
3014 return err;
3015 }
3016
neigh_get(struct sk_buff * in_skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)3017 static int neigh_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3018 struct netlink_ext_ack *extack)
3019 {
3020 struct net *net = sock_net(in_skb->sk);
3021 struct net_device *dev = NULL;
3022 struct neigh_table *tbl = NULL;
3023 struct neighbour *neigh;
3024 void *dst = NULL;
3025 u8 ndm_flags = 0;
3026 int dev_idx = 0;
3027 int err;
3028
3029 err = neigh_valid_get_req(nlh, &tbl, &dst, &dev_idx, &ndm_flags,
3030 extack);
3031 if (err < 0)
3032 return err;
3033
3034 if (dev_idx) {
3035 dev = __dev_get_by_index(net, dev_idx);
3036 if (!dev) {
3037 NL_SET_ERR_MSG(extack, "Unknown device ifindex");
3038 return -ENODEV;
3039 }
3040 }
3041
3042 if (!dst) {
3043 NL_SET_ERR_MSG(extack, "Network address not specified");
3044 return -EINVAL;
3045 }
3046
3047 if (ndm_flags & NTF_PROXY) {
3048 struct pneigh_entry *pn;
3049
3050 pn = pneigh_lookup(tbl, net, dst, dev, 0);
3051 if (!pn) {
3052 NL_SET_ERR_MSG(extack, "Proxy neighbour entry not found");
3053 return -ENOENT;
3054 }
3055 return pneigh_get_reply(net, pn, NETLINK_CB(in_skb).portid,
3056 nlh->nlmsg_seq, tbl);
3057 }
3058
3059 if (!dev) {
3060 NL_SET_ERR_MSG(extack, "No device specified");
3061 return -EINVAL;
3062 }
3063
3064 neigh = neigh_lookup(tbl, dst, dev);
3065 if (!neigh) {
3066 NL_SET_ERR_MSG(extack, "Neighbour entry not found");
3067 return -ENOENT;
3068 }
3069
3070 err = neigh_get_reply(net, neigh, NETLINK_CB(in_skb).portid,
3071 nlh->nlmsg_seq);
3072
3073 neigh_release(neigh);
3074
3075 return err;
3076 }
3077
neigh_for_each(struct neigh_table * tbl,void (* cb)(struct neighbour *,void *),void * cookie)3078 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
3079 {
3080 int chain;
3081 struct neigh_hash_table *nht;
3082
3083 rcu_read_lock();
3084 nht = rcu_dereference(tbl->nht);
3085
3086 read_lock_bh(&tbl->lock); /* avoid resizes */
3087 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
3088 struct neighbour *n;
3089
3090 for (n = rcu_dereference(nht->hash_buckets[chain]);
3091 n != NULL;
3092 n = rcu_dereference(n->next))
3093 cb(n, cookie);
3094 }
3095 read_unlock_bh(&tbl->lock);
3096 rcu_read_unlock();
3097 }
3098 EXPORT_SYMBOL(neigh_for_each);
3099
3100 /* The tbl->lock must be held as a writer and BH disabled. */
__neigh_for_each_release(struct neigh_table * tbl,int (* cb)(struct neighbour *))3101 void __neigh_for_each_release(struct neigh_table *tbl,
3102 int (*cb)(struct neighbour *))
3103 {
3104 int chain;
3105 struct neigh_hash_table *nht;
3106
3107 nht = rcu_dereference_protected(tbl->nht,
3108 lockdep_is_held(&tbl->lock));
3109 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
3110 struct neighbour *n;
3111 struct neighbour __rcu **np;
3112
3113 np = &nht->hash_buckets[chain];
3114 while ((n = rcu_dereference_protected(*np,
3115 lockdep_is_held(&tbl->lock))) != NULL) {
3116 int release;
3117
3118 write_lock(&n->lock);
3119 release = cb(n);
3120 if (release) {
3121 rcu_assign_pointer(*np,
3122 rcu_dereference_protected(n->next,
3123 lockdep_is_held(&tbl->lock)));
3124 neigh_mark_dead(n);
3125 } else
3126 np = &n->next;
3127 write_unlock(&n->lock);
3128 if (release)
3129 neigh_cleanup_and_release(n);
3130 }
3131 }
3132 }
3133 EXPORT_SYMBOL(__neigh_for_each_release);
3134
neigh_xmit(int index,struct net_device * dev,const void * addr,struct sk_buff * skb)3135 int neigh_xmit(int index, struct net_device *dev,
3136 const void *addr, struct sk_buff *skb)
3137 {
3138 int err = -EAFNOSUPPORT;
3139 if (likely(index < NEIGH_NR_TABLES)) {
3140 struct neigh_table *tbl;
3141 struct neighbour *neigh;
3142
3143 tbl = neigh_tables[index];
3144 if (!tbl)
3145 goto out;
3146 rcu_read_lock();
3147 if (index == NEIGH_ARP_TABLE) {
3148 u32 key = *((u32 *)addr);
3149
3150 neigh = __ipv4_neigh_lookup_noref(dev, key);
3151 } else {
3152 neigh = __neigh_lookup_noref(tbl, addr, dev);
3153 }
3154 if (!neigh)
3155 neigh = __neigh_create(tbl, addr, dev, false);
3156 err = PTR_ERR(neigh);
3157 if (IS_ERR(neigh)) {
3158 rcu_read_unlock();
3159 goto out_kfree_skb;
3160 }
3161 err = READ_ONCE(neigh->output)(neigh, skb);
3162 rcu_read_unlock();
3163 }
3164 else if (index == NEIGH_LINK_TABLE) {
3165 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
3166 addr, NULL, skb->len);
3167 if (err < 0)
3168 goto out_kfree_skb;
3169 err = dev_queue_xmit(skb);
3170 }
3171 out:
3172 return err;
3173 out_kfree_skb:
3174 kfree_skb(skb);
3175 goto out;
3176 }
3177 EXPORT_SYMBOL(neigh_xmit);
3178
3179 #ifdef CONFIG_PROC_FS
3180
neigh_get_first(struct seq_file * seq)3181 static struct neighbour *neigh_get_first(struct seq_file *seq)
3182 {
3183 struct neigh_seq_state *state = seq->private;
3184 struct net *net = seq_file_net(seq);
3185 struct neigh_hash_table *nht = state->nht;
3186 struct neighbour *n = NULL;
3187 int bucket;
3188
3189 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
3190 for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
3191 n = rcu_dereference(nht->hash_buckets[bucket]);
3192
3193 while (n) {
3194 if (!net_eq(dev_net(n->dev), net))
3195 goto next;
3196 if (state->neigh_sub_iter) {
3197 loff_t fakep = 0;
3198 void *v;
3199
3200 v = state->neigh_sub_iter(state, n, &fakep);
3201 if (!v)
3202 goto next;
3203 }
3204 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3205 break;
3206 if (READ_ONCE(n->nud_state) & ~NUD_NOARP)
3207 break;
3208 next:
3209 n = rcu_dereference(n->next);
3210 }
3211
3212 if (n)
3213 break;
3214 }
3215 state->bucket = bucket;
3216
3217 return n;
3218 }
3219
neigh_get_next(struct seq_file * seq,struct neighbour * n,loff_t * pos)3220 static struct neighbour *neigh_get_next(struct seq_file *seq,
3221 struct neighbour *n,
3222 loff_t *pos)
3223 {
3224 struct neigh_seq_state *state = seq->private;
3225 struct net *net = seq_file_net(seq);
3226 struct neigh_hash_table *nht = state->nht;
3227
3228 if (state->neigh_sub_iter) {
3229 void *v = state->neigh_sub_iter(state, n, pos);
3230 if (v)
3231 return n;
3232 }
3233 n = rcu_dereference(n->next);
3234
3235 while (1) {
3236 while (n) {
3237 if (!net_eq(dev_net(n->dev), net))
3238 goto next;
3239 if (state->neigh_sub_iter) {
3240 void *v = state->neigh_sub_iter(state, n, pos);
3241 if (v)
3242 return n;
3243 goto next;
3244 }
3245 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3246 break;
3247
3248 if (READ_ONCE(n->nud_state) & ~NUD_NOARP)
3249 break;
3250 next:
3251 n = rcu_dereference(n->next);
3252 }
3253
3254 if (n)
3255 break;
3256
3257 if (++state->bucket >= (1 << nht->hash_shift))
3258 break;
3259
3260 n = rcu_dereference(nht->hash_buckets[state->bucket]);
3261 }
3262
3263 if (n && pos)
3264 --(*pos);
3265 return n;
3266 }
3267
neigh_get_idx(struct seq_file * seq,loff_t * pos)3268 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
3269 {
3270 struct neighbour *n = neigh_get_first(seq);
3271
3272 if (n) {
3273 --(*pos);
3274 while (*pos) {
3275 n = neigh_get_next(seq, n, pos);
3276 if (!n)
3277 break;
3278 }
3279 }
3280 return *pos ? NULL : n;
3281 }
3282
pneigh_get_first(struct seq_file * seq)3283 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
3284 {
3285 struct neigh_seq_state *state = seq->private;
3286 struct net *net = seq_file_net(seq);
3287 struct neigh_table *tbl = state->tbl;
3288 struct pneigh_entry *pn = NULL;
3289 int bucket;
3290
3291 state->flags |= NEIGH_SEQ_IS_PNEIGH;
3292 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
3293 pn = tbl->phash_buckets[bucket];
3294 while (pn && !net_eq(pneigh_net(pn), net))
3295 pn = pn->next;
3296 if (pn)
3297 break;
3298 }
3299 state->bucket = bucket;
3300
3301 return pn;
3302 }
3303
pneigh_get_next(struct seq_file * seq,struct pneigh_entry * pn,loff_t * pos)3304 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
3305 struct pneigh_entry *pn,
3306 loff_t *pos)
3307 {
3308 struct neigh_seq_state *state = seq->private;
3309 struct net *net = seq_file_net(seq);
3310 struct neigh_table *tbl = state->tbl;
3311
3312 do {
3313 pn = pn->next;
3314 } while (pn && !net_eq(pneigh_net(pn), net));
3315
3316 while (!pn) {
3317 if (++state->bucket > PNEIGH_HASHMASK)
3318 break;
3319 pn = tbl->phash_buckets[state->bucket];
3320 while (pn && !net_eq(pneigh_net(pn), net))
3321 pn = pn->next;
3322 if (pn)
3323 break;
3324 }
3325
3326 if (pn && pos)
3327 --(*pos);
3328
3329 return pn;
3330 }
3331
pneigh_get_idx(struct seq_file * seq,loff_t * pos)3332 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
3333 {
3334 struct pneigh_entry *pn = pneigh_get_first(seq);
3335
3336 if (pn) {
3337 --(*pos);
3338 while (*pos) {
3339 pn = pneigh_get_next(seq, pn, pos);
3340 if (!pn)
3341 break;
3342 }
3343 }
3344 return *pos ? NULL : pn;
3345 }
3346
neigh_get_idx_any(struct seq_file * seq,loff_t * pos)3347 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
3348 {
3349 struct neigh_seq_state *state = seq->private;
3350 void *rc;
3351 loff_t idxpos = *pos;
3352
3353 rc = neigh_get_idx(seq, &idxpos);
3354 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3355 rc = pneigh_get_idx(seq, &idxpos);
3356
3357 return rc;
3358 }
3359
neigh_seq_start(struct seq_file * seq,loff_t * pos,struct neigh_table * tbl,unsigned int neigh_seq_flags)3360 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
3361 __acquires(tbl->lock)
3362 __acquires(rcu)
3363 {
3364 struct neigh_seq_state *state = seq->private;
3365
3366 state->tbl = tbl;
3367 state->bucket = 0;
3368 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
3369
3370 rcu_read_lock();
3371 state->nht = rcu_dereference(tbl->nht);
3372 read_lock_bh(&tbl->lock);
3373
3374 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
3375 }
3376 EXPORT_SYMBOL(neigh_seq_start);
3377
neigh_seq_next(struct seq_file * seq,void * v,loff_t * pos)3378 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3379 {
3380 struct neigh_seq_state *state;
3381 void *rc;
3382
3383 if (v == SEQ_START_TOKEN) {
3384 rc = neigh_get_first(seq);
3385 goto out;
3386 }
3387
3388 state = seq->private;
3389 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
3390 rc = neigh_get_next(seq, v, NULL);
3391 if (rc)
3392 goto out;
3393 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3394 rc = pneigh_get_first(seq);
3395 } else {
3396 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
3397 rc = pneigh_get_next(seq, v, NULL);
3398 }
3399 out:
3400 ++(*pos);
3401 return rc;
3402 }
3403 EXPORT_SYMBOL(neigh_seq_next);
3404
neigh_seq_stop(struct seq_file * seq,void * v)3405 void neigh_seq_stop(struct seq_file *seq, void *v)
3406 __releases(tbl->lock)
3407 __releases(rcu)
3408 {
3409 struct neigh_seq_state *state = seq->private;
3410 struct neigh_table *tbl = state->tbl;
3411
3412 read_unlock_bh(&tbl->lock);
3413 rcu_read_unlock();
3414 }
3415 EXPORT_SYMBOL(neigh_seq_stop);
3416
3417 /* statistics via seq_file */
3418
neigh_stat_seq_start(struct seq_file * seq,loff_t * pos)3419 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
3420 {
3421 struct neigh_table *tbl = pde_data(file_inode(seq->file));
3422 int cpu;
3423
3424 if (*pos == 0)
3425 return SEQ_START_TOKEN;
3426
3427 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
3428 if (!cpu_possible(cpu))
3429 continue;
3430 *pos = cpu+1;
3431 return per_cpu_ptr(tbl->stats, cpu);
3432 }
3433 return NULL;
3434 }
3435
neigh_stat_seq_next(struct seq_file * seq,void * v,loff_t * pos)3436 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3437 {
3438 struct neigh_table *tbl = pde_data(file_inode(seq->file));
3439 int cpu;
3440
3441 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
3442 if (!cpu_possible(cpu))
3443 continue;
3444 *pos = cpu+1;
3445 return per_cpu_ptr(tbl->stats, cpu);
3446 }
3447 (*pos)++;
3448 return NULL;
3449 }
3450
neigh_stat_seq_stop(struct seq_file * seq,void * v)3451 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
3452 {
3453
3454 }
3455
neigh_stat_seq_show(struct seq_file * seq,void * v)3456 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
3457 {
3458 struct neigh_table *tbl = pde_data(file_inode(seq->file));
3459 struct neigh_statistics *st = v;
3460
3461 if (v == SEQ_START_TOKEN) {
3462 seq_puts(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
3463 return 0;
3464 }
3465
3466 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
3467 "%08lx %08lx %08lx "
3468 "%08lx %08lx %08lx\n",
3469 atomic_read(&tbl->entries),
3470
3471 st->allocs,
3472 st->destroys,
3473 st->hash_grows,
3474
3475 st->lookups,
3476 st->hits,
3477
3478 st->res_failed,
3479
3480 st->rcv_probes_mcast,
3481 st->rcv_probes_ucast,
3482
3483 st->periodic_gc_runs,
3484 st->forced_gc_runs,
3485 st->unres_discards,
3486 st->table_fulls
3487 );
3488
3489 return 0;
3490 }
3491
3492 static const struct seq_operations neigh_stat_seq_ops = {
3493 .start = neigh_stat_seq_start,
3494 .next = neigh_stat_seq_next,
3495 .stop = neigh_stat_seq_stop,
3496 .show = neigh_stat_seq_show,
3497 };
3498 #endif /* CONFIG_PROC_FS */
3499
__neigh_notify(struct neighbour * n,int type,int flags,u32 pid)3500 static void __neigh_notify(struct neighbour *n, int type, int flags,
3501 u32 pid)
3502 {
3503 struct net *net = dev_net(n->dev);
3504 struct sk_buff *skb;
3505 int err = -ENOBUFS;
3506
3507 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
3508 if (skb == NULL)
3509 goto errout;
3510
3511 err = neigh_fill_info(skb, n, pid, 0, type, flags);
3512 if (err < 0) {
3513 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
3514 WARN_ON(err == -EMSGSIZE);
3515 kfree_skb(skb);
3516 goto errout;
3517 }
3518 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
3519 return;
3520 errout:
3521 if (err < 0)
3522 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
3523 }
3524
neigh_app_ns(struct neighbour * n)3525 void neigh_app_ns(struct neighbour *n)
3526 {
3527 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0);
3528 }
3529 EXPORT_SYMBOL(neigh_app_ns);
3530
3531 #ifdef CONFIG_SYSCTL
3532 static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
3533
proc_unres_qlen(struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)3534 static int proc_unres_qlen(struct ctl_table *ctl, int write,
3535 void *buffer, size_t *lenp, loff_t *ppos)
3536 {
3537 int size, ret;
3538 struct ctl_table tmp = *ctl;
3539
3540 tmp.extra1 = SYSCTL_ZERO;
3541 tmp.extra2 = &unres_qlen_max;
3542 tmp.data = &size;
3543
3544 size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
3545 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3546
3547 if (write && !ret)
3548 *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
3549 return ret;
3550 }
3551
neigh_copy_dflt_parms(struct net * net,struct neigh_parms * p,int index)3552 static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
3553 int index)
3554 {
3555 struct net_device *dev;
3556 int family = neigh_parms_family(p);
3557
3558 rcu_read_lock();
3559 for_each_netdev_rcu(net, dev) {
3560 struct neigh_parms *dst_p =
3561 neigh_get_dev_parms_rcu(dev, family);
3562
3563 if (dst_p && !test_bit(index, dst_p->data_state))
3564 dst_p->data[index] = p->data[index];
3565 }
3566 rcu_read_unlock();
3567 }
3568
neigh_proc_update(struct ctl_table * ctl,int write)3569 static void neigh_proc_update(struct ctl_table *ctl, int write)
3570 {
3571 struct net_device *dev = ctl->extra1;
3572 struct neigh_parms *p = ctl->extra2;
3573 struct net *net = neigh_parms_net(p);
3574 int index = (int *) ctl->data - p->data;
3575
3576 if (!write)
3577 return;
3578
3579 set_bit(index, p->data_state);
3580 if (index == NEIGH_VAR_DELAY_PROBE_TIME)
3581 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
3582 if (!dev) /* NULL dev means this is default value */
3583 neigh_copy_dflt_parms(net, p, index);
3584 }
3585
neigh_proc_dointvec_zero_intmax(struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)3586 static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
3587 void *buffer, size_t *lenp,
3588 loff_t *ppos)
3589 {
3590 struct ctl_table tmp = *ctl;
3591 int ret;
3592
3593 tmp.extra1 = SYSCTL_ZERO;
3594 tmp.extra2 = SYSCTL_INT_MAX;
3595
3596 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3597 neigh_proc_update(ctl, write);
3598 return ret;
3599 }
3600
neigh_proc_dointvec_ms_jiffies_positive(struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)3601 static int neigh_proc_dointvec_ms_jiffies_positive(struct ctl_table *ctl, int write,
3602 void *buffer, size_t *lenp, loff_t *ppos)
3603 {
3604 struct ctl_table tmp = *ctl;
3605 int ret;
3606
3607 int min = msecs_to_jiffies(1);
3608
3609 tmp.extra1 = &min;
3610 tmp.extra2 = NULL;
3611
3612 ret = proc_dointvec_ms_jiffies_minmax(&tmp, write, buffer, lenp, ppos);
3613 neigh_proc_update(ctl, write);
3614 return ret;
3615 }
3616
neigh_proc_dointvec(struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)3617 int neigh_proc_dointvec(struct ctl_table *ctl, int write, void *buffer,
3618 size_t *lenp, loff_t *ppos)
3619 {
3620 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
3621
3622 neigh_proc_update(ctl, write);
3623 return ret;
3624 }
3625 EXPORT_SYMBOL(neigh_proc_dointvec);
3626
neigh_proc_dointvec_jiffies(struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)3627 int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, void *buffer,
3628 size_t *lenp, loff_t *ppos)
3629 {
3630 int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3631
3632 neigh_proc_update(ctl, write);
3633 return ret;
3634 }
3635 EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
3636
neigh_proc_dointvec_userhz_jiffies(struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)3637 static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
3638 void *buffer, size_t *lenp,
3639 loff_t *ppos)
3640 {
3641 int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
3642
3643 neigh_proc_update(ctl, write);
3644 return ret;
3645 }
3646
neigh_proc_dointvec_ms_jiffies(struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)3647 int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
3648 void *buffer, size_t *lenp, loff_t *ppos)
3649 {
3650 int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3651
3652 neigh_proc_update(ctl, write);
3653 return ret;
3654 }
3655 EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
3656
neigh_proc_dointvec_unres_qlen(struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)3657 static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
3658 void *buffer, size_t *lenp,
3659 loff_t *ppos)
3660 {
3661 int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
3662
3663 neigh_proc_update(ctl, write);
3664 return ret;
3665 }
3666
neigh_proc_base_reachable_time(struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)3667 static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
3668 void *buffer, size_t *lenp,
3669 loff_t *ppos)
3670 {
3671 struct neigh_parms *p = ctl->extra2;
3672 int ret;
3673
3674 if (strcmp(ctl->procname, "base_reachable_time") == 0)
3675 ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3676 else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3677 ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3678 else
3679 ret = -1;
3680
3681 if (write && ret == 0) {
3682 /* update reachable_time as well, otherwise, the change will
3683 * only be effective after the next time neigh_periodic_work
3684 * decides to recompute it
3685 */
3686 p->reachable_time =
3687 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3688 }
3689 return ret;
3690 }
3691
3692 #define NEIGH_PARMS_DATA_OFFSET(index) \
3693 (&((struct neigh_parms *) 0)->data[index])
3694
3695 #define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3696 [NEIGH_VAR_ ## attr] = { \
3697 .procname = name, \
3698 .data = NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3699 .maxlen = sizeof(int), \
3700 .mode = mval, \
3701 .proc_handler = proc, \
3702 }
3703
3704 #define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3705 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3706
3707 #define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3708 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3709
3710 #define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3711 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3712
3713 #define NEIGH_SYSCTL_MS_JIFFIES_POSITIVE_ENTRY(attr, name) \
3714 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies_positive)
3715
3716 #define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3717 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3718
3719 #define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3720 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3721
3722 static struct neigh_sysctl_table {
3723 struct ctl_table_header *sysctl_header;
3724 struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
3725 } neigh_sysctl_template __read_mostly = {
3726 .neigh_vars = {
3727 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3728 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3729 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3730 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3731 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3732 NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3733 NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3734 NEIGH_SYSCTL_MS_JIFFIES_POSITIVE_ENTRY(INTERVAL_PROBE_TIME_MS,
3735 "interval_probe_time_ms"),
3736 NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3737 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3738 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3739 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3740 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3741 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3742 NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3743 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3744 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3745 [NEIGH_VAR_GC_INTERVAL] = {
3746 .procname = "gc_interval",
3747 .maxlen = sizeof(int),
3748 .mode = 0644,
3749 .proc_handler = proc_dointvec_jiffies,
3750 },
3751 [NEIGH_VAR_GC_THRESH1] = {
3752 .procname = "gc_thresh1",
3753 .maxlen = sizeof(int),
3754 .mode = 0644,
3755 .extra1 = SYSCTL_ZERO,
3756 .extra2 = SYSCTL_INT_MAX,
3757 .proc_handler = proc_dointvec_minmax,
3758 },
3759 [NEIGH_VAR_GC_THRESH2] = {
3760 .procname = "gc_thresh2",
3761 .maxlen = sizeof(int),
3762 .mode = 0644,
3763 .extra1 = SYSCTL_ZERO,
3764 .extra2 = SYSCTL_INT_MAX,
3765 .proc_handler = proc_dointvec_minmax,
3766 },
3767 [NEIGH_VAR_GC_THRESH3] = {
3768 .procname = "gc_thresh3",
3769 .maxlen = sizeof(int),
3770 .mode = 0644,
3771 .extra1 = SYSCTL_ZERO,
3772 .extra2 = SYSCTL_INT_MAX,
3773 .proc_handler = proc_dointvec_minmax,
3774 },
3775 {},
3776 },
3777 };
3778
neigh_sysctl_register(struct net_device * dev,struct neigh_parms * p,proc_handler * handler)3779 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3780 proc_handler *handler)
3781 {
3782 int i;
3783 struct neigh_sysctl_table *t;
3784 const char *dev_name_source;
3785 char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3786 char *p_name;
3787 size_t neigh_vars_size;
3788
3789 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL_ACCOUNT);
3790 if (!t)
3791 goto err;
3792
3793 for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3794 t->neigh_vars[i].data += (long) p;
3795 t->neigh_vars[i].extra1 = dev;
3796 t->neigh_vars[i].extra2 = p;
3797 }
3798
3799 neigh_vars_size = ARRAY_SIZE(t->neigh_vars);
3800 if (dev) {
3801 dev_name_source = dev->name;
3802 /* Terminate the table early */
3803 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3804 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3805 neigh_vars_size = NEIGH_VAR_BASE_REACHABLE_TIME_MS + 1;
3806 } else {
3807 struct neigh_table *tbl = p->tbl;
3808 dev_name_source = "default";
3809 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3810 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3811 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3812 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3813 }
3814
3815 if (handler) {
3816 /* RetransTime */
3817 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3818 /* ReachableTime */
3819 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3820 /* RetransTime (in milliseconds)*/
3821 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3822 /* ReachableTime (in milliseconds) */
3823 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3824 } else {
3825 /* Those handlers will update p->reachable_time after
3826 * base_reachable_time(_ms) is set to ensure the new timer starts being
3827 * applied after the next neighbour update instead of waiting for
3828 * neigh_periodic_work to update its value (can be multiple minutes)
3829 * So any handler that replaces them should do this as well
3830 */
3831 /* ReachableTime */
3832 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3833 neigh_proc_base_reachable_time;
3834 /* ReachableTime (in milliseconds) */
3835 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3836 neigh_proc_base_reachable_time;
3837 }
3838
3839 switch (neigh_parms_family(p)) {
3840 case AF_INET:
3841 p_name = "ipv4";
3842 break;
3843 case AF_INET6:
3844 p_name = "ipv6";
3845 break;
3846 default:
3847 BUG();
3848 }
3849
3850 snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3851 p_name, dev_name_source);
3852 t->sysctl_header = register_net_sysctl_sz(neigh_parms_net(p),
3853 neigh_path, t->neigh_vars,
3854 neigh_vars_size);
3855 if (!t->sysctl_header)
3856 goto free;
3857
3858 p->sysctl_table = t;
3859 return 0;
3860
3861 free:
3862 kfree(t);
3863 err:
3864 return -ENOBUFS;
3865 }
3866 EXPORT_SYMBOL(neigh_sysctl_register);
3867
neigh_sysctl_unregister(struct neigh_parms * p)3868 void neigh_sysctl_unregister(struct neigh_parms *p)
3869 {
3870 if (p->sysctl_table) {
3871 struct neigh_sysctl_table *t = p->sysctl_table;
3872 p->sysctl_table = NULL;
3873 unregister_net_sysctl_table(t->sysctl_header);
3874 kfree(t);
3875 }
3876 }
3877 EXPORT_SYMBOL(neigh_sysctl_unregister);
3878
3879 #endif /* CONFIG_SYSCTL */
3880
neigh_init(void)3881 static int __init neigh_init(void)
3882 {
3883 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0);
3884 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0);
3885 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, neigh_get, neigh_dump_info, 0);
3886
3887 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3888 0);
3889 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, 0);
3890
3891 return 0;
3892 }
3893
3894 subsys_initcall(neigh_init);
3895