1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/rhashtable.h>
7 #include <linux/bitops.h>
8 #include <linux/in6.h>
9 #include <linux/notifier.h>
10 #include <linux/inetdevice.h>
11 #include <linux/netdevice.h>
12 #include <linux/if_bridge.h>
13 #include <linux/socket.h>
14 #include <linux/route.h>
15 #include <linux/gcd.h>
16 #include <linux/if_macvlan.h>
17 #include <linux/refcount.h>
18 #include <linux/jhash.h>
19 #include <linux/net_namespace.h>
20 #include <linux/mutex.h>
21 #include <net/netevent.h>
22 #include <net/neighbour.h>
23 #include <net/arp.h>
24 #include <net/ip_fib.h>
25 #include <net/ip6_fib.h>
26 #include <net/nexthop.h>
27 #include <net/fib_rules.h>
28 #include <net/ip_tunnels.h>
29 #include <net/l3mdev.h>
30 #include <net/addrconf.h>
31 #include <net/ndisc.h>
32 #include <net/ipv6.h>
33 #include <net/fib_notifier.h>
34 #include <net/switchdev.h>
35
36 #include "spectrum.h"
37 #include "core.h"
38 #include "reg.h"
39 #include "spectrum_cnt.h"
40 #include "spectrum_dpipe.h"
41 #include "spectrum_ipip.h"
42 #include "spectrum_mr.h"
43 #include "spectrum_mr_tcam.h"
44 #include "spectrum_router.h"
45 #include "spectrum_span.h"
46
47 struct mlxsw_sp_fib;
48 struct mlxsw_sp_vr;
49 struct mlxsw_sp_lpm_tree;
50 struct mlxsw_sp_rif_ops;
51
52 struct mlxsw_sp_rif {
53 struct list_head nexthop_list;
54 struct list_head neigh_list;
55 struct net_device *dev; /* NULL for underlay RIF */
56 struct mlxsw_sp_fid *fid;
57 unsigned char addr[ETH_ALEN];
58 int mtu;
59 u16 rif_index;
60 u16 vr_id;
61 const struct mlxsw_sp_rif_ops *ops;
62 struct mlxsw_sp *mlxsw_sp;
63
64 unsigned int counter_ingress;
65 bool counter_ingress_valid;
66 unsigned int counter_egress;
67 bool counter_egress_valid;
68 };
69
70 struct mlxsw_sp_rif_params {
71 struct net_device *dev;
72 union {
73 u16 system_port;
74 u16 lag_id;
75 };
76 u16 vid;
77 bool lag;
78 };
79
80 struct mlxsw_sp_rif_subport {
81 struct mlxsw_sp_rif common;
82 refcount_t ref_count;
83 union {
84 u16 system_port;
85 u16 lag_id;
86 };
87 u16 vid;
88 bool lag;
89 };
90
91 struct mlxsw_sp_rif_ipip_lb {
92 struct mlxsw_sp_rif common;
93 struct mlxsw_sp_rif_ipip_lb_config lb_config;
94 u16 ul_vr_id; /* Reserved for Spectrum-2. */
95 u16 ul_rif_id; /* Reserved for Spectrum. */
96 };
97
98 struct mlxsw_sp_rif_params_ipip_lb {
99 struct mlxsw_sp_rif_params common;
100 struct mlxsw_sp_rif_ipip_lb_config lb_config;
101 };
102
103 struct mlxsw_sp_rif_ops {
104 enum mlxsw_sp_rif_type type;
105 size_t rif_size;
106
107 void (*setup)(struct mlxsw_sp_rif *rif,
108 const struct mlxsw_sp_rif_params *params);
109 int (*configure)(struct mlxsw_sp_rif *rif);
110 void (*deconfigure)(struct mlxsw_sp_rif *rif);
111 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
112 struct netlink_ext_ack *extack);
113 void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
114 };
115
116 static struct mlxsw_sp_rif *
117 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
118 const struct net_device *dev);
119 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
120 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
121 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
122 struct mlxsw_sp_lpm_tree *lpm_tree);
123 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
124 const struct mlxsw_sp_fib *fib,
125 u8 tree_id);
126 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
127 const struct mlxsw_sp_fib *fib);
128
129 static unsigned int *
mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir)130 mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
131 enum mlxsw_sp_rif_counter_dir dir)
132 {
133 switch (dir) {
134 case MLXSW_SP_RIF_COUNTER_EGRESS:
135 return &rif->counter_egress;
136 case MLXSW_SP_RIF_COUNTER_INGRESS:
137 return &rif->counter_ingress;
138 }
139 return NULL;
140 }
141
142 static bool
mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir)143 mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
144 enum mlxsw_sp_rif_counter_dir dir)
145 {
146 switch (dir) {
147 case MLXSW_SP_RIF_COUNTER_EGRESS:
148 return rif->counter_egress_valid;
149 case MLXSW_SP_RIF_COUNTER_INGRESS:
150 return rif->counter_ingress_valid;
151 }
152 return false;
153 }
154
155 static void
mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir,bool valid)156 mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
157 enum mlxsw_sp_rif_counter_dir dir,
158 bool valid)
159 {
160 switch (dir) {
161 case MLXSW_SP_RIF_COUNTER_EGRESS:
162 rif->counter_egress_valid = valid;
163 break;
164 case MLXSW_SP_RIF_COUNTER_INGRESS:
165 rif->counter_ingress_valid = valid;
166 break;
167 }
168 }
169
mlxsw_sp_rif_counter_edit(struct mlxsw_sp * mlxsw_sp,u16 rif_index,unsigned int counter_index,bool enable,enum mlxsw_sp_rif_counter_dir dir)170 static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
171 unsigned int counter_index, bool enable,
172 enum mlxsw_sp_rif_counter_dir dir)
173 {
174 char ritr_pl[MLXSW_REG_RITR_LEN];
175 bool is_egress = false;
176 int err;
177
178 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
179 is_egress = true;
180 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
181 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
182 if (err)
183 return err;
184
185 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
186 is_egress);
187 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
188 }
189
mlxsw_sp_rif_counter_value_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir,u64 * cnt)190 int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
191 struct mlxsw_sp_rif *rif,
192 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
193 {
194 char ricnt_pl[MLXSW_REG_RICNT_LEN];
195 unsigned int *p_counter_index;
196 bool valid;
197 int err;
198
199 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
200 if (!valid)
201 return -EINVAL;
202
203 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
204 if (!p_counter_index)
205 return -EINVAL;
206 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
207 MLXSW_REG_RICNT_OPCODE_NOP);
208 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
209 if (err)
210 return err;
211 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
212 return 0;
213 }
214
mlxsw_sp_rif_counter_clear(struct mlxsw_sp * mlxsw_sp,unsigned int counter_index)215 static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
216 unsigned int counter_index)
217 {
218 char ricnt_pl[MLXSW_REG_RICNT_LEN];
219
220 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
221 MLXSW_REG_RICNT_OPCODE_CLEAR);
222 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
223 }
224
mlxsw_sp_rif_counter_alloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir)225 int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
226 struct mlxsw_sp_rif *rif,
227 enum mlxsw_sp_rif_counter_dir dir)
228 {
229 unsigned int *p_counter_index;
230 int err;
231
232 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
233 if (!p_counter_index)
234 return -EINVAL;
235 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
236 p_counter_index);
237 if (err)
238 return err;
239
240 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
241 if (err)
242 goto err_counter_clear;
243
244 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
245 *p_counter_index, true, dir);
246 if (err)
247 goto err_counter_edit;
248 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
249 return 0;
250
251 err_counter_edit:
252 err_counter_clear:
253 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
254 *p_counter_index);
255 return err;
256 }
257
mlxsw_sp_rif_counter_free(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir)258 void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
259 struct mlxsw_sp_rif *rif,
260 enum mlxsw_sp_rif_counter_dir dir)
261 {
262 unsigned int *p_counter_index;
263
264 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
265 return;
266
267 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
268 if (WARN_ON(!p_counter_index))
269 return;
270 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
271 *p_counter_index, false, dir);
272 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
273 *p_counter_index);
274 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
275 }
276
mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif * rif)277 static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
278 {
279 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
280 struct devlink *devlink;
281
282 devlink = priv_to_devlink(mlxsw_sp->core);
283 if (!devlink_dpipe_table_counter_enabled(devlink,
284 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
285 return;
286 mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
287 }
288
mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif * rif)289 static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
290 {
291 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
292
293 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
294 }
295
296 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
297
298 struct mlxsw_sp_prefix_usage {
299 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
300 };
301
302 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
303 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
304
305 static bool
mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage * prefix_usage1,struct mlxsw_sp_prefix_usage * prefix_usage2)306 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
307 struct mlxsw_sp_prefix_usage *prefix_usage2)
308 {
309 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
310 }
311
312 static void
mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage * prefix_usage1,struct mlxsw_sp_prefix_usage * prefix_usage2)313 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
314 struct mlxsw_sp_prefix_usage *prefix_usage2)
315 {
316 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
317 }
318
319 static void
mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage * prefix_usage,unsigned char prefix_len)320 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
321 unsigned char prefix_len)
322 {
323 set_bit(prefix_len, prefix_usage->b);
324 }
325
326 static void
mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage * prefix_usage,unsigned char prefix_len)327 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
328 unsigned char prefix_len)
329 {
330 clear_bit(prefix_len, prefix_usage->b);
331 }
332
333 struct mlxsw_sp_fib_key {
334 unsigned char addr[sizeof(struct in6_addr)];
335 unsigned char prefix_len;
336 };
337
338 enum mlxsw_sp_fib_entry_type {
339 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
340 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
341 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
342 MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE,
343 MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE,
344
345 /* This is a special case of local delivery, where a packet should be
346 * decapsulated on reception. Note that there is no corresponding ENCAP,
347 * because that's a type of next hop, not of FIB entry. (There can be
348 * several next hops in a REMOTE entry, and some of them may be
349 * encapsulating entries.)
350 */
351 MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
352 MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP,
353 };
354
355 struct mlxsw_sp_nexthop_group;
356 struct mlxsw_sp_fib_entry;
357
358 struct mlxsw_sp_fib_node {
359 struct mlxsw_sp_fib_entry *fib_entry;
360 struct list_head list;
361 struct rhash_head ht_node;
362 struct mlxsw_sp_fib *fib;
363 struct mlxsw_sp_fib_key key;
364 };
365
366 struct mlxsw_sp_fib_entry_decap {
367 struct mlxsw_sp_ipip_entry *ipip_entry;
368 u32 tunnel_index;
369 };
370
371 struct mlxsw_sp_fib_entry {
372 struct mlxsw_sp_fib_node *fib_node;
373 enum mlxsw_sp_fib_entry_type type;
374 struct list_head nexthop_group_node;
375 struct mlxsw_sp_nexthop_group *nh_group;
376 struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
377 };
378
379 struct mlxsw_sp_fib4_entry {
380 struct mlxsw_sp_fib_entry common;
381 u32 tb_id;
382 u32 prio;
383 u8 tos;
384 u8 type;
385 };
386
387 struct mlxsw_sp_fib6_entry {
388 struct mlxsw_sp_fib_entry common;
389 struct list_head rt6_list;
390 unsigned int nrt6;
391 };
392
393 struct mlxsw_sp_rt6 {
394 struct list_head list;
395 struct fib6_info *rt;
396 };
397
398 struct mlxsw_sp_lpm_tree {
399 u8 id; /* tree ID */
400 unsigned int ref_count;
401 enum mlxsw_sp_l3proto proto;
402 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
403 struct mlxsw_sp_prefix_usage prefix_usage;
404 };
405
406 struct mlxsw_sp_fib {
407 struct rhashtable ht;
408 struct list_head node_list;
409 struct mlxsw_sp_vr *vr;
410 struct mlxsw_sp_lpm_tree *lpm_tree;
411 enum mlxsw_sp_l3proto proto;
412 };
413
414 struct mlxsw_sp_vr {
415 u16 id; /* virtual router ID */
416 u32 tb_id; /* kernel fib table id */
417 unsigned int rif_count;
418 struct mlxsw_sp_fib *fib4;
419 struct mlxsw_sp_fib *fib6;
420 struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX];
421 struct mlxsw_sp_rif *ul_rif;
422 refcount_t ul_rif_refcnt;
423 };
424
425 static const struct rhashtable_params mlxsw_sp_fib_ht_params;
426
mlxsw_sp_fib_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_vr * vr,enum mlxsw_sp_l3proto proto)427 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
428 struct mlxsw_sp_vr *vr,
429 enum mlxsw_sp_l3proto proto)
430 {
431 struct mlxsw_sp_lpm_tree *lpm_tree;
432 struct mlxsw_sp_fib *fib;
433 int err;
434
435 lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
436 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
437 if (!fib)
438 return ERR_PTR(-ENOMEM);
439 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
440 if (err)
441 goto err_rhashtable_init;
442 INIT_LIST_HEAD(&fib->node_list);
443 fib->proto = proto;
444 fib->vr = vr;
445 fib->lpm_tree = lpm_tree;
446 mlxsw_sp_lpm_tree_hold(lpm_tree);
447 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
448 if (err)
449 goto err_lpm_tree_bind;
450 return fib;
451
452 err_lpm_tree_bind:
453 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
454 err_rhashtable_init:
455 kfree(fib);
456 return ERR_PTR(err);
457 }
458
mlxsw_sp_fib_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib * fib)459 static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
460 struct mlxsw_sp_fib *fib)
461 {
462 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
463 mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
464 WARN_ON(!list_empty(&fib->node_list));
465 rhashtable_destroy(&fib->ht);
466 kfree(fib);
467 }
468
469 static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp * mlxsw_sp)470 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
471 {
472 static struct mlxsw_sp_lpm_tree *lpm_tree;
473 int i;
474
475 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
476 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
477 if (lpm_tree->ref_count == 0)
478 return lpm_tree;
479 }
480 return NULL;
481 }
482
mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_lpm_tree * lpm_tree)483 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
484 struct mlxsw_sp_lpm_tree *lpm_tree)
485 {
486 char ralta_pl[MLXSW_REG_RALTA_LEN];
487
488 mlxsw_reg_ralta_pack(ralta_pl, true,
489 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
490 lpm_tree->id);
491 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
492 }
493
mlxsw_sp_lpm_tree_free(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_lpm_tree * lpm_tree)494 static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
495 struct mlxsw_sp_lpm_tree *lpm_tree)
496 {
497 char ralta_pl[MLXSW_REG_RALTA_LEN];
498
499 mlxsw_reg_ralta_pack(ralta_pl, false,
500 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
501 lpm_tree->id);
502 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
503 }
504
505 static int
mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_prefix_usage * prefix_usage,struct mlxsw_sp_lpm_tree * lpm_tree)506 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
507 struct mlxsw_sp_prefix_usage *prefix_usage,
508 struct mlxsw_sp_lpm_tree *lpm_tree)
509 {
510 char ralst_pl[MLXSW_REG_RALST_LEN];
511 u8 root_bin = 0;
512 u8 prefix;
513 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
514
515 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
516 root_bin = prefix;
517
518 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
519 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
520 if (prefix == 0)
521 continue;
522 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
523 MLXSW_REG_RALST_BIN_NO_CHILD);
524 last_prefix = prefix;
525 }
526 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
527 }
528
529 static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_prefix_usage * prefix_usage,enum mlxsw_sp_l3proto proto)530 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
531 struct mlxsw_sp_prefix_usage *prefix_usage,
532 enum mlxsw_sp_l3proto proto)
533 {
534 struct mlxsw_sp_lpm_tree *lpm_tree;
535 int err;
536
537 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
538 if (!lpm_tree)
539 return ERR_PTR(-EBUSY);
540 lpm_tree->proto = proto;
541 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
542 if (err)
543 return ERR_PTR(err);
544
545 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
546 lpm_tree);
547 if (err)
548 goto err_left_struct_set;
549 memcpy(&lpm_tree->prefix_usage, prefix_usage,
550 sizeof(lpm_tree->prefix_usage));
551 memset(&lpm_tree->prefix_ref_count, 0,
552 sizeof(lpm_tree->prefix_ref_count));
553 lpm_tree->ref_count = 1;
554 return lpm_tree;
555
556 err_left_struct_set:
557 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
558 return ERR_PTR(err);
559 }
560
mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_lpm_tree * lpm_tree)561 static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
562 struct mlxsw_sp_lpm_tree *lpm_tree)
563 {
564 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
565 }
566
567 static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_prefix_usage * prefix_usage,enum mlxsw_sp_l3proto proto)568 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
569 struct mlxsw_sp_prefix_usage *prefix_usage,
570 enum mlxsw_sp_l3proto proto)
571 {
572 struct mlxsw_sp_lpm_tree *lpm_tree;
573 int i;
574
575 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
576 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
577 if (lpm_tree->ref_count != 0 &&
578 lpm_tree->proto == proto &&
579 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
580 prefix_usage)) {
581 mlxsw_sp_lpm_tree_hold(lpm_tree);
582 return lpm_tree;
583 }
584 }
585 return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
586 }
587
mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree * lpm_tree)588 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
589 {
590 lpm_tree->ref_count++;
591 }
592
mlxsw_sp_lpm_tree_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_lpm_tree * lpm_tree)593 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
594 struct mlxsw_sp_lpm_tree *lpm_tree)
595 {
596 if (--lpm_tree->ref_count == 0)
597 mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
598 }
599
600 #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
601
mlxsw_sp_lpm_init(struct mlxsw_sp * mlxsw_sp)602 static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
603 {
604 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
605 struct mlxsw_sp_lpm_tree *lpm_tree;
606 u64 max_trees;
607 int err, i;
608
609 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
610 return -EIO;
611
612 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
613 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
614 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
615 sizeof(struct mlxsw_sp_lpm_tree),
616 GFP_KERNEL);
617 if (!mlxsw_sp->router->lpm.trees)
618 return -ENOMEM;
619
620 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
621 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
622 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
623 }
624
625 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
626 MLXSW_SP_L3_PROTO_IPV4);
627 if (IS_ERR(lpm_tree)) {
628 err = PTR_ERR(lpm_tree);
629 goto err_ipv4_tree_get;
630 }
631 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
632
633 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
634 MLXSW_SP_L3_PROTO_IPV6);
635 if (IS_ERR(lpm_tree)) {
636 err = PTR_ERR(lpm_tree);
637 goto err_ipv6_tree_get;
638 }
639 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
640
641 return 0;
642
643 err_ipv6_tree_get:
644 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
645 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
646 err_ipv4_tree_get:
647 kfree(mlxsw_sp->router->lpm.trees);
648 return err;
649 }
650
mlxsw_sp_lpm_fini(struct mlxsw_sp * mlxsw_sp)651 static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
652 {
653 struct mlxsw_sp_lpm_tree *lpm_tree;
654
655 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
656 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
657
658 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
659 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
660
661 kfree(mlxsw_sp->router->lpm.trees);
662 }
663
mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr * vr)664 static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
665 {
666 return !!vr->fib4 || !!vr->fib6 ||
667 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] ||
668 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
669 }
670
mlxsw_sp_vr_find_unused(struct mlxsw_sp * mlxsw_sp)671 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
672 {
673 struct mlxsw_sp_vr *vr;
674 int i;
675
676 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
677 vr = &mlxsw_sp->router->vrs[i];
678 if (!mlxsw_sp_vr_is_used(vr))
679 return vr;
680 }
681 return NULL;
682 }
683
mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_fib * fib,u8 tree_id)684 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
685 const struct mlxsw_sp_fib *fib, u8 tree_id)
686 {
687 char raltb_pl[MLXSW_REG_RALTB_LEN];
688
689 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
690 (enum mlxsw_reg_ralxx_protocol) fib->proto,
691 tree_id);
692 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
693 }
694
mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_fib * fib)695 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
696 const struct mlxsw_sp_fib *fib)
697 {
698 char raltb_pl[MLXSW_REG_RALTB_LEN];
699
700 /* Bind to tree 0 which is default */
701 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
702 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
703 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
704 }
705
mlxsw_sp_fix_tb_id(u32 tb_id)706 static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
707 {
708 /* For our purpose, squash main, default and local tables into one */
709 if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
710 tb_id = RT_TABLE_MAIN;
711 return tb_id;
712 }
713
mlxsw_sp_vr_find(struct mlxsw_sp * mlxsw_sp,u32 tb_id)714 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
715 u32 tb_id)
716 {
717 struct mlxsw_sp_vr *vr;
718 int i;
719
720 tb_id = mlxsw_sp_fix_tb_id(tb_id);
721
722 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
723 vr = &mlxsw_sp->router->vrs[i];
724 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
725 return vr;
726 }
727 return NULL;
728 }
729
mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp * mlxsw_sp,u32 tb_id,u16 * vr_id)730 int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
731 u16 *vr_id)
732 {
733 struct mlxsw_sp_vr *vr;
734 int err = 0;
735
736 mutex_lock(&mlxsw_sp->router->lock);
737 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
738 if (!vr) {
739 err = -ESRCH;
740 goto out;
741 }
742 *vr_id = vr->id;
743 out:
744 mutex_unlock(&mlxsw_sp->router->lock);
745 return err;
746 }
747
mlxsw_sp_vr_fib(const struct mlxsw_sp_vr * vr,enum mlxsw_sp_l3proto proto)748 static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
749 enum mlxsw_sp_l3proto proto)
750 {
751 switch (proto) {
752 case MLXSW_SP_L3_PROTO_IPV4:
753 return vr->fib4;
754 case MLXSW_SP_L3_PROTO_IPV6:
755 return vr->fib6;
756 }
757 return NULL;
758 }
759
mlxsw_sp_vr_create(struct mlxsw_sp * mlxsw_sp,u32 tb_id,struct netlink_ext_ack * extack)760 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
761 u32 tb_id,
762 struct netlink_ext_ack *extack)
763 {
764 struct mlxsw_sp_mr_table *mr4_table, *mr6_table;
765 struct mlxsw_sp_fib *fib4;
766 struct mlxsw_sp_fib *fib6;
767 struct mlxsw_sp_vr *vr;
768 int err;
769
770 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
771 if (!vr) {
772 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
773 return ERR_PTR(-EBUSY);
774 }
775 fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
776 if (IS_ERR(fib4))
777 return ERR_CAST(fib4);
778 fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
779 if (IS_ERR(fib6)) {
780 err = PTR_ERR(fib6);
781 goto err_fib6_create;
782 }
783 mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
784 MLXSW_SP_L3_PROTO_IPV4);
785 if (IS_ERR(mr4_table)) {
786 err = PTR_ERR(mr4_table);
787 goto err_mr4_table_create;
788 }
789 mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
790 MLXSW_SP_L3_PROTO_IPV6);
791 if (IS_ERR(mr6_table)) {
792 err = PTR_ERR(mr6_table);
793 goto err_mr6_table_create;
794 }
795
796 vr->fib4 = fib4;
797 vr->fib6 = fib6;
798 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table;
799 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table;
800 vr->tb_id = tb_id;
801 return vr;
802
803 err_mr6_table_create:
804 mlxsw_sp_mr_table_destroy(mr4_table);
805 err_mr4_table_create:
806 mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
807 err_fib6_create:
808 mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
809 return ERR_PTR(err);
810 }
811
mlxsw_sp_vr_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_vr * vr)812 static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
813 struct mlxsw_sp_vr *vr)
814 {
815 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]);
816 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL;
817 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]);
818 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL;
819 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
820 vr->fib6 = NULL;
821 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
822 vr->fib4 = NULL;
823 }
824
mlxsw_sp_vr_get(struct mlxsw_sp * mlxsw_sp,u32 tb_id,struct netlink_ext_ack * extack)825 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
826 struct netlink_ext_ack *extack)
827 {
828 struct mlxsw_sp_vr *vr;
829
830 tb_id = mlxsw_sp_fix_tb_id(tb_id);
831 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
832 if (!vr)
833 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
834 return vr;
835 }
836
mlxsw_sp_vr_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_vr * vr)837 static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
838 {
839 if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
840 list_empty(&vr->fib6->node_list) &&
841 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) &&
842 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]))
843 mlxsw_sp_vr_destroy(mlxsw_sp, vr);
844 }
845
846 static bool
mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr * vr,enum mlxsw_sp_l3proto proto,u8 tree_id)847 mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
848 enum mlxsw_sp_l3proto proto, u8 tree_id)
849 {
850 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
851
852 if (!mlxsw_sp_vr_is_used(vr))
853 return false;
854 if (fib->lpm_tree->id == tree_id)
855 return true;
856 return false;
857 }
858
mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib * fib,struct mlxsw_sp_lpm_tree * new_tree)859 static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
860 struct mlxsw_sp_fib *fib,
861 struct mlxsw_sp_lpm_tree *new_tree)
862 {
863 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
864 int err;
865
866 fib->lpm_tree = new_tree;
867 mlxsw_sp_lpm_tree_hold(new_tree);
868 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
869 if (err)
870 goto err_tree_bind;
871 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
872 return 0;
873
874 err_tree_bind:
875 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
876 fib->lpm_tree = old_tree;
877 return err;
878 }
879
mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib * fib,struct mlxsw_sp_lpm_tree * new_tree)880 static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
881 struct mlxsw_sp_fib *fib,
882 struct mlxsw_sp_lpm_tree *new_tree)
883 {
884 enum mlxsw_sp_l3proto proto = fib->proto;
885 struct mlxsw_sp_lpm_tree *old_tree;
886 u8 old_id, new_id = new_tree->id;
887 struct mlxsw_sp_vr *vr;
888 int i, err;
889
890 old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
891 old_id = old_tree->id;
892
893 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
894 vr = &mlxsw_sp->router->vrs[i];
895 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
896 continue;
897 err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
898 mlxsw_sp_vr_fib(vr, proto),
899 new_tree);
900 if (err)
901 goto err_tree_replace;
902 }
903
904 memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
905 sizeof(new_tree->prefix_ref_count));
906 mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
907 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
908
909 return 0;
910
911 err_tree_replace:
912 for (i--; i >= 0; i--) {
913 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
914 continue;
915 mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
916 mlxsw_sp_vr_fib(vr, proto),
917 old_tree);
918 }
919 return err;
920 }
921
mlxsw_sp_vrs_init(struct mlxsw_sp * mlxsw_sp)922 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
923 {
924 struct mlxsw_sp_vr *vr;
925 u64 max_vrs;
926 int i;
927
928 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
929 return -EIO;
930
931 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
932 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
933 GFP_KERNEL);
934 if (!mlxsw_sp->router->vrs)
935 return -ENOMEM;
936
937 for (i = 0; i < max_vrs; i++) {
938 vr = &mlxsw_sp->router->vrs[i];
939 vr->id = i;
940 }
941
942 return 0;
943 }
944
945 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
946
mlxsw_sp_vrs_fini(struct mlxsw_sp * mlxsw_sp)947 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
948 {
949 /* At this stage we're guaranteed not to have new incoming
950 * FIB notifications and the work queue is free from FIBs
951 * sitting on top of mlxsw netdevs. However, we can still
952 * have other FIBs queued. Flush the queue before flushing
953 * the device's tables. No need for locks, as we're the only
954 * writer.
955 */
956 mlxsw_core_flush_owq();
957 mlxsw_sp_router_fib_flush(mlxsw_sp);
958 kfree(mlxsw_sp->router->vrs);
959 }
960
961 static struct net_device *
__mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device * ol_dev)962 __mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
963 {
964 struct ip_tunnel *tun = netdev_priv(ol_dev);
965 struct net *net = dev_net(ol_dev);
966
967 return dev_get_by_index_rcu(net, tun->parms.link);
968 }
969
mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device * ol_dev)970 u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
971 {
972 struct net_device *d;
973 u32 tb_id;
974
975 rcu_read_lock();
976 d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
977 if (d)
978 tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
979 else
980 tb_id = RT_TABLE_MAIN;
981 rcu_read_unlock();
982
983 return tb_id;
984 }
985
986 static struct mlxsw_sp_rif *
987 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
988 const struct mlxsw_sp_rif_params *params,
989 struct netlink_ext_ack *extack);
990
991 static struct mlxsw_sp_rif_ipip_lb *
mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_ipip_type ipipt,struct net_device * ol_dev,struct netlink_ext_ack * extack)992 mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
993 enum mlxsw_sp_ipip_type ipipt,
994 struct net_device *ol_dev,
995 struct netlink_ext_ack *extack)
996 {
997 struct mlxsw_sp_rif_params_ipip_lb lb_params;
998 const struct mlxsw_sp_ipip_ops *ipip_ops;
999 struct mlxsw_sp_rif *rif;
1000
1001 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1002 lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
1003 .common.dev = ol_dev,
1004 .common.lag = false,
1005 .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
1006 };
1007
1008 rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
1009 if (IS_ERR(rif))
1010 return ERR_CAST(rif);
1011 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
1012 }
1013
1014 static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_ipip_type ipipt,struct net_device * ol_dev)1015 mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
1016 enum mlxsw_sp_ipip_type ipipt,
1017 struct net_device *ol_dev)
1018 {
1019 const struct mlxsw_sp_ipip_ops *ipip_ops;
1020 struct mlxsw_sp_ipip_entry *ipip_entry;
1021 struct mlxsw_sp_ipip_entry *ret = NULL;
1022
1023 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1024 ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
1025 if (!ipip_entry)
1026 return ERR_PTR(-ENOMEM);
1027
1028 ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
1029 ol_dev, NULL);
1030 if (IS_ERR(ipip_entry->ol_lb)) {
1031 ret = ERR_CAST(ipip_entry->ol_lb);
1032 goto err_ol_ipip_lb_create;
1033 }
1034
1035 ipip_entry->ipipt = ipipt;
1036 ipip_entry->ol_dev = ol_dev;
1037
1038 switch (ipip_ops->ul_proto) {
1039 case MLXSW_SP_L3_PROTO_IPV4:
1040 ipip_entry->parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
1041 break;
1042 case MLXSW_SP_L3_PROTO_IPV6:
1043 WARN_ON(1);
1044 break;
1045 }
1046
1047 return ipip_entry;
1048
1049 err_ol_ipip_lb_create:
1050 kfree(ipip_entry);
1051 return ret;
1052 }
1053
1054 static void
mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry * ipip_entry)1055 mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
1056 {
1057 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1058 kfree(ipip_entry);
1059 }
1060
1061 static bool
mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp * mlxsw_sp,const enum mlxsw_sp_l3proto ul_proto,union mlxsw_sp_l3addr saddr,u32 ul_tb_id,struct mlxsw_sp_ipip_entry * ipip_entry)1062 mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1063 const enum mlxsw_sp_l3proto ul_proto,
1064 union mlxsw_sp_l3addr saddr,
1065 u32 ul_tb_id,
1066 struct mlxsw_sp_ipip_entry *ipip_entry)
1067 {
1068 u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1069 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1070 union mlxsw_sp_l3addr tun_saddr;
1071
1072 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1073 return false;
1074
1075 tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1076 return tun_ul_tb_id == ul_tb_id &&
1077 mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1078 }
1079
1080 static int
mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,struct mlxsw_sp_ipip_entry * ipip_entry)1081 mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1082 struct mlxsw_sp_fib_entry *fib_entry,
1083 struct mlxsw_sp_ipip_entry *ipip_entry)
1084 {
1085 u32 tunnel_index;
1086 int err;
1087
1088 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1089 1, &tunnel_index);
1090 if (err)
1091 return err;
1092
1093 ipip_entry->decap_fib_entry = fib_entry;
1094 fib_entry->decap.ipip_entry = ipip_entry;
1095 fib_entry->decap.tunnel_index = tunnel_index;
1096 return 0;
1097 }
1098
mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)1099 static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1100 struct mlxsw_sp_fib_entry *fib_entry)
1101 {
1102 /* Unlink this node from the IPIP entry that it's the decap entry of. */
1103 fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1104 fib_entry->decap.ipip_entry = NULL;
1105 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1106 1, fib_entry->decap.tunnel_index);
1107 }
1108
1109 static struct mlxsw_sp_fib_node *
1110 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1111 size_t addr_len, unsigned char prefix_len);
1112 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1113 struct mlxsw_sp_fib_entry *fib_entry);
1114
1115 static void
mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1116 mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1117 struct mlxsw_sp_ipip_entry *ipip_entry)
1118 {
1119 struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1120
1121 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1122 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1123
1124 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1125 }
1126
1127 static void
mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,struct mlxsw_sp_fib_entry * decap_fib_entry)1128 mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1129 struct mlxsw_sp_ipip_entry *ipip_entry,
1130 struct mlxsw_sp_fib_entry *decap_fib_entry)
1131 {
1132 if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1133 ipip_entry))
1134 return;
1135 decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1136
1137 if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1138 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1139 }
1140
1141 static struct mlxsw_sp_fib_entry *
mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp * mlxsw_sp,u32 tb_id,enum mlxsw_sp_l3proto proto,const union mlxsw_sp_l3addr * addr,enum mlxsw_sp_fib_entry_type type)1142 mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
1143 enum mlxsw_sp_l3proto proto,
1144 const union mlxsw_sp_l3addr *addr,
1145 enum mlxsw_sp_fib_entry_type type)
1146 {
1147 struct mlxsw_sp_fib_node *fib_node;
1148 unsigned char addr_prefix_len;
1149 struct mlxsw_sp_fib *fib;
1150 struct mlxsw_sp_vr *vr;
1151 const void *addrp;
1152 size_t addr_len;
1153 u32 addr4;
1154
1155 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
1156 if (!vr)
1157 return NULL;
1158 fib = mlxsw_sp_vr_fib(vr, proto);
1159
1160 switch (proto) {
1161 case MLXSW_SP_L3_PROTO_IPV4:
1162 addr4 = be32_to_cpu(addr->addr4);
1163 addrp = &addr4;
1164 addr_len = 4;
1165 addr_prefix_len = 32;
1166 break;
1167 case MLXSW_SP_L3_PROTO_IPV6:
1168 default:
1169 WARN_ON(1);
1170 return NULL;
1171 }
1172
1173 fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len,
1174 addr_prefix_len);
1175 if (!fib_node || fib_node->fib_entry->type != type)
1176 return NULL;
1177
1178 return fib_node->fib_entry;
1179 }
1180
1181 /* Given an IPIP entry, find the corresponding decap route. */
1182 static struct mlxsw_sp_fib_entry *
mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1183 mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1184 struct mlxsw_sp_ipip_entry *ipip_entry)
1185 {
1186 static struct mlxsw_sp_fib_node *fib_node;
1187 const struct mlxsw_sp_ipip_ops *ipip_ops;
1188 unsigned char saddr_prefix_len;
1189 union mlxsw_sp_l3addr saddr;
1190 struct mlxsw_sp_fib *ul_fib;
1191 struct mlxsw_sp_vr *ul_vr;
1192 const void *saddrp;
1193 size_t saddr_len;
1194 u32 ul_tb_id;
1195 u32 saddr4;
1196
1197 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1198
1199 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1200 ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1201 if (!ul_vr)
1202 return NULL;
1203
1204 ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1205 saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1206 ipip_entry->ol_dev);
1207
1208 switch (ipip_ops->ul_proto) {
1209 case MLXSW_SP_L3_PROTO_IPV4:
1210 saddr4 = be32_to_cpu(saddr.addr4);
1211 saddrp = &saddr4;
1212 saddr_len = 4;
1213 saddr_prefix_len = 32;
1214 break;
1215 default:
1216 WARN_ON(1);
1217 return NULL;
1218 }
1219
1220 fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1221 saddr_prefix_len);
1222 if (!fib_node ||
1223 fib_node->fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1224 return NULL;
1225
1226 return fib_node->fib_entry;
1227 }
1228
1229 static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_create(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_ipip_type ipipt,struct net_device * ol_dev)1230 mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1231 enum mlxsw_sp_ipip_type ipipt,
1232 struct net_device *ol_dev)
1233 {
1234 struct mlxsw_sp_ipip_entry *ipip_entry;
1235
1236 ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1237 if (IS_ERR(ipip_entry))
1238 return ipip_entry;
1239
1240 list_add_tail(&ipip_entry->ipip_list_node,
1241 &mlxsw_sp->router->ipip_list);
1242
1243 return ipip_entry;
1244 }
1245
1246 static void
mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1247 mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1248 struct mlxsw_sp_ipip_entry *ipip_entry)
1249 {
1250 list_del(&ipip_entry->ipip_list_node);
1251 mlxsw_sp_ipip_entry_dealloc(ipip_entry);
1252 }
1253
1254 static bool
mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp * mlxsw_sp,const struct net_device * ul_dev,enum mlxsw_sp_l3proto ul_proto,union mlxsw_sp_l3addr ul_dip,struct mlxsw_sp_ipip_entry * ipip_entry)1255 mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1256 const struct net_device *ul_dev,
1257 enum mlxsw_sp_l3proto ul_proto,
1258 union mlxsw_sp_l3addr ul_dip,
1259 struct mlxsw_sp_ipip_entry *ipip_entry)
1260 {
1261 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1262 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1263
1264 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1265 return false;
1266
1267 return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1268 ul_tb_id, ipip_entry);
1269 }
1270
1271 /* Given decap parameters, find the corresponding IPIP entry. */
1272 static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp * mlxsw_sp,const struct net_device * ul_dev,enum mlxsw_sp_l3proto ul_proto,union mlxsw_sp_l3addr ul_dip)1273 mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp,
1274 const struct net_device *ul_dev,
1275 enum mlxsw_sp_l3proto ul_proto,
1276 union mlxsw_sp_l3addr ul_dip)
1277 {
1278 struct mlxsw_sp_ipip_entry *ipip_entry;
1279
1280 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1281 ipip_list_node)
1282 if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1283 ul_proto, ul_dip,
1284 ipip_entry))
1285 return ipip_entry;
1286
1287 return NULL;
1288 }
1289
mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp * mlxsw_sp,const struct net_device * dev,enum mlxsw_sp_ipip_type * p_type)1290 static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1291 const struct net_device *dev,
1292 enum mlxsw_sp_ipip_type *p_type)
1293 {
1294 struct mlxsw_sp_router *router = mlxsw_sp->router;
1295 const struct mlxsw_sp_ipip_ops *ipip_ops;
1296 enum mlxsw_sp_ipip_type ipipt;
1297
1298 for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1299 ipip_ops = router->ipip_ops_arr[ipipt];
1300 if (dev->type == ipip_ops->dev_type) {
1301 if (p_type)
1302 *p_type = ipipt;
1303 return true;
1304 }
1305 }
1306 return false;
1307 }
1308
mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)1309 bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1310 const struct net_device *dev)
1311 {
1312 return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1313 }
1314
1315 static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp * mlxsw_sp,const struct net_device * ol_dev)1316 mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1317 const struct net_device *ol_dev)
1318 {
1319 struct mlxsw_sp_ipip_entry *ipip_entry;
1320
1321 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1322 ipip_list_node)
1323 if (ipip_entry->ol_dev == ol_dev)
1324 return ipip_entry;
1325
1326 return NULL;
1327 }
1328
1329 static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp * mlxsw_sp,const struct net_device * ul_dev,struct mlxsw_sp_ipip_entry * start)1330 mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1331 const struct net_device *ul_dev,
1332 struct mlxsw_sp_ipip_entry *start)
1333 {
1334 struct mlxsw_sp_ipip_entry *ipip_entry;
1335
1336 ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1337 ipip_list_node);
1338 list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1339 ipip_list_node) {
1340 struct net_device *ol_dev = ipip_entry->ol_dev;
1341 struct net_device *ipip_ul_dev;
1342
1343 rcu_read_lock();
1344 ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1345 rcu_read_unlock();
1346
1347 if (ipip_ul_dev == ul_dev)
1348 return ipip_entry;
1349 }
1350
1351 return NULL;
1352 }
1353
mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)1354 bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp,
1355 const struct net_device *dev)
1356 {
1357 bool is_ipip_ul;
1358
1359 mutex_lock(&mlxsw_sp->router->lock);
1360 is_ipip_ul = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1361 mutex_unlock(&mlxsw_sp->router->lock);
1362
1363 return is_ipip_ul;
1364 }
1365
mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp * mlxsw_sp,const struct net_device * ol_dev,enum mlxsw_sp_ipip_type ipipt)1366 static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1367 const struct net_device *ol_dev,
1368 enum mlxsw_sp_ipip_type ipipt)
1369 {
1370 const struct mlxsw_sp_ipip_ops *ops
1371 = mlxsw_sp->router->ipip_ops_arr[ipipt];
1372
1373 /* For deciding whether decap should be offloaded, we don't care about
1374 * overlay protocol, so ask whether either one is supported.
1375 */
1376 return ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV4) ||
1377 ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV6);
1378 }
1379
mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev)1380 static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1381 struct net_device *ol_dev)
1382 {
1383 enum mlxsw_sp_ipip_type ipipt = MLXSW_SP_IPIP_TYPE_MAX;
1384 struct mlxsw_sp_ipip_entry *ipip_entry;
1385 enum mlxsw_sp_l3proto ul_proto;
1386 union mlxsw_sp_l3addr saddr;
1387 u32 ul_tb_id;
1388
1389 mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
1390 if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
1391 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1392 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1393 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1394 if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1395 saddr, ul_tb_id,
1396 NULL)) {
1397 ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1398 ol_dev);
1399 if (IS_ERR(ipip_entry))
1400 return PTR_ERR(ipip_entry);
1401 }
1402 }
1403
1404 return 0;
1405 }
1406
mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev)1407 static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1408 struct net_device *ol_dev)
1409 {
1410 struct mlxsw_sp_ipip_entry *ipip_entry;
1411
1412 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1413 if (ipip_entry)
1414 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1415 }
1416
1417 static void
mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1418 mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1419 struct mlxsw_sp_ipip_entry *ipip_entry)
1420 {
1421 struct mlxsw_sp_fib_entry *decap_fib_entry;
1422
1423 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1424 if (decap_fib_entry)
1425 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1426 decap_fib_entry);
1427 }
1428
1429 static int
mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb * lb_rif,u16 ul_vr_id,u16 ul_rif_id,bool enable)1430 mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
1431 u16 ul_rif_id, bool enable)
1432 {
1433 struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
1434 struct mlxsw_sp_rif *rif = &lb_rif->common;
1435 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
1436 char ritr_pl[MLXSW_REG_RITR_LEN];
1437 u32 saddr4;
1438
1439 switch (lb_cf.ul_protocol) {
1440 case MLXSW_SP_L3_PROTO_IPV4:
1441 saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
1442 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1443 rif->rif_index, rif->vr_id, rif->dev->mtu);
1444 mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
1445 MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
1446 ul_vr_id, ul_rif_id, saddr4, lb_cf.okey);
1447 break;
1448
1449 case MLXSW_SP_L3_PROTO_IPV6:
1450 return -EAFNOSUPPORT;
1451 }
1452
1453 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
1454 }
1455
mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev)1456 static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
1457 struct net_device *ol_dev)
1458 {
1459 struct mlxsw_sp_ipip_entry *ipip_entry;
1460 struct mlxsw_sp_rif_ipip_lb *lb_rif;
1461 int err = 0;
1462
1463 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1464 if (ipip_entry) {
1465 lb_rif = ipip_entry->ol_lb;
1466 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, lb_rif->ul_vr_id,
1467 lb_rif->ul_rif_id, true);
1468 if (err)
1469 goto out;
1470 lb_rif->common.mtu = ol_dev->mtu;
1471 }
1472
1473 out:
1474 return err;
1475 }
1476
mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev)1477 static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1478 struct net_device *ol_dev)
1479 {
1480 struct mlxsw_sp_ipip_entry *ipip_entry;
1481
1482 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1483 if (ipip_entry)
1484 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1485 }
1486
1487 static void
mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1488 mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1489 struct mlxsw_sp_ipip_entry *ipip_entry)
1490 {
1491 if (ipip_entry->decap_fib_entry)
1492 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1493 }
1494
mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev)1495 static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1496 struct net_device *ol_dev)
1497 {
1498 struct mlxsw_sp_ipip_entry *ipip_entry;
1499
1500 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1501 if (ipip_entry)
1502 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1503 }
1504
1505 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
1506 struct mlxsw_sp_rif *old_rif,
1507 struct mlxsw_sp_rif *new_rif);
1508 static int
mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,bool keep_encap,struct netlink_ext_ack * extack)1509 mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1510 struct mlxsw_sp_ipip_entry *ipip_entry,
1511 bool keep_encap,
1512 struct netlink_ext_ack *extack)
1513 {
1514 struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1515 struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
1516
1517 new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1518 ipip_entry->ipipt,
1519 ipip_entry->ol_dev,
1520 extack);
1521 if (IS_ERR(new_lb_rif))
1522 return PTR_ERR(new_lb_rif);
1523 ipip_entry->ol_lb = new_lb_rif;
1524
1525 if (keep_encap)
1526 mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
1527 &new_lb_rif->common);
1528
1529 mlxsw_sp_rif_destroy(&old_lb_rif->common);
1530
1531 return 0;
1532 }
1533
1534 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1535 struct mlxsw_sp_rif *rif);
1536
1537 /**
1538 * __mlxsw_sp_ipip_entry_update_tunnel - Update offload related to IPIP entry.
1539 * @mlxsw_sp: mlxsw_sp.
1540 * @ipip_entry: IPIP entry.
1541 * @recreate_loopback: Recreates the associated loopback RIF.
1542 * @keep_encap: Updates next hops that use the tunnel netdevice. This is only
1543 * relevant when recreate_loopback is true.
1544 * @update_nexthops: Updates next hops, keeping the current loopback RIF. This
1545 * is only relevant when recreate_loopback is false.
1546 * @extack: extack.
1547 *
1548 * Return: Non-zero value on failure.
1549 */
__mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,bool recreate_loopback,bool keep_encap,bool update_nexthops,struct netlink_ext_ack * extack)1550 int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1551 struct mlxsw_sp_ipip_entry *ipip_entry,
1552 bool recreate_loopback,
1553 bool keep_encap,
1554 bool update_nexthops,
1555 struct netlink_ext_ack *extack)
1556 {
1557 int err;
1558
1559 /* RIFs can't be edited, so to update loopback, we need to destroy and
1560 * recreate it. That creates a window of opportunity where RALUE and
1561 * RATR registers end up referencing a RIF that's already gone. RATRs
1562 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
1563 * of RALUE, demote the decap route back.
1564 */
1565 if (ipip_entry->decap_fib_entry)
1566 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1567
1568 if (recreate_loopback) {
1569 err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1570 keep_encap, extack);
1571 if (err)
1572 return err;
1573 } else if (update_nexthops) {
1574 mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1575 &ipip_entry->ol_lb->common);
1576 }
1577
1578 if (ipip_entry->ol_dev->flags & IFF_UP)
1579 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1580
1581 return 0;
1582 }
1583
mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev,struct netlink_ext_ack * extack)1584 static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1585 struct net_device *ol_dev,
1586 struct netlink_ext_ack *extack)
1587 {
1588 struct mlxsw_sp_ipip_entry *ipip_entry =
1589 mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1590
1591 if (!ipip_entry)
1592 return 0;
1593
1594 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1595 true, false, false, extack);
1596 }
1597
1598 static int
mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,struct net_device * ul_dev,bool * demote_this,struct netlink_ext_ack * extack)1599 mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1600 struct mlxsw_sp_ipip_entry *ipip_entry,
1601 struct net_device *ul_dev,
1602 bool *demote_this,
1603 struct netlink_ext_ack *extack)
1604 {
1605 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1606 enum mlxsw_sp_l3proto ul_proto;
1607 union mlxsw_sp_l3addr saddr;
1608
1609 /* Moving underlay to a different VRF might cause local address
1610 * conflict, and the conflicting tunnels need to be demoted.
1611 */
1612 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1613 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1614 if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1615 saddr, ul_tb_id,
1616 ipip_entry)) {
1617 *demote_this = true;
1618 return 0;
1619 }
1620
1621 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1622 true, true, false, extack);
1623 }
1624
1625 static int
mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,struct net_device * ul_dev)1626 mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
1627 struct mlxsw_sp_ipip_entry *ipip_entry,
1628 struct net_device *ul_dev)
1629 {
1630 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1631 false, false, true, NULL);
1632 }
1633
1634 static int
mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,struct net_device * ul_dev)1635 mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
1636 struct mlxsw_sp_ipip_entry *ipip_entry,
1637 struct net_device *ul_dev)
1638 {
1639 /* A down underlay device causes encapsulated packets to not be
1640 * forwarded, but decap still works. So refresh next hops without
1641 * touching anything else.
1642 */
1643 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1644 false, false, true, NULL);
1645 }
1646
1647 static int
mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev,struct netlink_ext_ack * extack)1648 mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1649 struct net_device *ol_dev,
1650 struct netlink_ext_ack *extack)
1651 {
1652 const struct mlxsw_sp_ipip_ops *ipip_ops;
1653 struct mlxsw_sp_ipip_entry *ipip_entry;
1654 int err;
1655
1656 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1657 if (!ipip_entry)
1658 /* A change might make a tunnel eligible for offloading, but
1659 * that is currently not implemented. What falls to slow path
1660 * stays there.
1661 */
1662 return 0;
1663
1664 /* A change might make a tunnel not eligible for offloading. */
1665 if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1666 ipip_entry->ipipt)) {
1667 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1668 return 0;
1669 }
1670
1671 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1672 err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1673 return err;
1674 }
1675
mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1676 void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1677 struct mlxsw_sp_ipip_entry *ipip_entry)
1678 {
1679 struct net_device *ol_dev = ipip_entry->ol_dev;
1680
1681 if (ol_dev->flags & IFF_UP)
1682 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1683 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1684 }
1685
1686 /* The configuration where several tunnels have the same local address in the
1687 * same underlay table needs special treatment in the HW. That is currently not
1688 * implemented in the driver. This function finds and demotes the first tunnel
1689 * with a given source address, except the one passed in in the argument
1690 * `except'.
1691 */
1692 bool
mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_l3proto ul_proto,union mlxsw_sp_l3addr saddr,u32 ul_tb_id,const struct mlxsw_sp_ipip_entry * except)1693 mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1694 enum mlxsw_sp_l3proto ul_proto,
1695 union mlxsw_sp_l3addr saddr,
1696 u32 ul_tb_id,
1697 const struct mlxsw_sp_ipip_entry *except)
1698 {
1699 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1700
1701 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1702 ipip_list_node) {
1703 if (ipip_entry != except &&
1704 mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1705 ul_tb_id, ipip_entry)) {
1706 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1707 return true;
1708 }
1709 }
1710
1711 return false;
1712 }
1713
mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp * mlxsw_sp,struct net_device * ul_dev)1714 static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1715 struct net_device *ul_dev)
1716 {
1717 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1718
1719 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1720 ipip_list_node) {
1721 struct net_device *ol_dev = ipip_entry->ol_dev;
1722 struct net_device *ipip_ul_dev;
1723
1724 rcu_read_lock();
1725 ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1726 rcu_read_unlock();
1727 if (ipip_ul_dev == ul_dev)
1728 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1729 }
1730 }
1731
mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev,unsigned long event,struct netdev_notifier_info * info)1732 int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1733 struct net_device *ol_dev,
1734 unsigned long event,
1735 struct netdev_notifier_info *info)
1736 {
1737 struct netdev_notifier_changeupper_info *chup;
1738 struct netlink_ext_ack *extack;
1739 int err = 0;
1740
1741 mutex_lock(&mlxsw_sp->router->lock);
1742 switch (event) {
1743 case NETDEV_REGISTER:
1744 err = mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
1745 break;
1746 case NETDEV_UNREGISTER:
1747 mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
1748 break;
1749 case NETDEV_UP:
1750 mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1751 break;
1752 case NETDEV_DOWN:
1753 mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
1754 break;
1755 case NETDEV_CHANGEUPPER:
1756 chup = container_of(info, typeof(*chup), info);
1757 extack = info->extack;
1758 if (netif_is_l3_master(chup->upper_dev))
1759 err = mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
1760 ol_dev,
1761 extack);
1762 break;
1763 case NETDEV_CHANGE:
1764 extack = info->extack;
1765 err = mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
1766 ol_dev, extack);
1767 break;
1768 case NETDEV_CHANGEMTU:
1769 err = mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
1770 break;
1771 }
1772 mutex_unlock(&mlxsw_sp->router->lock);
1773 return err;
1774 }
1775
1776 static int
__mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,struct net_device * ul_dev,bool * demote_this,unsigned long event,struct netdev_notifier_info * info)1777 __mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1778 struct mlxsw_sp_ipip_entry *ipip_entry,
1779 struct net_device *ul_dev,
1780 bool *demote_this,
1781 unsigned long event,
1782 struct netdev_notifier_info *info)
1783 {
1784 struct netdev_notifier_changeupper_info *chup;
1785 struct netlink_ext_ack *extack;
1786
1787 switch (event) {
1788 case NETDEV_CHANGEUPPER:
1789 chup = container_of(info, typeof(*chup), info);
1790 extack = info->extack;
1791 if (netif_is_l3_master(chup->upper_dev))
1792 return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
1793 ipip_entry,
1794 ul_dev,
1795 demote_this,
1796 extack);
1797 break;
1798
1799 case NETDEV_UP:
1800 return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
1801 ul_dev);
1802 case NETDEV_DOWN:
1803 return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
1804 ipip_entry,
1805 ul_dev);
1806 }
1807 return 0;
1808 }
1809
1810 int
mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ul_dev,unsigned long event,struct netdev_notifier_info * info)1811 mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1812 struct net_device *ul_dev,
1813 unsigned long event,
1814 struct netdev_notifier_info *info)
1815 {
1816 struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1817 int err = 0;
1818
1819 mutex_lock(&mlxsw_sp->router->lock);
1820 while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
1821 ul_dev,
1822 ipip_entry))) {
1823 struct mlxsw_sp_ipip_entry *prev;
1824 bool demote_this = false;
1825
1826 err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
1827 ul_dev, &demote_this,
1828 event, info);
1829 if (err) {
1830 mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
1831 ul_dev);
1832 break;
1833 }
1834
1835 if (demote_this) {
1836 if (list_is_first(&ipip_entry->ipip_list_node,
1837 &mlxsw_sp->router->ipip_list))
1838 prev = NULL;
1839 else
1840 /* This can't be cached from previous iteration,
1841 * because that entry could be gone now.
1842 */
1843 prev = list_prev_entry(ipip_entry,
1844 ipip_list_node);
1845 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1846 ipip_entry = prev;
1847 }
1848 }
1849 mutex_unlock(&mlxsw_sp->router->lock);
1850
1851 return err;
1852 }
1853
mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp * mlxsw_sp,u32 ul_tb_id,enum mlxsw_sp_l3proto ul_proto,const union mlxsw_sp_l3addr * ul_sip,u32 tunnel_index)1854 int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
1855 enum mlxsw_sp_l3proto ul_proto,
1856 const union mlxsw_sp_l3addr *ul_sip,
1857 u32 tunnel_index)
1858 {
1859 enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1860 struct mlxsw_sp_router *router = mlxsw_sp->router;
1861 struct mlxsw_sp_fib_entry *fib_entry;
1862 int err = 0;
1863
1864 mutex_lock(&mlxsw_sp->router->lock);
1865
1866 if (WARN_ON_ONCE(router->nve_decap_config.valid)) {
1867 err = -EINVAL;
1868 goto out;
1869 }
1870
1871 router->nve_decap_config.ul_tb_id = ul_tb_id;
1872 router->nve_decap_config.tunnel_index = tunnel_index;
1873 router->nve_decap_config.ul_proto = ul_proto;
1874 router->nve_decap_config.ul_sip = *ul_sip;
1875 router->nve_decap_config.valid = true;
1876
1877 /* It is valid to create a tunnel with a local IP and only later
1878 * assign this IP address to a local interface
1879 */
1880 fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
1881 ul_proto, ul_sip,
1882 type);
1883 if (!fib_entry)
1884 goto out;
1885
1886 fib_entry->decap.tunnel_index = tunnel_index;
1887 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
1888
1889 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1890 if (err)
1891 goto err_fib_entry_update;
1892
1893 goto out;
1894
1895 err_fib_entry_update:
1896 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1897 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1898 out:
1899 mutex_unlock(&mlxsw_sp->router->lock);
1900 return err;
1901 }
1902
mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp * mlxsw_sp,u32 ul_tb_id,enum mlxsw_sp_l3proto ul_proto,const union mlxsw_sp_l3addr * ul_sip)1903 void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
1904 enum mlxsw_sp_l3proto ul_proto,
1905 const union mlxsw_sp_l3addr *ul_sip)
1906 {
1907 enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
1908 struct mlxsw_sp_router *router = mlxsw_sp->router;
1909 struct mlxsw_sp_fib_entry *fib_entry;
1910
1911 mutex_lock(&mlxsw_sp->router->lock);
1912
1913 if (WARN_ON_ONCE(!router->nve_decap_config.valid))
1914 goto out;
1915
1916 router->nve_decap_config.valid = false;
1917
1918 fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
1919 ul_proto, ul_sip,
1920 type);
1921 if (!fib_entry)
1922 goto out;
1923
1924 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1925 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1926 out:
1927 mutex_unlock(&mlxsw_sp->router->lock);
1928 }
1929
mlxsw_sp_router_nve_is_decap(struct mlxsw_sp * mlxsw_sp,u32 ul_tb_id,enum mlxsw_sp_l3proto ul_proto,const union mlxsw_sp_l3addr * ul_sip)1930 static bool mlxsw_sp_router_nve_is_decap(struct mlxsw_sp *mlxsw_sp,
1931 u32 ul_tb_id,
1932 enum mlxsw_sp_l3proto ul_proto,
1933 const union mlxsw_sp_l3addr *ul_sip)
1934 {
1935 struct mlxsw_sp_router *router = mlxsw_sp->router;
1936
1937 return router->nve_decap_config.valid &&
1938 router->nve_decap_config.ul_tb_id == ul_tb_id &&
1939 router->nve_decap_config.ul_proto == ul_proto &&
1940 !memcmp(&router->nve_decap_config.ul_sip, ul_sip,
1941 sizeof(*ul_sip));
1942 }
1943
1944 struct mlxsw_sp_neigh_key {
1945 struct neighbour *n;
1946 };
1947
1948 struct mlxsw_sp_neigh_entry {
1949 struct list_head rif_list_node;
1950 struct rhash_head ht_node;
1951 struct mlxsw_sp_neigh_key key;
1952 u16 rif;
1953 bool connected;
1954 unsigned char ha[ETH_ALEN];
1955 struct list_head nexthop_list; /* list of nexthops using
1956 * this neigh entry
1957 */
1958 struct list_head nexthop_neighs_list_node;
1959 unsigned int counter_index;
1960 bool counter_valid;
1961 };
1962
1963 static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
1964 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
1965 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
1966 .key_len = sizeof(struct mlxsw_sp_neigh_key),
1967 };
1968
1969 struct mlxsw_sp_neigh_entry *
mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif * rif,struct mlxsw_sp_neigh_entry * neigh_entry)1970 mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
1971 struct mlxsw_sp_neigh_entry *neigh_entry)
1972 {
1973 if (!neigh_entry) {
1974 if (list_empty(&rif->neigh_list))
1975 return NULL;
1976 else
1977 return list_first_entry(&rif->neigh_list,
1978 typeof(*neigh_entry),
1979 rif_list_node);
1980 }
1981 if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
1982 return NULL;
1983 return list_next_entry(neigh_entry, rif_list_node);
1984 }
1985
mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry * neigh_entry)1986 int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
1987 {
1988 return neigh_entry->key.n->tbl->family;
1989 }
1990
1991 unsigned char *
mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry * neigh_entry)1992 mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
1993 {
1994 return neigh_entry->ha;
1995 }
1996
mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry * neigh_entry)1997 u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1998 {
1999 struct neighbour *n;
2000
2001 n = neigh_entry->key.n;
2002 return ntohl(*((__be32 *) n->primary_key));
2003 }
2004
2005 struct in6_addr *
mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry * neigh_entry)2006 mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2007 {
2008 struct neighbour *n;
2009
2010 n = neigh_entry->key.n;
2011 return (struct in6_addr *) &n->primary_key;
2012 }
2013
mlxsw_sp_neigh_counter_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,u64 * p_counter)2014 int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
2015 struct mlxsw_sp_neigh_entry *neigh_entry,
2016 u64 *p_counter)
2017 {
2018 if (!neigh_entry->counter_valid)
2019 return -EINVAL;
2020
2021 return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
2022 p_counter, NULL);
2023 }
2024
2025 static struct mlxsw_sp_neigh_entry *
mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp * mlxsw_sp,struct neighbour * n,u16 rif)2026 mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
2027 u16 rif)
2028 {
2029 struct mlxsw_sp_neigh_entry *neigh_entry;
2030
2031 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
2032 if (!neigh_entry)
2033 return NULL;
2034
2035 neigh_entry->key.n = n;
2036 neigh_entry->rif = rif;
2037 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
2038
2039 return neigh_entry;
2040 }
2041
mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry * neigh_entry)2042 static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
2043 {
2044 kfree(neigh_entry);
2045 }
2046
2047 static int
mlxsw_sp_neigh_entry_insert(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2048 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
2049 struct mlxsw_sp_neigh_entry *neigh_entry)
2050 {
2051 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
2052 &neigh_entry->ht_node,
2053 mlxsw_sp_neigh_ht_params);
2054 }
2055
2056 static void
mlxsw_sp_neigh_entry_remove(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2057 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
2058 struct mlxsw_sp_neigh_entry *neigh_entry)
2059 {
2060 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
2061 &neigh_entry->ht_node,
2062 mlxsw_sp_neigh_ht_params);
2063 }
2064
2065 static bool
mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2066 mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
2067 struct mlxsw_sp_neigh_entry *neigh_entry)
2068 {
2069 struct devlink *devlink;
2070 const char *table_name;
2071
2072 switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
2073 case AF_INET:
2074 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
2075 break;
2076 case AF_INET6:
2077 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
2078 break;
2079 default:
2080 WARN_ON(1);
2081 return false;
2082 }
2083
2084 devlink = priv_to_devlink(mlxsw_sp->core);
2085 return devlink_dpipe_table_counter_enabled(devlink, table_name);
2086 }
2087
2088 static void
mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2089 mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2090 struct mlxsw_sp_neigh_entry *neigh_entry)
2091 {
2092 if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
2093 return;
2094
2095 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
2096 return;
2097
2098 neigh_entry->counter_valid = true;
2099 }
2100
2101 static void
mlxsw_sp_neigh_counter_free(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2102 mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
2103 struct mlxsw_sp_neigh_entry *neigh_entry)
2104 {
2105 if (!neigh_entry->counter_valid)
2106 return;
2107 mlxsw_sp_flow_counter_free(mlxsw_sp,
2108 neigh_entry->counter_index);
2109 neigh_entry->counter_valid = false;
2110 }
2111
2112 static struct mlxsw_sp_neigh_entry *
mlxsw_sp_neigh_entry_create(struct mlxsw_sp * mlxsw_sp,struct neighbour * n)2113 mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2114 {
2115 struct mlxsw_sp_neigh_entry *neigh_entry;
2116 struct mlxsw_sp_rif *rif;
2117 int err;
2118
2119 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
2120 if (!rif)
2121 return ERR_PTR(-EINVAL);
2122
2123 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
2124 if (!neigh_entry)
2125 return ERR_PTR(-ENOMEM);
2126
2127 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
2128 if (err)
2129 goto err_neigh_entry_insert;
2130
2131 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2132 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
2133
2134 return neigh_entry;
2135
2136 err_neigh_entry_insert:
2137 mlxsw_sp_neigh_entry_free(neigh_entry);
2138 return ERR_PTR(err);
2139 }
2140
2141 static void
mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2142 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2143 struct mlxsw_sp_neigh_entry *neigh_entry)
2144 {
2145 list_del(&neigh_entry->rif_list_node);
2146 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2147 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
2148 mlxsw_sp_neigh_entry_free(neigh_entry);
2149 }
2150
2151 static struct mlxsw_sp_neigh_entry *
mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp * mlxsw_sp,struct neighbour * n)2152 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2153 {
2154 struct mlxsw_sp_neigh_key key;
2155
2156 key.n = n;
2157 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
2158 &key, mlxsw_sp_neigh_ht_params);
2159 }
2160
2161 static void
mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp * mlxsw_sp)2162 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
2163 {
2164 unsigned long interval;
2165
2166 #if IS_ENABLED(CONFIG_IPV6)
2167 interval = min_t(unsigned long,
2168 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
2169 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
2170 #else
2171 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
2172 #endif
2173 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
2174 }
2175
mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int ent_index)2176 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2177 char *rauhtd_pl,
2178 int ent_index)
2179 {
2180 struct net_device *dev;
2181 struct neighbour *n;
2182 __be32 dipn;
2183 u32 dip;
2184 u16 rif;
2185
2186 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
2187
2188 if (!mlxsw_sp->router->rifs[rif]) {
2189 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2190 return;
2191 }
2192
2193 dipn = htonl(dip);
2194 dev = mlxsw_sp->router->rifs[rif]->dev;
2195 n = neigh_lookup(&arp_tbl, &dipn, dev);
2196 if (!n)
2197 return;
2198
2199 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
2200 neigh_event_send(n, NULL);
2201 neigh_release(n);
2202 }
2203
2204 #if IS_ENABLED(CONFIG_IPV6)
mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int rec_index)2205 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2206 char *rauhtd_pl,
2207 int rec_index)
2208 {
2209 struct net_device *dev;
2210 struct neighbour *n;
2211 struct in6_addr dip;
2212 u16 rif;
2213
2214 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
2215 (char *) &dip);
2216
2217 if (!mlxsw_sp->router->rifs[rif]) {
2218 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2219 return;
2220 }
2221
2222 dev = mlxsw_sp->router->rifs[rif]->dev;
2223 n = neigh_lookup(&nd_tbl, &dip, dev);
2224 if (!n)
2225 return;
2226
2227 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
2228 neigh_event_send(n, NULL);
2229 neigh_release(n);
2230 }
2231 #else
mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int rec_index)2232 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2233 char *rauhtd_pl,
2234 int rec_index)
2235 {
2236 }
2237 #endif
2238
mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int rec_index)2239 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2240 char *rauhtd_pl,
2241 int rec_index)
2242 {
2243 u8 num_entries;
2244 int i;
2245
2246 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2247 rec_index);
2248 /* Hardware starts counting at 0, so add 1. */
2249 num_entries++;
2250
2251 /* Each record consists of several neighbour entries. */
2252 for (i = 0; i < num_entries; i++) {
2253 int ent_index;
2254
2255 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
2256 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
2257 ent_index);
2258 }
2259
2260 }
2261
mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int rec_index)2262 static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2263 char *rauhtd_pl,
2264 int rec_index)
2265 {
2266 /* One record contains one entry. */
2267 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
2268 rec_index);
2269 }
2270
mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int rec_index)2271 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
2272 char *rauhtd_pl, int rec_index)
2273 {
2274 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
2275 case MLXSW_REG_RAUHTD_TYPE_IPV4:
2276 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
2277 rec_index);
2278 break;
2279 case MLXSW_REG_RAUHTD_TYPE_IPV6:
2280 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
2281 rec_index);
2282 break;
2283 }
2284 }
2285
mlxsw_sp_router_rauhtd_is_full(char * rauhtd_pl)2286 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
2287 {
2288 u8 num_rec, last_rec_index, num_entries;
2289
2290 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2291 last_rec_index = num_rec - 1;
2292
2293 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
2294 return false;
2295 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
2296 MLXSW_REG_RAUHTD_TYPE_IPV6)
2297 return true;
2298
2299 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2300 last_rec_index);
2301 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
2302 return true;
2303 return false;
2304 }
2305
2306 static int
__mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,enum mlxsw_reg_rauhtd_type type)2307 __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2308 char *rauhtd_pl,
2309 enum mlxsw_reg_rauhtd_type type)
2310 {
2311 int i, num_rec;
2312 int err;
2313
2314 /* Ensure the RIF we read from the device does not change mid-dump. */
2315 mutex_lock(&mlxsw_sp->router->lock);
2316 do {
2317 mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
2318 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2319 rauhtd_pl);
2320 if (err) {
2321 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
2322 break;
2323 }
2324 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2325 for (i = 0; i < num_rec; i++)
2326 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2327 i);
2328 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
2329 mutex_unlock(&mlxsw_sp->router->lock);
2330
2331 return err;
2332 }
2333
mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp * mlxsw_sp)2334 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2335 {
2336 enum mlxsw_reg_rauhtd_type type;
2337 char *rauhtd_pl;
2338 int err;
2339
2340 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2341 if (!rauhtd_pl)
2342 return -ENOMEM;
2343
2344 type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2345 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2346 if (err)
2347 goto out;
2348
2349 type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2350 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2351 out:
2352 kfree(rauhtd_pl);
2353 return err;
2354 }
2355
mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp * mlxsw_sp)2356 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2357 {
2358 struct mlxsw_sp_neigh_entry *neigh_entry;
2359
2360 mutex_lock(&mlxsw_sp->router->lock);
2361 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
2362 nexthop_neighs_list_node)
2363 /* If this neigh have nexthops, make the kernel think this neigh
2364 * is active regardless of the traffic.
2365 */
2366 neigh_event_send(neigh_entry->key.n, NULL);
2367 mutex_unlock(&mlxsw_sp->router->lock);
2368 }
2369
2370 static void
mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp * mlxsw_sp)2371 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2372 {
2373 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
2374
2375 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
2376 msecs_to_jiffies(interval));
2377 }
2378
mlxsw_sp_router_neighs_update_work(struct work_struct * work)2379 static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2380 {
2381 struct mlxsw_sp_router *router;
2382 int err;
2383
2384 router = container_of(work, struct mlxsw_sp_router,
2385 neighs_update.dw.work);
2386 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
2387 if (err)
2388 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
2389
2390 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
2391
2392 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
2393 }
2394
mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct * work)2395 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2396 {
2397 struct mlxsw_sp_neigh_entry *neigh_entry;
2398 struct mlxsw_sp_router *router;
2399
2400 router = container_of(work, struct mlxsw_sp_router,
2401 nexthop_probe_dw.work);
2402 /* Iterate over nexthop neighbours, find those who are unresolved and
2403 * send arp on them. This solves the chicken-egg problem when
2404 * the nexthop wouldn't get offloaded until the neighbor is resolved
2405 * but it wouldn't get resolved ever in case traffic is flowing in HW
2406 * using different nexthop.
2407 */
2408 mutex_lock(&router->lock);
2409 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
2410 nexthop_neighs_list_node)
2411 if (!neigh_entry->connected)
2412 neigh_event_send(neigh_entry->key.n, NULL);
2413 mutex_unlock(&router->lock);
2414
2415 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
2416 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2417 }
2418
2419 static void
2420 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2421 struct mlxsw_sp_neigh_entry *neigh_entry,
2422 bool removing, bool dead);
2423
mlxsw_sp_rauht_op(bool adding)2424 static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
2425 {
2426 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2427 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2428 }
2429
2430 static int
mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,enum mlxsw_reg_rauht_op op)2431 mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2432 struct mlxsw_sp_neigh_entry *neigh_entry,
2433 enum mlxsw_reg_rauht_op op)
2434 {
2435 struct neighbour *n = neigh_entry->key.n;
2436 u32 dip = ntohl(*((__be32 *) n->primary_key));
2437 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2438
2439 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2440 dip);
2441 if (neigh_entry->counter_valid)
2442 mlxsw_reg_rauht_pack_counter(rauht_pl,
2443 neigh_entry->counter_index);
2444 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2445 }
2446
2447 static int
mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,enum mlxsw_reg_rauht_op op)2448 mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2449 struct mlxsw_sp_neigh_entry *neigh_entry,
2450 enum mlxsw_reg_rauht_op op)
2451 {
2452 struct neighbour *n = neigh_entry->key.n;
2453 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2454 const char *dip = n->primary_key;
2455
2456 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2457 dip);
2458 if (neigh_entry->counter_valid)
2459 mlxsw_reg_rauht_pack_counter(rauht_pl,
2460 neigh_entry->counter_index);
2461 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2462 }
2463
mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry * neigh_entry)2464 bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
2465 {
2466 struct neighbour *n = neigh_entry->key.n;
2467
2468 /* Packets with a link-local destination address are trapped
2469 * after LPM lookup and never reach the neighbour table, so
2470 * there is no need to program such neighbours to the device.
2471 */
2472 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2473 IPV6_ADDR_LINKLOCAL)
2474 return true;
2475 return false;
2476 }
2477
2478 static void
mlxsw_sp_neigh_entry_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,bool adding)2479 mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2480 struct mlxsw_sp_neigh_entry *neigh_entry,
2481 bool adding)
2482 {
2483 enum mlxsw_reg_rauht_op op = mlxsw_sp_rauht_op(adding);
2484 int err;
2485
2486 if (!adding && !neigh_entry->connected)
2487 return;
2488 neigh_entry->connected = adding;
2489 if (neigh_entry->key.n->tbl->family == AF_INET) {
2490 err = mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2491 op);
2492 if (err)
2493 return;
2494 } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
2495 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
2496 return;
2497 err = mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2498 op);
2499 if (err)
2500 return;
2501 } else {
2502 WARN_ON_ONCE(1);
2503 return;
2504 }
2505
2506 if (adding)
2507 neigh_entry->key.n->flags |= NTF_OFFLOADED;
2508 else
2509 neigh_entry->key.n->flags &= ~NTF_OFFLOADED;
2510 }
2511
2512 void
mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,bool adding)2513 mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2514 struct mlxsw_sp_neigh_entry *neigh_entry,
2515 bool adding)
2516 {
2517 if (adding)
2518 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2519 else
2520 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2521 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2522 }
2523
2524 struct mlxsw_sp_netevent_work {
2525 struct work_struct work;
2526 struct mlxsw_sp *mlxsw_sp;
2527 struct neighbour *n;
2528 };
2529
mlxsw_sp_router_neigh_event_work(struct work_struct * work)2530 static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2531 {
2532 struct mlxsw_sp_netevent_work *net_work =
2533 container_of(work, struct mlxsw_sp_netevent_work, work);
2534 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2535 struct mlxsw_sp_neigh_entry *neigh_entry;
2536 struct neighbour *n = net_work->n;
2537 unsigned char ha[ETH_ALEN];
2538 bool entry_connected;
2539 u8 nud_state, dead;
2540
2541 /* If these parameters are changed after we release the lock,
2542 * then we are guaranteed to receive another event letting us
2543 * know about it.
2544 */
2545 read_lock_bh(&n->lock);
2546 memcpy(ha, n->ha, ETH_ALEN);
2547 nud_state = n->nud_state;
2548 dead = n->dead;
2549 read_unlock_bh(&n->lock);
2550
2551 mutex_lock(&mlxsw_sp->router->lock);
2552 mlxsw_sp_span_respin(mlxsw_sp);
2553
2554 entry_connected = nud_state & NUD_VALID && !dead;
2555 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2556 if (!entry_connected && !neigh_entry)
2557 goto out;
2558 if (!neigh_entry) {
2559 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2560 if (IS_ERR(neigh_entry))
2561 goto out;
2562 }
2563
2564 memcpy(neigh_entry->ha, ha, ETH_ALEN);
2565 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2566 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected,
2567 dead);
2568
2569 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2570 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2571
2572 out:
2573 mutex_unlock(&mlxsw_sp->router->lock);
2574 neigh_release(n);
2575 kfree(net_work);
2576 }
2577
2578 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2579
mlxsw_sp_router_mp_hash_event_work(struct work_struct * work)2580 static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2581 {
2582 struct mlxsw_sp_netevent_work *net_work =
2583 container_of(work, struct mlxsw_sp_netevent_work, work);
2584 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2585
2586 mlxsw_sp_mp_hash_init(mlxsw_sp);
2587 kfree(net_work);
2588 }
2589
2590 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
2591
mlxsw_sp_router_update_priority_work(struct work_struct * work)2592 static void mlxsw_sp_router_update_priority_work(struct work_struct *work)
2593 {
2594 struct mlxsw_sp_netevent_work *net_work =
2595 container_of(work, struct mlxsw_sp_netevent_work, work);
2596 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2597
2598 __mlxsw_sp_router_init(mlxsw_sp);
2599 kfree(net_work);
2600 }
2601
mlxsw_sp_router_schedule_work(struct net * net,struct notifier_block * nb,void (* cb)(struct work_struct *))2602 static int mlxsw_sp_router_schedule_work(struct net *net,
2603 struct notifier_block *nb,
2604 void (*cb)(struct work_struct *))
2605 {
2606 struct mlxsw_sp_netevent_work *net_work;
2607 struct mlxsw_sp_router *router;
2608
2609 router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2610 if (!net_eq(net, mlxsw_sp_net(router->mlxsw_sp)))
2611 return NOTIFY_DONE;
2612
2613 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2614 if (!net_work)
2615 return NOTIFY_BAD;
2616
2617 INIT_WORK(&net_work->work, cb);
2618 net_work->mlxsw_sp = router->mlxsw_sp;
2619 mlxsw_core_schedule_work(&net_work->work);
2620 return NOTIFY_DONE;
2621 }
2622
mlxsw_sp_router_netevent_event(struct notifier_block * nb,unsigned long event,void * ptr)2623 static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
2624 unsigned long event, void *ptr)
2625 {
2626 struct mlxsw_sp_netevent_work *net_work;
2627 struct mlxsw_sp_port *mlxsw_sp_port;
2628 struct mlxsw_sp *mlxsw_sp;
2629 unsigned long interval;
2630 struct neigh_parms *p;
2631 struct neighbour *n;
2632
2633 switch (event) {
2634 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2635 p = ptr;
2636
2637 /* We don't care about changes in the default table. */
2638 if (!p->dev || (p->tbl->family != AF_INET &&
2639 p->tbl->family != AF_INET6))
2640 return NOTIFY_DONE;
2641
2642 /* We are in atomic context and can't take RTNL mutex,
2643 * so use RCU variant to walk the device chain.
2644 */
2645 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2646 if (!mlxsw_sp_port)
2647 return NOTIFY_DONE;
2648
2649 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2650 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
2651 mlxsw_sp->router->neighs_update.interval = interval;
2652
2653 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2654 break;
2655 case NETEVENT_NEIGH_UPDATE:
2656 n = ptr;
2657
2658 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
2659 return NOTIFY_DONE;
2660
2661 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
2662 if (!mlxsw_sp_port)
2663 return NOTIFY_DONE;
2664
2665 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2666 if (!net_work) {
2667 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2668 return NOTIFY_BAD;
2669 }
2670
2671 INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2672 net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2673 net_work->n = n;
2674
2675 /* Take a reference to ensure the neighbour won't be
2676 * destructed until we drop the reference in delayed
2677 * work.
2678 */
2679 neigh_clone(n);
2680 mlxsw_core_schedule_work(&net_work->work);
2681 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2682 break;
2683 case NETEVENT_IPV4_MPATH_HASH_UPDATE:
2684 case NETEVENT_IPV6_MPATH_HASH_UPDATE:
2685 return mlxsw_sp_router_schedule_work(ptr, nb,
2686 mlxsw_sp_router_mp_hash_event_work);
2687
2688 case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE:
2689 return mlxsw_sp_router_schedule_work(ptr, nb,
2690 mlxsw_sp_router_update_priority_work);
2691 }
2692
2693 return NOTIFY_DONE;
2694 }
2695
mlxsw_sp_neigh_init(struct mlxsw_sp * mlxsw_sp)2696 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2697 {
2698 int err;
2699
2700 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
2701 &mlxsw_sp_neigh_ht_params);
2702 if (err)
2703 return err;
2704
2705 /* Initialize the polling interval according to the default
2706 * table.
2707 */
2708 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2709
2710 /* Create the delayed works for the activity_update */
2711 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
2712 mlxsw_sp_router_neighs_update_work);
2713 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
2714 mlxsw_sp_router_probe_unresolved_nexthops);
2715 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2716 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
2717 return 0;
2718 }
2719
mlxsw_sp_neigh_fini(struct mlxsw_sp * mlxsw_sp)2720 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2721 {
2722 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2723 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2724 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
2725 }
2726
mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)2727 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2728 struct mlxsw_sp_rif *rif)
2729 {
2730 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2731
2732 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
2733 rif_list_node) {
2734 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
2735 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2736 }
2737 }
2738
2739 enum mlxsw_sp_nexthop_type {
2740 MLXSW_SP_NEXTHOP_TYPE_ETH,
2741 MLXSW_SP_NEXTHOP_TYPE_IPIP,
2742 };
2743
2744 struct mlxsw_sp_nexthop_key {
2745 struct fib_nh *fib_nh;
2746 };
2747
2748 struct mlxsw_sp_nexthop {
2749 struct list_head neigh_list_node; /* member of neigh entry list */
2750 struct list_head rif_list_node;
2751 struct list_head router_list_node;
2752 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
2753 * this belongs to
2754 */
2755 struct rhash_head ht_node;
2756 struct mlxsw_sp_nexthop_key key;
2757 unsigned char gw_addr[sizeof(struct in6_addr)];
2758 int ifindex;
2759 int nh_weight;
2760 int norm_nh_weight;
2761 int num_adj_entries;
2762 struct mlxsw_sp_rif *rif;
2763 u8 should_offload:1, /* set indicates this neigh is connected and
2764 * should be put to KVD linear area of this group.
2765 */
2766 offloaded:1, /* set in case the neigh is actually put into
2767 * KVD linear area of this group.
2768 */
2769 update:1; /* set indicates that MAC of this neigh should be
2770 * updated in HW
2771 */
2772 enum mlxsw_sp_nexthop_type type;
2773 union {
2774 struct mlxsw_sp_neigh_entry *neigh_entry;
2775 struct mlxsw_sp_ipip_entry *ipip_entry;
2776 };
2777 unsigned int counter_index;
2778 bool counter_valid;
2779 };
2780
2781 struct mlxsw_sp_nexthop_group {
2782 void *priv;
2783 struct rhash_head ht_node;
2784 struct list_head fib_list; /* list of fib entries that use this group */
2785 struct neigh_table *neigh_tbl;
2786 u8 adj_index_valid:1,
2787 gateway:1; /* routes using the group use a gateway */
2788 u32 adj_index;
2789 u16 ecmp_size;
2790 u16 count;
2791 int sum_norm_weight;
2792 struct mlxsw_sp_nexthop nexthops[0];
2793 #define nh_rif nexthops[0].rif
2794 };
2795
mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)2796 void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2797 struct mlxsw_sp_nexthop *nh)
2798 {
2799 struct devlink *devlink;
2800
2801 devlink = priv_to_devlink(mlxsw_sp->core);
2802 if (!devlink_dpipe_table_counter_enabled(devlink,
2803 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
2804 return;
2805
2806 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
2807 return;
2808
2809 nh->counter_valid = true;
2810 }
2811
mlxsw_sp_nexthop_counter_free(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)2812 void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
2813 struct mlxsw_sp_nexthop *nh)
2814 {
2815 if (!nh->counter_valid)
2816 return;
2817 mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
2818 nh->counter_valid = false;
2819 }
2820
mlxsw_sp_nexthop_counter_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh,u64 * p_counter)2821 int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
2822 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
2823 {
2824 if (!nh->counter_valid)
2825 return -EINVAL;
2826
2827 return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
2828 p_counter, NULL);
2829 }
2830
mlxsw_sp_nexthop_next(struct mlxsw_sp_router * router,struct mlxsw_sp_nexthop * nh)2831 struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
2832 struct mlxsw_sp_nexthop *nh)
2833 {
2834 if (!nh) {
2835 if (list_empty(&router->nexthop_list))
2836 return NULL;
2837 else
2838 return list_first_entry(&router->nexthop_list,
2839 typeof(*nh), router_list_node);
2840 }
2841 if (list_is_last(&nh->router_list_node, &router->nexthop_list))
2842 return NULL;
2843 return list_next_entry(nh, router_list_node);
2844 }
2845
mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop * nh)2846 bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh)
2847 {
2848 return nh->offloaded;
2849 }
2850
mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop * nh)2851 unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
2852 {
2853 if (!nh->offloaded)
2854 return NULL;
2855 return nh->neigh_entry->ha;
2856 }
2857
mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop * nh,u32 * p_adj_index,u32 * p_adj_size,u32 * p_adj_hash_index)2858 int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
2859 u32 *p_adj_size, u32 *p_adj_hash_index)
2860 {
2861 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2862 u32 adj_hash_index = 0;
2863 int i;
2864
2865 if (!nh->offloaded || !nh_grp->adj_index_valid)
2866 return -EINVAL;
2867
2868 *p_adj_index = nh_grp->adj_index;
2869 *p_adj_size = nh_grp->ecmp_size;
2870
2871 for (i = 0; i < nh_grp->count; i++) {
2872 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2873
2874 if (nh_iter == nh)
2875 break;
2876 if (nh_iter->offloaded)
2877 adj_hash_index += nh_iter->num_adj_entries;
2878 }
2879
2880 *p_adj_hash_index = adj_hash_index;
2881 return 0;
2882 }
2883
mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop * nh)2884 struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
2885 {
2886 return nh->rif;
2887 }
2888
mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop * nh)2889 bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
2890 {
2891 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2892 int i;
2893
2894 for (i = 0; i < nh_grp->count; i++) {
2895 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2896
2897 if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
2898 return true;
2899 }
2900 return false;
2901 }
2902
2903 static struct fib_info *
mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group * nh_grp)2904 mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp)
2905 {
2906 return nh_grp->priv;
2907 }
2908
2909 struct mlxsw_sp_nexthop_group_cmp_arg {
2910 enum mlxsw_sp_l3proto proto;
2911 union {
2912 struct fib_info *fi;
2913 struct mlxsw_sp_fib6_entry *fib6_entry;
2914 };
2915 };
2916
2917 static bool
mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group * nh_grp,const struct in6_addr * gw,int ifindex,int weight)2918 mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
2919 const struct in6_addr *gw, int ifindex,
2920 int weight)
2921 {
2922 int i;
2923
2924 for (i = 0; i < nh_grp->count; i++) {
2925 const struct mlxsw_sp_nexthop *nh;
2926
2927 nh = &nh_grp->nexthops[i];
2928 if (nh->ifindex == ifindex && nh->nh_weight == weight &&
2929 ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
2930 return true;
2931 }
2932
2933 return false;
2934 }
2935
2936 static bool
mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_fib6_entry * fib6_entry)2937 mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
2938 const struct mlxsw_sp_fib6_entry *fib6_entry)
2939 {
2940 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2941
2942 if (nh_grp->count != fib6_entry->nrt6)
2943 return false;
2944
2945 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2946 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
2947 struct in6_addr *gw;
2948 int ifindex, weight;
2949
2950 ifindex = fib6_nh->fib_nh_dev->ifindex;
2951 weight = fib6_nh->fib_nh_weight;
2952 gw = &fib6_nh->fib_nh_gw6;
2953 if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
2954 weight))
2955 return false;
2956 }
2957
2958 return true;
2959 }
2960
2961 static int
mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg * arg,const void * ptr)2962 mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
2963 {
2964 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
2965 const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
2966
2967 switch (cmp_arg->proto) {
2968 case MLXSW_SP_L3_PROTO_IPV4:
2969 return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp);
2970 case MLXSW_SP_L3_PROTO_IPV6:
2971 return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
2972 cmp_arg->fib6_entry);
2973 default:
2974 WARN_ON(1);
2975 return 1;
2976 }
2977 }
2978
2979 static int
mlxsw_sp_nexthop_group_type(const struct mlxsw_sp_nexthop_group * nh_grp)2980 mlxsw_sp_nexthop_group_type(const struct mlxsw_sp_nexthop_group *nh_grp)
2981 {
2982 return nh_grp->neigh_tbl->family;
2983 }
2984
mlxsw_sp_nexthop_group_hash_obj(const void * data,u32 len,u32 seed)2985 static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
2986 {
2987 const struct mlxsw_sp_nexthop_group *nh_grp = data;
2988 const struct mlxsw_sp_nexthop *nh;
2989 struct fib_info *fi;
2990 unsigned int val;
2991 int i;
2992
2993 switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
2994 case AF_INET:
2995 fi = mlxsw_sp_nexthop4_group_fi(nh_grp);
2996 return jhash(&fi, sizeof(fi), seed);
2997 case AF_INET6:
2998 val = nh_grp->count;
2999 for (i = 0; i < nh_grp->count; i++) {
3000 nh = &nh_grp->nexthops[i];
3001 val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
3002 val ^= jhash(&nh->gw_addr, sizeof(nh->gw_addr), seed);
3003 }
3004 return jhash(&val, sizeof(val), seed);
3005 default:
3006 WARN_ON(1);
3007 return 0;
3008 }
3009 }
3010
3011 static u32
mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry * fib6_entry,u32 seed)3012 mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
3013 {
3014 unsigned int val = fib6_entry->nrt6;
3015 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3016
3017 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3018 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3019 struct net_device *dev = fib6_nh->fib_nh_dev;
3020 struct in6_addr *gw = &fib6_nh->fib_nh_gw6;
3021
3022 val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed);
3023 val ^= jhash(gw, sizeof(*gw), seed);
3024 }
3025
3026 return jhash(&val, sizeof(val), seed);
3027 }
3028
3029 static u32
mlxsw_sp_nexthop_group_hash(const void * data,u32 len,u32 seed)3030 mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
3031 {
3032 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
3033
3034 switch (cmp_arg->proto) {
3035 case MLXSW_SP_L3_PROTO_IPV4:
3036 return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
3037 case MLXSW_SP_L3_PROTO_IPV6:
3038 return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
3039 default:
3040 WARN_ON(1);
3041 return 0;
3042 }
3043 }
3044
3045 static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
3046 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
3047 .hashfn = mlxsw_sp_nexthop_group_hash,
3048 .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj,
3049 .obj_cmpfn = mlxsw_sp_nexthop_group_cmp,
3050 };
3051
mlxsw_sp_nexthop_group_insert(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3052 static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
3053 struct mlxsw_sp_nexthop_group *nh_grp)
3054 {
3055 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
3056 !nh_grp->gateway)
3057 return 0;
3058
3059 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
3060 &nh_grp->ht_node,
3061 mlxsw_sp_nexthop_group_ht_params);
3062 }
3063
mlxsw_sp_nexthop_group_remove(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3064 static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
3065 struct mlxsw_sp_nexthop_group *nh_grp)
3066 {
3067 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
3068 !nh_grp->gateway)
3069 return;
3070
3071 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
3072 &nh_grp->ht_node,
3073 mlxsw_sp_nexthop_group_ht_params);
3074 }
3075
3076 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp * mlxsw_sp,struct fib_info * fi)3077 mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
3078 struct fib_info *fi)
3079 {
3080 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3081
3082 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV4;
3083 cmp_arg.fi = fi;
3084 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3085 &cmp_arg,
3086 mlxsw_sp_nexthop_group_ht_params);
3087 }
3088
3089 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)3090 mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
3091 struct mlxsw_sp_fib6_entry *fib6_entry)
3092 {
3093 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3094
3095 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV6;
3096 cmp_arg.fib6_entry = fib6_entry;
3097 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3098 &cmp_arg,
3099 mlxsw_sp_nexthop_group_ht_params);
3100 }
3101
3102 static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
3103 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
3104 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
3105 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
3106 };
3107
mlxsw_sp_nexthop_insert(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)3108 static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
3109 struct mlxsw_sp_nexthop *nh)
3110 {
3111 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
3112 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
3113 }
3114
mlxsw_sp_nexthop_remove(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)3115 static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
3116 struct mlxsw_sp_nexthop *nh)
3117 {
3118 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
3119 mlxsw_sp_nexthop_ht_params);
3120 }
3121
3122 static struct mlxsw_sp_nexthop *
mlxsw_sp_nexthop_lookup(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_key key)3123 mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
3124 struct mlxsw_sp_nexthop_key key)
3125 {
3126 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
3127 mlxsw_sp_nexthop_ht_params);
3128 }
3129
mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_fib * fib,u32 adj_index,u16 ecmp_size,u32 new_adj_index,u16 new_ecmp_size)3130 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
3131 const struct mlxsw_sp_fib *fib,
3132 u32 adj_index, u16 ecmp_size,
3133 u32 new_adj_index,
3134 u16 new_ecmp_size)
3135 {
3136 char raleu_pl[MLXSW_REG_RALEU_LEN];
3137
3138 mlxsw_reg_raleu_pack(raleu_pl,
3139 (enum mlxsw_reg_ralxx_protocol) fib->proto,
3140 fib->vr->id, adj_index, ecmp_size, new_adj_index,
3141 new_ecmp_size);
3142 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
3143 }
3144
mlxsw_sp_adj_index_mass_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,u32 old_adj_index,u16 old_ecmp_size)3145 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
3146 struct mlxsw_sp_nexthop_group *nh_grp,
3147 u32 old_adj_index, u16 old_ecmp_size)
3148 {
3149 struct mlxsw_sp_fib_entry *fib_entry;
3150 struct mlxsw_sp_fib *fib = NULL;
3151 int err;
3152
3153 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3154 if (fib == fib_entry->fib_node->fib)
3155 continue;
3156 fib = fib_entry->fib_node->fib;
3157 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
3158 old_adj_index,
3159 old_ecmp_size,
3160 nh_grp->adj_index,
3161 nh_grp->ecmp_size);
3162 if (err)
3163 return err;
3164 }
3165 return 0;
3166 }
3167
__mlxsw_sp_nexthop_update(struct mlxsw_sp * mlxsw_sp,u32 adj_index,struct mlxsw_sp_nexthop * nh)3168 static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3169 struct mlxsw_sp_nexthop *nh)
3170 {
3171 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3172 char ratr_pl[MLXSW_REG_RATR_LEN];
3173
3174 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
3175 true, MLXSW_REG_RATR_TYPE_ETHERNET,
3176 adj_index, neigh_entry->rif);
3177 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
3178 if (nh->counter_valid)
3179 mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
3180 else
3181 mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
3182
3183 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
3184 }
3185
mlxsw_sp_nexthop_update(struct mlxsw_sp * mlxsw_sp,u32 adj_index,struct mlxsw_sp_nexthop * nh)3186 int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3187 struct mlxsw_sp_nexthop *nh)
3188 {
3189 int i;
3190
3191 for (i = 0; i < nh->num_adj_entries; i++) {
3192 int err;
3193
3194 err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh);
3195 if (err)
3196 return err;
3197 }
3198
3199 return 0;
3200 }
3201
__mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp * mlxsw_sp,u32 adj_index,struct mlxsw_sp_nexthop * nh)3202 static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3203 u32 adj_index,
3204 struct mlxsw_sp_nexthop *nh)
3205 {
3206 const struct mlxsw_sp_ipip_ops *ipip_ops;
3207
3208 ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
3209 return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry);
3210 }
3211
mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp * mlxsw_sp,u32 adj_index,struct mlxsw_sp_nexthop * nh)3212 static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3213 u32 adj_index,
3214 struct mlxsw_sp_nexthop *nh)
3215 {
3216 int i;
3217
3218 for (i = 0; i < nh->num_adj_entries; i++) {
3219 int err;
3220
3221 err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
3222 nh);
3223 if (err)
3224 return err;
3225 }
3226
3227 return 0;
3228 }
3229
3230 static int
mlxsw_sp_nexthop_group_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,bool reallocate)3231 mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
3232 struct mlxsw_sp_nexthop_group *nh_grp,
3233 bool reallocate)
3234 {
3235 u32 adj_index = nh_grp->adj_index; /* base */
3236 struct mlxsw_sp_nexthop *nh;
3237 int i;
3238
3239 for (i = 0; i < nh_grp->count; i++) {
3240 nh = &nh_grp->nexthops[i];
3241
3242 if (!nh->should_offload) {
3243 nh->offloaded = 0;
3244 continue;
3245 }
3246
3247 if (nh->update || reallocate) {
3248 int err = 0;
3249
3250 switch (nh->type) {
3251 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3252 err = mlxsw_sp_nexthop_update
3253 (mlxsw_sp, adj_index, nh);
3254 break;
3255 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3256 err = mlxsw_sp_nexthop_ipip_update
3257 (mlxsw_sp, adj_index, nh);
3258 break;
3259 }
3260 if (err)
3261 return err;
3262 nh->update = 0;
3263 nh->offloaded = 1;
3264 }
3265 adj_index += nh->num_adj_entries;
3266 }
3267 return 0;
3268 }
3269
3270 static int
mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3271 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
3272 struct mlxsw_sp_nexthop_group *nh_grp)
3273 {
3274 struct mlxsw_sp_fib_entry *fib_entry;
3275 int err;
3276
3277 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3278 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
3279 if (err)
3280 return err;
3281 }
3282 return 0;
3283 }
3284
mlxsw_sp_adj_grp_size_round_up(u16 * p_adj_grp_size)3285 static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size)
3286 {
3287 /* Valid sizes for an adjacency group are:
3288 * 1-64, 512, 1024, 2048 and 4096.
3289 */
3290 if (*p_adj_grp_size <= 64)
3291 return;
3292 else if (*p_adj_grp_size <= 512)
3293 *p_adj_grp_size = 512;
3294 else if (*p_adj_grp_size <= 1024)
3295 *p_adj_grp_size = 1024;
3296 else if (*p_adj_grp_size <= 2048)
3297 *p_adj_grp_size = 2048;
3298 else
3299 *p_adj_grp_size = 4096;
3300 }
3301
mlxsw_sp_adj_grp_size_round_down(u16 * p_adj_grp_size,unsigned int alloc_size)3302 static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size,
3303 unsigned int alloc_size)
3304 {
3305 if (alloc_size >= 4096)
3306 *p_adj_grp_size = 4096;
3307 else if (alloc_size >= 2048)
3308 *p_adj_grp_size = 2048;
3309 else if (alloc_size >= 1024)
3310 *p_adj_grp_size = 1024;
3311 else if (alloc_size >= 512)
3312 *p_adj_grp_size = 512;
3313 }
3314
mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp * mlxsw_sp,u16 * p_adj_grp_size)3315 static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
3316 u16 *p_adj_grp_size)
3317 {
3318 unsigned int alloc_size;
3319 int err;
3320
3321 /* Round up the requested group size to the next size supported
3322 * by the device and make sure the request can be satisfied.
3323 */
3324 mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
3325 err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
3326 MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3327 *p_adj_grp_size, &alloc_size);
3328 if (err)
3329 return err;
3330 /* It is possible the allocation results in more allocated
3331 * entries than requested. Try to use as much of them as
3332 * possible.
3333 */
3334 mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size);
3335
3336 return 0;
3337 }
3338
3339 static void
mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group * nh_grp)3340 mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
3341 {
3342 int i, g = 0, sum_norm_weight = 0;
3343 struct mlxsw_sp_nexthop *nh;
3344
3345 for (i = 0; i < nh_grp->count; i++) {
3346 nh = &nh_grp->nexthops[i];
3347
3348 if (!nh->should_offload)
3349 continue;
3350 if (g > 0)
3351 g = gcd(nh->nh_weight, g);
3352 else
3353 g = nh->nh_weight;
3354 }
3355
3356 for (i = 0; i < nh_grp->count; i++) {
3357 nh = &nh_grp->nexthops[i];
3358
3359 if (!nh->should_offload)
3360 continue;
3361 nh->norm_nh_weight = nh->nh_weight / g;
3362 sum_norm_weight += nh->norm_nh_weight;
3363 }
3364
3365 nh_grp->sum_norm_weight = sum_norm_weight;
3366 }
3367
3368 static void
mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group * nh_grp)3369 mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp)
3370 {
3371 int total = nh_grp->sum_norm_weight;
3372 u16 ecmp_size = nh_grp->ecmp_size;
3373 int i, weight = 0, lower_bound = 0;
3374
3375 for (i = 0; i < nh_grp->count; i++) {
3376 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3377 int upper_bound;
3378
3379 if (!nh->should_offload)
3380 continue;
3381 weight += nh->norm_nh_weight;
3382 upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
3383 nh->num_adj_entries = upper_bound - lower_bound;
3384 lower_bound = upper_bound;
3385 }
3386 }
3387
3388 static struct mlxsw_sp_nexthop *
3389 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3390 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6);
3391
3392 static void
mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3393 mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3394 struct mlxsw_sp_nexthop_group *nh_grp)
3395 {
3396 int i;
3397
3398 for (i = 0; i < nh_grp->count; i++) {
3399 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3400
3401 if (nh->offloaded)
3402 nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3403 else
3404 nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3405 }
3406 }
3407
3408 static void
__mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_fib6_entry * fib6_entry)3409 __mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp_nexthop_group *nh_grp,
3410 struct mlxsw_sp_fib6_entry *fib6_entry)
3411 {
3412 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3413
3414 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3415 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3416 struct mlxsw_sp_nexthop *nh;
3417
3418 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3419 if (nh && nh->offloaded)
3420 fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3421 else
3422 fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3423 }
3424 }
3425
3426 static void
mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3427 mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3428 struct mlxsw_sp_nexthop_group *nh_grp)
3429 {
3430 struct mlxsw_sp_fib6_entry *fib6_entry;
3431
3432 /* Unfortunately, in IPv6 the route and the nexthop are described by
3433 * the same struct, so we need to iterate over all the routes using the
3434 * nexthop group and set / clear the offload indication for them.
3435 */
3436 list_for_each_entry(fib6_entry, &nh_grp->fib_list,
3437 common.nexthop_group_node)
3438 __mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
3439 }
3440
3441 static void
mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3442 mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3443 struct mlxsw_sp_nexthop_group *nh_grp)
3444 {
3445 switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
3446 case AF_INET:
3447 mlxsw_sp_nexthop4_group_offload_refresh(mlxsw_sp, nh_grp);
3448 break;
3449 case AF_INET6:
3450 mlxsw_sp_nexthop6_group_offload_refresh(mlxsw_sp, nh_grp);
3451 break;
3452 }
3453 }
3454
3455 static void
mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3456 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
3457 struct mlxsw_sp_nexthop_group *nh_grp)
3458 {
3459 u16 ecmp_size, old_ecmp_size;
3460 struct mlxsw_sp_nexthop *nh;
3461 bool offload_change = false;
3462 u32 adj_index;
3463 bool old_adj_index_valid;
3464 u32 old_adj_index;
3465 int i;
3466 int err;
3467
3468 if (!nh_grp->gateway) {
3469 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3470 return;
3471 }
3472
3473 for (i = 0; i < nh_grp->count; i++) {
3474 nh = &nh_grp->nexthops[i];
3475
3476 if (nh->should_offload != nh->offloaded) {
3477 offload_change = true;
3478 if (nh->should_offload)
3479 nh->update = 1;
3480 }
3481 }
3482 if (!offload_change) {
3483 /* Nothing was added or removed, so no need to reallocate. Just
3484 * update MAC on existing adjacency indexes.
3485 */
3486 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, false);
3487 if (err) {
3488 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3489 goto set_trap;
3490 }
3491 return;
3492 }
3493 mlxsw_sp_nexthop_group_normalize(nh_grp);
3494 if (!nh_grp->sum_norm_weight)
3495 /* No neigh of this group is connected so we just set
3496 * the trap and let everthing flow through kernel.
3497 */
3498 goto set_trap;
3499
3500 ecmp_size = nh_grp->sum_norm_weight;
3501 err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
3502 if (err)
3503 /* No valid allocation size available. */
3504 goto set_trap;
3505
3506 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3507 ecmp_size, &adj_index);
3508 if (err) {
3509 /* We ran out of KVD linear space, just set the
3510 * trap and let everything flow through kernel.
3511 */
3512 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
3513 goto set_trap;
3514 }
3515 old_adj_index_valid = nh_grp->adj_index_valid;
3516 old_adj_index = nh_grp->adj_index;
3517 old_ecmp_size = nh_grp->ecmp_size;
3518 nh_grp->adj_index_valid = 1;
3519 nh_grp->adj_index = adj_index;
3520 nh_grp->ecmp_size = ecmp_size;
3521 mlxsw_sp_nexthop_group_rebalance(nh_grp);
3522 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true);
3523 if (err) {
3524 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3525 goto set_trap;
3526 }
3527
3528 mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
3529
3530 if (!old_adj_index_valid) {
3531 /* The trap was set for fib entries, so we have to call
3532 * fib entry update to unset it and use adjacency index.
3533 */
3534 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3535 if (err) {
3536 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
3537 goto set_trap;
3538 }
3539 return;
3540 }
3541
3542 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
3543 old_adj_index, old_ecmp_size);
3544 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3545 old_ecmp_size, old_adj_index);
3546 if (err) {
3547 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
3548 goto set_trap;
3549 }
3550
3551 return;
3552
3553 set_trap:
3554 old_adj_index_valid = nh_grp->adj_index_valid;
3555 nh_grp->adj_index_valid = 0;
3556 for (i = 0; i < nh_grp->count; i++) {
3557 nh = &nh_grp->nexthops[i];
3558 nh->offloaded = 0;
3559 }
3560 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3561 if (err)
3562 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
3563 mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
3564 if (old_adj_index_valid)
3565 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3566 nh_grp->ecmp_size, nh_grp->adj_index);
3567 }
3568
__mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop * nh,bool removing)3569 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
3570 bool removing)
3571 {
3572 if (!removing)
3573 nh->should_offload = 1;
3574 else
3575 nh->should_offload = 0;
3576 nh->update = 1;
3577 }
3578
3579 static int
mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)3580 mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
3581 struct mlxsw_sp_neigh_entry *neigh_entry)
3582 {
3583 struct neighbour *n, *old_n = neigh_entry->key.n;
3584 struct mlxsw_sp_nexthop *nh;
3585 bool entry_connected;
3586 u8 nud_state, dead;
3587 int err;
3588
3589 nh = list_first_entry(&neigh_entry->nexthop_list,
3590 struct mlxsw_sp_nexthop, neigh_list_node);
3591
3592 n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
3593 if (!n) {
3594 n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
3595 nh->rif->dev);
3596 if (IS_ERR(n))
3597 return PTR_ERR(n);
3598 neigh_event_send(n, NULL);
3599 }
3600
3601 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
3602 neigh_entry->key.n = n;
3603 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
3604 if (err)
3605 goto err_neigh_entry_insert;
3606
3607 read_lock_bh(&n->lock);
3608 nud_state = n->nud_state;
3609 dead = n->dead;
3610 read_unlock_bh(&n->lock);
3611 entry_connected = nud_state & NUD_VALID && !dead;
3612
3613 list_for_each_entry(nh, &neigh_entry->nexthop_list,
3614 neigh_list_node) {
3615 neigh_release(old_n);
3616 neigh_clone(n);
3617 __mlxsw_sp_nexthop_neigh_update(nh, !entry_connected);
3618 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3619 }
3620
3621 neigh_release(n);
3622
3623 return 0;
3624
3625 err_neigh_entry_insert:
3626 neigh_entry->key.n = old_n;
3627 mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
3628 neigh_release(n);
3629 return err;
3630 }
3631
3632 static void
mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,bool removing,bool dead)3633 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
3634 struct mlxsw_sp_neigh_entry *neigh_entry,
3635 bool removing, bool dead)
3636 {
3637 struct mlxsw_sp_nexthop *nh;
3638
3639 if (list_empty(&neigh_entry->nexthop_list))
3640 return;
3641
3642 if (dead) {
3643 int err;
3644
3645 err = mlxsw_sp_nexthop_dead_neigh_replace(mlxsw_sp,
3646 neigh_entry);
3647 if (err)
3648 dev_err(mlxsw_sp->bus_info->dev, "Failed to replace dead neigh\n");
3649 return;
3650 }
3651
3652 list_for_each_entry(nh, &neigh_entry->nexthop_list,
3653 neigh_list_node) {
3654 __mlxsw_sp_nexthop_neigh_update(nh, removing);
3655 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3656 }
3657 }
3658
mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop * nh,struct mlxsw_sp_rif * rif)3659 static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
3660 struct mlxsw_sp_rif *rif)
3661 {
3662 if (nh->rif)
3663 return;
3664
3665 nh->rif = rif;
3666 list_add(&nh->rif_list_node, &rif->nexthop_list);
3667 }
3668
mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop * nh)3669 static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
3670 {
3671 if (!nh->rif)
3672 return;
3673
3674 list_del(&nh->rif_list_node);
3675 nh->rif = NULL;
3676 }
3677
mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)3678 static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
3679 struct mlxsw_sp_nexthop *nh)
3680 {
3681 struct mlxsw_sp_neigh_entry *neigh_entry;
3682 struct neighbour *n;
3683 u8 nud_state, dead;
3684 int err;
3685
3686 if (!nh->nh_grp->gateway || nh->neigh_entry)
3687 return 0;
3688
3689 /* Take a reference of neigh here ensuring that neigh would
3690 * not be destructed before the nexthop entry is finished.
3691 * The reference is taken either in neigh_lookup() or
3692 * in neigh_create() in case n is not found.
3693 */
3694 n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
3695 if (!n) {
3696 n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
3697 nh->rif->dev);
3698 if (IS_ERR(n))
3699 return PTR_ERR(n);
3700 neigh_event_send(n, NULL);
3701 }
3702 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
3703 if (!neigh_entry) {
3704 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
3705 if (IS_ERR(neigh_entry)) {
3706 err = -EINVAL;
3707 goto err_neigh_entry_create;
3708 }
3709 }
3710
3711 /* If that is the first nexthop connected to that neigh, add to
3712 * nexthop_neighs_list
3713 */
3714 if (list_empty(&neigh_entry->nexthop_list))
3715 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
3716 &mlxsw_sp->router->nexthop_neighs_list);
3717
3718 nh->neigh_entry = neigh_entry;
3719 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
3720 read_lock_bh(&n->lock);
3721 nud_state = n->nud_state;
3722 dead = n->dead;
3723 read_unlock_bh(&n->lock);
3724 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
3725
3726 return 0;
3727
3728 err_neigh_entry_create:
3729 neigh_release(n);
3730 return err;
3731 }
3732
mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)3733 static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
3734 struct mlxsw_sp_nexthop *nh)
3735 {
3736 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3737 struct neighbour *n;
3738
3739 if (!neigh_entry)
3740 return;
3741 n = neigh_entry->key.n;
3742
3743 __mlxsw_sp_nexthop_neigh_update(nh, true);
3744 list_del(&nh->neigh_list_node);
3745 nh->neigh_entry = NULL;
3746
3747 /* If that is the last nexthop connected to that neigh, remove from
3748 * nexthop_neighs_list
3749 */
3750 if (list_empty(&neigh_entry->nexthop_list))
3751 list_del(&neigh_entry->nexthop_neighs_list_node);
3752
3753 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
3754 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
3755
3756 neigh_release(n);
3757 }
3758
mlxsw_sp_ipip_netdev_ul_up(struct net_device * ol_dev)3759 static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
3760 {
3761 struct net_device *ul_dev;
3762 bool is_up;
3763
3764 rcu_read_lock();
3765 ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
3766 is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true;
3767 rcu_read_unlock();
3768
3769 return is_up;
3770 }
3771
mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh,struct mlxsw_sp_ipip_entry * ipip_entry)3772 static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
3773 struct mlxsw_sp_nexthop *nh,
3774 struct mlxsw_sp_ipip_entry *ipip_entry)
3775 {
3776 bool removing;
3777
3778 if (!nh->nh_grp->gateway || nh->ipip_entry)
3779 return;
3780
3781 nh->ipip_entry = ipip_entry;
3782 removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
3783 __mlxsw_sp_nexthop_neigh_update(nh, removing);
3784 mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
3785 }
3786
mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)3787 static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
3788 struct mlxsw_sp_nexthop *nh)
3789 {
3790 struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
3791
3792 if (!ipip_entry)
3793 return;
3794
3795 __mlxsw_sp_nexthop_neigh_update(nh, true);
3796 nh->ipip_entry = NULL;
3797 }
3798
mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp * mlxsw_sp,const struct fib_nh * fib_nh,enum mlxsw_sp_ipip_type * p_ipipt)3799 static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
3800 const struct fib_nh *fib_nh,
3801 enum mlxsw_sp_ipip_type *p_ipipt)
3802 {
3803 struct net_device *dev = fib_nh->fib_nh_dev;
3804
3805 return dev &&
3806 fib_nh->nh_parent->fib_type == RTN_UNICAST &&
3807 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
3808 }
3809
mlxsw_sp_nexthop_type_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)3810 static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
3811 struct mlxsw_sp_nexthop *nh)
3812 {
3813 switch (nh->type) {
3814 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3815 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
3816 mlxsw_sp_nexthop_rif_fini(nh);
3817 break;
3818 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3819 mlxsw_sp_nexthop_rif_fini(nh);
3820 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
3821 break;
3822 }
3823 }
3824
mlxsw_sp_nexthop4_type_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh,struct fib_nh * fib_nh)3825 static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
3826 struct mlxsw_sp_nexthop *nh,
3827 struct fib_nh *fib_nh)
3828 {
3829 const struct mlxsw_sp_ipip_ops *ipip_ops;
3830 struct net_device *dev = fib_nh->fib_nh_dev;
3831 struct mlxsw_sp_ipip_entry *ipip_entry;
3832 struct mlxsw_sp_rif *rif;
3833 int err;
3834
3835 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
3836 if (ipip_entry) {
3837 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
3838 if (ipip_ops->can_offload(mlxsw_sp, dev,
3839 MLXSW_SP_L3_PROTO_IPV4)) {
3840 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
3841 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
3842 return 0;
3843 }
3844 }
3845
3846 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
3847 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3848 if (!rif)
3849 return 0;
3850
3851 mlxsw_sp_nexthop_rif_init(nh, rif);
3852 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
3853 if (err)
3854 goto err_neigh_init;
3855
3856 return 0;
3857
3858 err_neigh_init:
3859 mlxsw_sp_nexthop_rif_fini(nh);
3860 return err;
3861 }
3862
mlxsw_sp_nexthop4_type_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)3863 static void mlxsw_sp_nexthop4_type_fini(struct mlxsw_sp *mlxsw_sp,
3864 struct mlxsw_sp_nexthop *nh)
3865 {
3866 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
3867 }
3868
mlxsw_sp_nexthop4_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_nexthop * nh,struct fib_nh * fib_nh)3869 static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
3870 struct mlxsw_sp_nexthop_group *nh_grp,
3871 struct mlxsw_sp_nexthop *nh,
3872 struct fib_nh *fib_nh)
3873 {
3874 struct net_device *dev = fib_nh->fib_nh_dev;
3875 struct in_device *in_dev;
3876 int err;
3877
3878 nh->nh_grp = nh_grp;
3879 nh->key.fib_nh = fib_nh;
3880 #ifdef CONFIG_IP_ROUTE_MULTIPATH
3881 nh->nh_weight = fib_nh->fib_nh_weight;
3882 #else
3883 nh->nh_weight = 1;
3884 #endif
3885 memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4));
3886 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
3887 if (err)
3888 return err;
3889
3890 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
3891 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
3892
3893 if (!dev)
3894 return 0;
3895
3896 rcu_read_lock();
3897 in_dev = __in_dev_get_rcu(dev);
3898 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
3899 fib_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
3900 rcu_read_unlock();
3901 return 0;
3902 }
3903 rcu_read_unlock();
3904
3905 err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
3906 if (err)
3907 goto err_nexthop_neigh_init;
3908
3909 return 0;
3910
3911 err_nexthop_neigh_init:
3912 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
3913 return err;
3914 }
3915
mlxsw_sp_nexthop4_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)3916 static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
3917 struct mlxsw_sp_nexthop *nh)
3918 {
3919 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
3920 list_del(&nh->router_list_node);
3921 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
3922 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
3923 }
3924
mlxsw_sp_nexthop4_event(struct mlxsw_sp * mlxsw_sp,unsigned long event,struct fib_nh * fib_nh)3925 static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
3926 unsigned long event, struct fib_nh *fib_nh)
3927 {
3928 struct mlxsw_sp_nexthop_key key;
3929 struct mlxsw_sp_nexthop *nh;
3930
3931 if (mlxsw_sp->router->aborted)
3932 return;
3933
3934 key.fib_nh = fib_nh;
3935 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
3936 if (!nh)
3937 return;
3938
3939 switch (event) {
3940 case FIB_EVENT_NH_ADD:
3941 mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
3942 break;
3943 case FIB_EVENT_NH_DEL:
3944 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
3945 break;
3946 }
3947
3948 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3949 }
3950
mlxsw_sp_nexthop_rif_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)3951 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
3952 struct mlxsw_sp_rif *rif)
3953 {
3954 struct mlxsw_sp_nexthop *nh;
3955 bool removing;
3956
3957 list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
3958 switch (nh->type) {
3959 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3960 removing = false;
3961 break;
3962 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3963 removing = !mlxsw_sp_ipip_netdev_ul_up(rif->dev);
3964 break;
3965 default:
3966 WARN_ON(1);
3967 continue;
3968 }
3969
3970 __mlxsw_sp_nexthop_neigh_update(nh, removing);
3971 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3972 }
3973 }
3974
mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * old_rif,struct mlxsw_sp_rif * new_rif)3975 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
3976 struct mlxsw_sp_rif *old_rif,
3977 struct mlxsw_sp_rif *new_rif)
3978 {
3979 struct mlxsw_sp_nexthop *nh;
3980
3981 list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
3982 list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
3983 nh->rif = new_rif;
3984 mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
3985 }
3986
mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)3987 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
3988 struct mlxsw_sp_rif *rif)
3989 {
3990 struct mlxsw_sp_nexthop *nh, *tmp;
3991
3992 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
3993 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
3994 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3995 }
3996 }
3997
mlxsw_sp_fi_is_gateway(const struct mlxsw_sp * mlxsw_sp,struct fib_info * fi)3998 static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
3999 struct fib_info *fi)
4000 {
4001 const struct fib_nh *nh = fib_info_nh(fi, 0);
4002
4003 return nh->fib_nh_scope == RT_SCOPE_LINK ||
4004 mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
4005 }
4006
4007 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop4_group_create(struct mlxsw_sp * mlxsw_sp,struct fib_info * fi)4008 mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
4009 {
4010 unsigned int nhs = fib_info_num_path(fi);
4011 struct mlxsw_sp_nexthop_group *nh_grp;
4012 struct mlxsw_sp_nexthop *nh;
4013 struct fib_nh *fib_nh;
4014 int i;
4015 int err;
4016
4017 nh_grp = kzalloc(struct_size(nh_grp, nexthops, nhs), GFP_KERNEL);
4018 if (!nh_grp)
4019 return ERR_PTR(-ENOMEM);
4020 nh_grp->priv = fi;
4021 INIT_LIST_HEAD(&nh_grp->fib_list);
4022 nh_grp->neigh_tbl = &arp_tbl;
4023
4024 nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi);
4025 nh_grp->count = nhs;
4026 fib_info_hold(fi);
4027 for (i = 0; i < nh_grp->count; i++) {
4028 nh = &nh_grp->nexthops[i];
4029 fib_nh = fib_info_nh(fi, i);
4030 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
4031 if (err)
4032 goto err_nexthop4_init;
4033 }
4034 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
4035 if (err)
4036 goto err_nexthop_group_insert;
4037 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4038 return nh_grp;
4039
4040 err_nexthop_group_insert:
4041 err_nexthop4_init:
4042 for (i--; i >= 0; i--) {
4043 nh = &nh_grp->nexthops[i];
4044 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
4045 }
4046 fib_info_put(fi);
4047 kfree(nh_grp);
4048 return ERR_PTR(err);
4049 }
4050
4051 static void
mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)4052 mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
4053 struct mlxsw_sp_nexthop_group *nh_grp)
4054 {
4055 struct mlxsw_sp_nexthop *nh;
4056 int i;
4057
4058 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
4059 for (i = 0; i < nh_grp->count; i++) {
4060 nh = &nh_grp->nexthops[i];
4061 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
4062 }
4063 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4064 WARN_ON_ONCE(nh_grp->adj_index_valid);
4065 fib_info_put(mlxsw_sp_nexthop4_group_fi(nh_grp));
4066 kfree(nh_grp);
4067 }
4068
mlxsw_sp_nexthop4_group_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,struct fib_info * fi)4069 static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
4070 struct mlxsw_sp_fib_entry *fib_entry,
4071 struct fib_info *fi)
4072 {
4073 struct mlxsw_sp_nexthop_group *nh_grp;
4074
4075 nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
4076 if (!nh_grp) {
4077 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
4078 if (IS_ERR(nh_grp))
4079 return PTR_ERR(nh_grp);
4080 }
4081 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
4082 fib_entry->nh_group = nh_grp;
4083 return 0;
4084 }
4085
mlxsw_sp_nexthop4_group_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)4086 static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
4087 struct mlxsw_sp_fib_entry *fib_entry)
4088 {
4089 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
4090
4091 list_del(&fib_entry->nexthop_group_node);
4092 if (!list_empty(&nh_grp->fib_list))
4093 return;
4094 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
4095 }
4096
4097 static bool
mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry * fib_entry)4098 mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
4099 {
4100 struct mlxsw_sp_fib4_entry *fib4_entry;
4101
4102 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
4103 common);
4104 return !fib4_entry->tos;
4105 }
4106
4107 static bool
mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry * fib_entry)4108 mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
4109 {
4110 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
4111
4112 switch (fib_entry->fib_node->fib->proto) {
4113 case MLXSW_SP_L3_PROTO_IPV4:
4114 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
4115 return false;
4116 break;
4117 case MLXSW_SP_L3_PROTO_IPV6:
4118 break;
4119 }
4120
4121 switch (fib_entry->type) {
4122 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
4123 return !!nh_group->adj_index_valid;
4124 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
4125 return !!nh_group->nh_rif;
4126 case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
4127 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
4128 case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
4129 return true;
4130 default:
4131 return false;
4132 }
4133 }
4134
4135 static struct mlxsw_sp_nexthop *
mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_rt6 * mlxsw_sp_rt6)4136 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
4137 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
4138 {
4139 int i;
4140
4141 for (i = 0; i < nh_grp->count; i++) {
4142 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
4143 struct fib6_info *rt = mlxsw_sp_rt6->rt;
4144
4145 if (nh->rif && nh->rif->dev == rt->fib6_nh->fib_nh_dev &&
4146 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
4147 &rt->fib6_nh->fib_nh_gw6))
4148 return nh;
4149 continue;
4150 }
4151
4152 return NULL;
4153 }
4154
4155 static void
mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)4156 mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
4157 struct mlxsw_sp_fib_entry *fib_entry)
4158 {
4159 struct fib_info *fi = mlxsw_sp_nexthop4_group_fi(fib_entry->nh_group);
4160 u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
4161 int dst_len = fib_entry->fib_node->key.prefix_len;
4162 struct mlxsw_sp_fib4_entry *fib4_entry;
4163 struct fib_rt_info fri;
4164 bool should_offload;
4165
4166 should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
4167 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
4168 common);
4169 fri.fi = fi;
4170 fri.tb_id = fib4_entry->tb_id;
4171 fri.dst = cpu_to_be32(*p_dst);
4172 fri.dst_len = dst_len;
4173 fri.tos = fib4_entry->tos;
4174 fri.type = fib4_entry->type;
4175 fri.offload = should_offload;
4176 fri.trap = !should_offload;
4177 fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
4178 }
4179
4180 static void
mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)4181 mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
4182 struct mlxsw_sp_fib_entry *fib_entry)
4183 {
4184 struct fib_info *fi = mlxsw_sp_nexthop4_group_fi(fib_entry->nh_group);
4185 u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
4186 int dst_len = fib_entry->fib_node->key.prefix_len;
4187 struct mlxsw_sp_fib4_entry *fib4_entry;
4188 struct fib_rt_info fri;
4189
4190 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
4191 common);
4192 fri.fi = fi;
4193 fri.tb_id = fib4_entry->tb_id;
4194 fri.dst = cpu_to_be32(*p_dst);
4195 fri.dst_len = dst_len;
4196 fri.tos = fib4_entry->tos;
4197 fri.type = fib4_entry->type;
4198 fri.offload = false;
4199 fri.trap = false;
4200 fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
4201 }
4202
4203 static void
mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)4204 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
4205 struct mlxsw_sp_fib_entry *fib_entry)
4206 {
4207 struct mlxsw_sp_fib6_entry *fib6_entry;
4208 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4209 bool should_offload;
4210
4211 should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
4212
4213 /* In IPv6 a multipath route is represented using multiple routes, so
4214 * we need to set the flags on all of them.
4215 */
4216 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
4217 common);
4218 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
4219 fib6_info_hw_flags_set(mlxsw_sp_rt6->rt, should_offload,
4220 !should_offload);
4221 }
4222
4223 static void
mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)4224 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
4225 struct mlxsw_sp_fib_entry *fib_entry)
4226 {
4227 struct mlxsw_sp_fib6_entry *fib6_entry;
4228 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4229
4230 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
4231 common);
4232 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
4233 fib6_info_hw_flags_set(mlxsw_sp_rt6->rt, false, false);
4234 }
4235
4236 static void
mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)4237 mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
4238 struct mlxsw_sp_fib_entry *fib_entry)
4239 {
4240 switch (fib_entry->fib_node->fib->proto) {
4241 case MLXSW_SP_L3_PROTO_IPV4:
4242 mlxsw_sp_fib4_entry_hw_flags_set(mlxsw_sp, fib_entry);
4243 break;
4244 case MLXSW_SP_L3_PROTO_IPV6:
4245 mlxsw_sp_fib6_entry_hw_flags_set(mlxsw_sp, fib_entry);
4246 break;
4247 }
4248 }
4249
4250 static void
mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)4251 mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
4252 struct mlxsw_sp_fib_entry *fib_entry)
4253 {
4254 switch (fib_entry->fib_node->fib->proto) {
4255 case MLXSW_SP_L3_PROTO_IPV4:
4256 mlxsw_sp_fib4_entry_hw_flags_clear(mlxsw_sp, fib_entry);
4257 break;
4258 case MLXSW_SP_L3_PROTO_IPV6:
4259 mlxsw_sp_fib6_entry_hw_flags_clear(mlxsw_sp, fib_entry);
4260 break;
4261 }
4262 }
4263
4264 static void
mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)4265 mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
4266 struct mlxsw_sp_fib_entry *fib_entry,
4267 enum mlxsw_reg_ralue_op op)
4268 {
4269 switch (op) {
4270 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
4271 mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
4272 break;
4273 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
4274 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
4275 break;
4276 default:
4277 break;
4278 }
4279 }
4280
4281 static void
mlxsw_sp_fib_entry_ralue_pack(char * ralue_pl,const struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)4282 mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
4283 const struct mlxsw_sp_fib_entry *fib_entry,
4284 enum mlxsw_reg_ralue_op op)
4285 {
4286 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
4287 enum mlxsw_reg_ralxx_protocol proto;
4288 u32 *p_dip;
4289
4290 proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
4291
4292 switch (fib->proto) {
4293 case MLXSW_SP_L3_PROTO_IPV4:
4294 p_dip = (u32 *) fib_entry->fib_node->key.addr;
4295 mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
4296 fib_entry->fib_node->key.prefix_len,
4297 *p_dip);
4298 break;
4299 case MLXSW_SP_L3_PROTO_IPV6:
4300 mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
4301 fib_entry->fib_node->key.prefix_len,
4302 fib_entry->fib_node->key.addr);
4303 break;
4304 }
4305 }
4306
mlxsw_sp_adj_discard_write(struct mlxsw_sp * mlxsw_sp,u16 rif_index)4307 static int mlxsw_sp_adj_discard_write(struct mlxsw_sp *mlxsw_sp, u16 rif_index)
4308 {
4309 enum mlxsw_reg_ratr_trap_action trap_action;
4310 char ratr_pl[MLXSW_REG_RATR_LEN];
4311 int err;
4312
4313 if (mlxsw_sp->router->adj_discard_index_valid)
4314 return 0;
4315
4316 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4317 &mlxsw_sp->router->adj_discard_index);
4318 if (err)
4319 return err;
4320
4321 trap_action = MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS;
4322 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
4323 MLXSW_REG_RATR_TYPE_ETHERNET,
4324 mlxsw_sp->router->adj_discard_index, rif_index);
4325 mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
4326 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
4327 if (err)
4328 goto err_ratr_write;
4329
4330 mlxsw_sp->router->adj_discard_index_valid = true;
4331
4332 return 0;
4333
4334 err_ratr_write:
4335 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4336 mlxsw_sp->router->adj_discard_index);
4337 return err;
4338 }
4339
mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)4340 static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
4341 struct mlxsw_sp_fib_entry *fib_entry,
4342 enum mlxsw_reg_ralue_op op)
4343 {
4344 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
4345 char ralue_pl[MLXSW_REG_RALUE_LEN];
4346 enum mlxsw_reg_ralue_trap_action trap_action;
4347 u16 trap_id = 0;
4348 u32 adjacency_index = 0;
4349 u16 ecmp_size = 0;
4350 int err;
4351
4352 /* In case the nexthop group adjacency index is valid, use it
4353 * with provided ECMP size. Otherwise, setup trap and pass
4354 * traffic to kernel.
4355 */
4356 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
4357 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
4358 adjacency_index = fib_entry->nh_group->adj_index;
4359 ecmp_size = fib_entry->nh_group->ecmp_size;
4360 } else if (!nh_group->adj_index_valid && nh_group->count &&
4361 nh_group->nh_rif) {
4362 err = mlxsw_sp_adj_discard_write(mlxsw_sp,
4363 nh_group->nh_rif->rif_index);
4364 if (err)
4365 return err;
4366 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
4367 adjacency_index = mlxsw_sp->router->adj_discard_index;
4368 ecmp_size = 1;
4369 } else {
4370 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
4371 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
4372 }
4373
4374 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4375 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
4376 adjacency_index, ecmp_size);
4377 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4378 }
4379
mlxsw_sp_fib_entry_op_local(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)4380 static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
4381 struct mlxsw_sp_fib_entry *fib_entry,
4382 enum mlxsw_reg_ralue_op op)
4383 {
4384 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
4385 enum mlxsw_reg_ralue_trap_action trap_action;
4386 char ralue_pl[MLXSW_REG_RALUE_LEN];
4387 u16 trap_id = 0;
4388 u16 rif_index = 0;
4389
4390 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
4391 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
4392 rif_index = rif->rif_index;
4393 } else {
4394 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
4395 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
4396 }
4397
4398 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4399 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
4400 rif_index);
4401 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4402 }
4403
mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)4404 static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
4405 struct mlxsw_sp_fib_entry *fib_entry,
4406 enum mlxsw_reg_ralue_op op)
4407 {
4408 char ralue_pl[MLXSW_REG_RALUE_LEN];
4409
4410 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4411 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
4412 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4413 }
4414
mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)4415 static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
4416 struct mlxsw_sp_fib_entry *fib_entry,
4417 enum mlxsw_reg_ralue_op op)
4418 {
4419 enum mlxsw_reg_ralue_trap_action trap_action;
4420 char ralue_pl[MLXSW_REG_RALUE_LEN];
4421
4422 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
4423 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4424 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, 0, 0);
4425 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4426 }
4427
4428 static int
mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)4429 mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
4430 struct mlxsw_sp_fib_entry *fib_entry,
4431 enum mlxsw_reg_ralue_op op)
4432 {
4433 enum mlxsw_reg_ralue_trap_action trap_action;
4434 char ralue_pl[MLXSW_REG_RALUE_LEN];
4435 u16 trap_id;
4436
4437 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
4438 trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
4439
4440 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4441 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, 0);
4442 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4443 }
4444
4445 static int
mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)4446 mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
4447 struct mlxsw_sp_fib_entry *fib_entry,
4448 enum mlxsw_reg_ralue_op op)
4449 {
4450 struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
4451 const struct mlxsw_sp_ipip_ops *ipip_ops;
4452
4453 if (WARN_ON(!ipip_entry))
4454 return -EINVAL;
4455
4456 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4457 return ipip_ops->fib_entry_op(mlxsw_sp, ipip_entry, op,
4458 fib_entry->decap.tunnel_index);
4459 }
4460
mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)4461 static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
4462 struct mlxsw_sp_fib_entry *fib_entry,
4463 enum mlxsw_reg_ralue_op op)
4464 {
4465 char ralue_pl[MLXSW_REG_RALUE_LEN];
4466
4467 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4468 mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
4469 fib_entry->decap.tunnel_index);
4470 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4471 }
4472
__mlxsw_sp_fib_entry_op(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)4473 static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
4474 struct mlxsw_sp_fib_entry *fib_entry,
4475 enum mlxsw_reg_ralue_op op)
4476 {
4477 switch (fib_entry->type) {
4478 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
4479 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
4480 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
4481 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
4482 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
4483 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
4484 case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
4485 return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, fib_entry, op);
4486 case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
4487 return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, fib_entry,
4488 op);
4489 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
4490 return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
4491 fib_entry, op);
4492 case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
4493 return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, fib_entry, op);
4494 }
4495 return -EINVAL;
4496 }
4497
mlxsw_sp_fib_entry_op(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)4498 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
4499 struct mlxsw_sp_fib_entry *fib_entry,
4500 enum mlxsw_reg_ralue_op op)
4501 {
4502 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
4503
4504 if (err)
4505 return err;
4506
4507 mlxsw_sp_fib_entry_hw_flags_refresh(mlxsw_sp, fib_entry, op);
4508
4509 return err;
4510 }
4511
mlxsw_sp_fib_entry_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)4512 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
4513 struct mlxsw_sp_fib_entry *fib_entry)
4514 {
4515 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
4516 MLXSW_REG_RALUE_OP_WRITE_WRITE);
4517 }
4518
mlxsw_sp_fib_entry_del(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)4519 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
4520 struct mlxsw_sp_fib_entry *fib_entry)
4521 {
4522 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
4523 MLXSW_REG_RALUE_OP_WRITE_DELETE);
4524 }
4525
4526 static int
mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp * mlxsw_sp,const struct fib_entry_notifier_info * fen_info,struct mlxsw_sp_fib_entry * fib_entry)4527 mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
4528 const struct fib_entry_notifier_info *fen_info,
4529 struct mlxsw_sp_fib_entry *fib_entry)
4530 {
4531 struct net_device *dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
4532 union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
4533 struct mlxsw_sp_router *router = mlxsw_sp->router;
4534 u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
4535 struct mlxsw_sp_ipip_entry *ipip_entry;
4536 struct fib_info *fi = fen_info->fi;
4537
4538 switch (fen_info->type) {
4539 case RTN_LOCAL:
4540 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
4541 MLXSW_SP_L3_PROTO_IPV4, dip);
4542 if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
4543 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
4544 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
4545 fib_entry,
4546 ipip_entry);
4547 }
4548 if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
4549 MLXSW_SP_L3_PROTO_IPV4,
4550 &dip)) {
4551 u32 tunnel_index;
4552
4553 tunnel_index = router->nve_decap_config.tunnel_index;
4554 fib_entry->decap.tunnel_index = tunnel_index;
4555 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
4556 return 0;
4557 }
4558 fallthrough;
4559 case RTN_BROADCAST:
4560 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
4561 return 0;
4562 case RTN_BLACKHOLE:
4563 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
4564 return 0;
4565 case RTN_UNREACHABLE:
4566 case RTN_PROHIBIT:
4567 /* Packets hitting these routes need to be trapped, but
4568 * can do so with a lower priority than packets directed
4569 * at the host, so use action type local instead of trap.
4570 */
4571 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
4572 return 0;
4573 case RTN_UNICAST:
4574 if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi))
4575 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
4576 else
4577 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
4578 return 0;
4579 default:
4580 return -EINVAL;
4581 }
4582 }
4583
4584 static void
mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)4585 mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
4586 struct mlxsw_sp_fib_entry *fib_entry)
4587 {
4588 switch (fib_entry->type) {
4589 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
4590 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
4591 break;
4592 default:
4593 break;
4594 }
4595 }
4596
4597 static struct mlxsw_sp_fib4_entry *
mlxsw_sp_fib4_entry_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node,const struct fib_entry_notifier_info * fen_info)4598 mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
4599 struct mlxsw_sp_fib_node *fib_node,
4600 const struct fib_entry_notifier_info *fen_info)
4601 {
4602 struct mlxsw_sp_fib4_entry *fib4_entry;
4603 struct mlxsw_sp_fib_entry *fib_entry;
4604 int err;
4605
4606 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
4607 if (!fib4_entry)
4608 return ERR_PTR(-ENOMEM);
4609 fib_entry = &fib4_entry->common;
4610
4611 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
4612 if (err)
4613 goto err_fib4_entry_type_set;
4614
4615 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
4616 if (err)
4617 goto err_nexthop4_group_get;
4618
4619 fib4_entry->prio = fen_info->fi->fib_priority;
4620 fib4_entry->tb_id = fen_info->tb_id;
4621 fib4_entry->type = fen_info->type;
4622 fib4_entry->tos = fen_info->tos;
4623
4624 fib_entry->fib_node = fib_node;
4625
4626 return fib4_entry;
4627
4628 err_nexthop4_group_get:
4629 mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib_entry);
4630 err_fib4_entry_type_set:
4631 kfree(fib4_entry);
4632 return ERR_PTR(err);
4633 }
4634
mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib4_entry * fib4_entry)4635 static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
4636 struct mlxsw_sp_fib4_entry *fib4_entry)
4637 {
4638 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
4639 mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, &fib4_entry->common);
4640 kfree(fib4_entry);
4641 }
4642
4643 static struct mlxsw_sp_fib4_entry *
mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp * mlxsw_sp,const struct fib_entry_notifier_info * fen_info)4644 mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
4645 const struct fib_entry_notifier_info *fen_info)
4646 {
4647 struct mlxsw_sp_fib4_entry *fib4_entry;
4648 struct mlxsw_sp_fib_node *fib_node;
4649 struct mlxsw_sp_fib *fib;
4650 struct mlxsw_sp_vr *vr;
4651
4652 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
4653 if (!vr)
4654 return NULL;
4655 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
4656
4657 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
4658 sizeof(fen_info->dst),
4659 fen_info->dst_len);
4660 if (!fib_node)
4661 return NULL;
4662
4663 fib4_entry = container_of(fib_node->fib_entry,
4664 struct mlxsw_sp_fib4_entry, common);
4665 if (fib4_entry->tb_id == fen_info->tb_id &&
4666 fib4_entry->tos == fen_info->tos &&
4667 fib4_entry->type == fen_info->type &&
4668 mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) ==
4669 fen_info->fi)
4670 return fib4_entry;
4671
4672 return NULL;
4673 }
4674
4675 static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
4676 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
4677 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
4678 .key_len = sizeof(struct mlxsw_sp_fib_key),
4679 .automatic_shrinking = true,
4680 };
4681
mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib * fib,struct mlxsw_sp_fib_node * fib_node)4682 static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
4683 struct mlxsw_sp_fib_node *fib_node)
4684 {
4685 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
4686 mlxsw_sp_fib_ht_params);
4687 }
4688
mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib * fib,struct mlxsw_sp_fib_node * fib_node)4689 static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
4690 struct mlxsw_sp_fib_node *fib_node)
4691 {
4692 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
4693 mlxsw_sp_fib_ht_params);
4694 }
4695
4696 static struct mlxsw_sp_fib_node *
mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib * fib,const void * addr,size_t addr_len,unsigned char prefix_len)4697 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
4698 size_t addr_len, unsigned char prefix_len)
4699 {
4700 struct mlxsw_sp_fib_key key;
4701
4702 memset(&key, 0, sizeof(key));
4703 memcpy(key.addr, addr, addr_len);
4704 key.prefix_len = prefix_len;
4705 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
4706 }
4707
4708 static struct mlxsw_sp_fib_node *
mlxsw_sp_fib_node_create(struct mlxsw_sp_fib * fib,const void * addr,size_t addr_len,unsigned char prefix_len)4709 mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
4710 size_t addr_len, unsigned char prefix_len)
4711 {
4712 struct mlxsw_sp_fib_node *fib_node;
4713
4714 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
4715 if (!fib_node)
4716 return NULL;
4717
4718 list_add(&fib_node->list, &fib->node_list);
4719 memcpy(fib_node->key.addr, addr, addr_len);
4720 fib_node->key.prefix_len = prefix_len;
4721
4722 return fib_node;
4723 }
4724
mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node * fib_node)4725 static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
4726 {
4727 list_del(&fib_node->list);
4728 kfree(fib_node);
4729 }
4730
mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)4731 static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
4732 struct mlxsw_sp_fib_node *fib_node)
4733 {
4734 struct mlxsw_sp_prefix_usage req_prefix_usage;
4735 struct mlxsw_sp_fib *fib = fib_node->fib;
4736 struct mlxsw_sp_lpm_tree *lpm_tree;
4737 int err;
4738
4739 lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
4740 if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
4741 goto out;
4742
4743 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
4744 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
4745 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
4746 fib->proto);
4747 if (IS_ERR(lpm_tree))
4748 return PTR_ERR(lpm_tree);
4749
4750 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
4751 if (err)
4752 goto err_lpm_tree_replace;
4753
4754 out:
4755 lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
4756 return 0;
4757
4758 err_lpm_tree_replace:
4759 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
4760 return err;
4761 }
4762
mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)4763 static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
4764 struct mlxsw_sp_fib_node *fib_node)
4765 {
4766 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
4767 struct mlxsw_sp_prefix_usage req_prefix_usage;
4768 struct mlxsw_sp_fib *fib = fib_node->fib;
4769 int err;
4770
4771 if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
4772 return;
4773 /* Try to construct a new LPM tree from the current prefix usage
4774 * minus the unused one. If we fail, continue using the old one.
4775 */
4776 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
4777 mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
4778 fib_node->key.prefix_len);
4779 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
4780 fib->proto);
4781 if (IS_ERR(lpm_tree))
4782 return;
4783
4784 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
4785 if (err)
4786 goto err_lpm_tree_replace;
4787
4788 return;
4789
4790 err_lpm_tree_replace:
4791 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
4792 }
4793
mlxsw_sp_fib_node_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node,struct mlxsw_sp_fib * fib)4794 static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
4795 struct mlxsw_sp_fib_node *fib_node,
4796 struct mlxsw_sp_fib *fib)
4797 {
4798 int err;
4799
4800 err = mlxsw_sp_fib_node_insert(fib, fib_node);
4801 if (err)
4802 return err;
4803 fib_node->fib = fib;
4804
4805 err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
4806 if (err)
4807 goto err_fib_lpm_tree_link;
4808
4809 return 0;
4810
4811 err_fib_lpm_tree_link:
4812 fib_node->fib = NULL;
4813 mlxsw_sp_fib_node_remove(fib, fib_node);
4814 return err;
4815 }
4816
mlxsw_sp_fib_node_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)4817 static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
4818 struct mlxsw_sp_fib_node *fib_node)
4819 {
4820 struct mlxsw_sp_fib *fib = fib_node->fib;
4821
4822 mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
4823 fib_node->fib = NULL;
4824 mlxsw_sp_fib_node_remove(fib, fib_node);
4825 }
4826
4827 static struct mlxsw_sp_fib_node *
mlxsw_sp_fib_node_get(struct mlxsw_sp * mlxsw_sp,u32 tb_id,const void * addr,size_t addr_len,unsigned char prefix_len,enum mlxsw_sp_l3proto proto)4828 mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
4829 size_t addr_len, unsigned char prefix_len,
4830 enum mlxsw_sp_l3proto proto)
4831 {
4832 struct mlxsw_sp_fib_node *fib_node;
4833 struct mlxsw_sp_fib *fib;
4834 struct mlxsw_sp_vr *vr;
4835 int err;
4836
4837 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
4838 if (IS_ERR(vr))
4839 return ERR_CAST(vr);
4840 fib = mlxsw_sp_vr_fib(vr, proto);
4841
4842 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
4843 if (fib_node)
4844 return fib_node;
4845
4846 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
4847 if (!fib_node) {
4848 err = -ENOMEM;
4849 goto err_fib_node_create;
4850 }
4851
4852 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
4853 if (err)
4854 goto err_fib_node_init;
4855
4856 return fib_node;
4857
4858 err_fib_node_init:
4859 mlxsw_sp_fib_node_destroy(fib_node);
4860 err_fib_node_create:
4861 mlxsw_sp_vr_put(mlxsw_sp, vr);
4862 return ERR_PTR(err);
4863 }
4864
mlxsw_sp_fib_node_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)4865 static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
4866 struct mlxsw_sp_fib_node *fib_node)
4867 {
4868 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
4869
4870 if (fib_node->fib_entry)
4871 return;
4872 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
4873 mlxsw_sp_fib_node_destroy(fib_node);
4874 mlxsw_sp_vr_put(mlxsw_sp, vr);
4875 }
4876
mlxsw_sp_fib_node_entry_link(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)4877 static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
4878 struct mlxsw_sp_fib_entry *fib_entry)
4879 {
4880 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4881 int err;
4882
4883 fib_node->fib_entry = fib_entry;
4884
4885 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
4886 if (err)
4887 goto err_fib_entry_update;
4888
4889 return 0;
4890
4891 err_fib_entry_update:
4892 fib_node->fib_entry = NULL;
4893 return err;
4894 }
4895
4896 static void
mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)4897 mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
4898 struct mlxsw_sp_fib_entry *fib_entry)
4899 {
4900 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4901
4902 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
4903 fib_node->fib_entry = NULL;
4904 }
4905
mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry * fib4_entry)4906 static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
4907 {
4908 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
4909 struct mlxsw_sp_fib4_entry *fib4_replaced;
4910
4911 if (!fib_node->fib_entry)
4912 return true;
4913
4914 fib4_replaced = container_of(fib_node->fib_entry,
4915 struct mlxsw_sp_fib4_entry, common);
4916 if (fib4_entry->tb_id == RT_TABLE_MAIN &&
4917 fib4_replaced->tb_id == RT_TABLE_LOCAL)
4918 return false;
4919
4920 return true;
4921 }
4922
4923 static int
mlxsw_sp_router_fib4_replace(struct mlxsw_sp * mlxsw_sp,const struct fib_entry_notifier_info * fen_info)4924 mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
4925 const struct fib_entry_notifier_info *fen_info)
4926 {
4927 struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
4928 struct mlxsw_sp_fib_entry *replaced;
4929 struct mlxsw_sp_fib_node *fib_node;
4930 int err;
4931
4932 if (mlxsw_sp->router->aborted)
4933 return 0;
4934
4935 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
4936 &fen_info->dst, sizeof(fen_info->dst),
4937 fen_info->dst_len,
4938 MLXSW_SP_L3_PROTO_IPV4);
4939 if (IS_ERR(fib_node)) {
4940 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
4941 return PTR_ERR(fib_node);
4942 }
4943
4944 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
4945 if (IS_ERR(fib4_entry)) {
4946 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
4947 err = PTR_ERR(fib4_entry);
4948 goto err_fib4_entry_create;
4949 }
4950
4951 if (!mlxsw_sp_fib4_allow_replace(fib4_entry)) {
4952 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
4953 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
4954 return 0;
4955 }
4956
4957 replaced = fib_node->fib_entry;
4958 err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib4_entry->common);
4959 if (err) {
4960 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
4961 goto err_fib_node_entry_link;
4962 }
4963
4964 /* Nothing to replace */
4965 if (!replaced)
4966 return 0;
4967
4968 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
4969 fib4_replaced = container_of(replaced, struct mlxsw_sp_fib4_entry,
4970 common);
4971 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_replaced);
4972
4973 return 0;
4974
4975 err_fib_node_entry_link:
4976 fib_node->fib_entry = replaced;
4977 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
4978 err_fib4_entry_create:
4979 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
4980 return err;
4981 }
4982
mlxsw_sp_router_fib4_del(struct mlxsw_sp * mlxsw_sp,struct fib_entry_notifier_info * fen_info)4983 static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
4984 struct fib_entry_notifier_info *fen_info)
4985 {
4986 struct mlxsw_sp_fib4_entry *fib4_entry;
4987 struct mlxsw_sp_fib_node *fib_node;
4988
4989 if (mlxsw_sp->router->aborted)
4990 return;
4991
4992 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
4993 if (!fib4_entry)
4994 return;
4995 fib_node = fib4_entry->common.fib_node;
4996
4997 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib4_entry->common);
4998 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
4999 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5000 }
5001
mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info * rt)5002 static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
5003 {
5004 /* Multicast routes aren't supported, so ignore them. Neighbour
5005 * Discovery packets are specifically trapped.
5006 */
5007 if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST)
5008 return true;
5009
5010 /* Cloned routes are irrelevant in the forwarding path. */
5011 if (rt->fib6_flags & RTF_CACHE)
5012 return true;
5013
5014 return false;
5015 }
5016
mlxsw_sp_rt6_create(struct fib6_info * rt)5017 static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt)
5018 {
5019 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5020
5021 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
5022 if (!mlxsw_sp_rt6)
5023 return ERR_PTR(-ENOMEM);
5024
5025 /* In case of route replace, replaced route is deleted with
5026 * no notification. Take reference to prevent accessing freed
5027 * memory.
5028 */
5029 mlxsw_sp_rt6->rt = rt;
5030 fib6_info_hold(rt);
5031
5032 return mlxsw_sp_rt6;
5033 }
5034
5035 #if IS_ENABLED(CONFIG_IPV6)
mlxsw_sp_rt6_release(struct fib6_info * rt)5036 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
5037 {
5038 fib6_info_release(rt);
5039 }
5040 #else
mlxsw_sp_rt6_release(struct fib6_info * rt)5041 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
5042 {
5043 }
5044 #endif
5045
mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 * mlxsw_sp_rt6)5046 static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
5047 {
5048 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
5049
5050 fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
5051 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
5052 kfree(mlxsw_sp_rt6);
5053 }
5054
5055 static struct fib6_info *
mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry * fib6_entry)5056 mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
5057 {
5058 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
5059 list)->rt;
5060 }
5061
5062 static struct mlxsw_sp_rt6 *
mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry * fib6_entry,const struct fib6_info * rt)5063 mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
5064 const struct fib6_info *rt)
5065 {
5066 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5067
5068 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
5069 if (mlxsw_sp_rt6->rt == rt)
5070 return mlxsw_sp_rt6;
5071 }
5072
5073 return NULL;
5074 }
5075
mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp * mlxsw_sp,const struct fib6_info * rt,enum mlxsw_sp_ipip_type * ret)5076 static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
5077 const struct fib6_info *rt,
5078 enum mlxsw_sp_ipip_type *ret)
5079 {
5080 return rt->fib6_nh->fib_nh_dev &&
5081 mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret);
5082 }
5083
mlxsw_sp_nexthop6_type_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_nexthop * nh,const struct fib6_info * rt)5084 static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
5085 struct mlxsw_sp_nexthop_group *nh_grp,
5086 struct mlxsw_sp_nexthop *nh,
5087 const struct fib6_info *rt)
5088 {
5089 const struct mlxsw_sp_ipip_ops *ipip_ops;
5090 struct mlxsw_sp_ipip_entry *ipip_entry;
5091 struct net_device *dev = rt->fib6_nh->fib_nh_dev;
5092 struct mlxsw_sp_rif *rif;
5093 int err;
5094
5095 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
5096 if (ipip_entry) {
5097 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
5098 if (ipip_ops->can_offload(mlxsw_sp, dev,
5099 MLXSW_SP_L3_PROTO_IPV6)) {
5100 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
5101 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
5102 return 0;
5103 }
5104 }
5105
5106 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
5107 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
5108 if (!rif)
5109 return 0;
5110 mlxsw_sp_nexthop_rif_init(nh, rif);
5111
5112 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
5113 if (err)
5114 goto err_nexthop_neigh_init;
5115
5116 return 0;
5117
5118 err_nexthop_neigh_init:
5119 mlxsw_sp_nexthop_rif_fini(nh);
5120 return err;
5121 }
5122
mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)5123 static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp,
5124 struct mlxsw_sp_nexthop *nh)
5125 {
5126 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
5127 }
5128
mlxsw_sp_nexthop6_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_nexthop * nh,const struct fib6_info * rt)5129 static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
5130 struct mlxsw_sp_nexthop_group *nh_grp,
5131 struct mlxsw_sp_nexthop *nh,
5132 const struct fib6_info *rt)
5133 {
5134 struct net_device *dev = rt->fib6_nh->fib_nh_dev;
5135
5136 nh->nh_grp = nh_grp;
5137 nh->nh_weight = rt->fib6_nh->fib_nh_weight;
5138 memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr));
5139 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
5140
5141 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
5142
5143 if (!dev)
5144 return 0;
5145 nh->ifindex = dev->ifindex;
5146
5147 return mlxsw_sp_nexthop6_type_init(mlxsw_sp, nh_grp, nh, rt);
5148 }
5149
mlxsw_sp_nexthop6_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)5150 static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
5151 struct mlxsw_sp_nexthop *nh)
5152 {
5153 mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh);
5154 list_del(&nh->router_list_node);
5155 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
5156 }
5157
mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp * mlxsw_sp,const struct fib6_info * rt)5158 static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
5159 const struct fib6_info *rt)
5160 {
5161 return rt->fib6_nh->fib_nh_gw_family ||
5162 mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
5163 }
5164
5165 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop6_group_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)5166 mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
5167 struct mlxsw_sp_fib6_entry *fib6_entry)
5168 {
5169 struct mlxsw_sp_nexthop_group *nh_grp;
5170 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5171 struct mlxsw_sp_nexthop *nh;
5172 int i = 0;
5173 int err;
5174
5175 nh_grp = kzalloc(struct_size(nh_grp, nexthops, fib6_entry->nrt6),
5176 GFP_KERNEL);
5177 if (!nh_grp)
5178 return ERR_PTR(-ENOMEM);
5179 INIT_LIST_HEAD(&nh_grp->fib_list);
5180 #if IS_ENABLED(CONFIG_IPV6)
5181 nh_grp->neigh_tbl = &nd_tbl;
5182 #endif
5183 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
5184 struct mlxsw_sp_rt6, list);
5185 nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
5186 nh_grp->count = fib6_entry->nrt6;
5187 for (i = 0; i < nh_grp->count; i++) {
5188 struct fib6_info *rt = mlxsw_sp_rt6->rt;
5189
5190 nh = &nh_grp->nexthops[i];
5191 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
5192 if (err)
5193 goto err_nexthop6_init;
5194 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
5195 }
5196
5197 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5198 if (err)
5199 goto err_nexthop_group_insert;
5200
5201 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5202 return nh_grp;
5203
5204 err_nexthop_group_insert:
5205 err_nexthop6_init:
5206 for (i--; i >= 0; i--) {
5207 nh = &nh_grp->nexthops[i];
5208 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
5209 }
5210 kfree(nh_grp);
5211 return ERR_PTR(err);
5212 }
5213
5214 static void
mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)5215 mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
5216 struct mlxsw_sp_nexthop_group *nh_grp)
5217 {
5218 struct mlxsw_sp_nexthop *nh;
5219 int i = nh_grp->count;
5220
5221 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5222 for (i--; i >= 0; i--) {
5223 nh = &nh_grp->nexthops[i];
5224 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
5225 }
5226 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5227 WARN_ON(nh_grp->adj_index_valid);
5228 kfree(nh_grp);
5229 }
5230
mlxsw_sp_nexthop6_group_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)5231 static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
5232 struct mlxsw_sp_fib6_entry *fib6_entry)
5233 {
5234 struct mlxsw_sp_nexthop_group *nh_grp;
5235
5236 nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
5237 if (!nh_grp) {
5238 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
5239 if (IS_ERR(nh_grp))
5240 return PTR_ERR(nh_grp);
5241 }
5242
5243 list_add_tail(&fib6_entry->common.nexthop_group_node,
5244 &nh_grp->fib_list);
5245 fib6_entry->common.nh_group = nh_grp;
5246
5247 /* The route and the nexthop are described by the same struct, so we
5248 * need to the update the nexthop offload indication for the new route.
5249 */
5250 __mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
5251
5252 return 0;
5253 }
5254
mlxsw_sp_nexthop6_group_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5255 static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
5256 struct mlxsw_sp_fib_entry *fib_entry)
5257 {
5258 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
5259
5260 list_del(&fib_entry->nexthop_group_node);
5261 if (!list_empty(&nh_grp->fib_list))
5262 return;
5263 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
5264 }
5265
5266 static int
mlxsw_sp_nexthop6_group_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)5267 mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
5268 struct mlxsw_sp_fib6_entry *fib6_entry)
5269 {
5270 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
5271 int err;
5272
5273 fib6_entry->common.nh_group = NULL;
5274 list_del(&fib6_entry->common.nexthop_group_node);
5275
5276 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
5277 if (err)
5278 goto err_nexthop6_group_get;
5279
5280 /* In case this entry is offloaded, then the adjacency index
5281 * currently associated with it in the device's table is that
5282 * of the old group. Start using the new one instead.
5283 */
5284 err = mlxsw_sp_fib_entry_update(mlxsw_sp, &fib6_entry->common);
5285 if (err)
5286 goto err_fib_entry_update;
5287
5288 if (list_empty(&old_nh_grp->fib_list))
5289 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
5290
5291 return 0;
5292
5293 err_fib_entry_update:
5294 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
5295 err_nexthop6_group_get:
5296 list_add_tail(&fib6_entry->common.nexthop_group_node,
5297 &old_nh_grp->fib_list);
5298 fib6_entry->common.nh_group = old_nh_grp;
5299 return err;
5300 }
5301
5302 static int
mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry,struct fib6_info ** rt_arr,unsigned int nrt6)5303 mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
5304 struct mlxsw_sp_fib6_entry *fib6_entry,
5305 struct fib6_info **rt_arr, unsigned int nrt6)
5306 {
5307 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5308 int err, i;
5309
5310 for (i = 0; i < nrt6; i++) {
5311 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
5312 if (IS_ERR(mlxsw_sp_rt6)) {
5313 err = PTR_ERR(mlxsw_sp_rt6);
5314 goto err_rt6_create;
5315 }
5316
5317 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
5318 fib6_entry->nrt6++;
5319 }
5320
5321 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
5322 if (err)
5323 goto err_nexthop6_group_update;
5324
5325 return 0;
5326
5327 err_nexthop6_group_update:
5328 i = nrt6;
5329 err_rt6_create:
5330 for (i--; i >= 0; i--) {
5331 fib6_entry->nrt6--;
5332 mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
5333 struct mlxsw_sp_rt6, list);
5334 list_del(&mlxsw_sp_rt6->list);
5335 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5336 }
5337 return err;
5338 }
5339
5340 static void
mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry,struct fib6_info ** rt_arr,unsigned int nrt6)5341 mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
5342 struct mlxsw_sp_fib6_entry *fib6_entry,
5343 struct fib6_info **rt_arr, unsigned int nrt6)
5344 {
5345 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5346 int i;
5347
5348 for (i = 0; i < nrt6; i++) {
5349 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry,
5350 rt_arr[i]);
5351 if (WARN_ON_ONCE(!mlxsw_sp_rt6))
5352 continue;
5353
5354 fib6_entry->nrt6--;
5355 list_del(&mlxsw_sp_rt6->list);
5356 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5357 }
5358
5359 mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
5360 }
5361
mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,const struct fib6_info * rt)5362 static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
5363 struct mlxsw_sp_fib_entry *fib_entry,
5364 const struct fib6_info *rt)
5365 {
5366 /* Packets hitting RTF_REJECT routes need to be discarded by the
5367 * stack. We can rely on their destination device not having a
5368 * RIF (it's the loopback device) and can thus use action type
5369 * local, which will cause them to be trapped with a lower
5370 * priority than packets that need to be locally received.
5371 */
5372 if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
5373 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
5374 else if (rt->fib6_type == RTN_BLACKHOLE)
5375 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
5376 else if (rt->fib6_flags & RTF_REJECT)
5377 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
5378 else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
5379 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
5380 else
5381 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
5382 }
5383
5384 static void
mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry * fib6_entry)5385 mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
5386 {
5387 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
5388
5389 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
5390 list) {
5391 fib6_entry->nrt6--;
5392 list_del(&mlxsw_sp_rt6->list);
5393 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5394 }
5395 }
5396
5397 static struct mlxsw_sp_fib6_entry *
mlxsw_sp_fib6_entry_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node,struct fib6_info ** rt_arr,unsigned int nrt6)5398 mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
5399 struct mlxsw_sp_fib_node *fib_node,
5400 struct fib6_info **rt_arr, unsigned int nrt6)
5401 {
5402 struct mlxsw_sp_fib6_entry *fib6_entry;
5403 struct mlxsw_sp_fib_entry *fib_entry;
5404 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5405 int err, i;
5406
5407 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
5408 if (!fib6_entry)
5409 return ERR_PTR(-ENOMEM);
5410 fib_entry = &fib6_entry->common;
5411
5412 INIT_LIST_HEAD(&fib6_entry->rt6_list);
5413
5414 for (i = 0; i < nrt6; i++) {
5415 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
5416 if (IS_ERR(mlxsw_sp_rt6)) {
5417 err = PTR_ERR(mlxsw_sp_rt6);
5418 goto err_rt6_create;
5419 }
5420 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
5421 fib6_entry->nrt6++;
5422 }
5423
5424 mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
5425
5426 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
5427 if (err)
5428 goto err_nexthop6_group_get;
5429
5430 fib_entry->fib_node = fib_node;
5431
5432 return fib6_entry;
5433
5434 err_nexthop6_group_get:
5435 i = nrt6;
5436 err_rt6_create:
5437 for (i--; i >= 0; i--) {
5438 fib6_entry->nrt6--;
5439 mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
5440 struct mlxsw_sp_rt6, list);
5441 list_del(&mlxsw_sp_rt6->list);
5442 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5443 }
5444 kfree(fib6_entry);
5445 return ERR_PTR(err);
5446 }
5447
mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)5448 static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
5449 struct mlxsw_sp_fib6_entry *fib6_entry)
5450 {
5451 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
5452 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
5453 WARN_ON(fib6_entry->nrt6);
5454 kfree(fib6_entry);
5455 }
5456
5457 static struct mlxsw_sp_fib6_entry *
mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp * mlxsw_sp,const struct fib6_info * rt)5458 mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
5459 const struct fib6_info *rt)
5460 {
5461 struct mlxsw_sp_fib6_entry *fib6_entry;
5462 struct mlxsw_sp_fib_node *fib_node;
5463 struct mlxsw_sp_fib *fib;
5464 struct fib6_info *cmp_rt;
5465 struct mlxsw_sp_vr *vr;
5466
5467 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
5468 if (!vr)
5469 return NULL;
5470 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
5471
5472 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr,
5473 sizeof(rt->fib6_dst.addr),
5474 rt->fib6_dst.plen);
5475 if (!fib_node)
5476 return NULL;
5477
5478 fib6_entry = container_of(fib_node->fib_entry,
5479 struct mlxsw_sp_fib6_entry, common);
5480 cmp_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
5481 if (rt->fib6_table->tb6_id == cmp_rt->fib6_table->tb6_id &&
5482 rt->fib6_metric == cmp_rt->fib6_metric &&
5483 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
5484 return fib6_entry;
5485
5486 return NULL;
5487 }
5488
mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry * fib6_entry)5489 static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
5490 {
5491 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
5492 struct mlxsw_sp_fib6_entry *fib6_replaced;
5493 struct fib6_info *rt, *rt_replaced;
5494
5495 if (!fib_node->fib_entry)
5496 return true;
5497
5498 fib6_replaced = container_of(fib_node->fib_entry,
5499 struct mlxsw_sp_fib6_entry,
5500 common);
5501 rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
5502 rt_replaced = mlxsw_sp_fib6_entry_rt(fib6_replaced);
5503 if (rt->fib6_table->tb6_id == RT_TABLE_MAIN &&
5504 rt_replaced->fib6_table->tb6_id == RT_TABLE_LOCAL)
5505 return false;
5506
5507 return true;
5508 }
5509
mlxsw_sp_router_fib6_replace(struct mlxsw_sp * mlxsw_sp,struct fib6_info ** rt_arr,unsigned int nrt6)5510 static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
5511 struct fib6_info **rt_arr,
5512 unsigned int nrt6)
5513 {
5514 struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
5515 struct mlxsw_sp_fib_entry *replaced;
5516 struct mlxsw_sp_fib_node *fib_node;
5517 struct fib6_info *rt = rt_arr[0];
5518 int err;
5519
5520 if (mlxsw_sp->router->aborted)
5521 return 0;
5522
5523 if (rt->fib6_src.plen)
5524 return -EINVAL;
5525
5526 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5527 return 0;
5528
5529 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
5530 &rt->fib6_dst.addr,
5531 sizeof(rt->fib6_dst.addr),
5532 rt->fib6_dst.plen,
5533 MLXSW_SP_L3_PROTO_IPV6);
5534 if (IS_ERR(fib_node))
5535 return PTR_ERR(fib_node);
5536
5537 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt_arr,
5538 nrt6);
5539 if (IS_ERR(fib6_entry)) {
5540 err = PTR_ERR(fib6_entry);
5541 goto err_fib6_entry_create;
5542 }
5543
5544 if (!mlxsw_sp_fib6_allow_replace(fib6_entry)) {
5545 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5546 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5547 return 0;
5548 }
5549
5550 replaced = fib_node->fib_entry;
5551 err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib6_entry->common);
5552 if (err)
5553 goto err_fib_node_entry_link;
5554
5555 /* Nothing to replace */
5556 if (!replaced)
5557 return 0;
5558
5559 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
5560 fib6_replaced = container_of(replaced, struct mlxsw_sp_fib6_entry,
5561 common);
5562 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_replaced);
5563
5564 return 0;
5565
5566 err_fib_node_entry_link:
5567 fib_node->fib_entry = replaced;
5568 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5569 err_fib6_entry_create:
5570 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5571 return err;
5572 }
5573
mlxsw_sp_router_fib6_append(struct mlxsw_sp * mlxsw_sp,struct fib6_info ** rt_arr,unsigned int nrt6)5574 static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
5575 struct fib6_info **rt_arr,
5576 unsigned int nrt6)
5577 {
5578 struct mlxsw_sp_fib6_entry *fib6_entry;
5579 struct mlxsw_sp_fib_node *fib_node;
5580 struct fib6_info *rt = rt_arr[0];
5581 int err;
5582
5583 if (mlxsw_sp->router->aborted)
5584 return 0;
5585
5586 if (rt->fib6_src.plen)
5587 return -EINVAL;
5588
5589 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5590 return 0;
5591
5592 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
5593 &rt->fib6_dst.addr,
5594 sizeof(rt->fib6_dst.addr),
5595 rt->fib6_dst.plen,
5596 MLXSW_SP_L3_PROTO_IPV6);
5597 if (IS_ERR(fib_node))
5598 return PTR_ERR(fib_node);
5599
5600 if (WARN_ON_ONCE(!fib_node->fib_entry)) {
5601 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5602 return -EINVAL;
5603 }
5604
5605 fib6_entry = container_of(fib_node->fib_entry,
5606 struct mlxsw_sp_fib6_entry, common);
5607 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt_arr,
5608 nrt6);
5609 if (err)
5610 goto err_fib6_entry_nexthop_add;
5611
5612 return 0;
5613
5614 err_fib6_entry_nexthop_add:
5615 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5616 return err;
5617 }
5618
mlxsw_sp_router_fib6_del(struct mlxsw_sp * mlxsw_sp,struct fib6_info ** rt_arr,unsigned int nrt6)5619 static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
5620 struct fib6_info **rt_arr,
5621 unsigned int nrt6)
5622 {
5623 struct mlxsw_sp_fib6_entry *fib6_entry;
5624 struct mlxsw_sp_fib_node *fib_node;
5625 struct fib6_info *rt = rt_arr[0];
5626
5627 if (mlxsw_sp->router->aborted)
5628 return;
5629
5630 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5631 return;
5632
5633 /* Multipath routes are first added to the FIB trie and only then
5634 * notified. If we vetoed the addition, we will get a delete
5635 * notification for a route we do not have. Therefore, do not warn if
5636 * route was not found.
5637 */
5638 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
5639 if (!fib6_entry)
5640 return;
5641
5642 /* If not all the nexthops are deleted, then only reduce the nexthop
5643 * group.
5644 */
5645 if (nrt6 != fib6_entry->nrt6) {
5646 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt_arr,
5647 nrt6);
5648 return;
5649 }
5650
5651 fib_node = fib6_entry->common.fib_node;
5652
5653 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib6_entry->common);
5654 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5655 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5656 }
5657
__mlxsw_sp_router_set_abort_trap(struct mlxsw_sp * mlxsw_sp,enum mlxsw_reg_ralxx_protocol proto,u8 tree_id)5658 static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
5659 enum mlxsw_reg_ralxx_protocol proto,
5660 u8 tree_id)
5661 {
5662 char ralta_pl[MLXSW_REG_RALTA_LEN];
5663 char ralst_pl[MLXSW_REG_RALST_LEN];
5664 int i, err;
5665
5666 mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id);
5667 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
5668 if (err)
5669 return err;
5670
5671 mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id);
5672 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
5673 if (err)
5674 return err;
5675
5676 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
5677 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
5678 char raltb_pl[MLXSW_REG_RALTB_LEN];
5679 char ralue_pl[MLXSW_REG_RALUE_LEN];
5680
5681 mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id);
5682 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
5683 raltb_pl);
5684 if (err)
5685 return err;
5686
5687 mlxsw_reg_ralue_pack(ralue_pl, proto,
5688 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0);
5689 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
5690 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
5691 ralue_pl);
5692 if (err)
5693 return err;
5694 }
5695
5696 return 0;
5697 }
5698
5699 static struct mlxsw_sp_mr_table *
mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr * vr,int family)5700 mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family)
5701 {
5702 if (family == RTNL_FAMILY_IPMR)
5703 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4];
5704 else
5705 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
5706 }
5707
mlxsw_sp_router_fibmr_add(struct mlxsw_sp * mlxsw_sp,struct mfc_entry_notifier_info * men_info,bool replace)5708 static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
5709 struct mfc_entry_notifier_info *men_info,
5710 bool replace)
5711 {
5712 struct mlxsw_sp_mr_table *mrt;
5713 struct mlxsw_sp_vr *vr;
5714
5715 if (mlxsw_sp->router->aborted)
5716 return 0;
5717
5718 vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
5719 if (IS_ERR(vr))
5720 return PTR_ERR(vr);
5721
5722 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
5723 return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace);
5724 }
5725
mlxsw_sp_router_fibmr_del(struct mlxsw_sp * mlxsw_sp,struct mfc_entry_notifier_info * men_info)5726 static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
5727 struct mfc_entry_notifier_info *men_info)
5728 {
5729 struct mlxsw_sp_mr_table *mrt;
5730 struct mlxsw_sp_vr *vr;
5731
5732 if (mlxsw_sp->router->aborted)
5733 return;
5734
5735 vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
5736 if (WARN_ON(!vr))
5737 return;
5738
5739 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
5740 mlxsw_sp_mr_route_del(mrt, men_info->mfc);
5741 mlxsw_sp_vr_put(mlxsw_sp, vr);
5742 }
5743
5744 static int
mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp * mlxsw_sp,struct vif_entry_notifier_info * ven_info)5745 mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
5746 struct vif_entry_notifier_info *ven_info)
5747 {
5748 struct mlxsw_sp_mr_table *mrt;
5749 struct mlxsw_sp_rif *rif;
5750 struct mlxsw_sp_vr *vr;
5751
5752 if (mlxsw_sp->router->aborted)
5753 return 0;
5754
5755 vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
5756 if (IS_ERR(vr))
5757 return PTR_ERR(vr);
5758
5759 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
5760 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
5761 return mlxsw_sp_mr_vif_add(mrt, ven_info->dev,
5762 ven_info->vif_index,
5763 ven_info->vif_flags, rif);
5764 }
5765
5766 static void
mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp * mlxsw_sp,struct vif_entry_notifier_info * ven_info)5767 mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
5768 struct vif_entry_notifier_info *ven_info)
5769 {
5770 struct mlxsw_sp_mr_table *mrt;
5771 struct mlxsw_sp_vr *vr;
5772
5773 if (mlxsw_sp->router->aborted)
5774 return;
5775
5776 vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
5777 if (WARN_ON(!vr))
5778 return;
5779
5780 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
5781 mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index);
5782 mlxsw_sp_vr_put(mlxsw_sp, vr);
5783 }
5784
mlxsw_sp_router_set_abort_trap(struct mlxsw_sp * mlxsw_sp)5785 static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
5786 {
5787 enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4;
5788 int err;
5789
5790 err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5791 MLXSW_SP_LPM_TREE_MIN);
5792 if (err)
5793 return err;
5794
5795 /* The multicast router code does not need an abort trap as by default,
5796 * packets that don't match any routes are trapped to the CPU.
5797 */
5798
5799 proto = MLXSW_REG_RALXX_PROTOCOL_IPV6;
5800 return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5801 MLXSW_SP_LPM_TREE_MIN + 1);
5802 }
5803
mlxsw_sp_fib4_node_flush(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)5804 static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
5805 struct mlxsw_sp_fib_node *fib_node)
5806 {
5807 struct mlxsw_sp_fib4_entry *fib4_entry;
5808
5809 fib4_entry = container_of(fib_node->fib_entry,
5810 struct mlxsw_sp_fib4_entry, common);
5811 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
5812 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
5813 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5814 }
5815
mlxsw_sp_fib6_node_flush(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)5816 static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
5817 struct mlxsw_sp_fib_node *fib_node)
5818 {
5819 struct mlxsw_sp_fib6_entry *fib6_entry;
5820
5821 fib6_entry = container_of(fib_node->fib_entry,
5822 struct mlxsw_sp_fib6_entry, common);
5823 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
5824 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5825 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5826 }
5827
mlxsw_sp_fib_node_flush(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)5828 static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
5829 struct mlxsw_sp_fib_node *fib_node)
5830 {
5831 switch (fib_node->fib->proto) {
5832 case MLXSW_SP_L3_PROTO_IPV4:
5833 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
5834 break;
5835 case MLXSW_SP_L3_PROTO_IPV6:
5836 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
5837 break;
5838 }
5839 }
5840
mlxsw_sp_vr_fib_flush(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_vr * vr,enum mlxsw_sp_l3proto proto)5841 static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
5842 struct mlxsw_sp_vr *vr,
5843 enum mlxsw_sp_l3proto proto)
5844 {
5845 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
5846 struct mlxsw_sp_fib_node *fib_node, *tmp;
5847
5848 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
5849 bool do_break = &tmp->list == &fib->node_list;
5850
5851 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
5852 if (do_break)
5853 break;
5854 }
5855 }
5856
mlxsw_sp_router_fib_flush(struct mlxsw_sp * mlxsw_sp)5857 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
5858 {
5859 int i, j;
5860
5861 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
5862 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
5863
5864 if (!mlxsw_sp_vr_is_used(vr))
5865 continue;
5866
5867 for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++)
5868 mlxsw_sp_mr_table_flush(vr->mr_table[j]);
5869 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
5870
5871 /* If virtual router was only used for IPv4, then it's no
5872 * longer used.
5873 */
5874 if (!mlxsw_sp_vr_is_used(vr))
5875 continue;
5876 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
5877 }
5878
5879 /* After flushing all the routes, it is not possible anyone is still
5880 * using the adjacency index that is discarding packets, so free it in
5881 * case it was allocated.
5882 */
5883 if (!mlxsw_sp->router->adj_discard_index_valid)
5884 return;
5885 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
5886 mlxsw_sp->router->adj_discard_index);
5887 mlxsw_sp->router->adj_discard_index_valid = false;
5888 }
5889
mlxsw_sp_router_fib_abort(struct mlxsw_sp * mlxsw_sp)5890 static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
5891 {
5892 int err;
5893
5894 if (mlxsw_sp->router->aborted)
5895 return;
5896 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
5897 mlxsw_sp_router_fib_flush(mlxsw_sp);
5898 mlxsw_sp->router->aborted = true;
5899 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
5900 if (err)
5901 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
5902 }
5903
5904 struct mlxsw_sp_fib6_event_work {
5905 struct fib6_info **rt_arr;
5906 unsigned int nrt6;
5907 };
5908
5909 struct mlxsw_sp_fib_event_work {
5910 struct work_struct work;
5911 union {
5912 struct mlxsw_sp_fib6_event_work fib6_work;
5913 struct fib_entry_notifier_info fen_info;
5914 struct fib_rule_notifier_info fr_info;
5915 struct fib_nh_notifier_info fnh_info;
5916 struct mfc_entry_notifier_info men_info;
5917 struct vif_entry_notifier_info ven_info;
5918 };
5919 struct mlxsw_sp *mlxsw_sp;
5920 unsigned long event;
5921 };
5922
5923 static int
mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work * fib6_work,struct fib6_entry_notifier_info * fen6_info)5924 mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work *fib6_work,
5925 struct fib6_entry_notifier_info *fen6_info)
5926 {
5927 struct fib6_info *rt = fen6_info->rt;
5928 struct fib6_info **rt_arr;
5929 struct fib6_info *iter;
5930 unsigned int nrt6;
5931 int i = 0;
5932
5933 nrt6 = fen6_info->nsiblings + 1;
5934
5935 rt_arr = kcalloc(nrt6, sizeof(struct fib6_info *), GFP_ATOMIC);
5936 if (!rt_arr)
5937 return -ENOMEM;
5938
5939 fib6_work->rt_arr = rt_arr;
5940 fib6_work->nrt6 = nrt6;
5941
5942 rt_arr[0] = rt;
5943 fib6_info_hold(rt);
5944
5945 if (!fen6_info->nsiblings)
5946 return 0;
5947
5948 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
5949 if (i == fen6_info->nsiblings)
5950 break;
5951
5952 rt_arr[i + 1] = iter;
5953 fib6_info_hold(iter);
5954 i++;
5955 }
5956 WARN_ON_ONCE(i != fen6_info->nsiblings);
5957
5958 return 0;
5959 }
5960
5961 static void
mlxsw_sp_router_fib6_work_fini(struct mlxsw_sp_fib6_event_work * fib6_work)5962 mlxsw_sp_router_fib6_work_fini(struct mlxsw_sp_fib6_event_work *fib6_work)
5963 {
5964 int i;
5965
5966 for (i = 0; i < fib6_work->nrt6; i++)
5967 mlxsw_sp_rt6_release(fib6_work->rt_arr[i]);
5968 kfree(fib6_work->rt_arr);
5969 }
5970
mlxsw_sp_router_fib4_event_work(struct work_struct * work)5971 static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
5972 {
5973 struct mlxsw_sp_fib_event_work *fib_work =
5974 container_of(work, struct mlxsw_sp_fib_event_work, work);
5975 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
5976 int err;
5977
5978 mutex_lock(&mlxsw_sp->router->lock);
5979 mlxsw_sp_span_respin(mlxsw_sp);
5980
5981 switch (fib_work->event) {
5982 case FIB_EVENT_ENTRY_REPLACE:
5983 err = mlxsw_sp_router_fib4_replace(mlxsw_sp,
5984 &fib_work->fen_info);
5985 if (err)
5986 mlxsw_sp_router_fib_abort(mlxsw_sp);
5987 fib_info_put(fib_work->fen_info.fi);
5988 break;
5989 case FIB_EVENT_ENTRY_DEL:
5990 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
5991 fib_info_put(fib_work->fen_info.fi);
5992 break;
5993 case FIB_EVENT_NH_ADD:
5994 case FIB_EVENT_NH_DEL:
5995 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
5996 fib_work->fnh_info.fib_nh);
5997 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
5998 break;
5999 }
6000 mutex_unlock(&mlxsw_sp->router->lock);
6001 kfree(fib_work);
6002 }
6003
mlxsw_sp_router_fib6_event_work(struct work_struct * work)6004 static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
6005 {
6006 struct mlxsw_sp_fib_event_work *fib_work =
6007 container_of(work, struct mlxsw_sp_fib_event_work, work);
6008 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
6009 int err;
6010
6011 mutex_lock(&mlxsw_sp->router->lock);
6012 mlxsw_sp_span_respin(mlxsw_sp);
6013
6014 switch (fib_work->event) {
6015 case FIB_EVENT_ENTRY_REPLACE:
6016 err = mlxsw_sp_router_fib6_replace(mlxsw_sp,
6017 fib_work->fib6_work.rt_arr,
6018 fib_work->fib6_work.nrt6);
6019 if (err)
6020 mlxsw_sp_router_fib_abort(mlxsw_sp);
6021 mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
6022 break;
6023 case FIB_EVENT_ENTRY_APPEND:
6024 err = mlxsw_sp_router_fib6_append(mlxsw_sp,
6025 fib_work->fib6_work.rt_arr,
6026 fib_work->fib6_work.nrt6);
6027 if (err)
6028 mlxsw_sp_router_fib_abort(mlxsw_sp);
6029 mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
6030 break;
6031 case FIB_EVENT_ENTRY_DEL:
6032 mlxsw_sp_router_fib6_del(mlxsw_sp,
6033 fib_work->fib6_work.rt_arr,
6034 fib_work->fib6_work.nrt6);
6035 mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
6036 break;
6037 }
6038 mutex_unlock(&mlxsw_sp->router->lock);
6039 kfree(fib_work);
6040 }
6041
mlxsw_sp_router_fibmr_event_work(struct work_struct * work)6042 static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
6043 {
6044 struct mlxsw_sp_fib_event_work *fib_work =
6045 container_of(work, struct mlxsw_sp_fib_event_work, work);
6046 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
6047 bool replace;
6048 int err;
6049
6050 rtnl_lock();
6051 mutex_lock(&mlxsw_sp->router->lock);
6052 switch (fib_work->event) {
6053 case FIB_EVENT_ENTRY_REPLACE:
6054 case FIB_EVENT_ENTRY_ADD:
6055 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
6056
6057 err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
6058 replace);
6059 if (err)
6060 mlxsw_sp_router_fib_abort(mlxsw_sp);
6061 mr_cache_put(fib_work->men_info.mfc);
6062 break;
6063 case FIB_EVENT_ENTRY_DEL:
6064 mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
6065 mr_cache_put(fib_work->men_info.mfc);
6066 break;
6067 case FIB_EVENT_VIF_ADD:
6068 err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
6069 &fib_work->ven_info);
6070 if (err)
6071 mlxsw_sp_router_fib_abort(mlxsw_sp);
6072 dev_put(fib_work->ven_info.dev);
6073 break;
6074 case FIB_EVENT_VIF_DEL:
6075 mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
6076 &fib_work->ven_info);
6077 dev_put(fib_work->ven_info.dev);
6078 break;
6079 }
6080 mutex_unlock(&mlxsw_sp->router->lock);
6081 rtnl_unlock();
6082 kfree(fib_work);
6083 }
6084
mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work * fib_work,struct fib_notifier_info * info)6085 static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
6086 struct fib_notifier_info *info)
6087 {
6088 struct fib_entry_notifier_info *fen_info;
6089 struct fib_nh_notifier_info *fnh_info;
6090
6091 switch (fib_work->event) {
6092 case FIB_EVENT_ENTRY_REPLACE:
6093 case FIB_EVENT_ENTRY_DEL:
6094 fen_info = container_of(info, struct fib_entry_notifier_info,
6095 info);
6096 fib_work->fen_info = *fen_info;
6097 /* Take reference on fib_info to prevent it from being
6098 * freed while work is queued. Release it afterwards.
6099 */
6100 fib_info_hold(fib_work->fen_info.fi);
6101 break;
6102 case FIB_EVENT_NH_ADD:
6103 case FIB_EVENT_NH_DEL:
6104 fnh_info = container_of(info, struct fib_nh_notifier_info,
6105 info);
6106 fib_work->fnh_info = *fnh_info;
6107 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
6108 break;
6109 }
6110 }
6111
mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work * fib_work,struct fib_notifier_info * info)6112 static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
6113 struct fib_notifier_info *info)
6114 {
6115 struct fib6_entry_notifier_info *fen6_info;
6116 int err;
6117
6118 switch (fib_work->event) {
6119 case FIB_EVENT_ENTRY_REPLACE:
6120 case FIB_EVENT_ENTRY_APPEND:
6121 case FIB_EVENT_ENTRY_DEL:
6122 fen6_info = container_of(info, struct fib6_entry_notifier_info,
6123 info);
6124 err = mlxsw_sp_router_fib6_work_init(&fib_work->fib6_work,
6125 fen6_info);
6126 if (err)
6127 return err;
6128 break;
6129 }
6130
6131 return 0;
6132 }
6133
6134 static void
mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work * fib_work,struct fib_notifier_info * info)6135 mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
6136 struct fib_notifier_info *info)
6137 {
6138 switch (fib_work->event) {
6139 case FIB_EVENT_ENTRY_REPLACE:
6140 case FIB_EVENT_ENTRY_ADD:
6141 case FIB_EVENT_ENTRY_DEL:
6142 memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
6143 mr_cache_hold(fib_work->men_info.mfc);
6144 break;
6145 case FIB_EVENT_VIF_ADD:
6146 case FIB_EVENT_VIF_DEL:
6147 memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
6148 dev_hold(fib_work->ven_info.dev);
6149 break;
6150 }
6151 }
6152
mlxsw_sp_router_fib_rule_event(unsigned long event,struct fib_notifier_info * info,struct mlxsw_sp * mlxsw_sp)6153 static int mlxsw_sp_router_fib_rule_event(unsigned long event,
6154 struct fib_notifier_info *info,
6155 struct mlxsw_sp *mlxsw_sp)
6156 {
6157 struct netlink_ext_ack *extack = info->extack;
6158 struct fib_rule_notifier_info *fr_info;
6159 struct fib_rule *rule;
6160 int err = 0;
6161
6162 /* nothing to do at the moment */
6163 if (event == FIB_EVENT_RULE_DEL)
6164 return 0;
6165
6166 if (mlxsw_sp->router->aborted)
6167 return 0;
6168
6169 fr_info = container_of(info, struct fib_rule_notifier_info, info);
6170 rule = fr_info->rule;
6171
6172 /* Rule only affects locally generated traffic */
6173 if (rule->iifindex == mlxsw_sp_net(mlxsw_sp)->loopback_dev->ifindex)
6174 return 0;
6175
6176 switch (info->family) {
6177 case AF_INET:
6178 if (!fib4_rule_default(rule) && !rule->l3mdev)
6179 err = -EOPNOTSUPP;
6180 break;
6181 case AF_INET6:
6182 if (!fib6_rule_default(rule) && !rule->l3mdev)
6183 err = -EOPNOTSUPP;
6184 break;
6185 case RTNL_FAMILY_IPMR:
6186 if (!ipmr_rule_default(rule) && !rule->l3mdev)
6187 err = -EOPNOTSUPP;
6188 break;
6189 case RTNL_FAMILY_IP6MR:
6190 if (!ip6mr_rule_default(rule) && !rule->l3mdev)
6191 err = -EOPNOTSUPP;
6192 break;
6193 }
6194
6195 if (err < 0)
6196 NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported");
6197
6198 return err;
6199 }
6200
6201 /* Called with rcu_read_lock() */
mlxsw_sp_router_fib_event(struct notifier_block * nb,unsigned long event,void * ptr)6202 static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
6203 unsigned long event, void *ptr)
6204 {
6205 struct mlxsw_sp_fib_event_work *fib_work;
6206 struct fib_notifier_info *info = ptr;
6207 struct mlxsw_sp_router *router;
6208 int err;
6209
6210 if ((info->family != AF_INET && info->family != AF_INET6 &&
6211 info->family != RTNL_FAMILY_IPMR &&
6212 info->family != RTNL_FAMILY_IP6MR))
6213 return NOTIFY_DONE;
6214
6215 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
6216
6217 switch (event) {
6218 case FIB_EVENT_RULE_ADD:
6219 case FIB_EVENT_RULE_DEL:
6220 err = mlxsw_sp_router_fib_rule_event(event, info,
6221 router->mlxsw_sp);
6222 return notifier_from_errno(err);
6223 case FIB_EVENT_ENTRY_ADD:
6224 case FIB_EVENT_ENTRY_REPLACE:
6225 case FIB_EVENT_ENTRY_APPEND:
6226 if (router->aborted) {
6227 NL_SET_ERR_MSG_MOD(info->extack, "FIB offload was aborted. Not configuring route");
6228 return notifier_from_errno(-EINVAL);
6229 }
6230 if (info->family == AF_INET) {
6231 struct fib_entry_notifier_info *fen_info = ptr;
6232
6233 if (fen_info->fi->fib_nh_is_v6) {
6234 NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
6235 return notifier_from_errno(-EINVAL);
6236 }
6237 if (fen_info->fi->nh) {
6238 NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported");
6239 return notifier_from_errno(-EINVAL);
6240 }
6241 } else if (info->family == AF_INET6) {
6242 struct fib6_entry_notifier_info *fen6_info;
6243
6244 fen6_info = container_of(info,
6245 struct fib6_entry_notifier_info,
6246 info);
6247 if (fen6_info->rt->nh) {
6248 NL_SET_ERR_MSG_MOD(info->extack, "IPv6 route with nexthop objects is not supported");
6249 return notifier_from_errno(-EINVAL);
6250 }
6251 }
6252 break;
6253 }
6254
6255 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
6256 if (!fib_work)
6257 return NOTIFY_BAD;
6258
6259 fib_work->mlxsw_sp = router->mlxsw_sp;
6260 fib_work->event = event;
6261
6262 switch (info->family) {
6263 case AF_INET:
6264 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
6265 mlxsw_sp_router_fib4_event(fib_work, info);
6266 break;
6267 case AF_INET6:
6268 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
6269 err = mlxsw_sp_router_fib6_event(fib_work, info);
6270 if (err)
6271 goto err_fib_event;
6272 break;
6273 case RTNL_FAMILY_IP6MR:
6274 case RTNL_FAMILY_IPMR:
6275 INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
6276 mlxsw_sp_router_fibmr_event(fib_work, info);
6277 break;
6278 }
6279
6280 mlxsw_core_schedule_work(&fib_work->work);
6281
6282 return NOTIFY_DONE;
6283
6284 err_fib_event:
6285 kfree(fib_work);
6286 return NOTIFY_BAD;
6287 }
6288
6289 static struct mlxsw_sp_rif *
mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)6290 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
6291 const struct net_device *dev)
6292 {
6293 int i;
6294
6295 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
6296 if (mlxsw_sp->router->rifs[i] &&
6297 mlxsw_sp->router->rifs[i]->dev == dev)
6298 return mlxsw_sp->router->rifs[i];
6299
6300 return NULL;
6301 }
6302
mlxsw_sp_rif_exists(struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)6303 bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
6304 const struct net_device *dev)
6305 {
6306 struct mlxsw_sp_rif *rif;
6307
6308 mutex_lock(&mlxsw_sp->router->lock);
6309 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6310 mutex_unlock(&mlxsw_sp->router->lock);
6311
6312 return rif;
6313 }
6314
mlxsw_sp_rif_vid(struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)6315 u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
6316 {
6317 struct mlxsw_sp_rif *rif;
6318 u16 vid = 0;
6319
6320 mutex_lock(&mlxsw_sp->router->lock);
6321 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6322 if (!rif)
6323 goto out;
6324
6325 /* We only return the VID for VLAN RIFs. Otherwise we return an
6326 * invalid value (0).
6327 */
6328 if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
6329 goto out;
6330
6331 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
6332
6333 out:
6334 mutex_unlock(&mlxsw_sp->router->lock);
6335 return vid;
6336 }
6337
mlxsw_sp_router_rif_disable(struct mlxsw_sp * mlxsw_sp,u16 rif)6338 static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
6339 {
6340 char ritr_pl[MLXSW_REG_RITR_LEN];
6341 int err;
6342
6343 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
6344 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6345 if (err)
6346 return err;
6347
6348 mlxsw_reg_ritr_enable_set(ritr_pl, false);
6349 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6350 }
6351
mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)6352 static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
6353 struct mlxsw_sp_rif *rif)
6354 {
6355 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
6356 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
6357 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
6358 }
6359
6360 static bool
mlxsw_sp_rif_should_config(struct mlxsw_sp_rif * rif,struct net_device * dev,unsigned long event)6361 mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
6362 unsigned long event)
6363 {
6364 struct inet6_dev *inet6_dev;
6365 bool addr_list_empty = true;
6366 struct in_device *idev;
6367
6368 switch (event) {
6369 case NETDEV_UP:
6370 return rif == NULL;
6371 case NETDEV_DOWN:
6372 rcu_read_lock();
6373 idev = __in_dev_get_rcu(dev);
6374 if (idev && idev->ifa_list)
6375 addr_list_empty = false;
6376
6377 inet6_dev = __in6_dev_get(dev);
6378 if (addr_list_empty && inet6_dev &&
6379 !list_empty(&inet6_dev->addr_list))
6380 addr_list_empty = false;
6381 rcu_read_unlock();
6382
6383 /* macvlans do not have a RIF, but rather piggy back on the
6384 * RIF of their lower device.
6385 */
6386 if (netif_is_macvlan(dev) && addr_list_empty)
6387 return true;
6388
6389 if (rif && addr_list_empty &&
6390 !netif_is_l3_slave(rif->dev))
6391 return true;
6392 /* It is possible we already removed the RIF ourselves
6393 * if it was assigned to a netdev that is now a bridge
6394 * or LAG slave.
6395 */
6396 return false;
6397 }
6398
6399 return false;
6400 }
6401
6402 static enum mlxsw_sp_rif_type
mlxsw_sp_dev_rif_type(const struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)6403 mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
6404 const struct net_device *dev)
6405 {
6406 enum mlxsw_sp_fid_type type;
6407
6408 if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
6409 return MLXSW_SP_RIF_TYPE_IPIP_LB;
6410
6411 /* Otherwise RIF type is derived from the type of the underlying FID. */
6412 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
6413 type = MLXSW_SP_FID_TYPE_8021Q;
6414 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
6415 type = MLXSW_SP_FID_TYPE_8021Q;
6416 else if (netif_is_bridge_master(dev))
6417 type = MLXSW_SP_FID_TYPE_8021D;
6418 else
6419 type = MLXSW_SP_FID_TYPE_RFID;
6420
6421 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
6422 }
6423
mlxsw_sp_rif_index_alloc(struct mlxsw_sp * mlxsw_sp,u16 * p_rif_index)6424 static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
6425 {
6426 int i;
6427
6428 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
6429 if (!mlxsw_sp->router->rifs[i]) {
6430 *p_rif_index = i;
6431 return 0;
6432 }
6433 }
6434
6435 return -ENOBUFS;
6436 }
6437
mlxsw_sp_rif_alloc(size_t rif_size,u16 rif_index,u16 vr_id,struct net_device * l3_dev)6438 static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
6439 u16 vr_id,
6440 struct net_device *l3_dev)
6441 {
6442 struct mlxsw_sp_rif *rif;
6443
6444 rif = kzalloc(rif_size, GFP_KERNEL);
6445 if (!rif)
6446 return NULL;
6447
6448 INIT_LIST_HEAD(&rif->nexthop_list);
6449 INIT_LIST_HEAD(&rif->neigh_list);
6450 if (l3_dev) {
6451 ether_addr_copy(rif->addr, l3_dev->dev_addr);
6452 rif->mtu = l3_dev->mtu;
6453 rif->dev = l3_dev;
6454 }
6455 rif->vr_id = vr_id;
6456 rif->rif_index = rif_index;
6457
6458 return rif;
6459 }
6460
mlxsw_sp_rif_by_index(const struct mlxsw_sp * mlxsw_sp,u16 rif_index)6461 struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
6462 u16 rif_index)
6463 {
6464 return mlxsw_sp->router->rifs[rif_index];
6465 }
6466
mlxsw_sp_rif_index(const struct mlxsw_sp_rif * rif)6467 u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
6468 {
6469 return rif->rif_index;
6470 }
6471
mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb * lb_rif)6472 u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6473 {
6474 return lb_rif->common.rif_index;
6475 }
6476
mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb * lb_rif)6477 u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6478 {
6479 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(lb_rif->common.dev);
6480 struct mlxsw_sp_vr *ul_vr;
6481
6482 ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL);
6483 if (WARN_ON(IS_ERR(ul_vr)))
6484 return 0;
6485
6486 return ul_vr->id;
6487 }
6488
mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb * lb_rif)6489 u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6490 {
6491 return lb_rif->ul_rif_id;
6492 }
6493
mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif * rif)6494 int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
6495 {
6496 return rif->dev->ifindex;
6497 }
6498
mlxsw_sp_rif_dev(const struct mlxsw_sp_rif * rif)6499 const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
6500 {
6501 return rif->dev;
6502 }
6503
6504 static struct mlxsw_sp_rif *
mlxsw_sp_rif_create(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_rif_params * params,struct netlink_ext_ack * extack)6505 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
6506 const struct mlxsw_sp_rif_params *params,
6507 struct netlink_ext_ack *extack)
6508 {
6509 u32 tb_id = l3mdev_fib_table(params->dev);
6510 const struct mlxsw_sp_rif_ops *ops;
6511 struct mlxsw_sp_fid *fid = NULL;
6512 enum mlxsw_sp_rif_type type;
6513 struct mlxsw_sp_rif *rif;
6514 struct mlxsw_sp_vr *vr;
6515 u16 rif_index;
6516 int i, err;
6517
6518 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
6519 ops = mlxsw_sp->rif_ops_arr[type];
6520
6521 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
6522 if (IS_ERR(vr))
6523 return ERR_CAST(vr);
6524 vr->rif_count++;
6525
6526 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
6527 if (err) {
6528 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
6529 goto err_rif_index_alloc;
6530 }
6531
6532 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
6533 if (!rif) {
6534 err = -ENOMEM;
6535 goto err_rif_alloc;
6536 }
6537 dev_hold(rif->dev);
6538 mlxsw_sp->router->rifs[rif_index] = rif;
6539 rif->mlxsw_sp = mlxsw_sp;
6540 rif->ops = ops;
6541
6542 if (ops->fid_get) {
6543 fid = ops->fid_get(rif, extack);
6544 if (IS_ERR(fid)) {
6545 err = PTR_ERR(fid);
6546 goto err_fid_get;
6547 }
6548 rif->fid = fid;
6549 }
6550
6551 if (ops->setup)
6552 ops->setup(rif, params);
6553
6554 err = ops->configure(rif);
6555 if (err)
6556 goto err_configure;
6557
6558 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
6559 err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif);
6560 if (err)
6561 goto err_mr_rif_add;
6562 }
6563
6564 mlxsw_sp_rif_counters_alloc(rif);
6565
6566 return rif;
6567
6568 err_mr_rif_add:
6569 for (i--; i >= 0; i--)
6570 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
6571 ops->deconfigure(rif);
6572 err_configure:
6573 if (fid)
6574 mlxsw_sp_fid_put(fid);
6575 err_fid_get:
6576 mlxsw_sp->router->rifs[rif_index] = NULL;
6577 dev_put(rif->dev);
6578 kfree(rif);
6579 err_rif_alloc:
6580 err_rif_index_alloc:
6581 vr->rif_count--;
6582 mlxsw_sp_vr_put(mlxsw_sp, vr);
6583 return ERR_PTR(err);
6584 }
6585
mlxsw_sp_rif_destroy(struct mlxsw_sp_rif * rif)6586 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
6587 {
6588 const struct mlxsw_sp_rif_ops *ops = rif->ops;
6589 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6590 struct mlxsw_sp_fid *fid = rif->fid;
6591 struct mlxsw_sp_vr *vr;
6592 int i;
6593
6594 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
6595 vr = &mlxsw_sp->router->vrs[rif->vr_id];
6596
6597 mlxsw_sp_rif_counters_free(rif);
6598 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
6599 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
6600 ops->deconfigure(rif);
6601 if (fid)
6602 /* Loopback RIFs are not associated with a FID. */
6603 mlxsw_sp_fid_put(fid);
6604 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
6605 dev_put(rif->dev);
6606 kfree(rif);
6607 vr->rif_count--;
6608 mlxsw_sp_vr_put(mlxsw_sp, vr);
6609 }
6610
mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp * mlxsw_sp,struct net_device * dev)6611 void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
6612 struct net_device *dev)
6613 {
6614 struct mlxsw_sp_rif *rif;
6615
6616 mutex_lock(&mlxsw_sp->router->lock);
6617 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6618 if (!rif)
6619 goto out;
6620 mlxsw_sp_rif_destroy(rif);
6621 out:
6622 mutex_unlock(&mlxsw_sp->router->lock);
6623 }
6624
6625 static void
mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params * params,struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan)6626 mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
6627 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
6628 {
6629 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
6630
6631 params->vid = mlxsw_sp_port_vlan->vid;
6632 params->lag = mlxsw_sp_port->lagged;
6633 if (params->lag)
6634 params->lag_id = mlxsw_sp_port->lag_id;
6635 else
6636 params->system_port = mlxsw_sp_port->local_port;
6637 }
6638
6639 static struct mlxsw_sp_rif_subport *
mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif * rif)6640 mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
6641 {
6642 return container_of(rif, struct mlxsw_sp_rif_subport, common);
6643 }
6644
6645 static struct mlxsw_sp_rif *
mlxsw_sp_rif_subport_get(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_rif_params * params,struct netlink_ext_ack * extack)6646 mlxsw_sp_rif_subport_get(struct mlxsw_sp *mlxsw_sp,
6647 const struct mlxsw_sp_rif_params *params,
6648 struct netlink_ext_ack *extack)
6649 {
6650 struct mlxsw_sp_rif_subport *rif_subport;
6651 struct mlxsw_sp_rif *rif;
6652
6653 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, params->dev);
6654 if (!rif)
6655 return mlxsw_sp_rif_create(mlxsw_sp, params, extack);
6656
6657 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6658 refcount_inc(&rif_subport->ref_count);
6659 return rif;
6660 }
6661
mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif * rif)6662 static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif)
6663 {
6664 struct mlxsw_sp_rif_subport *rif_subport;
6665
6666 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6667 if (!refcount_dec_and_test(&rif_subport->ref_count))
6668 return;
6669
6670 mlxsw_sp_rif_destroy(rif);
6671 }
6672
6673 static int
mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan,struct net_device * l3_dev,struct netlink_ext_ack * extack)6674 mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
6675 struct net_device *l3_dev,
6676 struct netlink_ext_ack *extack)
6677 {
6678 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
6679 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
6680 struct mlxsw_sp_rif_params params = {
6681 .dev = l3_dev,
6682 };
6683 u16 vid = mlxsw_sp_port_vlan->vid;
6684 struct mlxsw_sp_rif *rif;
6685 struct mlxsw_sp_fid *fid;
6686 int err;
6687
6688 mlxsw_sp_rif_subport_params_init(¶ms, mlxsw_sp_port_vlan);
6689 rif = mlxsw_sp_rif_subport_get(mlxsw_sp, ¶ms, extack);
6690 if (IS_ERR(rif))
6691 return PTR_ERR(rif);
6692
6693 /* FID was already created, just take a reference */
6694 fid = rif->ops->fid_get(rif, extack);
6695 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
6696 if (err)
6697 goto err_fid_port_vid_map;
6698
6699 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
6700 if (err)
6701 goto err_port_vid_learning_set;
6702
6703 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
6704 BR_STATE_FORWARDING);
6705 if (err)
6706 goto err_port_vid_stp_set;
6707
6708 mlxsw_sp_port_vlan->fid = fid;
6709
6710 return 0;
6711
6712 err_port_vid_stp_set:
6713 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
6714 err_port_vid_learning_set:
6715 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
6716 err_fid_port_vid_map:
6717 mlxsw_sp_fid_put(fid);
6718 mlxsw_sp_rif_subport_put(rif);
6719 return err;
6720 }
6721
6722 static void
__mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan)6723 __mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
6724 {
6725 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
6726 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
6727 struct mlxsw_sp_rif *rif = mlxsw_sp_fid_rif(fid);
6728 u16 vid = mlxsw_sp_port_vlan->vid;
6729
6730 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
6731 return;
6732
6733 mlxsw_sp_port_vlan->fid = NULL;
6734 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
6735 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
6736 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
6737 mlxsw_sp_fid_put(fid);
6738 mlxsw_sp_rif_subport_put(rif);
6739 }
6740
6741 void
mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan)6742 mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
6743 {
6744 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
6745
6746 mutex_lock(&mlxsw_sp->router->lock);
6747 __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
6748 mutex_unlock(&mlxsw_sp->router->lock);
6749 }
6750
mlxsw_sp_inetaddr_port_vlan_event(struct net_device * l3_dev,struct net_device * port_dev,unsigned long event,u16 vid,struct netlink_ext_ack * extack)6751 static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
6752 struct net_device *port_dev,
6753 unsigned long event, u16 vid,
6754 struct netlink_ext_ack *extack)
6755 {
6756 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
6757 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
6758
6759 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
6760 if (WARN_ON(!mlxsw_sp_port_vlan))
6761 return -EINVAL;
6762
6763 switch (event) {
6764 case NETDEV_UP:
6765 return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
6766 l3_dev, extack);
6767 case NETDEV_DOWN:
6768 __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
6769 break;
6770 }
6771
6772 return 0;
6773 }
6774
mlxsw_sp_inetaddr_port_event(struct net_device * port_dev,unsigned long event,struct netlink_ext_ack * extack)6775 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
6776 unsigned long event,
6777 struct netlink_ext_ack *extack)
6778 {
6779 if (netif_is_bridge_port(port_dev) ||
6780 netif_is_lag_port(port_dev) ||
6781 netif_is_ovs_port(port_dev))
6782 return 0;
6783
6784 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
6785 MLXSW_SP_DEFAULT_VID, extack);
6786 }
6787
__mlxsw_sp_inetaddr_lag_event(struct net_device * l3_dev,struct net_device * lag_dev,unsigned long event,u16 vid,struct netlink_ext_ack * extack)6788 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
6789 struct net_device *lag_dev,
6790 unsigned long event, u16 vid,
6791 struct netlink_ext_ack *extack)
6792 {
6793 struct net_device *port_dev;
6794 struct list_head *iter;
6795 int err;
6796
6797 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
6798 if (mlxsw_sp_port_dev_check(port_dev)) {
6799 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
6800 port_dev,
6801 event, vid,
6802 extack);
6803 if (err)
6804 return err;
6805 }
6806 }
6807
6808 return 0;
6809 }
6810
mlxsw_sp_inetaddr_lag_event(struct net_device * lag_dev,unsigned long event,struct netlink_ext_ack * extack)6811 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
6812 unsigned long event,
6813 struct netlink_ext_ack *extack)
6814 {
6815 if (netif_is_bridge_port(lag_dev))
6816 return 0;
6817
6818 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event,
6819 MLXSW_SP_DEFAULT_VID, extack);
6820 }
6821
mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp * mlxsw_sp,struct net_device * l3_dev,unsigned long event,struct netlink_ext_ack * extack)6822 static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
6823 struct net_device *l3_dev,
6824 unsigned long event,
6825 struct netlink_ext_ack *extack)
6826 {
6827 struct mlxsw_sp_rif_params params = {
6828 .dev = l3_dev,
6829 };
6830 struct mlxsw_sp_rif *rif;
6831
6832 switch (event) {
6833 case NETDEV_UP:
6834 rif = mlxsw_sp_rif_create(mlxsw_sp, ¶ms, extack);
6835 if (IS_ERR(rif))
6836 return PTR_ERR(rif);
6837 break;
6838 case NETDEV_DOWN:
6839 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6840 mlxsw_sp_rif_destroy(rif);
6841 break;
6842 }
6843
6844 return 0;
6845 }
6846
mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp * mlxsw_sp,struct net_device * vlan_dev,unsigned long event,struct netlink_ext_ack * extack)6847 static int mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp *mlxsw_sp,
6848 struct net_device *vlan_dev,
6849 unsigned long event,
6850 struct netlink_ext_ack *extack)
6851 {
6852 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
6853 u16 vid = vlan_dev_vlan_id(vlan_dev);
6854
6855 if (netif_is_bridge_port(vlan_dev))
6856 return 0;
6857
6858 if (mlxsw_sp_port_dev_check(real_dev))
6859 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
6860 event, vid, extack);
6861 else if (netif_is_lag_master(real_dev))
6862 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
6863 vid, extack);
6864 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
6865 return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev, event,
6866 extack);
6867
6868 return 0;
6869 }
6870
mlxsw_sp_rif_macvlan_is_vrrp4(const u8 * mac)6871 static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac)
6872 {
6873 u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 };
6874 u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
6875
6876 return ether_addr_equal_masked(mac, vrrp4, mask);
6877 }
6878
mlxsw_sp_rif_macvlan_is_vrrp6(const u8 * mac)6879 static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac)
6880 {
6881 u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 };
6882 u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
6883
6884 return ether_addr_equal_masked(mac, vrrp6, mask);
6885 }
6886
mlxsw_sp_rif_vrrp_op(struct mlxsw_sp * mlxsw_sp,u16 rif_index,const u8 * mac,bool adding)6887 static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
6888 const u8 *mac, bool adding)
6889 {
6890 char ritr_pl[MLXSW_REG_RITR_LEN];
6891 u8 vrrp_id = adding ? mac[5] : 0;
6892 int err;
6893
6894 if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) &&
6895 !mlxsw_sp_rif_macvlan_is_vrrp6(mac))
6896 return 0;
6897
6898 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
6899 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6900 if (err)
6901 return err;
6902
6903 if (mlxsw_sp_rif_macvlan_is_vrrp4(mac))
6904 mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id);
6905 else
6906 mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id);
6907
6908 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6909 }
6910
mlxsw_sp_rif_macvlan_add(struct mlxsw_sp * mlxsw_sp,const struct net_device * macvlan_dev,struct netlink_ext_ack * extack)6911 static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
6912 const struct net_device *macvlan_dev,
6913 struct netlink_ext_ack *extack)
6914 {
6915 struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
6916 struct mlxsw_sp_rif *rif;
6917 int err;
6918
6919 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
6920 if (!rif) {
6921 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
6922 return -EOPNOTSUPP;
6923 }
6924
6925 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
6926 mlxsw_sp_fid_index(rif->fid), true);
6927 if (err)
6928 return err;
6929
6930 err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index,
6931 macvlan_dev->dev_addr, true);
6932 if (err)
6933 goto err_rif_vrrp_add;
6934
6935 /* Make sure the bridge driver does not have this MAC pointing at
6936 * some other port.
6937 */
6938 if (rif->ops->fdb_del)
6939 rif->ops->fdb_del(rif, macvlan_dev->dev_addr);
6940
6941 return 0;
6942
6943 err_rif_vrrp_add:
6944 mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
6945 mlxsw_sp_fid_index(rif->fid), false);
6946 return err;
6947 }
6948
__mlxsw_sp_rif_macvlan_del(struct mlxsw_sp * mlxsw_sp,const struct net_device * macvlan_dev)6949 static void __mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
6950 const struct net_device *macvlan_dev)
6951 {
6952 struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
6953 struct mlxsw_sp_rif *rif;
6954
6955 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
6956 /* If we do not have a RIF, then we already took care of
6957 * removing the macvlan's MAC during RIF deletion.
6958 */
6959 if (!rif)
6960 return;
6961 mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr,
6962 false);
6963 mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
6964 mlxsw_sp_fid_index(rif->fid), false);
6965 }
6966
mlxsw_sp_rif_macvlan_del(struct mlxsw_sp * mlxsw_sp,const struct net_device * macvlan_dev)6967 void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
6968 const struct net_device *macvlan_dev)
6969 {
6970 mutex_lock(&mlxsw_sp->router->lock);
6971 __mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
6972 mutex_unlock(&mlxsw_sp->router->lock);
6973 }
6974
mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp * mlxsw_sp,struct net_device * macvlan_dev,unsigned long event,struct netlink_ext_ack * extack)6975 static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
6976 struct net_device *macvlan_dev,
6977 unsigned long event,
6978 struct netlink_ext_ack *extack)
6979 {
6980 switch (event) {
6981 case NETDEV_UP:
6982 return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
6983 case NETDEV_DOWN:
6984 __mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
6985 break;
6986 }
6987
6988 return 0;
6989 }
6990
mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp * mlxsw_sp,struct net_device * dev,const unsigned char * dev_addr,struct netlink_ext_ack * extack)6991 static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp,
6992 struct net_device *dev,
6993 const unsigned char *dev_addr,
6994 struct netlink_ext_ack *extack)
6995 {
6996 struct mlxsw_sp_rif *rif;
6997 int i;
6998
6999 /* A RIF is not created for macvlan netdevs. Their MAC is used to
7000 * populate the FDB
7001 */
7002 if (netif_is_macvlan(dev) || netif_is_l3_master(dev))
7003 return 0;
7004
7005 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
7006 rif = mlxsw_sp->router->rifs[i];
7007 if (rif && rif->ops &&
7008 rif->ops->type == MLXSW_SP_RIF_TYPE_IPIP_LB)
7009 continue;
7010 if (rif && rif->dev && rif->dev != dev &&
7011 !ether_addr_equal_masked(rif->dev->dev_addr, dev_addr,
7012 mlxsw_sp->mac_mask)) {
7013 NL_SET_ERR_MSG_MOD(extack, "All router interface MAC addresses must have the same prefix");
7014 return -EINVAL;
7015 }
7016 }
7017
7018 return 0;
7019 }
7020
__mlxsw_sp_inetaddr_event(struct mlxsw_sp * mlxsw_sp,struct net_device * dev,unsigned long event,struct netlink_ext_ack * extack)7021 static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp,
7022 struct net_device *dev,
7023 unsigned long event,
7024 struct netlink_ext_ack *extack)
7025 {
7026 if (mlxsw_sp_port_dev_check(dev))
7027 return mlxsw_sp_inetaddr_port_event(dev, event, extack);
7028 else if (netif_is_lag_master(dev))
7029 return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
7030 else if (netif_is_bridge_master(dev))
7031 return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, event,
7032 extack);
7033 else if (is_vlan_dev(dev))
7034 return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event,
7035 extack);
7036 else if (netif_is_macvlan(dev))
7037 return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event,
7038 extack);
7039 else
7040 return 0;
7041 }
7042
mlxsw_sp_inetaddr_event(struct notifier_block * nb,unsigned long event,void * ptr)7043 static int mlxsw_sp_inetaddr_event(struct notifier_block *nb,
7044 unsigned long event, void *ptr)
7045 {
7046 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
7047 struct net_device *dev = ifa->ifa_dev->dev;
7048 struct mlxsw_sp_router *router;
7049 struct mlxsw_sp_rif *rif;
7050 int err = 0;
7051
7052 /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
7053 if (event == NETDEV_UP)
7054 return NOTIFY_DONE;
7055
7056 router = container_of(nb, struct mlxsw_sp_router, inetaddr_nb);
7057 mutex_lock(&router->lock);
7058 rif = mlxsw_sp_rif_find_by_dev(router->mlxsw_sp, dev);
7059 if (!mlxsw_sp_rif_should_config(rif, dev, event))
7060 goto out;
7061
7062 err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, NULL);
7063 out:
7064 mutex_unlock(&router->lock);
7065 return notifier_from_errno(err);
7066 }
7067
mlxsw_sp_inetaddr_valid_event(struct notifier_block * unused,unsigned long event,void * ptr)7068 int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
7069 unsigned long event, void *ptr)
7070 {
7071 struct in_validator_info *ivi = (struct in_validator_info *) ptr;
7072 struct net_device *dev = ivi->ivi_dev->dev;
7073 struct mlxsw_sp *mlxsw_sp;
7074 struct mlxsw_sp_rif *rif;
7075 int err = 0;
7076
7077 mlxsw_sp = mlxsw_sp_lower_get(dev);
7078 if (!mlxsw_sp)
7079 return NOTIFY_DONE;
7080
7081 mutex_lock(&mlxsw_sp->router->lock);
7082 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7083 if (!mlxsw_sp_rif_should_config(rif, dev, event))
7084 goto out;
7085
7086 err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr,
7087 ivi->extack);
7088 if (err)
7089 goto out;
7090
7091 err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, ivi->extack);
7092 out:
7093 mutex_unlock(&mlxsw_sp->router->lock);
7094 return notifier_from_errno(err);
7095 }
7096
7097 struct mlxsw_sp_inet6addr_event_work {
7098 struct work_struct work;
7099 struct mlxsw_sp *mlxsw_sp;
7100 struct net_device *dev;
7101 unsigned long event;
7102 };
7103
mlxsw_sp_inet6addr_event_work(struct work_struct * work)7104 static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
7105 {
7106 struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
7107 container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
7108 struct mlxsw_sp *mlxsw_sp = inet6addr_work->mlxsw_sp;
7109 struct net_device *dev = inet6addr_work->dev;
7110 unsigned long event = inet6addr_work->event;
7111 struct mlxsw_sp_rif *rif;
7112
7113 rtnl_lock();
7114 mutex_lock(&mlxsw_sp->router->lock);
7115
7116 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7117 if (!mlxsw_sp_rif_should_config(rif, dev, event))
7118 goto out;
7119
7120 __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, NULL);
7121 out:
7122 mutex_unlock(&mlxsw_sp->router->lock);
7123 rtnl_unlock();
7124 dev_put(dev);
7125 kfree(inet6addr_work);
7126 }
7127
7128 /* Called with rcu_read_lock() */
mlxsw_sp_inet6addr_event(struct notifier_block * nb,unsigned long event,void * ptr)7129 static int mlxsw_sp_inet6addr_event(struct notifier_block *nb,
7130 unsigned long event, void *ptr)
7131 {
7132 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
7133 struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
7134 struct net_device *dev = if6->idev->dev;
7135 struct mlxsw_sp_router *router;
7136
7137 /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
7138 if (event == NETDEV_UP)
7139 return NOTIFY_DONE;
7140
7141 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
7142 if (!inet6addr_work)
7143 return NOTIFY_BAD;
7144
7145 router = container_of(nb, struct mlxsw_sp_router, inet6addr_nb);
7146 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
7147 inet6addr_work->mlxsw_sp = router->mlxsw_sp;
7148 inet6addr_work->dev = dev;
7149 inet6addr_work->event = event;
7150 dev_hold(dev);
7151 mlxsw_core_schedule_work(&inet6addr_work->work);
7152
7153 return NOTIFY_DONE;
7154 }
7155
mlxsw_sp_inet6addr_valid_event(struct notifier_block * unused,unsigned long event,void * ptr)7156 int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
7157 unsigned long event, void *ptr)
7158 {
7159 struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
7160 struct net_device *dev = i6vi->i6vi_dev->dev;
7161 struct mlxsw_sp *mlxsw_sp;
7162 struct mlxsw_sp_rif *rif;
7163 int err = 0;
7164
7165 mlxsw_sp = mlxsw_sp_lower_get(dev);
7166 if (!mlxsw_sp)
7167 return NOTIFY_DONE;
7168
7169 mutex_lock(&mlxsw_sp->router->lock);
7170 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7171 if (!mlxsw_sp_rif_should_config(rif, dev, event))
7172 goto out;
7173
7174 err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr,
7175 i6vi->extack);
7176 if (err)
7177 goto out;
7178
7179 err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, i6vi->extack);
7180 out:
7181 mutex_unlock(&mlxsw_sp->router->lock);
7182 return notifier_from_errno(err);
7183 }
7184
mlxsw_sp_rif_edit(struct mlxsw_sp * mlxsw_sp,u16 rif_index,const char * mac,int mtu)7185 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
7186 const char *mac, int mtu)
7187 {
7188 char ritr_pl[MLXSW_REG_RITR_LEN];
7189 int err;
7190
7191 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
7192 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7193 if (err)
7194 return err;
7195
7196 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
7197 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
7198 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
7199 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7200 }
7201
7202 static int
mlxsw_sp_router_port_change_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)7203 mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
7204 struct mlxsw_sp_rif *rif)
7205 {
7206 struct net_device *dev = rif->dev;
7207 u16 fid_index;
7208 int err;
7209
7210 fid_index = mlxsw_sp_fid_index(rif->fid);
7211
7212 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
7213 if (err)
7214 return err;
7215
7216 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
7217 dev->mtu);
7218 if (err)
7219 goto err_rif_edit;
7220
7221 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
7222 if (err)
7223 goto err_rif_fdb_op;
7224
7225 if (rif->mtu != dev->mtu) {
7226 struct mlxsw_sp_vr *vr;
7227 int i;
7228
7229 /* The RIF is relevant only to its mr_table instance, as unlike
7230 * unicast routing, in multicast routing a RIF cannot be shared
7231 * between several multicast routing tables.
7232 */
7233 vr = &mlxsw_sp->router->vrs[rif->vr_id];
7234 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
7235 mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i],
7236 rif, dev->mtu);
7237 }
7238
7239 ether_addr_copy(rif->addr, dev->dev_addr);
7240 rif->mtu = dev->mtu;
7241
7242 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
7243
7244 return 0;
7245
7246 err_rif_fdb_op:
7247 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
7248 err_rif_edit:
7249 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
7250 return err;
7251 }
7252
mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif * rif,struct netdev_notifier_pre_changeaddr_info * info)7253 static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
7254 struct netdev_notifier_pre_changeaddr_info *info)
7255 {
7256 struct netlink_ext_ack *extack;
7257
7258 extack = netdev_notifier_info_to_extack(&info->info);
7259 return mlxsw_sp_router_port_check_rif_addr(rif->mlxsw_sp, rif->dev,
7260 info->dev_addr, extack);
7261 }
7262
mlxsw_sp_netdevice_router_port_event(struct net_device * dev,unsigned long event,void * ptr)7263 int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
7264 unsigned long event, void *ptr)
7265 {
7266 struct mlxsw_sp *mlxsw_sp;
7267 struct mlxsw_sp_rif *rif;
7268 int err = 0;
7269
7270 mlxsw_sp = mlxsw_sp_lower_get(dev);
7271 if (!mlxsw_sp)
7272 return 0;
7273
7274 mutex_lock(&mlxsw_sp->router->lock);
7275 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7276 if (!rif)
7277 goto out;
7278
7279 switch (event) {
7280 case NETDEV_CHANGEMTU:
7281 case NETDEV_CHANGEADDR:
7282 err = mlxsw_sp_router_port_change_event(mlxsw_sp, rif);
7283 break;
7284 case NETDEV_PRE_CHANGEADDR:
7285 err = mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
7286 break;
7287 }
7288
7289 out:
7290 mutex_unlock(&mlxsw_sp->router->lock);
7291 return err;
7292 }
7293
mlxsw_sp_port_vrf_join(struct mlxsw_sp * mlxsw_sp,struct net_device * l3_dev,struct netlink_ext_ack * extack)7294 static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
7295 struct net_device *l3_dev,
7296 struct netlink_ext_ack *extack)
7297 {
7298 struct mlxsw_sp_rif *rif;
7299
7300 /* If netdev is already associated with a RIF, then we need to
7301 * destroy it and create a new one with the new virtual router ID.
7302 */
7303 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
7304 if (rif)
7305 __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN,
7306 extack);
7307
7308 return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, extack);
7309 }
7310
mlxsw_sp_port_vrf_leave(struct mlxsw_sp * mlxsw_sp,struct net_device * l3_dev)7311 static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
7312 struct net_device *l3_dev)
7313 {
7314 struct mlxsw_sp_rif *rif;
7315
7316 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
7317 if (!rif)
7318 return;
7319 __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, NULL);
7320 }
7321
mlxsw_sp_netdevice_vrf_event(struct net_device * l3_dev,unsigned long event,struct netdev_notifier_changeupper_info * info)7322 int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
7323 struct netdev_notifier_changeupper_info *info)
7324 {
7325 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
7326 int err = 0;
7327
7328 /* We do not create a RIF for a macvlan, but only use it to
7329 * direct more MAC addresses to the router.
7330 */
7331 if (!mlxsw_sp || netif_is_macvlan(l3_dev))
7332 return 0;
7333
7334 mutex_lock(&mlxsw_sp->router->lock);
7335 switch (event) {
7336 case NETDEV_PRECHANGEUPPER:
7337 break;
7338 case NETDEV_CHANGEUPPER:
7339 if (info->linking) {
7340 struct netlink_ext_ack *extack;
7341
7342 extack = netdev_notifier_info_to_extack(&info->info);
7343 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
7344 } else {
7345 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
7346 }
7347 break;
7348 }
7349 mutex_unlock(&mlxsw_sp->router->lock);
7350
7351 return err;
7352 }
7353
__mlxsw_sp_rif_macvlan_flush(struct net_device * dev,struct netdev_nested_priv * priv)7354 static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev,
7355 struct netdev_nested_priv *priv)
7356 {
7357 struct mlxsw_sp_rif *rif = (struct mlxsw_sp_rif *)priv->data;
7358
7359 if (!netif_is_macvlan(dev))
7360 return 0;
7361
7362 return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
7363 mlxsw_sp_fid_index(rif->fid), false);
7364 }
7365
mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif * rif)7366 static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
7367 {
7368 struct netdev_nested_priv priv = {
7369 .data = (void *)rif,
7370 };
7371
7372 if (!netif_is_macvlan_port(rif->dev))
7373 return 0;
7374
7375 netdev_warn(rif->dev, "Router interface is deleted. Upper macvlans will not work\n");
7376 return netdev_walk_all_upper_dev_rcu(rif->dev,
7377 __mlxsw_sp_rif_macvlan_flush, &priv);
7378 }
7379
mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif * rif,const struct mlxsw_sp_rif_params * params)7380 static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
7381 const struct mlxsw_sp_rif_params *params)
7382 {
7383 struct mlxsw_sp_rif_subport *rif_subport;
7384
7385 rif_subport = mlxsw_sp_rif_subport_rif(rif);
7386 refcount_set(&rif_subport->ref_count, 1);
7387 rif_subport->vid = params->vid;
7388 rif_subport->lag = params->lag;
7389 if (params->lag)
7390 rif_subport->lag_id = params->lag_id;
7391 else
7392 rif_subport->system_port = params->system_port;
7393 }
7394
mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif * rif,bool enable)7395 static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
7396 {
7397 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7398 struct mlxsw_sp_rif_subport *rif_subport;
7399 char ritr_pl[MLXSW_REG_RITR_LEN];
7400
7401 rif_subport = mlxsw_sp_rif_subport_rif(rif);
7402 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
7403 rif->rif_index, rif->vr_id, rif->dev->mtu);
7404 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
7405 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
7406 rif_subport->lag ? rif_subport->lag_id :
7407 rif_subport->system_port,
7408 rif_subport->vid);
7409
7410 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7411 }
7412
mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif * rif)7413 static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
7414 {
7415 int err;
7416
7417 err = mlxsw_sp_rif_subport_op(rif, true);
7418 if (err)
7419 return err;
7420
7421 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7422 mlxsw_sp_fid_index(rif->fid), true);
7423 if (err)
7424 goto err_rif_fdb_op;
7425
7426 mlxsw_sp_fid_rif_set(rif->fid, rif);
7427 return 0;
7428
7429 err_rif_fdb_op:
7430 mlxsw_sp_rif_subport_op(rif, false);
7431 return err;
7432 }
7433
mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif * rif)7434 static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
7435 {
7436 struct mlxsw_sp_fid *fid = rif->fid;
7437
7438 mlxsw_sp_fid_rif_set(fid, NULL);
7439 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7440 mlxsw_sp_fid_index(fid), false);
7441 mlxsw_sp_rif_macvlan_flush(rif);
7442 mlxsw_sp_rif_subport_op(rif, false);
7443 }
7444
7445 static struct mlxsw_sp_fid *
mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)7446 mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
7447 struct netlink_ext_ack *extack)
7448 {
7449 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
7450 }
7451
7452 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
7453 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
7454 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
7455 .setup = mlxsw_sp_rif_subport_setup,
7456 .configure = mlxsw_sp_rif_subport_configure,
7457 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
7458 .fid_get = mlxsw_sp_rif_subport_fid_get,
7459 };
7460
mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif * rif,enum mlxsw_reg_ritr_if_type type,u16 vid_fid,bool enable)7461 static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
7462 enum mlxsw_reg_ritr_if_type type,
7463 u16 vid_fid, bool enable)
7464 {
7465 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7466 char ritr_pl[MLXSW_REG_RITR_LEN];
7467
7468 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
7469 rif->dev->mtu);
7470 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
7471 mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
7472
7473 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7474 }
7475
mlxsw_sp_router_port(const struct mlxsw_sp * mlxsw_sp)7476 u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
7477 {
7478 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
7479 }
7480
mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif * rif)7481 static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
7482 {
7483 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7484 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
7485 int err;
7486
7487 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
7488 true);
7489 if (err)
7490 return err;
7491
7492 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7493 mlxsw_sp_router_port(mlxsw_sp), true);
7494 if (err)
7495 goto err_fid_mc_flood_set;
7496
7497 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7498 mlxsw_sp_router_port(mlxsw_sp), true);
7499 if (err)
7500 goto err_fid_bc_flood_set;
7501
7502 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7503 mlxsw_sp_fid_index(rif->fid), true);
7504 if (err)
7505 goto err_rif_fdb_op;
7506
7507 mlxsw_sp_fid_rif_set(rif->fid, rif);
7508 return 0;
7509
7510 err_rif_fdb_op:
7511 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7512 mlxsw_sp_router_port(mlxsw_sp), false);
7513 err_fid_bc_flood_set:
7514 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7515 mlxsw_sp_router_port(mlxsw_sp), false);
7516 err_fid_mc_flood_set:
7517 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
7518 return err;
7519 }
7520
mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif * rif)7521 static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
7522 {
7523 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
7524 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7525 struct mlxsw_sp_fid *fid = rif->fid;
7526
7527 mlxsw_sp_fid_rif_set(fid, NULL);
7528 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7529 mlxsw_sp_fid_index(fid), false);
7530 mlxsw_sp_rif_macvlan_flush(rif);
7531 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7532 mlxsw_sp_router_port(mlxsw_sp), false);
7533 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7534 mlxsw_sp_router_port(mlxsw_sp), false);
7535 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
7536 }
7537
7538 static struct mlxsw_sp_fid *
mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)7539 mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
7540 struct netlink_ext_ack *extack)
7541 {
7542 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
7543 }
7544
mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif * rif,const char * mac)7545 static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
7546 {
7547 struct switchdev_notifier_fdb_info info;
7548 struct net_device *dev;
7549
7550 dev = br_fdb_find_port(rif->dev, mac, 0);
7551 if (!dev)
7552 return;
7553
7554 info.addr = mac;
7555 info.vid = 0;
7556 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
7557 NULL);
7558 }
7559
7560 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
7561 .type = MLXSW_SP_RIF_TYPE_FID,
7562 .rif_size = sizeof(struct mlxsw_sp_rif),
7563 .configure = mlxsw_sp_rif_fid_configure,
7564 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
7565 .fid_get = mlxsw_sp_rif_fid_fid_get,
7566 .fdb_del = mlxsw_sp_rif_fid_fdb_del,
7567 };
7568
7569 static struct mlxsw_sp_fid *
mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)7570 mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
7571 struct netlink_ext_ack *extack)
7572 {
7573 struct net_device *br_dev;
7574 u16 vid;
7575 int err;
7576
7577 if (is_vlan_dev(rif->dev)) {
7578 vid = vlan_dev_vlan_id(rif->dev);
7579 br_dev = vlan_dev_real_dev(rif->dev);
7580 if (WARN_ON(!netif_is_bridge_master(br_dev)))
7581 return ERR_PTR(-EINVAL);
7582 } else {
7583 err = br_vlan_get_pvid(rif->dev, &vid);
7584 if (err < 0 || !vid) {
7585 NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID");
7586 return ERR_PTR(-EINVAL);
7587 }
7588 }
7589
7590 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
7591 }
7592
mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif * rif,const char * mac)7593 static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
7594 {
7595 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
7596 struct switchdev_notifier_fdb_info info;
7597 struct net_device *br_dev;
7598 struct net_device *dev;
7599
7600 br_dev = is_vlan_dev(rif->dev) ? vlan_dev_real_dev(rif->dev) : rif->dev;
7601 dev = br_fdb_find_port(br_dev, mac, vid);
7602 if (!dev)
7603 return;
7604
7605 info.addr = mac;
7606 info.vid = vid;
7607 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
7608 NULL);
7609 }
7610
7611 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_emu_ops = {
7612 .type = MLXSW_SP_RIF_TYPE_VLAN,
7613 .rif_size = sizeof(struct mlxsw_sp_rif),
7614 .configure = mlxsw_sp_rif_fid_configure,
7615 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
7616 .fid_get = mlxsw_sp_rif_vlan_fid_get,
7617 .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
7618 };
7619
7620 static struct mlxsw_sp_rif_ipip_lb *
mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif * rif)7621 mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
7622 {
7623 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
7624 }
7625
7626 static void
mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif * rif,const struct mlxsw_sp_rif_params * params)7627 mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
7628 const struct mlxsw_sp_rif_params *params)
7629 {
7630 struct mlxsw_sp_rif_params_ipip_lb *params_lb;
7631 struct mlxsw_sp_rif_ipip_lb *rif_lb;
7632
7633 params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
7634 common);
7635 rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
7636 rif_lb->lb_config = params_lb->lb_config;
7637 }
7638
7639 static int
mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif * rif)7640 mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
7641 {
7642 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
7643 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
7644 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7645 struct mlxsw_sp_vr *ul_vr;
7646 int err;
7647
7648 ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
7649 if (IS_ERR(ul_vr))
7650 return PTR_ERR(ul_vr);
7651
7652 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, true);
7653 if (err)
7654 goto err_loopback_op;
7655
7656 lb_rif->ul_vr_id = ul_vr->id;
7657 lb_rif->ul_rif_id = 0;
7658 ++ul_vr->rif_count;
7659 return 0;
7660
7661 err_loopback_op:
7662 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
7663 return err;
7664 }
7665
mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif * rif)7666 static void mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
7667 {
7668 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
7669 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7670 struct mlxsw_sp_vr *ul_vr;
7671
7672 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
7673 mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, false);
7674
7675 --ul_vr->rif_count;
7676 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
7677 }
7678
7679 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
7680 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
7681 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
7682 .setup = mlxsw_sp_rif_ipip_lb_setup,
7683 .configure = mlxsw_sp1_rif_ipip_lb_configure,
7684 .deconfigure = mlxsw_sp1_rif_ipip_lb_deconfigure,
7685 };
7686
7687 const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
7688 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
7689 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
7690 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
7691 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp1_rif_ipip_lb_ops,
7692 };
7693
7694 static int
mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif * ul_rif,bool enable)7695 mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif *ul_rif, bool enable)
7696 {
7697 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
7698 char ritr_pl[MLXSW_REG_RITR_LEN];
7699
7700 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
7701 ul_rif->rif_index, ul_rif->vr_id, IP_MAX_MTU);
7702 mlxsw_reg_ritr_loopback_protocol_set(ritr_pl,
7703 MLXSW_REG_RITR_LOOPBACK_GENERIC);
7704
7705 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7706 }
7707
7708 static struct mlxsw_sp_rif *
mlxsw_sp_ul_rif_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_vr * vr,struct netlink_ext_ack * extack)7709 mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
7710 struct netlink_ext_ack *extack)
7711 {
7712 struct mlxsw_sp_rif *ul_rif;
7713 u16 rif_index;
7714 int err;
7715
7716 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
7717 if (err) {
7718 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
7719 return ERR_PTR(err);
7720 }
7721
7722 ul_rif = mlxsw_sp_rif_alloc(sizeof(*ul_rif), rif_index, vr->id, NULL);
7723 if (!ul_rif)
7724 return ERR_PTR(-ENOMEM);
7725
7726 mlxsw_sp->router->rifs[rif_index] = ul_rif;
7727 ul_rif->mlxsw_sp = mlxsw_sp;
7728 err = mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, true);
7729 if (err)
7730 goto ul_rif_op_err;
7731
7732 return ul_rif;
7733
7734 ul_rif_op_err:
7735 mlxsw_sp->router->rifs[rif_index] = NULL;
7736 kfree(ul_rif);
7737 return ERR_PTR(err);
7738 }
7739
mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif * ul_rif)7740 static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
7741 {
7742 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
7743
7744 mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
7745 mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
7746 kfree(ul_rif);
7747 }
7748
7749 static struct mlxsw_sp_rif *
mlxsw_sp_ul_rif_get(struct mlxsw_sp * mlxsw_sp,u32 tb_id,struct netlink_ext_ack * extack)7750 mlxsw_sp_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
7751 struct netlink_ext_ack *extack)
7752 {
7753 struct mlxsw_sp_vr *vr;
7754 int err;
7755
7756 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, extack);
7757 if (IS_ERR(vr))
7758 return ERR_CAST(vr);
7759
7760 if (refcount_inc_not_zero(&vr->ul_rif_refcnt))
7761 return vr->ul_rif;
7762
7763 vr->ul_rif = mlxsw_sp_ul_rif_create(mlxsw_sp, vr, extack);
7764 if (IS_ERR(vr->ul_rif)) {
7765 err = PTR_ERR(vr->ul_rif);
7766 goto err_ul_rif_create;
7767 }
7768
7769 vr->rif_count++;
7770 refcount_set(&vr->ul_rif_refcnt, 1);
7771
7772 return vr->ul_rif;
7773
7774 err_ul_rif_create:
7775 mlxsw_sp_vr_put(mlxsw_sp, vr);
7776 return ERR_PTR(err);
7777 }
7778
mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif * ul_rif)7779 static void mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif *ul_rif)
7780 {
7781 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
7782 struct mlxsw_sp_vr *vr;
7783
7784 vr = &mlxsw_sp->router->vrs[ul_rif->vr_id];
7785
7786 if (!refcount_dec_and_test(&vr->ul_rif_refcnt))
7787 return;
7788
7789 vr->rif_count--;
7790 mlxsw_sp_ul_rif_destroy(ul_rif);
7791 mlxsw_sp_vr_put(mlxsw_sp, vr);
7792 }
7793
mlxsw_sp_router_ul_rif_get(struct mlxsw_sp * mlxsw_sp,u32 ul_tb_id,u16 * ul_rif_index)7794 int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
7795 u16 *ul_rif_index)
7796 {
7797 struct mlxsw_sp_rif *ul_rif;
7798 int err = 0;
7799
7800 mutex_lock(&mlxsw_sp->router->lock);
7801 ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
7802 if (IS_ERR(ul_rif)) {
7803 err = PTR_ERR(ul_rif);
7804 goto out;
7805 }
7806 *ul_rif_index = ul_rif->rif_index;
7807 out:
7808 mutex_unlock(&mlxsw_sp->router->lock);
7809 return err;
7810 }
7811
mlxsw_sp_router_ul_rif_put(struct mlxsw_sp * mlxsw_sp,u16 ul_rif_index)7812 void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index)
7813 {
7814 struct mlxsw_sp_rif *ul_rif;
7815
7816 mutex_lock(&mlxsw_sp->router->lock);
7817 ul_rif = mlxsw_sp->router->rifs[ul_rif_index];
7818 if (WARN_ON(!ul_rif))
7819 goto out;
7820
7821 mlxsw_sp_ul_rif_put(ul_rif);
7822 out:
7823 mutex_unlock(&mlxsw_sp->router->lock);
7824 }
7825
7826 static int
mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif * rif)7827 mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
7828 {
7829 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
7830 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
7831 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7832 struct mlxsw_sp_rif *ul_rif;
7833 int err;
7834
7835 ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
7836 if (IS_ERR(ul_rif))
7837 return PTR_ERR(ul_rif);
7838
7839 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, ul_rif->rif_index, true);
7840 if (err)
7841 goto err_loopback_op;
7842
7843 lb_rif->ul_vr_id = 0;
7844 lb_rif->ul_rif_id = ul_rif->rif_index;
7845
7846 return 0;
7847
7848 err_loopback_op:
7849 mlxsw_sp_ul_rif_put(ul_rif);
7850 return err;
7851 }
7852
mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif * rif)7853 static void mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
7854 {
7855 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
7856 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7857 struct mlxsw_sp_rif *ul_rif;
7858
7859 ul_rif = mlxsw_sp_rif_by_index(mlxsw_sp, lb_rif->ul_rif_id);
7860 mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, lb_rif->ul_rif_id, false);
7861 mlxsw_sp_ul_rif_put(ul_rif);
7862 }
7863
7864 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
7865 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
7866 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
7867 .setup = mlxsw_sp_rif_ipip_lb_setup,
7868 .configure = mlxsw_sp2_rif_ipip_lb_configure,
7869 .deconfigure = mlxsw_sp2_rif_ipip_lb_deconfigure,
7870 };
7871
7872 const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
7873 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
7874 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
7875 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
7876 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp2_rif_ipip_lb_ops,
7877 };
7878
mlxsw_sp_rifs_init(struct mlxsw_sp * mlxsw_sp)7879 static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
7880 {
7881 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
7882
7883 mlxsw_sp->router->rifs = kcalloc(max_rifs,
7884 sizeof(struct mlxsw_sp_rif *),
7885 GFP_KERNEL);
7886 if (!mlxsw_sp->router->rifs)
7887 return -ENOMEM;
7888
7889 return 0;
7890 }
7891
mlxsw_sp_rifs_fini(struct mlxsw_sp * mlxsw_sp)7892 static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
7893 {
7894 int i;
7895
7896 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
7897 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
7898
7899 kfree(mlxsw_sp->router->rifs);
7900 }
7901
7902 static int
mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp * mlxsw_sp)7903 mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
7904 {
7905 char tigcr_pl[MLXSW_REG_TIGCR_LEN];
7906
7907 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
7908 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
7909 }
7910
mlxsw_sp_ipips_init(struct mlxsw_sp * mlxsw_sp)7911 static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
7912 {
7913 int err;
7914
7915 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
7916 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
7917
7918 err = mlxsw_sp_ipip_ecn_encap_init(mlxsw_sp);
7919 if (err)
7920 return err;
7921 err = mlxsw_sp_ipip_ecn_decap_init(mlxsw_sp);
7922 if (err)
7923 return err;
7924
7925 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
7926 }
7927
mlxsw_sp_ipips_fini(struct mlxsw_sp * mlxsw_sp)7928 static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
7929 {
7930 WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
7931 }
7932
mlxsw_sp_router_fib_dump_flush(struct notifier_block * nb)7933 static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
7934 {
7935 struct mlxsw_sp_router *router;
7936
7937 /* Flush pending FIB notifications and then flush the device's
7938 * table before requesting another dump. The FIB notification
7939 * block is unregistered, so no need to take RTNL.
7940 */
7941 mlxsw_core_flush_owq();
7942 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
7943 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
7944 }
7945
7946 #ifdef CONFIG_IP_ROUTE_MULTIPATH
mlxsw_sp_mp_hash_header_set(char * recr2_pl,int header)7947 static void mlxsw_sp_mp_hash_header_set(char *recr2_pl, int header)
7948 {
7949 mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, header, true);
7950 }
7951
mlxsw_sp_mp_hash_field_set(char * recr2_pl,int field)7952 static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field)
7953 {
7954 mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true);
7955 }
7956
mlxsw_sp_mp4_hash_init(struct mlxsw_sp * mlxsw_sp,char * recr2_pl)7957 static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp, char *recr2_pl)
7958 {
7959 struct net *net = mlxsw_sp_net(mlxsw_sp);
7960 bool only_l3 = !net->ipv4.sysctl_fib_multipath_hash_policy;
7961
7962 mlxsw_sp_mp_hash_header_set(recr2_pl,
7963 MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP);
7964 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV4_EN_TCP_UDP);
7965 mlxsw_reg_recr2_ipv4_sip_enable(recr2_pl);
7966 mlxsw_reg_recr2_ipv4_dip_enable(recr2_pl);
7967 if (only_l3)
7968 return;
7969 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_EN_IPV4);
7970 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV4_PROTOCOL);
7971 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_SPORT);
7972 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT);
7973 }
7974
mlxsw_sp_mp6_hash_init(struct mlxsw_sp * mlxsw_sp,char * recr2_pl)7975 static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp, char *recr2_pl)
7976 {
7977 bool only_l3 = !ip6_multipath_hash_policy(mlxsw_sp_net(mlxsw_sp));
7978
7979 mlxsw_sp_mp_hash_header_set(recr2_pl,
7980 MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
7981 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP);
7982 mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl);
7983 mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl);
7984 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER);
7985 if (only_l3) {
7986 mlxsw_sp_mp_hash_field_set(recr2_pl,
7987 MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
7988 } else {
7989 mlxsw_sp_mp_hash_header_set(recr2_pl,
7990 MLXSW_REG_RECR2_TCP_UDP_EN_IPV6);
7991 mlxsw_sp_mp_hash_field_set(recr2_pl,
7992 MLXSW_REG_RECR2_TCP_UDP_SPORT);
7993 mlxsw_sp_mp_hash_field_set(recr2_pl,
7994 MLXSW_REG_RECR2_TCP_UDP_DPORT);
7995 }
7996 }
7997
mlxsw_sp_mp_hash_init(struct mlxsw_sp * mlxsw_sp)7998 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
7999 {
8000 char recr2_pl[MLXSW_REG_RECR2_LEN];
8001 u32 seed;
8002
8003 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
8004 mlxsw_reg_recr2_pack(recr2_pl, seed);
8005 mlxsw_sp_mp4_hash_init(mlxsw_sp, recr2_pl);
8006 mlxsw_sp_mp6_hash_init(mlxsw_sp, recr2_pl);
8007
8008 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
8009 }
8010 #else
mlxsw_sp_mp_hash_init(struct mlxsw_sp * mlxsw_sp)8011 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
8012 {
8013 return 0;
8014 }
8015 #endif
8016
mlxsw_sp_dscp_init(struct mlxsw_sp * mlxsw_sp)8017 static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
8018 {
8019 char rdpm_pl[MLXSW_REG_RDPM_LEN];
8020 unsigned int i;
8021
8022 MLXSW_REG_ZERO(rdpm, rdpm_pl);
8023
8024 /* HW is determining switch priority based on DSCP-bits, but the
8025 * kernel is still doing that based on the ToS. Since there's a
8026 * mismatch in bits we need to make sure to translate the right
8027 * value ToS would observe, skipping the 2 least-significant ECN bits.
8028 */
8029 for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
8030 mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
8031
8032 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
8033 }
8034
__mlxsw_sp_router_init(struct mlxsw_sp * mlxsw_sp)8035 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
8036 {
8037 struct net *net = mlxsw_sp_net(mlxsw_sp);
8038 bool usp = net->ipv4.sysctl_ip_fwd_update_priority;
8039 char rgcr_pl[MLXSW_REG_RGCR_LEN];
8040 u64 max_rifs;
8041
8042 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
8043 return -EIO;
8044 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
8045
8046 mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
8047 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
8048 mlxsw_reg_rgcr_usp_set(rgcr_pl, usp);
8049 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
8050 }
8051
__mlxsw_sp_router_fini(struct mlxsw_sp * mlxsw_sp)8052 static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
8053 {
8054 char rgcr_pl[MLXSW_REG_RGCR_LEN];
8055
8056 mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
8057 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
8058 }
8059
mlxsw_sp_router_init(struct mlxsw_sp * mlxsw_sp,struct netlink_ext_ack * extack)8060 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
8061 struct netlink_ext_ack *extack)
8062 {
8063 struct mlxsw_sp_router *router;
8064 int err;
8065
8066 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
8067 if (!router)
8068 return -ENOMEM;
8069 mutex_init(&router->lock);
8070 mlxsw_sp->router = router;
8071 router->mlxsw_sp = mlxsw_sp;
8072
8073 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
8074 err = __mlxsw_sp_router_init(mlxsw_sp);
8075 if (err)
8076 goto err_router_init;
8077
8078 err = mlxsw_sp_rifs_init(mlxsw_sp);
8079 if (err)
8080 goto err_rifs_init;
8081
8082 err = mlxsw_sp_ipips_init(mlxsw_sp);
8083 if (err)
8084 goto err_ipips_init;
8085
8086 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
8087 &mlxsw_sp_nexthop_ht_params);
8088 if (err)
8089 goto err_nexthop_ht_init;
8090
8091 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
8092 &mlxsw_sp_nexthop_group_ht_params);
8093 if (err)
8094 goto err_nexthop_group_ht_init;
8095
8096 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
8097 err = mlxsw_sp_lpm_init(mlxsw_sp);
8098 if (err)
8099 goto err_lpm_init;
8100
8101 err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
8102 if (err)
8103 goto err_mr_init;
8104
8105 err = mlxsw_sp_vrs_init(mlxsw_sp);
8106 if (err)
8107 goto err_vrs_init;
8108
8109 err = mlxsw_sp_neigh_init(mlxsw_sp);
8110 if (err)
8111 goto err_neigh_init;
8112
8113 err = mlxsw_sp_mp_hash_init(mlxsw_sp);
8114 if (err)
8115 goto err_mp_hash_init;
8116
8117 err = mlxsw_sp_dscp_init(mlxsw_sp);
8118 if (err)
8119 goto err_dscp_init;
8120
8121 router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
8122 err = register_inetaddr_notifier(&router->inetaddr_nb);
8123 if (err)
8124 goto err_register_inetaddr_notifier;
8125
8126 router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
8127 err = register_inet6addr_notifier(&router->inet6addr_nb);
8128 if (err)
8129 goto err_register_inet6addr_notifier;
8130
8131 mlxsw_sp->router->netevent_nb.notifier_call =
8132 mlxsw_sp_router_netevent_event;
8133 err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
8134 if (err)
8135 goto err_register_netevent_notifier;
8136
8137 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
8138 err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
8139 &mlxsw_sp->router->fib_nb,
8140 mlxsw_sp_router_fib_dump_flush, extack);
8141 if (err)
8142 goto err_register_fib_notifier;
8143
8144 return 0;
8145
8146 err_register_fib_notifier:
8147 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
8148 err_register_netevent_notifier:
8149 unregister_inet6addr_notifier(&router->inet6addr_nb);
8150 err_register_inet6addr_notifier:
8151 unregister_inetaddr_notifier(&router->inetaddr_nb);
8152 err_register_inetaddr_notifier:
8153 mlxsw_core_flush_owq();
8154 err_dscp_init:
8155 err_mp_hash_init:
8156 mlxsw_sp_neigh_fini(mlxsw_sp);
8157 err_neigh_init:
8158 mlxsw_sp_vrs_fini(mlxsw_sp);
8159 err_vrs_init:
8160 mlxsw_sp_mr_fini(mlxsw_sp);
8161 err_mr_init:
8162 mlxsw_sp_lpm_fini(mlxsw_sp);
8163 err_lpm_init:
8164 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
8165 err_nexthop_group_ht_init:
8166 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
8167 err_nexthop_ht_init:
8168 mlxsw_sp_ipips_fini(mlxsw_sp);
8169 err_ipips_init:
8170 mlxsw_sp_rifs_fini(mlxsw_sp);
8171 err_rifs_init:
8172 __mlxsw_sp_router_fini(mlxsw_sp);
8173 err_router_init:
8174 mutex_destroy(&mlxsw_sp->router->lock);
8175 kfree(mlxsw_sp->router);
8176 return err;
8177 }
8178
mlxsw_sp_router_fini(struct mlxsw_sp * mlxsw_sp)8179 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
8180 {
8181 unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
8182 &mlxsw_sp->router->fib_nb);
8183 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
8184 unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
8185 unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
8186 mlxsw_core_flush_owq();
8187 mlxsw_sp_neigh_fini(mlxsw_sp);
8188 mlxsw_sp_vrs_fini(mlxsw_sp);
8189 mlxsw_sp_mr_fini(mlxsw_sp);
8190 mlxsw_sp_lpm_fini(mlxsw_sp);
8191 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
8192 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
8193 mlxsw_sp_ipips_fini(mlxsw_sp);
8194 mlxsw_sp_rifs_fini(mlxsw_sp);
8195 __mlxsw_sp_router_fini(mlxsw_sp);
8196 mutex_destroy(&mlxsw_sp->router->lock);
8197 kfree(mlxsw_sp->router);
8198 }
8199