1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2018 Netronome Systems, Inc. */
3
4 #include "main.h"
5
6 /* LAG group config flags. */
7 #define NFP_FL_LAG_LAST BIT(1)
8 #define NFP_FL_LAG_FIRST BIT(2)
9 #define NFP_FL_LAG_DATA BIT(3)
10 #define NFP_FL_LAG_XON BIT(4)
11 #define NFP_FL_LAG_SYNC BIT(5)
12 #define NFP_FL_LAG_SWITCH BIT(6)
13 #define NFP_FL_LAG_RESET BIT(7)
14
15 /* LAG port state flags. */
16 #define NFP_PORT_LAG_LINK_UP BIT(0)
17 #define NFP_PORT_LAG_TX_ENABLED BIT(1)
18 #define NFP_PORT_LAG_CHANGED BIT(2)
19
20 enum nfp_fl_lag_batch {
21 NFP_FL_LAG_BATCH_FIRST,
22 NFP_FL_LAG_BATCH_MEMBER,
23 NFP_FL_LAG_BATCH_FINISHED
24 };
25
26 /**
27 * struct nfp_flower_cmsg_lag_config - control message payload for LAG config
28 * @ctrl_flags: Configuration flags
29 * @reserved: Reserved for future use
30 * @ttl: Time to live of packet - host always sets to 0xff
31 * @pkt_number: Config message packet number - increment for each message
32 * @batch_ver: Batch version of messages - increment for each batch of messages
33 * @group_id: Group ID applicable
34 * @group_inst: Group instance number - increment when group is reused
35 * @members: Array of 32-bit words listing all active group members
36 */
37 struct nfp_flower_cmsg_lag_config {
38 u8 ctrl_flags;
39 u8 reserved[2];
40 u8 ttl;
41 __be32 pkt_number;
42 __be32 batch_ver;
43 __be32 group_id;
44 __be32 group_inst;
45 __be32 members[];
46 };
47
48 /**
49 * struct nfp_fl_lag_group - list entry for each LAG group
50 * @group_id: Assigned group ID for host/kernel sync
51 * @group_inst: Group instance in case of ID reuse
52 * @list: List entry
53 * @master_ndev: Group master Netdev
54 * @dirty: Marked if the group needs synced to HW
55 * @offloaded: Marked if the group is currently offloaded to NIC
56 * @to_remove: Marked if the group should be removed from NIC
57 * @to_destroy: Marked if the group should be removed from driver
58 * @slave_cnt: Number of slaves in group
59 */
60 struct nfp_fl_lag_group {
61 unsigned int group_id;
62 u8 group_inst;
63 struct list_head list;
64 struct net_device *master_ndev;
65 bool dirty;
66 bool offloaded;
67 bool to_remove;
68 bool to_destroy;
69 unsigned int slave_cnt;
70 };
71
72 #define NFP_FL_LAG_PKT_NUMBER_MASK GENMASK(30, 0)
73 #define NFP_FL_LAG_VERSION_MASK GENMASK(22, 0)
74 #define NFP_FL_LAG_HOST_TTL 0xff
75
76 /* Use this ID with zero members to ack a batch config */
77 #define NFP_FL_LAG_SYNC_ID 0
78 #define NFP_FL_LAG_GROUP_MIN 1 /* ID 0 reserved */
79 #define NFP_FL_LAG_GROUP_MAX 32 /* IDs 1 to 31 are valid */
80
81 /* wait for more config */
82 #define NFP_FL_LAG_DELAY (msecs_to_jiffies(2))
83
84 #define NFP_FL_LAG_RETRANS_LIMIT 100 /* max retrans cmsgs to store */
85
nfp_fl_get_next_pkt_number(struct nfp_fl_lag * lag)86 static unsigned int nfp_fl_get_next_pkt_number(struct nfp_fl_lag *lag)
87 {
88 lag->pkt_num++;
89 lag->pkt_num &= NFP_FL_LAG_PKT_NUMBER_MASK;
90
91 return lag->pkt_num;
92 }
93
nfp_fl_increment_version(struct nfp_fl_lag * lag)94 static void nfp_fl_increment_version(struct nfp_fl_lag *lag)
95 {
96 /* LSB is not considered by firmware so add 2 for each increment. */
97 lag->batch_ver += 2;
98 lag->batch_ver &= NFP_FL_LAG_VERSION_MASK;
99
100 /* Zero is reserved by firmware. */
101 if (!lag->batch_ver)
102 lag->batch_ver += 2;
103 }
104
105 static struct nfp_fl_lag_group *
nfp_fl_lag_group_create(struct nfp_fl_lag * lag,struct net_device * master)106 nfp_fl_lag_group_create(struct nfp_fl_lag *lag, struct net_device *master)
107 {
108 struct nfp_fl_lag_group *group;
109 struct nfp_flower_priv *priv;
110 int id;
111
112 priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
113
114 id = ida_simple_get(&lag->ida_handle, NFP_FL_LAG_GROUP_MIN,
115 NFP_FL_LAG_GROUP_MAX, GFP_KERNEL);
116 if (id < 0) {
117 nfp_flower_cmsg_warn(priv->app,
118 "No more bonding groups available\n");
119 return ERR_PTR(id);
120 }
121
122 group = kmalloc(sizeof(*group), GFP_KERNEL);
123 if (!group) {
124 ida_simple_remove(&lag->ida_handle, id);
125 return ERR_PTR(-ENOMEM);
126 }
127
128 group->group_id = id;
129 group->master_ndev = master;
130 group->dirty = true;
131 group->offloaded = false;
132 group->to_remove = false;
133 group->to_destroy = false;
134 group->slave_cnt = 0;
135 group->group_inst = ++lag->global_inst;
136 list_add_tail(&group->list, &lag->group_list);
137
138 return group;
139 }
140
141 static struct nfp_fl_lag_group *
nfp_fl_lag_find_group_for_master_with_lag(struct nfp_fl_lag * lag,struct net_device * master)142 nfp_fl_lag_find_group_for_master_with_lag(struct nfp_fl_lag *lag,
143 struct net_device *master)
144 {
145 struct nfp_fl_lag_group *entry;
146
147 if (!master)
148 return NULL;
149
150 list_for_each_entry(entry, &lag->group_list, list)
151 if (entry->master_ndev == master)
152 return entry;
153
154 return NULL;
155 }
156
nfp_fl_lag_get_group_info(struct nfp_app * app,struct net_device * netdev,__be16 * group_id,u8 * batch_ver,u8 * group_inst)157 static int nfp_fl_lag_get_group_info(struct nfp_app *app,
158 struct net_device *netdev,
159 __be16 *group_id,
160 u8 *batch_ver,
161 u8 *group_inst)
162 {
163 struct nfp_flower_priv *priv = app->priv;
164 struct nfp_fl_lag_group *group = NULL;
165 __be32 temp_vers;
166
167 mutex_lock(&priv->nfp_lag.lock);
168 group = nfp_fl_lag_find_group_for_master_with_lag(&priv->nfp_lag,
169 netdev);
170 if (!group) {
171 mutex_unlock(&priv->nfp_lag.lock);
172 return -ENOENT;
173 }
174
175 if (group_id)
176 *group_id = cpu_to_be16(group->group_id);
177
178 if (batch_ver) {
179 temp_vers = cpu_to_be32(priv->nfp_lag.batch_ver <<
180 NFP_FL_PRE_LAG_VER_OFF);
181 memcpy(batch_ver, &temp_vers, 3);
182 }
183
184 if (group_inst)
185 *group_inst = group->group_inst;
186
187 mutex_unlock(&priv->nfp_lag.lock);
188
189 return 0;
190 }
191
nfp_flower_lag_populate_pre_action(struct nfp_app * app,struct net_device * master,struct nfp_fl_pre_lag * pre_act,struct netlink_ext_ack * extack)192 int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
193 struct net_device *master,
194 struct nfp_fl_pre_lag *pre_act,
195 struct netlink_ext_ack *extack)
196 {
197 if (nfp_fl_lag_get_group_info(app, master, &pre_act->group_id,
198 pre_act->lag_version,
199 &pre_act->instance)) {
200 NL_SET_ERR_MSG_MOD(extack, "invalid entry: group does not exist for LAG action");
201 return -ENOENT;
202 }
203
204 return 0;
205 }
206
nfp_flower_lag_get_info_from_netdev(struct nfp_app * app,struct net_device * netdev,struct nfp_tun_neigh_lag * lag)207 void nfp_flower_lag_get_info_from_netdev(struct nfp_app *app,
208 struct net_device *netdev,
209 struct nfp_tun_neigh_lag *lag)
210 {
211 nfp_fl_lag_get_group_info(app, netdev, NULL,
212 lag->lag_version, &lag->lag_instance);
213 }
214
nfp_flower_lag_get_output_id(struct nfp_app * app,struct net_device * master)215 int nfp_flower_lag_get_output_id(struct nfp_app *app, struct net_device *master)
216 {
217 struct nfp_flower_priv *priv = app->priv;
218 struct nfp_fl_lag_group *group = NULL;
219 int group_id = -ENOENT;
220
221 mutex_lock(&priv->nfp_lag.lock);
222 group = nfp_fl_lag_find_group_for_master_with_lag(&priv->nfp_lag,
223 master);
224 if (group)
225 group_id = group->group_id;
226 mutex_unlock(&priv->nfp_lag.lock);
227
228 return group_id;
229 }
230
231 static int
nfp_fl_lag_config_group(struct nfp_fl_lag * lag,struct nfp_fl_lag_group * group,struct net_device ** active_members,unsigned int member_cnt,enum nfp_fl_lag_batch * batch)232 nfp_fl_lag_config_group(struct nfp_fl_lag *lag, struct nfp_fl_lag_group *group,
233 struct net_device **active_members,
234 unsigned int member_cnt, enum nfp_fl_lag_batch *batch)
235 {
236 struct nfp_flower_cmsg_lag_config *cmsg_payload;
237 struct nfp_flower_priv *priv;
238 unsigned long int flags;
239 unsigned int size, i;
240 struct sk_buff *skb;
241
242 priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
243 size = sizeof(*cmsg_payload) + sizeof(__be32) * member_cnt;
244 skb = nfp_flower_cmsg_alloc(priv->app, size,
245 NFP_FLOWER_CMSG_TYPE_LAG_CONFIG,
246 GFP_KERNEL);
247 if (!skb)
248 return -ENOMEM;
249
250 cmsg_payload = nfp_flower_cmsg_get_data(skb);
251 flags = 0;
252
253 /* Increment batch version for each new batch of config messages. */
254 if (*batch == NFP_FL_LAG_BATCH_FIRST) {
255 flags |= NFP_FL_LAG_FIRST;
256 nfp_fl_increment_version(lag);
257 *batch = NFP_FL_LAG_BATCH_MEMBER;
258 }
259
260 /* If it is a reset msg then it is also the end of the batch. */
261 if (lag->rst_cfg) {
262 flags |= NFP_FL_LAG_RESET;
263 *batch = NFP_FL_LAG_BATCH_FINISHED;
264 }
265
266 /* To signal the end of a batch, both the switch and last flags are set
267 * and the reserved SYNC group ID is used.
268 */
269 if (*batch == NFP_FL_LAG_BATCH_FINISHED) {
270 flags |= NFP_FL_LAG_SWITCH | NFP_FL_LAG_LAST;
271 lag->rst_cfg = false;
272 cmsg_payload->group_id = cpu_to_be32(NFP_FL_LAG_SYNC_ID);
273 cmsg_payload->group_inst = 0;
274 } else {
275 cmsg_payload->group_id = cpu_to_be32(group->group_id);
276 cmsg_payload->group_inst = cpu_to_be32(group->group_inst);
277 }
278
279 cmsg_payload->reserved[0] = 0;
280 cmsg_payload->reserved[1] = 0;
281 cmsg_payload->ttl = NFP_FL_LAG_HOST_TTL;
282 cmsg_payload->ctrl_flags = flags;
283 cmsg_payload->batch_ver = cpu_to_be32(lag->batch_ver);
284 cmsg_payload->pkt_number = cpu_to_be32(nfp_fl_get_next_pkt_number(lag));
285
286 for (i = 0; i < member_cnt; i++)
287 cmsg_payload->members[i] =
288 cpu_to_be32(nfp_repr_get_port_id(active_members[i]));
289
290 nfp_ctrl_tx(priv->app->ctrl, skb);
291 return 0;
292 }
293
nfp_fl_lag_do_work(struct work_struct * work)294 static void nfp_fl_lag_do_work(struct work_struct *work)
295 {
296 enum nfp_fl_lag_batch batch = NFP_FL_LAG_BATCH_FIRST;
297 struct nfp_fl_lag_group *entry, *storage;
298 struct delayed_work *delayed_work;
299 struct nfp_flower_priv *priv;
300 struct nfp_fl_lag *lag;
301 int err;
302
303 delayed_work = to_delayed_work(work);
304 lag = container_of(delayed_work, struct nfp_fl_lag, work);
305 priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
306
307 mutex_lock(&lag->lock);
308 list_for_each_entry_safe(entry, storage, &lag->group_list, list) {
309 struct net_device *iter_netdev, **acti_netdevs;
310 struct nfp_flower_repr_priv *repr_priv;
311 int active_count = 0, slaves = 0;
312 struct nfp_repr *repr;
313 unsigned long *flags;
314
315 if (entry->to_remove) {
316 /* Active count of 0 deletes group on hw. */
317 err = nfp_fl_lag_config_group(lag, entry, NULL, 0,
318 &batch);
319 if (!err) {
320 entry->to_remove = false;
321 entry->offloaded = false;
322 } else {
323 nfp_flower_cmsg_warn(priv->app,
324 "group delete failed\n");
325 schedule_delayed_work(&lag->work,
326 NFP_FL_LAG_DELAY);
327 continue;
328 }
329
330 if (entry->to_destroy) {
331 ida_simple_remove(&lag->ida_handle,
332 entry->group_id);
333 list_del(&entry->list);
334 kfree(entry);
335 }
336 continue;
337 }
338
339 acti_netdevs = kmalloc_array(entry->slave_cnt,
340 sizeof(*acti_netdevs), GFP_KERNEL);
341
342 /* Include sanity check in the loop. It may be that a bond has
343 * changed between processing the last notification and the
344 * work queue triggering. If the number of slaves has changed
345 * or it now contains netdevs that cannot be offloaded, ignore
346 * the group until pending notifications are processed.
347 */
348 rcu_read_lock();
349 for_each_netdev_in_bond_rcu(entry->master_ndev, iter_netdev) {
350 if (!nfp_netdev_is_nfp_repr(iter_netdev)) {
351 slaves = 0;
352 break;
353 }
354
355 repr = netdev_priv(iter_netdev);
356
357 if (repr->app != priv->app) {
358 slaves = 0;
359 break;
360 }
361
362 slaves++;
363 if (slaves > entry->slave_cnt)
364 break;
365
366 /* Check the ports for state changes. */
367 repr_priv = repr->app_priv;
368 flags = &repr_priv->lag_port_flags;
369
370 if (*flags & NFP_PORT_LAG_CHANGED) {
371 *flags &= ~NFP_PORT_LAG_CHANGED;
372 entry->dirty = true;
373 }
374
375 if ((*flags & NFP_PORT_LAG_TX_ENABLED) &&
376 (*flags & NFP_PORT_LAG_LINK_UP))
377 acti_netdevs[active_count++] = iter_netdev;
378 }
379 rcu_read_unlock();
380
381 if (slaves != entry->slave_cnt || !entry->dirty) {
382 kfree(acti_netdevs);
383 continue;
384 }
385
386 err = nfp_fl_lag_config_group(lag, entry, acti_netdevs,
387 active_count, &batch);
388 if (!err) {
389 entry->offloaded = true;
390 entry->dirty = false;
391 } else {
392 nfp_flower_cmsg_warn(priv->app,
393 "group offload failed\n");
394 schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
395 }
396
397 kfree(acti_netdevs);
398 }
399
400 /* End the config batch if at least one packet has been batched. */
401 if (batch == NFP_FL_LAG_BATCH_MEMBER) {
402 batch = NFP_FL_LAG_BATCH_FINISHED;
403 err = nfp_fl_lag_config_group(lag, NULL, NULL, 0, &batch);
404 if (err)
405 nfp_flower_cmsg_warn(priv->app,
406 "group batch end cmsg failed\n");
407 }
408
409 mutex_unlock(&lag->lock);
410 }
411
412 static int
nfp_fl_lag_put_unprocessed(struct nfp_fl_lag * lag,struct sk_buff * skb)413 nfp_fl_lag_put_unprocessed(struct nfp_fl_lag *lag, struct sk_buff *skb)
414 {
415 struct nfp_flower_cmsg_lag_config *cmsg_payload;
416
417 cmsg_payload = nfp_flower_cmsg_get_data(skb);
418 if (be32_to_cpu(cmsg_payload->group_id) >= NFP_FL_LAG_GROUP_MAX)
419 return -EINVAL;
420
421 /* Drop cmsg retrans if storage limit is exceeded to prevent
422 * overloading. If the fw notices that expected messages have not been
423 * received in a given time block, it will request a full resync.
424 */
425 if (skb_queue_len(&lag->retrans_skbs) >= NFP_FL_LAG_RETRANS_LIMIT)
426 return -ENOSPC;
427
428 __skb_queue_tail(&lag->retrans_skbs, skb);
429
430 return 0;
431 }
432
nfp_fl_send_unprocessed(struct nfp_fl_lag * lag)433 static void nfp_fl_send_unprocessed(struct nfp_fl_lag *lag)
434 {
435 struct nfp_flower_priv *priv;
436 struct sk_buff *skb;
437
438 priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
439
440 while ((skb = __skb_dequeue(&lag->retrans_skbs)))
441 nfp_ctrl_tx(priv->app->ctrl, skb);
442 }
443
nfp_flower_lag_unprocessed_msg(struct nfp_app * app,struct sk_buff * skb)444 bool nfp_flower_lag_unprocessed_msg(struct nfp_app *app, struct sk_buff *skb)
445 {
446 struct nfp_flower_cmsg_lag_config *cmsg_payload;
447 struct nfp_flower_priv *priv = app->priv;
448 struct nfp_fl_lag_group *group_entry;
449 unsigned long int flags;
450 bool store_skb = false;
451 int err;
452
453 cmsg_payload = nfp_flower_cmsg_get_data(skb);
454 flags = cmsg_payload->ctrl_flags;
455
456 /* Note the intentional fall through below. If DATA and XON are both
457 * set, the message will stored and sent again with the rest of the
458 * unprocessed messages list.
459 */
460
461 /* Store */
462 if (flags & NFP_FL_LAG_DATA)
463 if (!nfp_fl_lag_put_unprocessed(&priv->nfp_lag, skb))
464 store_skb = true;
465
466 /* Send stored */
467 if (flags & NFP_FL_LAG_XON)
468 nfp_fl_send_unprocessed(&priv->nfp_lag);
469
470 /* Resend all */
471 if (flags & NFP_FL_LAG_SYNC) {
472 /* To resend all config:
473 * 1) Clear all unprocessed messages
474 * 2) Mark all groups dirty
475 * 3) Reset NFP group config
476 * 4) Schedule a LAG config update
477 */
478
479 __skb_queue_purge(&priv->nfp_lag.retrans_skbs);
480
481 mutex_lock(&priv->nfp_lag.lock);
482 list_for_each_entry(group_entry, &priv->nfp_lag.group_list,
483 list)
484 group_entry->dirty = true;
485
486 err = nfp_flower_lag_reset(&priv->nfp_lag);
487 if (err)
488 nfp_flower_cmsg_warn(priv->app,
489 "mem err in group reset msg\n");
490 mutex_unlock(&priv->nfp_lag.lock);
491
492 schedule_delayed_work(&priv->nfp_lag.work, 0);
493 }
494
495 return store_skb;
496 }
497
498 static void
nfp_fl_lag_schedule_group_remove(struct nfp_fl_lag * lag,struct nfp_fl_lag_group * group)499 nfp_fl_lag_schedule_group_remove(struct nfp_fl_lag *lag,
500 struct nfp_fl_lag_group *group)
501 {
502 group->to_remove = true;
503
504 schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
505 }
506
507 static void
nfp_fl_lag_schedule_group_delete(struct nfp_fl_lag * lag,struct net_device * master)508 nfp_fl_lag_schedule_group_delete(struct nfp_fl_lag *lag,
509 struct net_device *master)
510 {
511 struct nfp_fl_lag_group *group;
512 struct nfp_flower_priv *priv;
513
514 priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
515
516 if (!netif_is_bond_master(master))
517 return;
518
519 mutex_lock(&lag->lock);
520 group = nfp_fl_lag_find_group_for_master_with_lag(lag, master);
521 if (!group) {
522 mutex_unlock(&lag->lock);
523 nfp_warn(priv->app->cpp, "untracked bond got unregistered %s\n",
524 netdev_name(master));
525 return;
526 }
527
528 group->to_remove = true;
529 group->to_destroy = true;
530 mutex_unlock(&lag->lock);
531
532 schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
533 }
534
535 static int
nfp_fl_lag_changeupper_event(struct nfp_fl_lag * lag,struct netdev_notifier_changeupper_info * info)536 nfp_fl_lag_changeupper_event(struct nfp_fl_lag *lag,
537 struct netdev_notifier_changeupper_info *info)
538 {
539 struct net_device *upper = info->upper_dev, *iter_netdev;
540 struct netdev_lag_upper_info *lag_upper_info;
541 struct nfp_fl_lag_group *group;
542 struct nfp_flower_priv *priv;
543 unsigned int slave_count = 0;
544 bool can_offload = true;
545 struct nfp_repr *repr;
546
547 if (!netif_is_lag_master(upper))
548 return 0;
549
550 priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
551
552 rcu_read_lock();
553 for_each_netdev_in_bond_rcu(upper, iter_netdev) {
554 if (!nfp_netdev_is_nfp_repr(iter_netdev)) {
555 can_offload = false;
556 break;
557 }
558 repr = netdev_priv(iter_netdev);
559
560 /* Ensure all ports are created by the same app/on same card. */
561 if (repr->app != priv->app) {
562 can_offload = false;
563 break;
564 }
565
566 slave_count++;
567 }
568 rcu_read_unlock();
569
570 lag_upper_info = info->upper_info;
571
572 /* Firmware supports active/backup and L3/L4 hash bonds. */
573 if (lag_upper_info &&
574 lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
575 (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH ||
576 (lag_upper_info->hash_type != NETDEV_LAG_HASH_L34 &&
577 lag_upper_info->hash_type != NETDEV_LAG_HASH_E34 &&
578 lag_upper_info->hash_type != NETDEV_LAG_HASH_UNKNOWN))) {
579 can_offload = false;
580 nfp_flower_cmsg_warn(priv->app,
581 "Unable to offload tx_type %u hash %u\n",
582 lag_upper_info->tx_type,
583 lag_upper_info->hash_type);
584 }
585
586 mutex_lock(&lag->lock);
587 group = nfp_fl_lag_find_group_for_master_with_lag(lag, upper);
588
589 if (slave_count == 0 || !can_offload) {
590 /* Cannot offload the group - remove if previously offloaded. */
591 if (group && group->offloaded)
592 nfp_fl_lag_schedule_group_remove(lag, group);
593
594 mutex_unlock(&lag->lock);
595 return 0;
596 }
597
598 if (!group) {
599 group = nfp_fl_lag_group_create(lag, upper);
600 if (IS_ERR(group)) {
601 mutex_unlock(&lag->lock);
602 return PTR_ERR(group);
603 }
604 }
605
606 group->dirty = true;
607 group->slave_cnt = slave_count;
608
609 /* Group may have been on queue for removal but is now offloadable. */
610 group->to_remove = false;
611 mutex_unlock(&lag->lock);
612
613 schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
614 return 0;
615 }
616
617 static void
nfp_fl_lag_changels_event(struct nfp_fl_lag * lag,struct net_device * netdev,struct netdev_notifier_changelowerstate_info * info)618 nfp_fl_lag_changels_event(struct nfp_fl_lag *lag, struct net_device *netdev,
619 struct netdev_notifier_changelowerstate_info *info)
620 {
621 struct netdev_lag_lower_state_info *lag_lower_info;
622 struct nfp_flower_repr_priv *repr_priv;
623 struct nfp_flower_priv *priv;
624 struct nfp_repr *repr;
625 unsigned long *flags;
626
627 if (!netif_is_lag_port(netdev) || !nfp_netdev_is_nfp_repr(netdev))
628 return;
629
630 lag_lower_info = info->lower_state_info;
631 if (!lag_lower_info)
632 return;
633
634 priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
635 repr = netdev_priv(netdev);
636
637 /* Verify that the repr is associated with this app. */
638 if (repr->app != priv->app)
639 return;
640
641 repr_priv = repr->app_priv;
642 flags = &repr_priv->lag_port_flags;
643
644 mutex_lock(&lag->lock);
645 if (lag_lower_info->link_up)
646 *flags |= NFP_PORT_LAG_LINK_UP;
647 else
648 *flags &= ~NFP_PORT_LAG_LINK_UP;
649
650 if (lag_lower_info->tx_enabled)
651 *flags |= NFP_PORT_LAG_TX_ENABLED;
652 else
653 *flags &= ~NFP_PORT_LAG_TX_ENABLED;
654
655 *flags |= NFP_PORT_LAG_CHANGED;
656 mutex_unlock(&lag->lock);
657
658 schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
659 }
660
nfp_flower_lag_netdev_event(struct nfp_flower_priv * priv,struct net_device * netdev,unsigned long event,void * ptr)661 int nfp_flower_lag_netdev_event(struct nfp_flower_priv *priv,
662 struct net_device *netdev,
663 unsigned long event, void *ptr)
664 {
665 struct nfp_fl_lag *lag = &priv->nfp_lag;
666 int err;
667
668 switch (event) {
669 case NETDEV_CHANGEUPPER:
670 err = nfp_fl_lag_changeupper_event(lag, ptr);
671 if (err)
672 return NOTIFY_BAD;
673 return NOTIFY_OK;
674 case NETDEV_CHANGELOWERSTATE:
675 nfp_fl_lag_changels_event(lag, netdev, ptr);
676 return NOTIFY_OK;
677 case NETDEV_UNREGISTER:
678 nfp_fl_lag_schedule_group_delete(lag, netdev);
679 return NOTIFY_OK;
680 }
681
682 return NOTIFY_DONE;
683 }
684
nfp_flower_lag_reset(struct nfp_fl_lag * lag)685 int nfp_flower_lag_reset(struct nfp_fl_lag *lag)
686 {
687 enum nfp_fl_lag_batch batch = NFP_FL_LAG_BATCH_FIRST;
688
689 lag->rst_cfg = true;
690 return nfp_fl_lag_config_group(lag, NULL, NULL, 0, &batch);
691 }
692
nfp_flower_lag_init(struct nfp_fl_lag * lag)693 void nfp_flower_lag_init(struct nfp_fl_lag *lag)
694 {
695 INIT_DELAYED_WORK(&lag->work, nfp_fl_lag_do_work);
696 INIT_LIST_HEAD(&lag->group_list);
697 mutex_init(&lag->lock);
698 ida_init(&lag->ida_handle);
699
700 __skb_queue_head_init(&lag->retrans_skbs);
701
702 /* 0 is a reserved batch version so increment to first valid value. */
703 nfp_fl_increment_version(lag);
704 }
705
nfp_flower_lag_cleanup(struct nfp_fl_lag * lag)706 void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag)
707 {
708 struct nfp_fl_lag_group *entry, *storage;
709
710 cancel_delayed_work_sync(&lag->work);
711
712 __skb_queue_purge(&lag->retrans_skbs);
713
714 /* Remove all groups. */
715 mutex_lock(&lag->lock);
716 list_for_each_entry_safe(entry, storage, &lag->group_list, list) {
717 list_del(&entry->list);
718 kfree(entry);
719 }
720 mutex_unlock(&lag->lock);
721 mutex_destroy(&lag->lock);
722 ida_destroy(&lag->ida_handle);
723 }
724