Lines Matching full:lag

6 /* LAG group config flags. */
15 /* LAG port state flags. */
27 * struct nfp_flower_cmsg_lag_config - control message payload for LAG config
49 * struct nfp_fl_lag_group - list entry for each LAG group
86 static unsigned int nfp_fl_get_next_pkt_number(struct nfp_fl_lag *lag) in nfp_fl_get_next_pkt_number() argument
88 lag->pkt_num++; in nfp_fl_get_next_pkt_number()
89 lag->pkt_num &= NFP_FL_LAG_PKT_NUMBER_MASK; in nfp_fl_get_next_pkt_number()
91 return lag->pkt_num; in nfp_fl_get_next_pkt_number()
94 static void nfp_fl_increment_version(struct nfp_fl_lag *lag) in nfp_fl_increment_version() argument
97 lag->batch_ver += 2; in nfp_fl_increment_version()
98 lag->batch_ver &= NFP_FL_LAG_VERSION_MASK; in nfp_fl_increment_version()
101 if (!lag->batch_ver) in nfp_fl_increment_version()
102 lag->batch_ver += 2; in nfp_fl_increment_version()
106 nfp_fl_lag_group_create(struct nfp_fl_lag *lag, struct net_device *master) in nfp_fl_lag_group_create() argument
112 priv = container_of(lag, struct nfp_flower_priv, nfp_lag); in nfp_fl_lag_group_create()
114 id = ida_simple_get(&lag->ida_handle, NFP_FL_LAG_GROUP_MIN, in nfp_fl_lag_group_create()
124 ida_simple_remove(&lag->ida_handle, id); in nfp_fl_lag_group_create()
135 group->group_inst = ++lag->global_inst; in nfp_fl_lag_group_create()
136 list_add_tail(&group->list, &lag->group_list); in nfp_fl_lag_group_create()
142 nfp_fl_lag_find_group_for_master_with_lag(struct nfp_fl_lag *lag, in nfp_fl_lag_find_group_for_master_with_lag() argument
150 list_for_each_entry(entry, &lag->group_list, list) in nfp_fl_lag_find_group_for_master_with_lag()
171 NL_SET_ERR_MSG_MOD(extack, "invalid entry: group does not exist for LAG action"); in nfp_flower_lag_populate_pre_action()
202 nfp_fl_lag_config_group(struct nfp_fl_lag *lag, struct nfp_fl_lag_group *group, in nfp_fl_lag_config_group() argument
212 priv = container_of(lag, struct nfp_flower_priv, nfp_lag); in nfp_fl_lag_config_group()
226 nfp_fl_increment_version(lag); in nfp_fl_lag_config_group()
231 if (lag->rst_cfg) { in nfp_fl_lag_config_group()
241 lag->rst_cfg = false; in nfp_fl_lag_config_group()
253 cmsg_payload->batch_ver = cpu_to_be32(lag->batch_ver); in nfp_fl_lag_config_group()
254 cmsg_payload->pkt_number = cpu_to_be32(nfp_fl_get_next_pkt_number(lag)); in nfp_fl_lag_config_group()
270 struct nfp_fl_lag *lag; in nfp_fl_lag_do_work() local
274 lag = container_of(delayed_work, struct nfp_fl_lag, work); in nfp_fl_lag_do_work()
275 priv = container_of(lag, struct nfp_flower_priv, nfp_lag); in nfp_fl_lag_do_work()
277 mutex_lock(&lag->lock); in nfp_fl_lag_do_work()
278 list_for_each_entry_safe(entry, storage, &lag->group_list, list) { in nfp_fl_lag_do_work()
287 err = nfp_fl_lag_config_group(lag, entry, NULL, 0, in nfp_fl_lag_do_work()
295 schedule_delayed_work(&lag->work, in nfp_fl_lag_do_work()
301 ida_simple_remove(&lag->ida_handle, in nfp_fl_lag_do_work()
356 err = nfp_fl_lag_config_group(lag, entry, acti_netdevs, in nfp_fl_lag_do_work()
364 schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY); in nfp_fl_lag_do_work()
373 err = nfp_fl_lag_config_group(lag, NULL, NULL, 0, &batch); in nfp_fl_lag_do_work()
379 mutex_unlock(&lag->lock); in nfp_fl_lag_do_work()
383 nfp_fl_lag_put_unprocessed(struct nfp_fl_lag *lag, struct sk_buff *skb) in nfp_fl_lag_put_unprocessed() argument
395 if (skb_queue_len(&lag->retrans_skbs) >= NFP_FL_LAG_RETRANS_LIMIT) in nfp_fl_lag_put_unprocessed()
398 __skb_queue_tail(&lag->retrans_skbs, skb); in nfp_fl_lag_put_unprocessed()
403 static void nfp_fl_send_unprocessed(struct nfp_fl_lag *lag) in nfp_fl_send_unprocessed() argument
408 priv = container_of(lag, struct nfp_flower_priv, nfp_lag); in nfp_fl_send_unprocessed()
410 while ((skb = __skb_dequeue(&lag->retrans_skbs))) in nfp_fl_send_unprocessed()
446 * 4) Schedule a LAG config update in nfp_flower_lag_unprocessed_msg()
469 nfp_fl_lag_schedule_group_remove(struct nfp_fl_lag *lag, in nfp_fl_lag_schedule_group_remove() argument
474 schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY); in nfp_fl_lag_schedule_group_remove()
478 nfp_fl_lag_schedule_group_delete(struct nfp_fl_lag *lag, in nfp_fl_lag_schedule_group_delete() argument
484 priv = container_of(lag, struct nfp_flower_priv, nfp_lag); in nfp_fl_lag_schedule_group_delete()
489 mutex_lock(&lag->lock); in nfp_fl_lag_schedule_group_delete()
490 group = nfp_fl_lag_find_group_for_master_with_lag(lag, master); in nfp_fl_lag_schedule_group_delete()
492 mutex_unlock(&lag->lock); in nfp_fl_lag_schedule_group_delete()
500 mutex_unlock(&lag->lock); in nfp_fl_lag_schedule_group_delete()
502 schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY); in nfp_fl_lag_schedule_group_delete()
506 nfp_fl_lag_changeupper_event(struct nfp_fl_lag *lag, in nfp_fl_lag_changeupper_event() argument
520 priv = container_of(lag, struct nfp_flower_priv, nfp_lag); in nfp_fl_lag_changeupper_event()
556 mutex_lock(&lag->lock); in nfp_fl_lag_changeupper_event()
557 group = nfp_fl_lag_find_group_for_master_with_lag(lag, upper); in nfp_fl_lag_changeupper_event()
562 nfp_fl_lag_schedule_group_remove(lag, group); in nfp_fl_lag_changeupper_event()
564 mutex_unlock(&lag->lock); in nfp_fl_lag_changeupper_event()
569 group = nfp_fl_lag_group_create(lag, upper); in nfp_fl_lag_changeupper_event()
571 mutex_unlock(&lag->lock); in nfp_fl_lag_changeupper_event()
581 mutex_unlock(&lag->lock); in nfp_fl_lag_changeupper_event()
583 schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY); in nfp_fl_lag_changeupper_event()
588 nfp_fl_lag_changels_event(struct nfp_fl_lag *lag, struct net_device *netdev, in nfp_fl_lag_changels_event() argument
604 priv = container_of(lag, struct nfp_flower_priv, nfp_lag); in nfp_fl_lag_changels_event()
614 mutex_lock(&lag->lock); in nfp_fl_lag_changels_event()
626 mutex_unlock(&lag->lock); in nfp_fl_lag_changels_event()
628 schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY); in nfp_fl_lag_changels_event()
635 struct nfp_fl_lag *lag = &priv->nfp_lag; in nfp_flower_lag_netdev_event() local
640 err = nfp_fl_lag_changeupper_event(lag, ptr); in nfp_flower_lag_netdev_event()
645 nfp_fl_lag_changels_event(lag, netdev, ptr); in nfp_flower_lag_netdev_event()
648 nfp_fl_lag_schedule_group_delete(lag, netdev); in nfp_flower_lag_netdev_event()
655 int nfp_flower_lag_reset(struct nfp_fl_lag *lag) in nfp_flower_lag_reset() argument
659 lag->rst_cfg = true; in nfp_flower_lag_reset()
660 return nfp_fl_lag_config_group(lag, NULL, NULL, 0, &batch); in nfp_flower_lag_reset()
663 void nfp_flower_lag_init(struct nfp_fl_lag *lag) in nfp_flower_lag_init() argument
665 INIT_DELAYED_WORK(&lag->work, nfp_fl_lag_do_work); in nfp_flower_lag_init()
666 INIT_LIST_HEAD(&lag->group_list); in nfp_flower_lag_init()
667 mutex_init(&lag->lock); in nfp_flower_lag_init()
668 ida_init(&lag->ida_handle); in nfp_flower_lag_init()
670 __skb_queue_head_init(&lag->retrans_skbs); in nfp_flower_lag_init()
673 nfp_fl_increment_version(lag); in nfp_flower_lag_init()
676 void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag) in nfp_flower_lag_cleanup() argument
680 cancel_delayed_work_sync(&lag->work); in nfp_flower_lag_cleanup()
682 __skb_queue_purge(&lag->retrans_skbs); in nfp_flower_lag_cleanup()
685 mutex_lock(&lag->lock); in nfp_flower_lag_cleanup()
686 list_for_each_entry_safe(entry, storage, &lag->group_list, list) { in nfp_flower_lag_cleanup()
690 mutex_unlock(&lag->lock); in nfp_flower_lag_cleanup()
691 mutex_destroy(&lag->lock); in nfp_flower_lag_cleanup()
692 ida_destroy(&lag->ida_handle); in nfp_flower_lag_cleanup()