1 /*
2 * Copyright (C) 2015-2017 Netronome Systems, Inc.
3 *
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
8 *
9 * The BSD 2-Clause License:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #include <linux/bitfield.h>
35 #include <linux/netdevice.h>
36 #include <linux/skbuff.h>
37 #include <linux/workqueue.h>
38 #include <net/dst_metadata.h>
39
40 #include "main.h"
41 #include "../nfp_net.h"
42 #include "../nfp_net_repr.h"
43 #include "./cmsg.h"
44
45 static struct nfp_flower_cmsg_hdr *
nfp_flower_cmsg_get_hdr(struct sk_buff * skb)46 nfp_flower_cmsg_get_hdr(struct sk_buff *skb)
47 {
48 return (struct nfp_flower_cmsg_hdr *)skb->data;
49 }
50
51 struct sk_buff *
nfp_flower_cmsg_alloc(struct nfp_app * app,unsigned int size,enum nfp_flower_cmsg_type_port type,gfp_t flag)52 nfp_flower_cmsg_alloc(struct nfp_app *app, unsigned int size,
53 enum nfp_flower_cmsg_type_port type, gfp_t flag)
54 {
55 struct nfp_flower_cmsg_hdr *ch;
56 struct sk_buff *skb;
57
58 size += NFP_FLOWER_CMSG_HLEN;
59
60 skb = nfp_app_ctrl_msg_alloc(app, size, flag);
61 if (!skb)
62 return NULL;
63
64 ch = nfp_flower_cmsg_get_hdr(skb);
65 ch->pad = 0;
66 ch->version = NFP_FLOWER_CMSG_VER1;
67 ch->type = type;
68 skb_put(skb, size);
69
70 return skb;
71 }
72
73 struct sk_buff *
nfp_flower_cmsg_mac_repr_start(struct nfp_app * app,unsigned int num_ports)74 nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports)
75 {
76 struct nfp_flower_cmsg_mac_repr *msg;
77 struct sk_buff *skb;
78 unsigned int size;
79
80 size = sizeof(*msg) + num_ports * sizeof(msg->ports[0]);
81 skb = nfp_flower_cmsg_alloc(app, size, NFP_FLOWER_CMSG_TYPE_MAC_REPR,
82 GFP_KERNEL);
83 if (!skb)
84 return NULL;
85
86 msg = nfp_flower_cmsg_get_data(skb);
87 memset(msg->reserved, 0, sizeof(msg->reserved));
88 msg->num_ports = num_ports;
89
90 return skb;
91 }
92
93 void
nfp_flower_cmsg_mac_repr_add(struct sk_buff * skb,unsigned int idx,unsigned int nbi,unsigned int nbi_port,unsigned int phys_port)94 nfp_flower_cmsg_mac_repr_add(struct sk_buff *skb, unsigned int idx,
95 unsigned int nbi, unsigned int nbi_port,
96 unsigned int phys_port)
97 {
98 struct nfp_flower_cmsg_mac_repr *msg;
99
100 msg = nfp_flower_cmsg_get_data(skb);
101 msg->ports[idx].idx = idx;
102 msg->ports[idx].info = nbi & NFP_FLOWER_CMSG_MAC_REPR_NBI;
103 msg->ports[idx].nbi_port = nbi_port;
104 msg->ports[idx].phys_port = phys_port;
105 }
106
nfp_flower_cmsg_portmod(struct nfp_repr * repr,bool carrier_ok,unsigned int mtu,bool mtu_only)107 int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok,
108 unsigned int mtu, bool mtu_only)
109 {
110 struct nfp_flower_cmsg_portmod *msg;
111 struct sk_buff *skb;
112
113 skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg),
114 NFP_FLOWER_CMSG_TYPE_PORT_MOD, GFP_KERNEL);
115 if (!skb)
116 return -ENOMEM;
117
118 msg = nfp_flower_cmsg_get_data(skb);
119 msg->portnum = cpu_to_be32(repr->dst->u.port_info.port_id);
120 msg->reserved = 0;
121 msg->info = carrier_ok;
122
123 if (mtu_only)
124 msg->info |= NFP_FLOWER_CMSG_PORTMOD_MTU_CHANGE_ONLY;
125
126 msg->mtu = cpu_to_be16(mtu);
127
128 nfp_ctrl_tx(repr->app->ctrl, skb);
129
130 return 0;
131 }
132
nfp_flower_cmsg_portreify(struct nfp_repr * repr,bool exists)133 int nfp_flower_cmsg_portreify(struct nfp_repr *repr, bool exists)
134 {
135 struct nfp_flower_cmsg_portreify *msg;
136 struct sk_buff *skb;
137
138 skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg),
139 NFP_FLOWER_CMSG_TYPE_PORT_REIFY,
140 GFP_KERNEL);
141 if (!skb)
142 return -ENOMEM;
143
144 msg = nfp_flower_cmsg_get_data(skb);
145 msg->portnum = cpu_to_be32(repr->dst->u.port_info.port_id);
146 msg->reserved = 0;
147 msg->info = cpu_to_be16(exists);
148
149 nfp_ctrl_tx(repr->app->ctrl, skb);
150
151 return 0;
152 }
153
154 static bool
nfp_flower_process_mtu_ack(struct nfp_app * app,struct sk_buff * skb)155 nfp_flower_process_mtu_ack(struct nfp_app *app, struct sk_buff *skb)
156 {
157 struct nfp_flower_priv *app_priv = app->priv;
158 struct nfp_flower_cmsg_portmod *msg;
159
160 msg = nfp_flower_cmsg_get_data(skb);
161
162 if (!(msg->info & NFP_FLOWER_CMSG_PORTMOD_MTU_CHANGE_ONLY))
163 return false;
164
165 spin_lock_bh(&app_priv->mtu_conf.lock);
166 if (!app_priv->mtu_conf.requested_val ||
167 app_priv->mtu_conf.portnum != be32_to_cpu(msg->portnum) ||
168 be16_to_cpu(msg->mtu) != app_priv->mtu_conf.requested_val) {
169 /* Not an ack for requested MTU change. */
170 spin_unlock_bh(&app_priv->mtu_conf.lock);
171 return false;
172 }
173
174 app_priv->mtu_conf.ack = true;
175 app_priv->mtu_conf.requested_val = 0;
176 wake_up(&app_priv->mtu_conf.wait_q);
177 spin_unlock_bh(&app_priv->mtu_conf.lock);
178
179 return true;
180 }
181
182 static void
nfp_flower_cmsg_portmod_rx(struct nfp_app * app,struct sk_buff * skb)183 nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb)
184 {
185 struct nfp_flower_cmsg_portmod *msg;
186 struct net_device *netdev;
187 bool link;
188
189 msg = nfp_flower_cmsg_get_data(skb);
190 link = msg->info & NFP_FLOWER_CMSG_PORTMOD_INFO_LINK;
191
192 rtnl_lock();
193 rcu_read_lock();
194 netdev = nfp_app_repr_get(app, be32_to_cpu(msg->portnum));
195 rcu_read_unlock();
196 if (!netdev) {
197 nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n",
198 be32_to_cpu(msg->portnum));
199 rtnl_unlock();
200 return;
201 }
202
203 if (link) {
204 u16 mtu = be16_to_cpu(msg->mtu);
205
206 netif_carrier_on(netdev);
207
208 /* An MTU of 0 from the firmware should be ignored */
209 if (mtu)
210 dev_set_mtu(netdev, mtu);
211 } else {
212 netif_carrier_off(netdev);
213 }
214 rtnl_unlock();
215 }
216
217 static void
nfp_flower_cmsg_portreify_rx(struct nfp_app * app,struct sk_buff * skb)218 nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb)
219 {
220 struct nfp_flower_priv *priv = app->priv;
221 struct nfp_flower_cmsg_portreify *msg;
222 bool exists;
223
224 msg = nfp_flower_cmsg_get_data(skb);
225
226 rcu_read_lock();
227 exists = !!nfp_app_repr_get(app, be32_to_cpu(msg->portnum));
228 rcu_read_unlock();
229 if (!exists) {
230 nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n",
231 be32_to_cpu(msg->portnum));
232 return;
233 }
234
235 atomic_inc(&priv->reify_replies);
236 wake_up_interruptible(&priv->reify_wait_queue);
237 }
238
239 static void
nfp_flower_cmsg_process_one_rx(struct nfp_app * app,struct sk_buff * skb)240 nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
241 {
242 struct nfp_flower_priv *app_priv = app->priv;
243 struct nfp_flower_cmsg_hdr *cmsg_hdr;
244 enum nfp_flower_cmsg_type_port type;
245 bool skb_stored = false;
246
247 cmsg_hdr = nfp_flower_cmsg_get_hdr(skb);
248
249 type = cmsg_hdr->type;
250 switch (type) {
251 case NFP_FLOWER_CMSG_TYPE_PORT_REIFY:
252 nfp_flower_cmsg_portreify_rx(app, skb);
253 break;
254 case NFP_FLOWER_CMSG_TYPE_PORT_MOD:
255 nfp_flower_cmsg_portmod_rx(app, skb);
256 break;
257 case NFP_FLOWER_CMSG_TYPE_NO_NEIGH:
258 nfp_tunnel_request_route(app, skb);
259 break;
260 case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS:
261 nfp_tunnel_keep_alive(app, skb);
262 break;
263 case NFP_FLOWER_CMSG_TYPE_LAG_CONFIG:
264 if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
265 skb_stored = nfp_flower_lag_unprocessed_msg(app, skb);
266 break;
267 }
268 /* fall through */
269 default:
270 nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n",
271 type);
272 goto out;
273 }
274
275 if (!skb_stored)
276 dev_consume_skb_any(skb);
277 return;
278 out:
279 dev_kfree_skb_any(skb);
280 }
281
nfp_flower_cmsg_process_rx(struct work_struct * work)282 void nfp_flower_cmsg_process_rx(struct work_struct *work)
283 {
284 struct sk_buff_head cmsg_joined;
285 struct nfp_flower_priv *priv;
286 struct sk_buff *skb;
287
288 priv = container_of(work, struct nfp_flower_priv, cmsg_work);
289 skb_queue_head_init(&cmsg_joined);
290
291 spin_lock_bh(&priv->cmsg_skbs_high.lock);
292 skb_queue_splice_tail_init(&priv->cmsg_skbs_high, &cmsg_joined);
293 spin_unlock_bh(&priv->cmsg_skbs_high.lock);
294
295 spin_lock_bh(&priv->cmsg_skbs_low.lock);
296 skb_queue_splice_tail_init(&priv->cmsg_skbs_low, &cmsg_joined);
297 spin_unlock_bh(&priv->cmsg_skbs_low.lock);
298
299 while ((skb = __skb_dequeue(&cmsg_joined)))
300 nfp_flower_cmsg_process_one_rx(priv->app, skb);
301 }
302
303 static void
nfp_flower_queue_ctl_msg(struct nfp_app * app,struct sk_buff * skb,int type)304 nfp_flower_queue_ctl_msg(struct nfp_app *app, struct sk_buff *skb, int type)
305 {
306 struct nfp_flower_priv *priv = app->priv;
307 struct sk_buff_head *skb_head;
308
309 if (type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY ||
310 type == NFP_FLOWER_CMSG_TYPE_PORT_MOD)
311 skb_head = &priv->cmsg_skbs_high;
312 else
313 skb_head = &priv->cmsg_skbs_low;
314
315 if (skb_queue_len(skb_head) >= NFP_FLOWER_WORKQ_MAX_SKBS) {
316 nfp_flower_cmsg_warn(app, "Dropping queued control messages\n");
317 dev_kfree_skb_any(skb);
318 return;
319 }
320
321 skb_queue_tail(skb_head, skb);
322 schedule_work(&priv->cmsg_work);
323 }
324
nfp_flower_cmsg_rx(struct nfp_app * app,struct sk_buff * skb)325 void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
326 {
327 struct nfp_flower_cmsg_hdr *cmsg_hdr;
328
329 cmsg_hdr = nfp_flower_cmsg_get_hdr(skb);
330
331 if (unlikely(cmsg_hdr->version != NFP_FLOWER_CMSG_VER1)) {
332 nfp_flower_cmsg_warn(app, "Cannot handle repr control version %u\n",
333 cmsg_hdr->version);
334 dev_kfree_skb_any(skb);
335 return;
336 }
337
338 if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_FLOW_STATS) {
339 /* We need to deal with stats updates from HW asap */
340 nfp_flower_rx_flow_stats(app, skb);
341 dev_consume_skb_any(skb);
342 } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_PORT_MOD &&
343 nfp_flower_process_mtu_ack(app, skb)) {
344 /* Handle MTU acks outside wq to prevent RTNL conflict. */
345 dev_consume_skb_any(skb);
346 } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH) {
347 /* Acks from the NFP that the route is added - ignore. */
348 dev_consume_skb_any(skb);
349 } else {
350 nfp_flower_queue_ctl_msg(app, skb, cmsg_hdr->type);
351 }
352 }
353