1 /*
2 * Copyright (C) 2017-2018 Netronome Systems, Inc.
3 *
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
8 *
9 * The BSD 2-Clause License:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #include <linux/bpf.h>
35 #include <linux/bitops.h>
36 #include <linux/bug.h>
37 #include <linux/jiffies.h>
38 #include <linux/skbuff.h>
39 #include <linux/wait.h>
40
41 #include "../nfp_app.h"
42 #include "../nfp_net.h"
43 #include "fw.h"
44 #include "main.h"
45
46 #define NFP_BPF_TAG_ALLOC_SPAN (U16_MAX / 4)
47
nfp_bpf_all_tags_busy(struct nfp_app_bpf * bpf)48 static bool nfp_bpf_all_tags_busy(struct nfp_app_bpf *bpf)
49 {
50 u16 used_tags;
51
52 used_tags = bpf->tag_alloc_next - bpf->tag_alloc_last;
53
54 return used_tags > NFP_BPF_TAG_ALLOC_SPAN;
55 }
56
nfp_bpf_alloc_tag(struct nfp_app_bpf * bpf)57 static int nfp_bpf_alloc_tag(struct nfp_app_bpf *bpf)
58 {
59 /* All FW communication for BPF is request-reply. To make sure we
60 * don't reuse the message ID too early after timeout - limit the
61 * number of requests in flight.
62 */
63 if (nfp_bpf_all_tags_busy(bpf)) {
64 cmsg_warn(bpf, "all FW request contexts busy!\n");
65 return -EAGAIN;
66 }
67
68 WARN_ON(__test_and_set_bit(bpf->tag_alloc_next, bpf->tag_allocator));
69 return bpf->tag_alloc_next++;
70 }
71
nfp_bpf_free_tag(struct nfp_app_bpf * bpf,u16 tag)72 static void nfp_bpf_free_tag(struct nfp_app_bpf *bpf, u16 tag)
73 {
74 WARN_ON(!__test_and_clear_bit(tag, bpf->tag_allocator));
75
76 while (!test_bit(bpf->tag_alloc_last, bpf->tag_allocator) &&
77 bpf->tag_alloc_last != bpf->tag_alloc_next)
78 bpf->tag_alloc_last++;
79 }
80
81 static struct sk_buff *
nfp_bpf_cmsg_alloc(struct nfp_app_bpf * bpf,unsigned int size)82 nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
83 {
84 struct sk_buff *skb;
85
86 skb = nfp_app_ctrl_msg_alloc(bpf->app, size, GFP_KERNEL);
87 skb_put(skb, size);
88
89 return skb;
90 }
91
92 static struct sk_buff *
nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf * bpf,unsigned int n)93 nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n)
94 {
95 unsigned int size;
96
97 size = sizeof(struct cmsg_req_map_op);
98 size += sizeof(struct cmsg_key_value_pair) * n;
99
100 return nfp_bpf_cmsg_alloc(bpf, size);
101 }
102
nfp_bpf_cmsg_get_type(struct sk_buff * skb)103 static u8 nfp_bpf_cmsg_get_type(struct sk_buff *skb)
104 {
105 struct cmsg_hdr *hdr;
106
107 hdr = (struct cmsg_hdr *)skb->data;
108
109 return hdr->type;
110 }
111
nfp_bpf_cmsg_get_tag(struct sk_buff * skb)112 static unsigned int nfp_bpf_cmsg_get_tag(struct sk_buff *skb)
113 {
114 struct cmsg_hdr *hdr;
115
116 hdr = (struct cmsg_hdr *)skb->data;
117
118 return be16_to_cpu(hdr->tag);
119 }
120
__nfp_bpf_reply(struct nfp_app_bpf * bpf,u16 tag)121 static struct sk_buff *__nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
122 {
123 unsigned int msg_tag;
124 struct sk_buff *skb;
125
126 skb_queue_walk(&bpf->cmsg_replies, skb) {
127 msg_tag = nfp_bpf_cmsg_get_tag(skb);
128 if (msg_tag == tag) {
129 nfp_bpf_free_tag(bpf, tag);
130 __skb_unlink(skb, &bpf->cmsg_replies);
131 return skb;
132 }
133 }
134
135 return NULL;
136 }
137
nfp_bpf_reply(struct nfp_app_bpf * bpf,u16 tag)138 static struct sk_buff *nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
139 {
140 struct sk_buff *skb;
141
142 nfp_ctrl_lock(bpf->app->ctrl);
143 skb = __nfp_bpf_reply(bpf, tag);
144 nfp_ctrl_unlock(bpf->app->ctrl);
145
146 return skb;
147 }
148
nfp_bpf_reply_drop_tag(struct nfp_app_bpf * bpf,u16 tag)149 static struct sk_buff *nfp_bpf_reply_drop_tag(struct nfp_app_bpf *bpf, u16 tag)
150 {
151 struct sk_buff *skb;
152
153 nfp_ctrl_lock(bpf->app->ctrl);
154 skb = __nfp_bpf_reply(bpf, tag);
155 if (!skb)
156 nfp_bpf_free_tag(bpf, tag);
157 nfp_ctrl_unlock(bpf->app->ctrl);
158
159 return skb;
160 }
161
162 static struct sk_buff *
nfp_bpf_cmsg_wait_reply(struct nfp_app_bpf * bpf,enum nfp_bpf_cmsg_type type,int tag)163 nfp_bpf_cmsg_wait_reply(struct nfp_app_bpf *bpf, enum nfp_bpf_cmsg_type type,
164 int tag)
165 {
166 struct sk_buff *skb;
167 int i, err;
168
169 for (i = 0; i < 50; i++) {
170 udelay(4);
171 skb = nfp_bpf_reply(bpf, tag);
172 if (skb)
173 return skb;
174 }
175
176 err = wait_event_interruptible_timeout(bpf->cmsg_wq,
177 skb = nfp_bpf_reply(bpf, tag),
178 msecs_to_jiffies(5000));
179 /* We didn't get a response - try last time and atomically drop
180 * the tag even if no response is matched.
181 */
182 if (!skb)
183 skb = nfp_bpf_reply_drop_tag(bpf, tag);
184 if (err < 0) {
185 cmsg_warn(bpf, "%s waiting for response to 0x%02x: %d\n",
186 err == ERESTARTSYS ? "interrupted" : "error",
187 type, err);
188 return ERR_PTR(err);
189 }
190 if (!skb) {
191 cmsg_warn(bpf, "timeout waiting for response to 0x%02x\n",
192 type);
193 return ERR_PTR(-ETIMEDOUT);
194 }
195
196 return skb;
197 }
198
199 static struct sk_buff *
nfp_bpf_cmsg_communicate(struct nfp_app_bpf * bpf,struct sk_buff * skb,enum nfp_bpf_cmsg_type type,unsigned int reply_size)200 nfp_bpf_cmsg_communicate(struct nfp_app_bpf *bpf, struct sk_buff *skb,
201 enum nfp_bpf_cmsg_type type, unsigned int reply_size)
202 {
203 struct cmsg_hdr *hdr;
204 int tag;
205
206 nfp_ctrl_lock(bpf->app->ctrl);
207 tag = nfp_bpf_alloc_tag(bpf);
208 if (tag < 0) {
209 nfp_ctrl_unlock(bpf->app->ctrl);
210 dev_kfree_skb_any(skb);
211 return ERR_PTR(tag);
212 }
213
214 hdr = (void *)skb->data;
215 hdr->ver = CMSG_MAP_ABI_VERSION;
216 hdr->type = type;
217 hdr->tag = cpu_to_be16(tag);
218
219 __nfp_app_ctrl_tx(bpf->app, skb);
220
221 nfp_ctrl_unlock(bpf->app->ctrl);
222
223 skb = nfp_bpf_cmsg_wait_reply(bpf, type, tag);
224 if (IS_ERR(skb))
225 return skb;
226
227 hdr = (struct cmsg_hdr *)skb->data;
228 if (hdr->type != __CMSG_REPLY(type)) {
229 cmsg_warn(bpf, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
230 hdr->type, __CMSG_REPLY(type));
231 goto err_free;
232 }
233 /* 0 reply_size means caller will do the validation */
234 if (reply_size && skb->len != reply_size) {
235 cmsg_warn(bpf, "cmsg drop - type 0x%02x wrong size %d != %d!\n",
236 type, skb->len, reply_size);
237 goto err_free;
238 }
239
240 return skb;
241 err_free:
242 dev_kfree_skb_any(skb);
243 return ERR_PTR(-EIO);
244 }
245
246 static int
nfp_bpf_ctrl_rc_to_errno(struct nfp_app_bpf * bpf,struct cmsg_reply_map_simple * reply)247 nfp_bpf_ctrl_rc_to_errno(struct nfp_app_bpf *bpf,
248 struct cmsg_reply_map_simple *reply)
249 {
250 static const int res_table[] = {
251 [CMSG_RC_SUCCESS] = 0,
252 [CMSG_RC_ERR_MAP_FD] = -EBADFD,
253 [CMSG_RC_ERR_MAP_NOENT] = -ENOENT,
254 [CMSG_RC_ERR_MAP_ERR] = -EINVAL,
255 [CMSG_RC_ERR_MAP_PARSE] = -EIO,
256 [CMSG_RC_ERR_MAP_EXIST] = -EEXIST,
257 [CMSG_RC_ERR_MAP_NOMEM] = -ENOMEM,
258 [CMSG_RC_ERR_MAP_E2BIG] = -E2BIG,
259 };
260 u32 rc;
261
262 rc = be32_to_cpu(reply->rc);
263 if (rc >= ARRAY_SIZE(res_table)) {
264 cmsg_warn(bpf, "FW responded with invalid status: %u\n", rc);
265 return -EIO;
266 }
267
268 return res_table[rc];
269 }
270
271 long long int
nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf * bpf,struct bpf_map * map)272 nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map)
273 {
274 struct cmsg_reply_map_alloc_tbl *reply;
275 struct cmsg_req_map_alloc_tbl *req;
276 struct sk_buff *skb;
277 u32 tid;
278 int err;
279
280 skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
281 if (!skb)
282 return -ENOMEM;
283
284 req = (void *)skb->data;
285 req->key_size = cpu_to_be32(map->key_size);
286 req->value_size = cpu_to_be32(map->value_size);
287 req->max_entries = cpu_to_be32(map->max_entries);
288 req->map_type = cpu_to_be32(map->map_type);
289 req->map_flags = 0;
290
291 skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_ALLOC,
292 sizeof(*reply));
293 if (IS_ERR(skb))
294 return PTR_ERR(skb);
295
296 reply = (void *)skb->data;
297 err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
298 if (err)
299 goto err_free;
300
301 tid = be32_to_cpu(reply->tid);
302 dev_consume_skb_any(skb);
303
304 return tid;
305 err_free:
306 dev_kfree_skb_any(skb);
307 return err;
308 }
309
nfp_bpf_ctrl_free_map(struct nfp_app_bpf * bpf,struct nfp_bpf_map * nfp_map)310 void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map)
311 {
312 struct cmsg_reply_map_free_tbl *reply;
313 struct cmsg_req_map_free_tbl *req;
314 struct sk_buff *skb;
315 int err;
316
317 skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
318 if (!skb) {
319 cmsg_warn(bpf, "leaking map - failed to allocate msg\n");
320 return;
321 }
322
323 req = (void *)skb->data;
324 req->tid = cpu_to_be32(nfp_map->tid);
325
326 skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_FREE,
327 sizeof(*reply));
328 if (IS_ERR(skb)) {
329 cmsg_warn(bpf, "leaking map - I/O error\n");
330 return;
331 }
332
333 reply = (void *)skb->data;
334 err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
335 if (err)
336 cmsg_warn(bpf, "leaking map - FW responded with: %d\n", err);
337
338 dev_consume_skb_any(skb);
339 }
340
341 static int
nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map * offmap,enum nfp_bpf_cmsg_type op,u8 * key,u8 * value,u64 flags,u8 * out_key,u8 * out_value)342 nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
343 enum nfp_bpf_cmsg_type op,
344 u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value)
345 {
346 struct nfp_bpf_map *nfp_map = offmap->dev_priv;
347 struct nfp_app_bpf *bpf = nfp_map->bpf;
348 struct bpf_map *map = &offmap->map;
349 struct cmsg_reply_map_op *reply;
350 struct cmsg_req_map_op *req;
351 struct sk_buff *skb;
352 int err;
353
354 /* FW messages have no space for more than 32 bits of flags */
355 if (flags >> 32)
356 return -EOPNOTSUPP;
357
358 skb = nfp_bpf_cmsg_map_req_alloc(bpf, 1);
359 if (!skb)
360 return -ENOMEM;
361
362 req = (void *)skb->data;
363 req->tid = cpu_to_be32(nfp_map->tid);
364 req->count = cpu_to_be32(1);
365 req->flags = cpu_to_be32(flags);
366
367 /* Copy inputs */
368 if (key)
369 memcpy(&req->elem[0].key, key, map->key_size);
370 if (value)
371 memcpy(&req->elem[0].value, value, map->value_size);
372
373 skb = nfp_bpf_cmsg_communicate(bpf, skb, op,
374 sizeof(*reply) + sizeof(*reply->elem));
375 if (IS_ERR(skb))
376 return PTR_ERR(skb);
377
378 reply = (void *)skb->data;
379 err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
380 if (err)
381 goto err_free;
382
383 /* Copy outputs */
384 if (out_key)
385 memcpy(out_key, &reply->elem[0].key, map->key_size);
386 if (out_value)
387 memcpy(out_value, &reply->elem[0].value, map->value_size);
388
389 dev_consume_skb_any(skb);
390
391 return 0;
392 err_free:
393 dev_kfree_skb_any(skb);
394 return err;
395 }
396
nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map * offmap,void * key,void * value,u64 flags)397 int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
398 void *key, void *value, u64 flags)
399 {
400 return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_UPDATE,
401 key, value, flags, NULL, NULL);
402 }
403
nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map * offmap,void * key)404 int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key)
405 {
406 return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_DELETE,
407 key, NULL, 0, NULL, NULL);
408 }
409
nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map * offmap,void * key,void * value)410 int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
411 void *key, void *value)
412 {
413 return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_LOOKUP,
414 key, NULL, 0, NULL, value);
415 }
416
nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map * offmap,void * next_key)417 int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
418 void *next_key)
419 {
420 return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETFIRST,
421 NULL, NULL, 0, next_key, NULL);
422 }
423
nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map * offmap,void * key,void * next_key)424 int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
425 void *key, void *next_key)
426 {
427 return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETNEXT,
428 key, NULL, 0, next_key, NULL);
429 }
430
nfp_bpf_ctrl_msg_rx(struct nfp_app * app,struct sk_buff * skb)431 void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
432 {
433 struct nfp_app_bpf *bpf = app->priv;
434 unsigned int tag;
435
436 if (unlikely(skb->len < sizeof(struct cmsg_reply_map_simple))) {
437 cmsg_warn(bpf, "cmsg drop - too short %d!\n", skb->len);
438 goto err_free;
439 }
440
441 if (nfp_bpf_cmsg_get_type(skb) == CMSG_TYPE_BPF_EVENT) {
442 if (!nfp_bpf_event_output(bpf, skb->data, skb->len))
443 dev_consume_skb_any(skb);
444 else
445 dev_kfree_skb_any(skb);
446 return;
447 }
448
449 nfp_ctrl_lock(bpf->app->ctrl);
450
451 tag = nfp_bpf_cmsg_get_tag(skb);
452 if (unlikely(!test_bit(tag, bpf->tag_allocator))) {
453 cmsg_warn(bpf, "cmsg drop - no one is waiting for tag %u!\n",
454 tag);
455 goto err_unlock;
456 }
457
458 __skb_queue_tail(&bpf->cmsg_replies, skb);
459 wake_up_interruptible_all(&bpf->cmsg_wq);
460
461 nfp_ctrl_unlock(bpf->app->ctrl);
462
463 return;
464 err_unlock:
465 nfp_ctrl_unlock(bpf->app->ctrl);
466 err_free:
467 dev_kfree_skb_any(skb);
468 }
469
470 void
nfp_bpf_ctrl_msg_rx_raw(struct nfp_app * app,const void * data,unsigned int len)471 nfp_bpf_ctrl_msg_rx_raw(struct nfp_app *app, const void *data, unsigned int len)
472 {
473 struct nfp_app_bpf *bpf = app->priv;
474 const struct cmsg_hdr *hdr = data;
475
476 if (unlikely(len < sizeof(struct cmsg_reply_map_simple))) {
477 cmsg_warn(bpf, "cmsg drop - too short %d!\n", len);
478 return;
479 }
480
481 if (hdr->type == CMSG_TYPE_BPF_EVENT)
482 nfp_bpf_event_output(bpf, data, len);
483 else
484 cmsg_warn(bpf, "cmsg drop - msg type %d with raw buffer!\n",
485 hdr->type);
486 }
487