1 /*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/highmem.h>
34 #include <linux/kernel.h>
35 #include <linux/module.h>
36 #include <linux/delay.h>
37 #include <linux/mlx5/driver.h>
38 #include <linux/mlx5/cmd.h>
39 #include "mlx5_core.h"
40
41 enum {
42 MLX5_PAGES_CANT_GIVE = 0,
43 MLX5_PAGES_GIVE = 1,
44 MLX5_PAGES_TAKE = 2
45 };
46
47 struct mlx5_pages_req {
48 struct mlx5_core_dev *dev;
49 u16 func_id;
50 s32 npages;
51 struct work_struct work;
52 };
53
54 struct fw_page {
55 struct rb_node rb_node;
56 u64 addr;
57 struct page *page;
58 u16 func_id;
59 unsigned long bitmask;
60 struct list_head list;
61 unsigned free_count;
62 };
63
64 enum {
65 MAX_RECLAIM_TIME_MSECS = 5000,
66 MAX_RECLAIM_VFS_PAGES_TIME_MSECS = 2 * 1000 * 60,
67 };
68
69 enum {
70 MLX5_MAX_RECLAIM_TIME_MILI = 5000,
71 MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
72 };
73
insert_page(struct mlx5_core_dev * dev,u64 addr,struct page * page,u16 func_id)74 static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
75 {
76 struct rb_root *root = &dev->priv.page_root;
77 struct rb_node **new = &root->rb_node;
78 struct rb_node *parent = NULL;
79 struct fw_page *nfp;
80 struct fw_page *tfp;
81 int i;
82
83 while (*new) {
84 parent = *new;
85 tfp = rb_entry(parent, struct fw_page, rb_node);
86 if (tfp->addr < addr)
87 new = &parent->rb_left;
88 else if (tfp->addr > addr)
89 new = &parent->rb_right;
90 else
91 return -EEXIST;
92 }
93
94 nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
95 if (!nfp)
96 return -ENOMEM;
97
98 nfp->addr = addr;
99 nfp->page = page;
100 nfp->func_id = func_id;
101 nfp->free_count = MLX5_NUM_4K_IN_PAGE;
102 for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
103 set_bit(i, &nfp->bitmask);
104
105 rb_link_node(&nfp->rb_node, parent, new);
106 rb_insert_color(&nfp->rb_node, root);
107 list_add(&nfp->list, &dev->priv.free_list);
108
109 return 0;
110 }
111
find_fw_page(struct mlx5_core_dev * dev,u64 addr)112 static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr)
113 {
114 struct rb_root *root = &dev->priv.page_root;
115 struct rb_node *tmp = root->rb_node;
116 struct fw_page *result = NULL;
117 struct fw_page *tfp;
118
119 while (tmp) {
120 tfp = rb_entry(tmp, struct fw_page, rb_node);
121 if (tfp->addr < addr) {
122 tmp = tmp->rb_left;
123 } else if (tfp->addr > addr) {
124 tmp = tmp->rb_right;
125 } else {
126 result = tfp;
127 break;
128 }
129 }
130
131 return result;
132 }
133
mlx5_cmd_query_pages(struct mlx5_core_dev * dev,u16 * func_id,s32 * npages,int boot)134 static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
135 s32 *npages, int boot)
136 {
137 u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {0};
138 u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {0};
139 int err;
140
141 MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
142 MLX5_SET(query_pages_in, in, op_mod, boot ?
143 MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES :
144 MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
145
146 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
147 if (err)
148 return err;
149
150 *npages = MLX5_GET(query_pages_out, out, num_pages);
151 *func_id = MLX5_GET(query_pages_out, out, function_id);
152
153 return err;
154 }
155
alloc_4k(struct mlx5_core_dev * dev,u64 * addr)156 static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr)
157 {
158 struct fw_page *fp;
159 unsigned n;
160
161 if (list_empty(&dev->priv.free_list))
162 return -ENOMEM;
163
164 fp = list_entry(dev->priv.free_list.next, struct fw_page, list);
165 n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
166 if (n >= MLX5_NUM_4K_IN_PAGE) {
167 mlx5_core_warn(dev, "alloc 4k bug\n");
168 return -ENOENT;
169 }
170 clear_bit(n, &fp->bitmask);
171 fp->free_count--;
172 if (!fp->free_count)
173 list_del(&fp->list);
174
175 *addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE;
176
177 return 0;
178 }
179
180 #define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT)
181
free_4k(struct mlx5_core_dev * dev,u64 addr)182 static void free_4k(struct mlx5_core_dev *dev, u64 addr)
183 {
184 struct fw_page *fwp;
185 int n;
186
187 fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK);
188 if (!fwp) {
189 mlx5_core_warn(dev, "page not found\n");
190 return;
191 }
192
193 n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
194 fwp->free_count++;
195 set_bit(n, &fwp->bitmask);
196 if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) {
197 rb_erase(&fwp->rb_node, &dev->priv.page_root);
198 if (fwp->free_count != 1)
199 list_del(&fwp->list);
200 dma_unmap_page(&dev->pdev->dev, addr & MLX5_U64_4K_PAGE_MASK,
201 PAGE_SIZE, DMA_BIDIRECTIONAL);
202 __free_page(fwp->page);
203 kfree(fwp);
204 } else if (fwp->free_count == 1) {
205 list_add(&fwp->list, &dev->priv.free_list);
206 }
207 }
208
alloc_system_page(struct mlx5_core_dev * dev,u16 func_id)209 static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
210 {
211 struct page *page;
212 u64 zero_addr = 1;
213 u64 addr;
214 int err;
215 int nid = dev_to_node(&dev->pdev->dev);
216
217 page = alloc_pages_node(nid, GFP_HIGHUSER, 0);
218 if (!page) {
219 mlx5_core_warn(dev, "failed to allocate page\n");
220 return -ENOMEM;
221 }
222 map:
223 addr = dma_map_page(&dev->pdev->dev, page, 0,
224 PAGE_SIZE, DMA_BIDIRECTIONAL);
225 if (dma_mapping_error(&dev->pdev->dev, addr)) {
226 mlx5_core_warn(dev, "failed dma mapping page\n");
227 err = -ENOMEM;
228 goto err_mapping;
229 }
230
231 /* Firmware doesn't support page with physical address 0 */
232 if (addr == 0) {
233 zero_addr = addr;
234 goto map;
235 }
236
237 err = insert_page(dev, addr, page, func_id);
238 if (err) {
239 mlx5_core_err(dev, "failed to track allocated page\n");
240 dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE,
241 DMA_BIDIRECTIONAL);
242 }
243
244 err_mapping:
245 if (err)
246 __free_page(page);
247
248 if (zero_addr == 0)
249 dma_unmap_page(&dev->pdev->dev, zero_addr, PAGE_SIZE,
250 DMA_BIDIRECTIONAL);
251
252 return err;
253 }
254
page_notify_fail(struct mlx5_core_dev * dev,u16 func_id)255 static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id)
256 {
257 u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
258 u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
259 int err;
260
261 MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
262 MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE);
263 MLX5_SET(manage_pages_in, in, function_id, func_id);
264
265 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
266 if (err)
267 mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n",
268 func_id, err);
269 }
270
give_pages(struct mlx5_core_dev * dev,u16 func_id,int npages,int notify_fail)271 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
272 int notify_fail)
273 {
274 u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
275 int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
276 u64 addr;
277 int err;
278 u32 *in;
279 int i;
280
281 inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]);
282 in = kvzalloc(inlen, GFP_KERNEL);
283 if (!in) {
284 err = -ENOMEM;
285 mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
286 goto out_free;
287 }
288
289 for (i = 0; i < npages; i++) {
290 retry:
291 err = alloc_4k(dev, &addr);
292 if (err) {
293 if (err == -ENOMEM)
294 err = alloc_system_page(dev, func_id);
295 if (err)
296 goto out_4k;
297
298 goto retry;
299 }
300 MLX5_ARRAY_SET64(manage_pages_in, in, pas, i, addr);
301 }
302
303 MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
304 MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE);
305 MLX5_SET(manage_pages_in, in, function_id, func_id);
306 MLX5_SET(manage_pages_in, in, input_num_entries, npages);
307
308 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
309 if (err) {
310 mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
311 func_id, npages, err);
312 goto out_4k;
313 }
314
315 dev->priv.fw_pages += npages;
316 if (func_id)
317 dev->priv.vfs_pages += npages;
318
319 mlx5_core_dbg(dev, "err %d\n", err);
320
321 kvfree(in);
322 return 0;
323
324 out_4k:
325 for (i--; i >= 0; i--)
326 free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]));
327 out_free:
328 kvfree(in);
329 if (notify_fail)
330 page_notify_fail(dev, func_id);
331 return err;
332 }
333
reclaim_pages_cmd(struct mlx5_core_dev * dev,u32 * in,int in_size,u32 * out,int out_size)334 static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
335 u32 *in, int in_size, u32 *out, int out_size)
336 {
337 struct fw_page *fwp;
338 struct rb_node *p;
339 u32 func_id;
340 u32 npages;
341 u32 i = 0;
342
343 if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
344 return mlx5_cmd_exec(dev, in, in_size, out, out_size);
345
346 /* No hard feelings, we want our pages back! */
347 npages = MLX5_GET(manage_pages_in, in, input_num_entries);
348 func_id = MLX5_GET(manage_pages_in, in, function_id);
349
350 p = rb_first(&dev->priv.page_root);
351 while (p && i < npages) {
352 fwp = rb_entry(p, struct fw_page, rb_node);
353 p = rb_next(p);
354 if (fwp->func_id != func_id)
355 continue;
356
357 MLX5_ARRAY_SET64(manage_pages_out, out, pas, i, fwp->addr);
358 i++;
359 }
360
361 MLX5_SET(manage_pages_out, out, output_num_entries, i);
362 return 0;
363 }
364
reclaim_pages(struct mlx5_core_dev * dev,u32 func_id,int npages,int * nclaimed)365 static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
366 int *nclaimed)
367 {
368 int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
369 u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
370 int num_claimed;
371 u32 *out;
372 int err;
373 int i;
374
375 if (nclaimed)
376 *nclaimed = 0;
377
378 outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
379 out = kvzalloc(outlen, GFP_KERNEL);
380 if (!out)
381 return -ENOMEM;
382
383 MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
384 MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE);
385 MLX5_SET(manage_pages_in, in, function_id, func_id);
386 MLX5_SET(manage_pages_in, in, input_num_entries, npages);
387
388 mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
389 err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
390 if (err) {
391 mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
392 goto out_free;
393 }
394
395 num_claimed = MLX5_GET(manage_pages_out, out, output_num_entries);
396 if (num_claimed > npages) {
397 mlx5_core_warn(dev, "fw returned %d, driver asked %d => corruption\n",
398 num_claimed, npages);
399 err = -EINVAL;
400 goto out_free;
401 }
402
403 for (i = 0; i < num_claimed; i++)
404 free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]));
405
406 if (nclaimed)
407 *nclaimed = num_claimed;
408
409 dev->priv.fw_pages -= num_claimed;
410 if (func_id)
411 dev->priv.vfs_pages -= num_claimed;
412
413 out_free:
414 kvfree(out);
415 return err;
416 }
417
pages_work_handler(struct work_struct * work)418 static void pages_work_handler(struct work_struct *work)
419 {
420 struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
421 struct mlx5_core_dev *dev = req->dev;
422 int err = 0;
423
424 if (req->npages < 0)
425 err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL);
426 else if (req->npages > 0)
427 err = give_pages(dev, req->func_id, req->npages, 1);
428
429 if (err)
430 mlx5_core_warn(dev, "%s fail %d\n",
431 req->npages < 0 ? "reclaim" : "give", err);
432
433 kfree(req);
434 }
435
mlx5_core_req_pages_handler(struct mlx5_core_dev * dev,u16 func_id,s32 npages)436 void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
437 s32 npages)
438 {
439 struct mlx5_pages_req *req;
440
441 req = kzalloc(sizeof(*req), GFP_ATOMIC);
442 if (!req) {
443 mlx5_core_warn(dev, "failed to allocate pages request\n");
444 return;
445 }
446
447 req->dev = dev;
448 req->func_id = func_id;
449 req->npages = npages;
450 INIT_WORK(&req->work, pages_work_handler);
451 queue_work(dev->priv.pg_wq, &req->work);
452 }
453
mlx5_satisfy_startup_pages(struct mlx5_core_dev * dev,int boot)454 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
455 {
456 u16 uninitialized_var(func_id);
457 s32 uninitialized_var(npages);
458 int err;
459
460 err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
461 if (err)
462 return err;
463
464 mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
465 npages, boot ? "boot" : "init", func_id);
466
467 return give_pages(dev, func_id, npages, 0);
468 }
469
470 enum {
471 MLX5_BLKS_FOR_RECLAIM_PAGES = 12
472 };
473
optimal_reclaimed_pages(void)474 static int optimal_reclaimed_pages(void)
475 {
476 struct mlx5_cmd_prot_block *block;
477 struct mlx5_cmd_layout *lay;
478 int ret;
479
480 ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
481 MLX5_ST_SZ_BYTES(manage_pages_out)) /
482 MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
483
484 return ret;
485 }
486
mlx5_reclaim_startup_pages(struct mlx5_core_dev * dev)487 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
488 {
489 unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
490 struct fw_page *fwp;
491 struct rb_node *p;
492 int nclaimed = 0;
493 int err = 0;
494
495 do {
496 p = rb_first(&dev->priv.page_root);
497 if (p) {
498 fwp = rb_entry(p, struct fw_page, rb_node);
499 err = reclaim_pages(dev, fwp->func_id,
500 optimal_reclaimed_pages(),
501 &nclaimed);
502
503 if (err) {
504 mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
505 err);
506 return err;
507 }
508 if (nclaimed)
509 end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
510 }
511 if (time_after(jiffies, end)) {
512 mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
513 break;
514 }
515 } while (p);
516
517 WARN(dev->priv.fw_pages,
518 "FW pages counter is %d after reclaiming all pages\n",
519 dev->priv.fw_pages);
520 WARN(dev->priv.vfs_pages,
521 "VFs FW pages counter is %d after reclaiming all pages\n",
522 dev->priv.vfs_pages);
523
524 return 0;
525 }
526
mlx5_pagealloc_init(struct mlx5_core_dev * dev)527 void mlx5_pagealloc_init(struct mlx5_core_dev *dev)
528 {
529 dev->priv.page_root = RB_ROOT;
530 INIT_LIST_HEAD(&dev->priv.free_list);
531 }
532
mlx5_pagealloc_cleanup(struct mlx5_core_dev * dev)533 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
534 {
535 /* nothing */
536 }
537
mlx5_pagealloc_start(struct mlx5_core_dev * dev)538 int mlx5_pagealloc_start(struct mlx5_core_dev *dev)
539 {
540 dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
541 if (!dev->priv.pg_wq)
542 return -ENOMEM;
543
544 return 0;
545 }
546
mlx5_pagealloc_stop(struct mlx5_core_dev * dev)547 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
548 {
549 destroy_workqueue(dev->priv.pg_wq);
550 }
551
mlx5_wait_for_vf_pages(struct mlx5_core_dev * dev)552 int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev)
553 {
554 unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
555 int prev_vfs_pages = dev->priv.vfs_pages;
556
557 /* In case of internal error we will free the pages manually later */
558 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
559 mlx5_core_warn(dev, "Skipping wait for vf pages stage");
560 return 0;
561 }
562
563 mlx5_core_dbg(dev, "Waiting for %d pages from %s\n", prev_vfs_pages,
564 dev->priv.name);
565 while (dev->priv.vfs_pages) {
566 if (time_after(jiffies, end)) {
567 mlx5_core_warn(dev, "aborting while there are %d pending pages\n", dev->priv.vfs_pages);
568 return -ETIMEDOUT;
569 }
570 if (dev->priv.vfs_pages < prev_vfs_pages) {
571 end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
572 prev_vfs_pages = dev->priv.vfs_pages;
573 }
574 msleep(50);
575 }
576
577 mlx5_core_dbg(dev, "All pages received from %s\n", dev->priv.name);
578 return 0;
579 }
580