1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/highmem.h>
34 #include <linux/kernel.h>
35 #include <linux/module.h>
36 #include <linux/delay.h>
37 #include <linux/mlx5/driver.h>
38 #include <linux/mlx5/cmd.h>
39 #include "mlx5_core.h"
40 #include "lib/eq.h"
41 
42 enum {
43 	MLX5_PAGES_CANT_GIVE	= 0,
44 	MLX5_PAGES_GIVE		= 1,
45 	MLX5_PAGES_TAKE		= 2
46 };
47 
48 struct mlx5_pages_req {
49 	struct mlx5_core_dev *dev;
50 	u16	func_id;
51 	u8	ec_function;
52 	s32	npages;
53 	struct work_struct work;
54 };
55 
56 struct fw_page {
57 	struct rb_node		rb_node;
58 	u64			addr;
59 	struct page	       *page;
60 	u16			func_id;
61 	unsigned long		bitmask;
62 	struct list_head	list;
63 	unsigned		free_count;
64 };
65 
66 enum {
67 	MAX_RECLAIM_TIME_MSECS	= 5000,
68 	MAX_RECLAIM_VFS_PAGES_TIME_MSECS = 2 * 1000 * 60,
69 };
70 
71 enum {
72 	MLX5_MAX_RECLAIM_TIME_MILI	= 5000,
73 	MLX5_NUM_4K_IN_PAGE		= PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
74 };
75 
insert_page(struct mlx5_core_dev * dev,u64 addr,struct page * page,u16 func_id)76 static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
77 {
78 	struct rb_root *root = &dev->priv.page_root;
79 	struct rb_node **new = &root->rb_node;
80 	struct rb_node *parent = NULL;
81 	struct fw_page *nfp;
82 	struct fw_page *tfp;
83 	int i;
84 
85 	while (*new) {
86 		parent = *new;
87 		tfp = rb_entry(parent, struct fw_page, rb_node);
88 		if (tfp->addr < addr)
89 			new = &parent->rb_left;
90 		else if (tfp->addr > addr)
91 			new = &parent->rb_right;
92 		else
93 			return -EEXIST;
94 	}
95 
96 	nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
97 	if (!nfp)
98 		return -ENOMEM;
99 
100 	nfp->addr = addr;
101 	nfp->page = page;
102 	nfp->func_id = func_id;
103 	nfp->free_count = MLX5_NUM_4K_IN_PAGE;
104 	for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
105 		set_bit(i, &nfp->bitmask);
106 
107 	rb_link_node(&nfp->rb_node, parent, new);
108 	rb_insert_color(&nfp->rb_node, root);
109 	list_add(&nfp->list, &dev->priv.free_list);
110 
111 	return 0;
112 }
113 
find_fw_page(struct mlx5_core_dev * dev,u64 addr)114 static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr)
115 {
116 	struct rb_root *root = &dev->priv.page_root;
117 	struct rb_node *tmp = root->rb_node;
118 	struct fw_page *result = NULL;
119 	struct fw_page *tfp;
120 
121 	while (tmp) {
122 		tfp = rb_entry(tmp, struct fw_page, rb_node);
123 		if (tfp->addr < addr) {
124 			tmp = tmp->rb_left;
125 		} else if (tfp->addr > addr) {
126 			tmp = tmp->rb_right;
127 		} else {
128 			result = tfp;
129 			break;
130 		}
131 	}
132 
133 	return result;
134 }
135 
mlx5_cmd_query_pages(struct mlx5_core_dev * dev,u16 * func_id,s32 * npages,int boot)136 static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
137 				s32 *npages, int boot)
138 {
139 	u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {0};
140 	u32 in[MLX5_ST_SZ_DW(query_pages_in)]   = {0};
141 	int err;
142 
143 	MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
144 	MLX5_SET(query_pages_in, in, op_mod, boot ?
145 		 MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES :
146 		 MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
147 	MLX5_SET(query_pages_in, in, embedded_cpu_function, mlx5_core_is_ecpf(dev));
148 
149 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
150 	if (err)
151 		return err;
152 
153 	*npages = MLX5_GET(query_pages_out, out, num_pages);
154 	*func_id = MLX5_GET(query_pages_out, out, function_id);
155 
156 	return err;
157 }
158 
alloc_4k(struct mlx5_core_dev * dev,u64 * addr)159 static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr)
160 {
161 	struct fw_page *fp;
162 	unsigned n;
163 
164 	if (list_empty(&dev->priv.free_list))
165 		return -ENOMEM;
166 
167 	fp = list_entry(dev->priv.free_list.next, struct fw_page, list);
168 	n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
169 	if (n >= MLX5_NUM_4K_IN_PAGE) {
170 		mlx5_core_warn(dev, "alloc 4k bug\n");
171 		return -ENOENT;
172 	}
173 	clear_bit(n, &fp->bitmask);
174 	fp->free_count--;
175 	if (!fp->free_count)
176 		list_del(&fp->list);
177 
178 	*addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE;
179 
180 	return 0;
181 }
182 
183 #define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT)
184 
free_4k(struct mlx5_core_dev * dev,u64 addr)185 static void free_4k(struct mlx5_core_dev *dev, u64 addr)
186 {
187 	struct fw_page *fwp;
188 	int n;
189 
190 	fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK);
191 	if (!fwp) {
192 		mlx5_core_warn(dev, "page not found\n");
193 		return;
194 	}
195 
196 	n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
197 	fwp->free_count++;
198 	set_bit(n, &fwp->bitmask);
199 	if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) {
200 		rb_erase(&fwp->rb_node, &dev->priv.page_root);
201 		if (fwp->free_count != 1)
202 			list_del(&fwp->list);
203 		dma_unmap_page(dev->device, addr & MLX5_U64_4K_PAGE_MASK,
204 			       PAGE_SIZE, DMA_BIDIRECTIONAL);
205 		__free_page(fwp->page);
206 		kfree(fwp);
207 	} else if (fwp->free_count == 1) {
208 		list_add(&fwp->list, &dev->priv.free_list);
209 	}
210 }
211 
alloc_system_page(struct mlx5_core_dev * dev,u16 func_id)212 static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
213 {
214 	struct device *device = dev->device;
215 	int nid = dev_to_node(device);
216 	struct page *page;
217 	u64 zero_addr = 1;
218 	u64 addr;
219 	int err;
220 
221 	page = alloc_pages_node(nid, GFP_HIGHUSER, 0);
222 	if (!page) {
223 		mlx5_core_warn(dev, "failed to allocate page\n");
224 		return -ENOMEM;
225 	}
226 map:
227 	addr = dma_map_page(device, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
228 	if (dma_mapping_error(device, addr)) {
229 		mlx5_core_warn(dev, "failed dma mapping page\n");
230 		err = -ENOMEM;
231 		goto err_mapping;
232 	}
233 
234 	/* Firmware doesn't support page with physical address 0 */
235 	if (addr == 0) {
236 		zero_addr = addr;
237 		goto map;
238 	}
239 
240 	err = insert_page(dev, addr, page, func_id);
241 	if (err) {
242 		mlx5_core_err(dev, "failed to track allocated page\n");
243 		dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
244 	}
245 
246 err_mapping:
247 	if (err)
248 		__free_page(page);
249 
250 	if (zero_addr == 0)
251 		dma_unmap_page(device, zero_addr, PAGE_SIZE,
252 			       DMA_BIDIRECTIONAL);
253 
254 	return err;
255 }
256 
page_notify_fail(struct mlx5_core_dev * dev,u16 func_id,bool ec_function)257 static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
258 			     bool ec_function)
259 {
260 	u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
261 	u32 in[MLX5_ST_SZ_DW(manage_pages_in)]   = {0};
262 	int err;
263 
264 	MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
265 	MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE);
266 	MLX5_SET(manage_pages_in, in, function_id, func_id);
267 	MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
268 
269 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
270 	if (err)
271 		mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n",
272 			       func_id, err);
273 }
274 
give_pages(struct mlx5_core_dev * dev,u16 func_id,int npages,int notify_fail,bool ec_function)275 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
276 		      int notify_fail, bool ec_function)
277 {
278 	u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
279 	int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
280 	u64 addr;
281 	int err;
282 	u32 *in;
283 	int i;
284 
285 	inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]);
286 	in = kvzalloc(inlen, GFP_KERNEL);
287 	if (!in) {
288 		err = -ENOMEM;
289 		mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
290 		goto out_free;
291 	}
292 
293 	for (i = 0; i < npages; i++) {
294 retry:
295 		err = alloc_4k(dev, &addr);
296 		if (err) {
297 			if (err == -ENOMEM)
298 				err = alloc_system_page(dev, func_id);
299 			if (err)
300 				goto out_4k;
301 
302 			goto retry;
303 		}
304 		MLX5_ARRAY_SET64(manage_pages_in, in, pas, i, addr);
305 	}
306 
307 	MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
308 	MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE);
309 	MLX5_SET(manage_pages_in, in, function_id, func_id);
310 	MLX5_SET(manage_pages_in, in, input_num_entries, npages);
311 	MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
312 
313 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
314 	if (err) {
315 		mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
316 			       func_id, npages, err);
317 		goto out_4k;
318 	}
319 
320 	dev->priv.fw_pages += npages;
321 	if (func_id)
322 		dev->priv.vfs_pages += npages;
323 	else if (mlx5_core_is_ecpf(dev) && !ec_function)
324 		dev->priv.peer_pf_pages += npages;
325 
326 	mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n",
327 		      npages, ec_function, func_id, err);
328 
329 	kvfree(in);
330 	return 0;
331 
332 out_4k:
333 	for (i--; i >= 0; i--)
334 		free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]));
335 out_free:
336 	kvfree(in);
337 	if (notify_fail)
338 		page_notify_fail(dev, func_id, ec_function);
339 	return err;
340 }
341 
reclaim_pages_cmd(struct mlx5_core_dev * dev,u32 * in,int in_size,u32 * out,int out_size)342 static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
343 			     u32 *in, int in_size, u32 *out, int out_size)
344 {
345 	struct fw_page *fwp;
346 	struct rb_node *p;
347 	u32 func_id;
348 	u32 npages;
349 	u32 i = 0;
350 
351 	if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
352 		return mlx5_cmd_exec(dev, in, in_size, out, out_size);
353 
354 	/* No hard feelings, we want our pages back! */
355 	npages = MLX5_GET(manage_pages_in, in, input_num_entries);
356 	func_id = MLX5_GET(manage_pages_in, in, function_id);
357 
358 	p = rb_first(&dev->priv.page_root);
359 	while (p && i < npages) {
360 		fwp = rb_entry(p, struct fw_page, rb_node);
361 		p = rb_next(p);
362 		if (fwp->func_id != func_id)
363 			continue;
364 
365 		MLX5_ARRAY_SET64(manage_pages_out, out, pas, i, fwp->addr);
366 		i++;
367 	}
368 
369 	MLX5_SET(manage_pages_out, out, output_num_entries, i);
370 	return 0;
371 }
372 
reclaim_pages(struct mlx5_core_dev * dev,u32 func_id,int npages,int * nclaimed,bool ec_function)373 static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
374 			 int *nclaimed, bool ec_function)
375 {
376 	int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
377 	u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
378 	int num_claimed;
379 	u32 *out;
380 	int err;
381 	int i;
382 
383 	if (nclaimed)
384 		*nclaimed = 0;
385 
386 	outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
387 	out = kvzalloc(outlen, GFP_KERNEL);
388 	if (!out)
389 		return -ENOMEM;
390 
391 	MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
392 	MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE);
393 	MLX5_SET(manage_pages_in, in, function_id, func_id);
394 	MLX5_SET(manage_pages_in, in, input_num_entries, npages);
395 	MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
396 
397 	mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
398 	err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
399 	if (err) {
400 		mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
401 		goto out_free;
402 	}
403 
404 	num_claimed = MLX5_GET(manage_pages_out, out, output_num_entries);
405 	if (num_claimed > npages) {
406 		mlx5_core_warn(dev, "fw returned %d, driver asked %d => corruption\n",
407 			       num_claimed, npages);
408 		err = -EINVAL;
409 		goto out_free;
410 	}
411 
412 	for (i = 0; i < num_claimed; i++)
413 		free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]));
414 
415 	if (nclaimed)
416 		*nclaimed = num_claimed;
417 
418 	dev->priv.fw_pages -= num_claimed;
419 	if (func_id)
420 		dev->priv.vfs_pages -= num_claimed;
421 	else if (mlx5_core_is_ecpf(dev) && !ec_function)
422 		dev->priv.peer_pf_pages -= num_claimed;
423 
424 out_free:
425 	kvfree(out);
426 	return err;
427 }
428 
pages_work_handler(struct work_struct * work)429 static void pages_work_handler(struct work_struct *work)
430 {
431 	struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
432 	struct mlx5_core_dev *dev = req->dev;
433 	int err = 0;
434 
435 	if (req->npages < 0)
436 		err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL,
437 				    req->ec_function);
438 	else if (req->npages > 0)
439 		err = give_pages(dev, req->func_id, req->npages, 1, req->ec_function);
440 
441 	if (err)
442 		mlx5_core_warn(dev, "%s fail %d\n",
443 			       req->npages < 0 ? "reclaim" : "give", err);
444 
445 	kfree(req);
446 }
447 
448 enum {
449 	EC_FUNCTION_MASK = 0x8000,
450 };
451 
req_pages_handler(struct notifier_block * nb,unsigned long type,void * data)452 static int req_pages_handler(struct notifier_block *nb,
453 			     unsigned long type, void *data)
454 {
455 	struct mlx5_pages_req *req;
456 	struct mlx5_core_dev *dev;
457 	struct mlx5_priv *priv;
458 	struct mlx5_eqe *eqe;
459 	bool ec_function;
460 	u16 func_id;
461 	s32 npages;
462 
463 	priv = mlx5_nb_cof(nb, struct mlx5_priv, pg_nb);
464 	dev  = container_of(priv, struct mlx5_core_dev, priv);
465 	eqe  = data;
466 
467 	func_id = be16_to_cpu(eqe->data.req_pages.func_id);
468 	npages  = be32_to_cpu(eqe->data.req_pages.num_pages);
469 	ec_function = be16_to_cpu(eqe->data.req_pages.ec_function) & EC_FUNCTION_MASK;
470 	mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
471 		      func_id, npages);
472 	req = kzalloc(sizeof(*req), GFP_ATOMIC);
473 	if (!req) {
474 		mlx5_core_warn(dev, "failed to allocate pages request\n");
475 		return NOTIFY_DONE;
476 	}
477 
478 	req->dev = dev;
479 	req->func_id = func_id;
480 	req->npages = npages;
481 	req->ec_function = ec_function;
482 	INIT_WORK(&req->work, pages_work_handler);
483 	queue_work(dev->priv.pg_wq, &req->work);
484 	return NOTIFY_OK;
485 }
486 
mlx5_satisfy_startup_pages(struct mlx5_core_dev * dev,int boot)487 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
488 {
489 	u16 uninitialized_var(func_id);
490 	s32 uninitialized_var(npages);
491 	int err;
492 
493 	err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
494 	if (err)
495 		return err;
496 
497 	mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
498 		      npages, boot ? "boot" : "init", func_id);
499 
500 	return give_pages(dev, func_id, npages, 0, mlx5_core_is_ecpf(dev));
501 }
502 
503 enum {
504 	MLX5_BLKS_FOR_RECLAIM_PAGES = 12
505 };
506 
optimal_reclaimed_pages(void)507 static int optimal_reclaimed_pages(void)
508 {
509 	struct mlx5_cmd_prot_block *block;
510 	struct mlx5_cmd_layout *lay;
511 	int ret;
512 
513 	ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
514 	       MLX5_ST_SZ_BYTES(manage_pages_out)) /
515 	       MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
516 
517 	return ret;
518 }
519 
mlx5_reclaim_startup_pages(struct mlx5_core_dev * dev)520 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
521 {
522 	unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
523 	struct fw_page *fwp;
524 	struct rb_node *p;
525 	int nclaimed = 0;
526 	int err = 0;
527 
528 	do {
529 		p = rb_first(&dev->priv.page_root);
530 		if (p) {
531 			fwp = rb_entry(p, struct fw_page, rb_node);
532 			err = reclaim_pages(dev, fwp->func_id,
533 					    optimal_reclaimed_pages(),
534 					    &nclaimed, mlx5_core_is_ecpf(dev));
535 
536 			if (err) {
537 				mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
538 					       err);
539 				return err;
540 			}
541 			if (nclaimed)
542 				end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
543 		}
544 		if (time_after(jiffies, end)) {
545 			mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
546 			break;
547 		}
548 	} while (p);
549 
550 	WARN(dev->priv.fw_pages,
551 	     "FW pages counter is %d after reclaiming all pages\n",
552 	     dev->priv.fw_pages);
553 	WARN(dev->priv.vfs_pages,
554 	     "VFs FW pages counter is %d after reclaiming all pages\n",
555 	     dev->priv.vfs_pages);
556 	WARN(dev->priv.peer_pf_pages,
557 	     "Peer PF FW pages counter is %d after reclaiming all pages\n",
558 	     dev->priv.peer_pf_pages);
559 
560 	return 0;
561 }
562 
mlx5_pagealloc_init(struct mlx5_core_dev * dev)563 int mlx5_pagealloc_init(struct mlx5_core_dev *dev)
564 {
565 	dev->priv.page_root = RB_ROOT;
566 	INIT_LIST_HEAD(&dev->priv.free_list);
567 	dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
568 	if (!dev->priv.pg_wq)
569 		return -ENOMEM;
570 
571 	return 0;
572 }
573 
mlx5_pagealloc_cleanup(struct mlx5_core_dev * dev)574 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
575 {
576 	destroy_workqueue(dev->priv.pg_wq);
577 }
578 
mlx5_pagealloc_start(struct mlx5_core_dev * dev)579 void mlx5_pagealloc_start(struct mlx5_core_dev *dev)
580 {
581 	MLX5_NB_INIT(&dev->priv.pg_nb, req_pages_handler, PAGE_REQUEST);
582 	mlx5_eq_notifier_register(dev, &dev->priv.pg_nb);
583 }
584 
mlx5_pagealloc_stop(struct mlx5_core_dev * dev)585 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
586 {
587 	mlx5_eq_notifier_unregister(dev, &dev->priv.pg_nb);
588 	flush_workqueue(dev->priv.pg_wq);
589 }
590 
mlx5_wait_for_pages(struct mlx5_core_dev * dev,int * pages)591 int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages)
592 {
593 	unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
594 	int prev_pages = *pages;
595 
596 	/* In case of internal error we will free the pages manually later */
597 	if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
598 		mlx5_core_warn(dev, "Skipping wait for vf pages stage");
599 		return 0;
600 	}
601 
602 	mlx5_core_dbg(dev, "Waiting for %d pages\n", prev_pages);
603 	while (*pages) {
604 		if (time_after(jiffies, end)) {
605 			mlx5_core_warn(dev, "aborting while there are %d pending pages\n", *pages);
606 			return -ETIMEDOUT;
607 		}
608 		if (*pages < prev_pages) {
609 			end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
610 			prev_pages = *pages;
611 		}
612 		msleep(50);
613 	}
614 
615 	mlx5_core_dbg(dev, "All pages received\n");
616 	return 0;
617 }
618