1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/highmem.h>
34 #include <linux/kernel.h>
35 #include <linux/module.h>
36 #include <linux/delay.h>
37 #include <linux/mlx5/driver.h>
38 #include <linux/xarray.h>
39 #include "mlx5_core.h"
40 #include "lib/eq.h"
41 
42 enum {
43 	MLX5_PAGES_CANT_GIVE	= 0,
44 	MLX5_PAGES_GIVE		= 1,
45 	MLX5_PAGES_TAKE		= 2
46 };
47 
48 struct mlx5_pages_req {
49 	struct mlx5_core_dev *dev;
50 	u16	func_id;
51 	u8	ec_function;
52 	s32	npages;
53 	struct work_struct work;
54 	u8	release_all;
55 };
56 
57 struct fw_page {
58 	struct rb_node		rb_node;
59 	u64			addr;
60 	struct page	       *page;
61 	u16			func_id;
62 	unsigned long		bitmask;
63 	struct list_head	list;
64 	unsigned		free_count;
65 };
66 
67 enum {
68 	MAX_RECLAIM_TIME_MSECS	= 5000,
69 	MAX_RECLAIM_VFS_PAGES_TIME_MSECS = 2 * 1000 * 60,
70 };
71 
72 enum {
73 	MLX5_MAX_RECLAIM_TIME_MILI	= 5000,
74 	MLX5_NUM_4K_IN_PAGE		= PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
75 };
76 
page_root_per_func_id(struct mlx5_core_dev * dev,u16 func_id)77 static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func_id)
78 {
79 	struct rb_root *root;
80 	int err;
81 
82 	root = xa_load(&dev->priv.page_root_xa, func_id);
83 	if (root)
84 		return root;
85 
86 	root = kzalloc(sizeof(*root), GFP_KERNEL);
87 	if (!root)
88 		return ERR_PTR(-ENOMEM);
89 
90 	err = xa_insert(&dev->priv.page_root_xa, func_id, root, GFP_KERNEL);
91 	if (err) {
92 		kfree(root);
93 		return ERR_PTR(err);
94 	}
95 
96 	*root = RB_ROOT;
97 
98 	return root;
99 }
100 
insert_page(struct mlx5_core_dev * dev,u64 addr,struct page * page,u16 func_id)101 static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
102 {
103 	struct rb_node *parent = NULL;
104 	struct rb_root *root;
105 	struct rb_node **new;
106 	struct fw_page *nfp;
107 	struct fw_page *tfp;
108 	int i;
109 
110 	root = page_root_per_func_id(dev, func_id);
111 	if (IS_ERR(root))
112 		return PTR_ERR(root);
113 
114 	new = &root->rb_node;
115 
116 	while (*new) {
117 		parent = *new;
118 		tfp = rb_entry(parent, struct fw_page, rb_node);
119 		if (tfp->addr < addr)
120 			new = &parent->rb_left;
121 		else if (tfp->addr > addr)
122 			new = &parent->rb_right;
123 		else
124 			return -EEXIST;
125 	}
126 
127 	nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
128 	if (!nfp)
129 		return -ENOMEM;
130 
131 	nfp->addr = addr;
132 	nfp->page = page;
133 	nfp->func_id = func_id;
134 	nfp->free_count = MLX5_NUM_4K_IN_PAGE;
135 	for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
136 		set_bit(i, &nfp->bitmask);
137 
138 	rb_link_node(&nfp->rb_node, parent, new);
139 	rb_insert_color(&nfp->rb_node, root);
140 	list_add(&nfp->list, &dev->priv.free_list);
141 
142 	return 0;
143 }
144 
find_fw_page(struct mlx5_core_dev * dev,u64 addr,u32 func_id)145 static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr,
146 				    u32 func_id)
147 {
148 	struct fw_page *result = NULL;
149 	struct rb_root *root;
150 	struct rb_node *tmp;
151 	struct fw_page *tfp;
152 
153 	root = xa_load(&dev->priv.page_root_xa, func_id);
154 	if (WARN_ON_ONCE(!root))
155 		return NULL;
156 
157 	tmp = root->rb_node;
158 
159 	while (tmp) {
160 		tfp = rb_entry(tmp, struct fw_page, rb_node);
161 		if (tfp->addr < addr) {
162 			tmp = tmp->rb_left;
163 		} else if (tfp->addr > addr) {
164 			tmp = tmp->rb_right;
165 		} else {
166 			result = tfp;
167 			break;
168 		}
169 	}
170 
171 	return result;
172 }
173 
mlx5_cmd_query_pages(struct mlx5_core_dev * dev,u16 * func_id,s32 * npages,int boot)174 static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
175 				s32 *npages, int boot)
176 {
177 	u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {};
178 	u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {};
179 	int err;
180 
181 	MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
182 	MLX5_SET(query_pages_in, in, op_mod, boot ?
183 		 MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES :
184 		 MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
185 	MLX5_SET(query_pages_in, in, embedded_cpu_function, mlx5_core_is_ecpf(dev));
186 
187 	err = mlx5_cmd_exec_inout(dev, query_pages, in, out);
188 	if (err)
189 		return err;
190 
191 	*npages = MLX5_GET(query_pages_out, out, num_pages);
192 	*func_id = MLX5_GET(query_pages_out, out, function_id);
193 
194 	return err;
195 }
196 
alloc_4k(struct mlx5_core_dev * dev,u64 * addr,u16 func_id)197 static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u16 func_id)
198 {
199 	struct fw_page *fp = NULL;
200 	struct fw_page *iter;
201 	unsigned n;
202 
203 	list_for_each_entry(iter, &dev->priv.free_list, list) {
204 		if (iter->func_id != func_id)
205 			continue;
206 		fp = iter;
207 	}
208 
209 	if (list_empty(&dev->priv.free_list) || !fp)
210 		return -ENOMEM;
211 
212 	n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
213 	if (n >= MLX5_NUM_4K_IN_PAGE) {
214 		mlx5_core_warn(dev, "alloc 4k bug\n");
215 		return -ENOENT;
216 	}
217 	clear_bit(n, &fp->bitmask);
218 	fp->free_count--;
219 	if (!fp->free_count)
220 		list_del(&fp->list);
221 
222 	*addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE;
223 
224 	return 0;
225 }
226 
227 #define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT)
228 
free_fwp(struct mlx5_core_dev * dev,struct fw_page * fwp,bool in_free_list)229 static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
230 		     bool in_free_list)
231 {
232 	struct rb_root *root;
233 
234 	root = xa_load(&dev->priv.page_root_xa, fwp->func_id);
235 	if (WARN_ON_ONCE(!root))
236 		return;
237 
238 	rb_erase(&fwp->rb_node, root);
239 	if (in_free_list)
240 		list_del(&fwp->list);
241 	dma_unmap_page(mlx5_core_dma_dev(dev), fwp->addr & MLX5_U64_4K_PAGE_MASK,
242 		       PAGE_SIZE, DMA_BIDIRECTIONAL);
243 	__free_page(fwp->page);
244 	kfree(fwp);
245 }
246 
free_4k(struct mlx5_core_dev * dev,u64 addr,u32 func_id)247 static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 func_id)
248 {
249 	struct fw_page *fwp;
250 	int n;
251 
252 	fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, func_id);
253 	if (!fwp) {
254 		mlx5_core_warn_rl(dev, "page not found\n");
255 		return;
256 	}
257 	n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
258 	fwp->free_count++;
259 	set_bit(n, &fwp->bitmask);
260 	if (fwp->free_count == MLX5_NUM_4K_IN_PAGE)
261 		free_fwp(dev, fwp, fwp->free_count != 1);
262 	else if (fwp->free_count == 1)
263 		list_add(&fwp->list, &dev->priv.free_list);
264 }
265 
alloc_system_page(struct mlx5_core_dev * dev,u16 func_id)266 static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
267 {
268 	struct device *device = mlx5_core_dma_dev(dev);
269 	int nid = dev_to_node(device);
270 	struct page *page;
271 	u64 zero_addr = 1;
272 	u64 addr;
273 	int err;
274 
275 	page = alloc_pages_node(nid, GFP_HIGHUSER, 0);
276 	if (!page) {
277 		mlx5_core_warn(dev, "failed to allocate page\n");
278 		return -ENOMEM;
279 	}
280 map:
281 	addr = dma_map_page(device, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
282 	if (dma_mapping_error(device, addr)) {
283 		mlx5_core_warn(dev, "failed dma mapping page\n");
284 		err = -ENOMEM;
285 		goto err_mapping;
286 	}
287 
288 	/* Firmware doesn't support page with physical address 0 */
289 	if (addr == 0) {
290 		zero_addr = addr;
291 		goto map;
292 	}
293 
294 	err = insert_page(dev, addr, page, func_id);
295 	if (err) {
296 		mlx5_core_err(dev, "failed to track allocated page\n");
297 		dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
298 	}
299 
300 err_mapping:
301 	if (err)
302 		__free_page(page);
303 
304 	if (zero_addr == 0)
305 		dma_unmap_page(device, zero_addr, PAGE_SIZE,
306 			       DMA_BIDIRECTIONAL);
307 
308 	return err;
309 }
310 
page_notify_fail(struct mlx5_core_dev * dev,u16 func_id,bool ec_function)311 static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
312 			     bool ec_function)
313 {
314 	u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
315 	int err;
316 
317 	MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
318 	MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE);
319 	MLX5_SET(manage_pages_in, in, function_id, func_id);
320 	MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
321 
322 	err = mlx5_cmd_exec_in(dev, manage_pages, in);
323 	if (err)
324 		mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n",
325 			       func_id, err);
326 }
327 
give_pages(struct mlx5_core_dev * dev,u16 func_id,int npages,int notify_fail,bool ec_function)328 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
329 		      int notify_fail, bool ec_function)
330 {
331 	u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
332 	int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
333 	u64 addr;
334 	int err;
335 	u32 *in;
336 	int i;
337 
338 	inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]);
339 	in = kvzalloc(inlen, GFP_KERNEL);
340 	if (!in) {
341 		err = -ENOMEM;
342 		mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
343 		goto out_free;
344 	}
345 
346 	for (i = 0; i < npages; i++) {
347 retry:
348 		err = alloc_4k(dev, &addr, func_id);
349 		if (err) {
350 			if (err == -ENOMEM)
351 				err = alloc_system_page(dev, func_id);
352 			if (err)
353 				goto out_4k;
354 
355 			goto retry;
356 		}
357 		MLX5_ARRAY_SET64(manage_pages_in, in, pas, i, addr);
358 	}
359 
360 	MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
361 	MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE);
362 	MLX5_SET(manage_pages_in, in, function_id, func_id);
363 	MLX5_SET(manage_pages_in, in, input_num_entries, npages);
364 	MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
365 
366 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
367 	if (err) {
368 		mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
369 			       func_id, npages, err);
370 		goto out_4k;
371 	}
372 
373 	dev->priv.fw_pages += npages;
374 	if (func_id)
375 		dev->priv.vfs_pages += npages;
376 	else if (mlx5_core_is_ecpf(dev) && !ec_function)
377 		dev->priv.peer_pf_pages += npages;
378 
379 	mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n",
380 		      npages, ec_function, func_id, err);
381 
382 	kvfree(in);
383 	return 0;
384 
385 out_4k:
386 	for (i--; i >= 0; i--)
387 		free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), func_id);
388 out_free:
389 	kvfree(in);
390 	if (notify_fail)
391 		page_notify_fail(dev, func_id, ec_function);
392 	return err;
393 }
394 
release_all_pages(struct mlx5_core_dev * dev,u32 func_id,bool ec_function)395 static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id,
396 			      bool ec_function)
397 {
398 	struct rb_root *root;
399 	struct rb_node *p;
400 	int npages = 0;
401 
402 	root = xa_load(&dev->priv.page_root_xa, func_id);
403 	if (WARN_ON_ONCE(!root))
404 		return;
405 
406 	p = rb_first(root);
407 	while (p) {
408 		struct fw_page *fwp = rb_entry(p, struct fw_page, rb_node);
409 
410 		p = rb_next(p);
411 		npages += (MLX5_NUM_4K_IN_PAGE - fwp->free_count);
412 		free_fwp(dev, fwp, fwp->free_count);
413 	}
414 
415 	dev->priv.fw_pages -= npages;
416 	if (func_id)
417 		dev->priv.vfs_pages -= npages;
418 	else if (mlx5_core_is_ecpf(dev) && !ec_function)
419 		dev->priv.peer_pf_pages -= npages;
420 
421 	mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n",
422 		      npages, ec_function, func_id);
423 }
424 
fwp_fill_manage_pages_out(struct fw_page * fwp,u32 * out,u32 index,u32 npages)425 static u32 fwp_fill_manage_pages_out(struct fw_page *fwp, u32 *out, u32 index,
426 				     u32 npages)
427 {
428 	u32 pages_set = 0;
429 	unsigned int n;
430 
431 	for_each_clear_bit(n, &fwp->bitmask, MLX5_NUM_4K_IN_PAGE) {
432 		MLX5_ARRAY_SET64(manage_pages_out, out, pas, index + pages_set,
433 				 fwp->addr + (n * MLX5_ADAPTER_PAGE_SIZE));
434 		pages_set++;
435 
436 		if (!--npages)
437 			break;
438 	}
439 
440 	return pages_set;
441 }
442 
reclaim_pages_cmd(struct mlx5_core_dev * dev,u32 * in,int in_size,u32 * out,int out_size)443 static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
444 			     u32 *in, int in_size, u32 *out, int out_size)
445 {
446 	struct rb_root *root;
447 	struct fw_page *fwp;
448 	struct rb_node *p;
449 	u32 func_id;
450 	u32 npages;
451 	u32 i = 0;
452 
453 	if (!mlx5_cmd_is_down(dev))
454 		return mlx5_cmd_exec(dev, in, in_size, out, out_size);
455 
456 	/* No hard feelings, we want our pages back! */
457 	npages = MLX5_GET(manage_pages_in, in, input_num_entries);
458 	func_id = MLX5_GET(manage_pages_in, in, function_id);
459 
460 	root = xa_load(&dev->priv.page_root_xa, func_id);
461 	if (WARN_ON_ONCE(!root))
462 		return -EEXIST;
463 
464 	p = rb_first(root);
465 	while (p && i < npages) {
466 		fwp = rb_entry(p, struct fw_page, rb_node);
467 		p = rb_next(p);
468 
469 		i += fwp_fill_manage_pages_out(fwp, out, i, npages - i);
470 	}
471 
472 	MLX5_SET(manage_pages_out, out, output_num_entries, i);
473 	return 0;
474 }
475 
reclaim_pages(struct mlx5_core_dev * dev,u32 func_id,int npages,int * nclaimed,bool ec_function)476 static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
477 			 int *nclaimed, bool ec_function)
478 {
479 	int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
480 	u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
481 	int num_claimed;
482 	u32 *out;
483 	int err;
484 	int i;
485 
486 	if (nclaimed)
487 		*nclaimed = 0;
488 
489 	outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
490 	out = kvzalloc(outlen, GFP_KERNEL);
491 	if (!out)
492 		return -ENOMEM;
493 
494 	MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
495 	MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE);
496 	MLX5_SET(manage_pages_in, in, function_id, func_id);
497 	MLX5_SET(manage_pages_in, in, input_num_entries, npages);
498 	MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
499 
500 	mlx5_core_dbg(dev, "func 0x%x, npages %d, outlen %d\n",
501 		      func_id, npages, outlen);
502 	err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
503 	if (err) {
504 		mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
505 		goto out_free;
506 	}
507 
508 	num_claimed = MLX5_GET(manage_pages_out, out, output_num_entries);
509 	if (num_claimed > npages) {
510 		mlx5_core_warn(dev, "fw returned %d, driver asked %d => corruption\n",
511 			       num_claimed, npages);
512 		err = -EINVAL;
513 		goto out_free;
514 	}
515 
516 	for (i = 0; i < num_claimed; i++)
517 		free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), func_id);
518 
519 	if (nclaimed)
520 		*nclaimed = num_claimed;
521 
522 	dev->priv.fw_pages -= num_claimed;
523 	if (func_id)
524 		dev->priv.vfs_pages -= num_claimed;
525 	else if (mlx5_core_is_ecpf(dev) && !ec_function)
526 		dev->priv.peer_pf_pages -= num_claimed;
527 
528 out_free:
529 	kvfree(out);
530 	return err;
531 }
532 
pages_work_handler(struct work_struct * work)533 static void pages_work_handler(struct work_struct *work)
534 {
535 	struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
536 	struct mlx5_core_dev *dev = req->dev;
537 	int err = 0;
538 
539 	if (req->release_all)
540 		release_all_pages(dev, req->func_id, req->ec_function);
541 	else if (req->npages < 0)
542 		err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL,
543 				    req->ec_function);
544 	else if (req->npages > 0)
545 		err = give_pages(dev, req->func_id, req->npages, 1, req->ec_function);
546 
547 	if (err)
548 		mlx5_core_warn(dev, "%s fail %d\n",
549 			       req->npages < 0 ? "reclaim" : "give", err);
550 
551 	kfree(req);
552 }
553 
554 enum {
555 	EC_FUNCTION_MASK = 0x8000,
556 	RELEASE_ALL_PAGES_MASK = 0x4000,
557 };
558 
req_pages_handler(struct notifier_block * nb,unsigned long type,void * data)559 static int req_pages_handler(struct notifier_block *nb,
560 			     unsigned long type, void *data)
561 {
562 	struct mlx5_pages_req *req;
563 	struct mlx5_core_dev *dev;
564 	struct mlx5_priv *priv;
565 	struct mlx5_eqe *eqe;
566 	bool ec_function;
567 	bool release_all;
568 	u16 func_id;
569 	s32 npages;
570 
571 	priv = mlx5_nb_cof(nb, struct mlx5_priv, pg_nb);
572 	dev  = container_of(priv, struct mlx5_core_dev, priv);
573 	eqe  = data;
574 
575 	func_id = be16_to_cpu(eqe->data.req_pages.func_id);
576 	npages  = be32_to_cpu(eqe->data.req_pages.num_pages);
577 	ec_function = be16_to_cpu(eqe->data.req_pages.ec_function) & EC_FUNCTION_MASK;
578 	release_all = be16_to_cpu(eqe->data.req_pages.ec_function) &
579 		      RELEASE_ALL_PAGES_MASK;
580 	mlx5_core_dbg(dev, "page request for func 0x%x, npages %d, release_all %d\n",
581 		      func_id, npages, release_all);
582 	req = kzalloc(sizeof(*req), GFP_ATOMIC);
583 	if (!req) {
584 		mlx5_core_warn(dev, "failed to allocate pages request\n");
585 		return NOTIFY_DONE;
586 	}
587 
588 	req->dev = dev;
589 	req->func_id = func_id;
590 	req->npages = npages;
591 	req->ec_function = ec_function;
592 	req->release_all = release_all;
593 	INIT_WORK(&req->work, pages_work_handler);
594 	queue_work(dev->priv.pg_wq, &req->work);
595 	return NOTIFY_OK;
596 }
597 
mlx5_satisfy_startup_pages(struct mlx5_core_dev * dev,int boot)598 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
599 {
600 	u16 func_id;
601 	s32 npages;
602 	int err;
603 
604 	err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
605 	if (err)
606 		return err;
607 
608 	mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
609 		      npages, boot ? "boot" : "init", func_id);
610 
611 	return give_pages(dev, func_id, npages, 0, mlx5_core_is_ecpf(dev));
612 }
613 
614 enum {
615 	MLX5_BLKS_FOR_RECLAIM_PAGES = 12
616 };
617 
optimal_reclaimed_pages(void)618 static int optimal_reclaimed_pages(void)
619 {
620 	struct mlx5_cmd_prot_block *block;
621 	struct mlx5_cmd_layout *lay;
622 	int ret;
623 
624 	ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
625 	       MLX5_ST_SZ_BYTES(manage_pages_out)) /
626 	       MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
627 
628 	return ret;
629 }
630 
mlx5_reclaim_root_pages(struct mlx5_core_dev * dev,struct rb_root * root,u16 func_id)631 static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev,
632 				   struct rb_root *root, u16 func_id)
633 {
634 	unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
635 
636 	while (!RB_EMPTY_ROOT(root)) {
637 		int nclaimed;
638 		int err;
639 
640 		err = reclaim_pages(dev, func_id, optimal_reclaimed_pages(),
641 				    &nclaimed, mlx5_core_is_ecpf(dev));
642 		if (err) {
643 			mlx5_core_warn(dev, "failed reclaiming pages (%d) for func id 0x%x\n",
644 				       err, func_id);
645 			return err;
646 		}
647 
648 		if (nclaimed)
649 			end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
650 
651 		if (time_after(jiffies, end)) {
652 			mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
653 			break;
654 		}
655 	}
656 
657 	return 0;
658 }
659 
mlx5_reclaim_startup_pages(struct mlx5_core_dev * dev)660 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
661 {
662 	struct rb_root *root;
663 	unsigned long id;
664 	void *entry;
665 
666 	xa_for_each(&dev->priv.page_root_xa, id, entry) {
667 		root = entry;
668 		mlx5_reclaim_root_pages(dev, root, id);
669 		xa_erase(&dev->priv.page_root_xa, id);
670 		kfree(root);
671 	}
672 
673 	WARN_ON(!xa_empty(&dev->priv.page_root_xa));
674 
675 	WARN(dev->priv.fw_pages,
676 	     "FW pages counter is %d after reclaiming all pages\n",
677 	     dev->priv.fw_pages);
678 	WARN(dev->priv.vfs_pages,
679 	     "VFs FW pages counter is %d after reclaiming all pages\n",
680 	     dev->priv.vfs_pages);
681 	WARN(dev->priv.peer_pf_pages,
682 	     "Peer PF FW pages counter is %d after reclaiming all pages\n",
683 	     dev->priv.peer_pf_pages);
684 
685 	return 0;
686 }
687 
mlx5_pagealloc_init(struct mlx5_core_dev * dev)688 int mlx5_pagealloc_init(struct mlx5_core_dev *dev)
689 {
690 	INIT_LIST_HEAD(&dev->priv.free_list);
691 	dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
692 	if (!dev->priv.pg_wq)
693 		return -ENOMEM;
694 
695 	xa_init(&dev->priv.page_root_xa);
696 
697 	return 0;
698 }
699 
mlx5_pagealloc_cleanup(struct mlx5_core_dev * dev)700 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
701 {
702 	xa_destroy(&dev->priv.page_root_xa);
703 	destroy_workqueue(dev->priv.pg_wq);
704 }
705 
mlx5_pagealloc_start(struct mlx5_core_dev * dev)706 void mlx5_pagealloc_start(struct mlx5_core_dev *dev)
707 {
708 	MLX5_NB_INIT(&dev->priv.pg_nb, req_pages_handler, PAGE_REQUEST);
709 	mlx5_eq_notifier_register(dev, &dev->priv.pg_nb);
710 }
711 
mlx5_pagealloc_stop(struct mlx5_core_dev * dev)712 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
713 {
714 	mlx5_eq_notifier_unregister(dev, &dev->priv.pg_nb);
715 	flush_workqueue(dev->priv.pg_wq);
716 }
717 
mlx5_wait_for_pages(struct mlx5_core_dev * dev,int * pages)718 int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages)
719 {
720 	unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
721 	int prev_pages = *pages;
722 
723 	/* In case of internal error we will free the pages manually later */
724 	if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
725 		mlx5_core_warn(dev, "Skipping wait for vf pages stage");
726 		return 0;
727 	}
728 
729 	mlx5_core_dbg(dev, "Waiting for %d pages\n", prev_pages);
730 	while (*pages) {
731 		if (time_after(jiffies, end)) {
732 			mlx5_core_warn(dev, "aborting while there are %d pending pages\n", *pages);
733 			return -ETIMEDOUT;
734 		}
735 		if (*pages < prev_pages) {
736 			end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
737 			prev_pages = *pages;
738 		}
739 		msleep(50);
740 	}
741 
742 	mlx5_core_dbg(dev, "All pages received\n");
743 	return 0;
744 }
745