1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/errno.h>
34 #include <linux/slab.h>
35 #include <linux/mm.h>
36 #include <linux/export.h>
37 #include <linux/bitmap.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/vmalloc.h>
40 #include <linux/mlx5/driver.h>
41 
42 #include "mlx5_core.h"
43 
44 struct mlx5_db_pgdir {
45 	struct list_head	list;
46 	unsigned long	       *bitmap;
47 	__be32		       *db_page;
48 	dma_addr_t		db_dma;
49 };
50 
51 /* Handling for queue buffers -- we allocate a bunch of memory and
52  * register it in a memory region at HCA virtual address 0.
53  */
54 
mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev * dev,size_t size,dma_addr_t * dma_handle,int node)55 static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
56 					   size_t size, dma_addr_t *dma_handle,
57 					   int node)
58 {
59 	struct mlx5_priv *priv = &dev->priv;
60 	int original_node;
61 	void *cpu_handle;
62 
63 	mutex_lock(&priv->alloc_mutex);
64 	original_node = dev_to_node(&dev->pdev->dev);
65 	set_dev_node(&dev->pdev->dev, node);
66 	cpu_handle = dma_zalloc_coherent(&dev->pdev->dev, size,
67 					 dma_handle, GFP_KERNEL);
68 	set_dev_node(&dev->pdev->dev, original_node);
69 	mutex_unlock(&priv->alloc_mutex);
70 	return cpu_handle;
71 }
72 
mlx5_buf_alloc_node(struct mlx5_core_dev * dev,int size,struct mlx5_frag_buf * buf,int node)73 int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
74 			struct mlx5_frag_buf *buf, int node)
75 {
76 	dma_addr_t t;
77 
78 	buf->size = size;
79 	buf->npages       = 1;
80 	buf->page_shift   = (u8)get_order(size) + PAGE_SHIFT;
81 
82 	buf->frags = kzalloc(sizeof(*buf->frags), GFP_KERNEL);
83 	if (!buf->frags)
84 		return -ENOMEM;
85 
86 	buf->frags->buf   = mlx5_dma_zalloc_coherent_node(dev, size,
87 							  &t, node);
88 	if (!buf->frags->buf)
89 		goto err_out;
90 
91 	buf->frags->map = t;
92 
93 	while (t & ((1 << buf->page_shift) - 1)) {
94 		--buf->page_shift;
95 		buf->npages *= 2;
96 	}
97 
98 	return 0;
99 err_out:
100 	kfree(buf->frags);
101 	return -ENOMEM;
102 }
103 
mlx5_buf_alloc(struct mlx5_core_dev * dev,int size,struct mlx5_frag_buf * buf)104 int mlx5_buf_alloc(struct mlx5_core_dev *dev,
105 		   int size, struct mlx5_frag_buf *buf)
106 {
107 	return mlx5_buf_alloc_node(dev, size, buf, dev->priv.numa_node);
108 }
109 EXPORT_SYMBOL(mlx5_buf_alloc);
110 
mlx5_buf_free(struct mlx5_core_dev * dev,struct mlx5_frag_buf * buf)111 void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
112 {
113 	dma_free_coherent(&dev->pdev->dev, buf->size, buf->frags->buf,
114 			  buf->frags->map);
115 
116 	kfree(buf->frags);
117 }
118 EXPORT_SYMBOL_GPL(mlx5_buf_free);
119 
mlx5_frag_buf_alloc_node(struct mlx5_core_dev * dev,int size,struct mlx5_frag_buf * buf,int node)120 int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
121 			     struct mlx5_frag_buf *buf, int node)
122 {
123 	int i;
124 
125 	buf->size = size;
126 	buf->npages = DIV_ROUND_UP(size, PAGE_SIZE);
127 	buf->page_shift = PAGE_SHIFT;
128 	buf->frags = kcalloc(buf->npages, sizeof(struct mlx5_buf_list),
129 			     GFP_KERNEL);
130 	if (!buf->frags)
131 		goto err_out;
132 
133 	for (i = 0; i < buf->npages; i++) {
134 		struct mlx5_buf_list *frag = &buf->frags[i];
135 		int frag_sz = min_t(int, size, PAGE_SIZE);
136 
137 		frag->buf = mlx5_dma_zalloc_coherent_node(dev, frag_sz,
138 							  &frag->map, node);
139 		if (!frag->buf)
140 			goto err_free_buf;
141 		if (frag->map & ((1 << buf->page_shift) - 1)) {
142 			dma_free_coherent(&dev->pdev->dev, frag_sz,
143 					  buf->frags[i].buf, buf->frags[i].map);
144 			mlx5_core_warn(dev, "unexpected map alignment: %pad, page_shift=%d\n",
145 				       &frag->map, buf->page_shift);
146 			goto err_free_buf;
147 		}
148 		size -= frag_sz;
149 	}
150 
151 	return 0;
152 
153 err_free_buf:
154 	while (i--)
155 		dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, buf->frags[i].buf,
156 				  buf->frags[i].map);
157 	kfree(buf->frags);
158 err_out:
159 	return -ENOMEM;
160 }
161 EXPORT_SYMBOL_GPL(mlx5_frag_buf_alloc_node);
162 
mlx5_frag_buf_free(struct mlx5_core_dev * dev,struct mlx5_frag_buf * buf)163 void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
164 {
165 	int size = buf->size;
166 	int i;
167 
168 	for (i = 0; i < buf->npages; i++) {
169 		int frag_sz = min_t(int, size, PAGE_SIZE);
170 
171 		dma_free_coherent(&dev->pdev->dev, frag_sz, buf->frags[i].buf,
172 				  buf->frags[i].map);
173 		size -= frag_sz;
174 	}
175 	kfree(buf->frags);
176 }
177 EXPORT_SYMBOL_GPL(mlx5_frag_buf_free);
178 
mlx5_alloc_db_pgdir(struct mlx5_core_dev * dev,int node)179 static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
180 						 int node)
181 {
182 	u32 db_per_page = PAGE_SIZE / cache_line_size();
183 	struct mlx5_db_pgdir *pgdir;
184 
185 	pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
186 	if (!pgdir)
187 		return NULL;
188 
189 	pgdir->bitmap = kcalloc(BITS_TO_LONGS(db_per_page),
190 				sizeof(unsigned long),
191 				GFP_KERNEL);
192 
193 	if (!pgdir->bitmap) {
194 		kfree(pgdir);
195 		return NULL;
196 	}
197 
198 	bitmap_fill(pgdir->bitmap, db_per_page);
199 
200 	pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
201 						       &pgdir->db_dma, node);
202 	if (!pgdir->db_page) {
203 		kfree(pgdir->bitmap);
204 		kfree(pgdir);
205 		return NULL;
206 	}
207 
208 	return pgdir;
209 }
210 
mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir * pgdir,struct mlx5_db * db)211 static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
212 				    struct mlx5_db *db)
213 {
214 	u32 db_per_page = PAGE_SIZE / cache_line_size();
215 	int offset;
216 	int i;
217 
218 	i = find_first_bit(pgdir->bitmap, db_per_page);
219 	if (i >= db_per_page)
220 		return -ENOMEM;
221 
222 	__clear_bit(i, pgdir->bitmap);
223 
224 	db->u.pgdir = pgdir;
225 	db->index   = i;
226 	offset = db->index * cache_line_size();
227 	db->db      = pgdir->db_page + offset / sizeof(*pgdir->db_page);
228 	db->dma     = pgdir->db_dma  + offset;
229 
230 	db->db[0] = 0;
231 	db->db[1] = 0;
232 
233 	return 0;
234 }
235 
mlx5_db_alloc_node(struct mlx5_core_dev * dev,struct mlx5_db * db,int node)236 int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node)
237 {
238 	struct mlx5_db_pgdir *pgdir;
239 	int ret = 0;
240 
241 	mutex_lock(&dev->priv.pgdir_mutex);
242 
243 	list_for_each_entry(pgdir, &dev->priv.pgdir_list, list)
244 		if (!mlx5_alloc_db_from_pgdir(pgdir, db))
245 			goto out;
246 
247 	pgdir = mlx5_alloc_db_pgdir(dev, node);
248 	if (!pgdir) {
249 		ret = -ENOMEM;
250 		goto out;
251 	}
252 
253 	list_add(&pgdir->list, &dev->priv.pgdir_list);
254 
255 	/* This should never fail -- we just allocated an empty page: */
256 	WARN_ON(mlx5_alloc_db_from_pgdir(pgdir, db));
257 
258 out:
259 	mutex_unlock(&dev->priv.pgdir_mutex);
260 
261 	return ret;
262 }
263 EXPORT_SYMBOL_GPL(mlx5_db_alloc_node);
264 
mlx5_db_alloc(struct mlx5_core_dev * dev,struct mlx5_db * db)265 int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
266 {
267 	return mlx5_db_alloc_node(dev, db, dev->priv.numa_node);
268 }
269 EXPORT_SYMBOL_GPL(mlx5_db_alloc);
270 
mlx5_db_free(struct mlx5_core_dev * dev,struct mlx5_db * db)271 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
272 {
273 	u32 db_per_page = PAGE_SIZE / cache_line_size();
274 
275 	mutex_lock(&dev->priv.pgdir_mutex);
276 
277 	__set_bit(db->index, db->u.pgdir->bitmap);
278 
279 	if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) {
280 		dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
281 				  db->u.pgdir->db_page, db->u.pgdir->db_dma);
282 		list_del(&db->u.pgdir->list);
283 		kfree(db->u.pgdir->bitmap);
284 		kfree(db->u.pgdir);
285 	}
286 
287 	mutex_unlock(&dev->priv.pgdir_mutex);
288 }
289 EXPORT_SYMBOL_GPL(mlx5_db_free);
290 
mlx5_fill_page_array(struct mlx5_frag_buf * buf,__be64 * pas)291 void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas)
292 {
293 	u64 addr;
294 	int i;
295 
296 	for (i = 0; i < buf->npages; i++) {
297 		addr = buf->frags->map + (i << buf->page_shift);
298 
299 		pas[i] = cpu_to_be64(addr);
300 	}
301 }
302 EXPORT_SYMBOL_GPL(mlx5_fill_page_array);
303 
mlx5_fill_page_frag_array(struct mlx5_frag_buf * buf,__be64 * pas)304 void mlx5_fill_page_frag_array(struct mlx5_frag_buf *buf, __be64 *pas)
305 {
306 	int i;
307 
308 	for (i = 0; i < buf->npages; i++)
309 		pas[i] = cpu_to_be64(buf->frags[i].map);
310 }
311 EXPORT_SYMBOL_GPL(mlx5_fill_page_frag_array);
312