1 /*
2 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #include <linux/errno.h>
35 #include <linux/mm.h>
36 #include <linux/scatterlist.h>
37 #include <linux/slab.h>
38
39 #include <linux/mlx4/cmd.h>
40
41 #include "mlx4.h"
42 #include "icm.h"
43 #include "fw.h"
44
45 /*
46 * We allocate in as big chunks as we can, up to a maximum of 256 KB
47 * per chunk. Note that the chunks are not necessarily in contiguous
48 * physical memory.
49 */
50 enum {
51 MLX4_ICM_ALLOC_SIZE = 1 << 18,
52 MLX4_TABLE_CHUNK_SIZE = 1 << 18,
53 };
54
mlx4_free_icm_pages(struct mlx4_dev * dev,struct mlx4_icm_chunk * chunk)55 static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
56 {
57 int i;
58
59 if (chunk->nsg > 0)
60 pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages,
61 PCI_DMA_BIDIRECTIONAL);
62
63 for (i = 0; i < chunk->npages; ++i)
64 __free_pages(sg_page(&chunk->mem[i]),
65 get_order(chunk->mem[i].length));
66 }
67
mlx4_free_icm_coherent(struct mlx4_dev * dev,struct mlx4_icm_chunk * chunk)68 static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
69 {
70 int i;
71
72 for (i = 0; i < chunk->npages; ++i)
73 dma_free_coherent(&dev->persist->pdev->dev,
74 chunk->mem[i].length,
75 lowmem_page_address(sg_page(&chunk->mem[i])),
76 sg_dma_address(&chunk->mem[i]));
77 }
78
mlx4_free_icm(struct mlx4_dev * dev,struct mlx4_icm * icm,int coherent)79 void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
80 {
81 struct mlx4_icm_chunk *chunk, *tmp;
82
83 if (!icm)
84 return;
85
86 list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
87 if (coherent)
88 mlx4_free_icm_coherent(dev, chunk);
89 else
90 mlx4_free_icm_pages(dev, chunk);
91
92 kfree(chunk);
93 }
94
95 kfree(icm);
96 }
97
mlx4_alloc_icm_pages(struct scatterlist * mem,int order,gfp_t gfp_mask,int node)98 static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order,
99 gfp_t gfp_mask, int node)
100 {
101 struct page *page;
102
103 page = alloc_pages_node(node, gfp_mask, order);
104 if (!page) {
105 page = alloc_pages(gfp_mask, order);
106 if (!page)
107 return -ENOMEM;
108 }
109
110 sg_set_page(mem, page, PAGE_SIZE << order, 0);
111 return 0;
112 }
113
mlx4_alloc_icm_coherent(struct device * dev,struct scatterlist * mem,int order,gfp_t gfp_mask)114 static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
115 int order, gfp_t gfp_mask)
116 {
117 void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order,
118 &sg_dma_address(mem), gfp_mask);
119 if (!buf)
120 return -ENOMEM;
121
122 if (offset_in_page(buf)) {
123 dma_free_coherent(dev, PAGE_SIZE << order,
124 buf, sg_dma_address(mem));
125 return -ENOMEM;
126 }
127
128 sg_set_buf(mem, buf, PAGE_SIZE << order);
129 sg_dma_len(mem) = PAGE_SIZE << order;
130 return 0;
131 }
132
mlx4_alloc_icm(struct mlx4_dev * dev,int npages,gfp_t gfp_mask,int coherent)133 struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
134 gfp_t gfp_mask, int coherent)
135 {
136 struct mlx4_icm *icm;
137 struct mlx4_icm_chunk *chunk = NULL;
138 int cur_order;
139 gfp_t mask;
140 int ret;
141
142 /* We use sg_set_buf for coherent allocs, which assumes low memory */
143 BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
144
145 icm = kmalloc_node(sizeof(*icm),
146 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN),
147 dev->numa_node);
148 if (!icm) {
149 icm = kmalloc(sizeof(*icm),
150 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
151 if (!icm)
152 return NULL;
153 }
154
155 icm->refcount = 0;
156 INIT_LIST_HEAD(&icm->chunk_list);
157
158 cur_order = get_order(MLX4_ICM_ALLOC_SIZE);
159
160 while (npages > 0) {
161 if (!chunk) {
162 chunk = kmalloc_node(sizeof(*chunk),
163 gfp_mask & ~(__GFP_HIGHMEM |
164 __GFP_NOWARN),
165 dev->numa_node);
166 if (!chunk) {
167 chunk = kmalloc(sizeof(*chunk),
168 gfp_mask & ~(__GFP_HIGHMEM |
169 __GFP_NOWARN));
170 if (!chunk)
171 goto fail;
172 }
173
174 sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
175 chunk->npages = 0;
176 chunk->nsg = 0;
177 list_add_tail(&chunk->list, &icm->chunk_list);
178 }
179
180 while (1 << cur_order > npages)
181 --cur_order;
182
183 mask = gfp_mask;
184 if (cur_order)
185 mask &= ~__GFP_DIRECT_RECLAIM;
186
187 if (coherent)
188 ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev,
189 &chunk->mem[chunk->npages],
190 cur_order, mask);
191 else
192 ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
193 cur_order, mask,
194 dev->numa_node);
195
196 if (ret) {
197 if (--cur_order < 0)
198 goto fail;
199 else
200 continue;
201 }
202
203 ++chunk->npages;
204
205 if (coherent)
206 ++chunk->nsg;
207 else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
208 chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
209 chunk->npages,
210 PCI_DMA_BIDIRECTIONAL);
211
212 if (chunk->nsg <= 0)
213 goto fail;
214 }
215
216 if (chunk->npages == MLX4_ICM_CHUNK_LEN)
217 chunk = NULL;
218
219 npages -= 1 << cur_order;
220 }
221
222 if (!coherent && chunk) {
223 chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
224 chunk->npages,
225 PCI_DMA_BIDIRECTIONAL);
226
227 if (chunk->nsg <= 0)
228 goto fail;
229 }
230
231 return icm;
232
233 fail:
234 mlx4_free_icm(dev, icm, coherent);
235 return NULL;
236 }
237
mlx4_MAP_ICM(struct mlx4_dev * dev,struct mlx4_icm * icm,u64 virt)238 static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
239 {
240 return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt);
241 }
242
mlx4_UNMAP_ICM(struct mlx4_dev * dev,u64 virt,u32 page_count)243 static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
244 {
245 return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
246 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
247 }
248
mlx4_MAP_ICM_AUX(struct mlx4_dev * dev,struct mlx4_icm * icm)249 int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
250 {
251 return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1);
252 }
253
mlx4_UNMAP_ICM_AUX(struct mlx4_dev * dev)254 int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
255 {
256 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX,
257 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
258 }
259
mlx4_table_get(struct mlx4_dev * dev,struct mlx4_icm_table * table,u32 obj)260 int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj)
261 {
262 u32 i = (obj & (table->num_obj - 1)) /
263 (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
264 int ret = 0;
265
266 mutex_lock(&table->mutex);
267
268 if (table->icm[i]) {
269 ++table->icm[i]->refcount;
270 goto out;
271 }
272
273 table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
274 (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
275 __GFP_NOWARN, table->coherent);
276 if (!table->icm[i]) {
277 ret = -ENOMEM;
278 goto out;
279 }
280
281 if (mlx4_MAP_ICM(dev, table->icm[i], table->virt +
282 (u64) i * MLX4_TABLE_CHUNK_SIZE)) {
283 mlx4_free_icm(dev, table->icm[i], table->coherent);
284 table->icm[i] = NULL;
285 ret = -ENOMEM;
286 goto out;
287 }
288
289 ++table->icm[i]->refcount;
290
291 out:
292 mutex_unlock(&table->mutex);
293 return ret;
294 }
295
mlx4_table_put(struct mlx4_dev * dev,struct mlx4_icm_table * table,u32 obj)296 void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj)
297 {
298 u32 i;
299 u64 offset;
300
301 i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
302
303 mutex_lock(&table->mutex);
304
305 if (--table->icm[i]->refcount == 0) {
306 offset = (u64) i * MLX4_TABLE_CHUNK_SIZE;
307 mlx4_UNMAP_ICM(dev, table->virt + offset,
308 MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
309 mlx4_free_icm(dev, table->icm[i], table->coherent);
310 table->icm[i] = NULL;
311 }
312
313 mutex_unlock(&table->mutex);
314 }
315
mlx4_table_find(struct mlx4_icm_table * table,u32 obj,dma_addr_t * dma_handle)316 void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj,
317 dma_addr_t *dma_handle)
318 {
319 int offset, dma_offset, i;
320 u64 idx;
321 struct mlx4_icm_chunk *chunk;
322 struct mlx4_icm *icm;
323 struct page *page = NULL;
324
325 if (!table->lowmem)
326 return NULL;
327
328 mutex_lock(&table->mutex);
329
330 idx = (u64) (obj & (table->num_obj - 1)) * table->obj_size;
331 icm = table->icm[idx / MLX4_TABLE_CHUNK_SIZE];
332 dma_offset = offset = idx % MLX4_TABLE_CHUNK_SIZE;
333
334 if (!icm)
335 goto out;
336
337 list_for_each_entry(chunk, &icm->chunk_list, list) {
338 for (i = 0; i < chunk->npages; ++i) {
339 if (dma_handle && dma_offset >= 0) {
340 if (sg_dma_len(&chunk->mem[i]) > dma_offset)
341 *dma_handle = sg_dma_address(&chunk->mem[i]) +
342 dma_offset;
343 dma_offset -= sg_dma_len(&chunk->mem[i]);
344 }
345 /*
346 * DMA mapping can merge pages but not split them,
347 * so if we found the page, dma_handle has already
348 * been assigned to.
349 */
350 if (chunk->mem[i].length > offset) {
351 page = sg_page(&chunk->mem[i]);
352 goto out;
353 }
354 offset -= chunk->mem[i].length;
355 }
356 }
357
358 out:
359 mutex_unlock(&table->mutex);
360 return page ? lowmem_page_address(page) + offset : NULL;
361 }
362
mlx4_table_get_range(struct mlx4_dev * dev,struct mlx4_icm_table * table,u32 start,u32 end)363 int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
364 u32 start, u32 end)
365 {
366 int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size;
367 int err;
368 u32 i;
369
370 for (i = start; i <= end; i += inc) {
371 err = mlx4_table_get(dev, table, i);
372 if (err)
373 goto fail;
374 }
375
376 return 0;
377
378 fail:
379 while (i > start) {
380 i -= inc;
381 mlx4_table_put(dev, table, i);
382 }
383
384 return err;
385 }
386
mlx4_table_put_range(struct mlx4_dev * dev,struct mlx4_icm_table * table,u32 start,u32 end)387 void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
388 u32 start, u32 end)
389 {
390 u32 i;
391
392 for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size)
393 mlx4_table_put(dev, table, i);
394 }
395
mlx4_init_icm_table(struct mlx4_dev * dev,struct mlx4_icm_table * table,u64 virt,int obj_size,u32 nobj,int reserved,int use_lowmem,int use_coherent)396 int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
397 u64 virt, int obj_size, u32 nobj, int reserved,
398 int use_lowmem, int use_coherent)
399 {
400 int obj_per_chunk;
401 int num_icm;
402 unsigned chunk_size;
403 int i;
404 u64 size;
405
406 obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
407 if (WARN_ON(!obj_per_chunk))
408 return -EINVAL;
409 num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
410
411 table->icm = kvcalloc(num_icm, sizeof(*table->icm), GFP_KERNEL);
412 if (!table->icm)
413 return -ENOMEM;
414 table->virt = virt;
415 table->num_icm = num_icm;
416 table->num_obj = nobj;
417 table->obj_size = obj_size;
418 table->lowmem = use_lowmem;
419 table->coherent = use_coherent;
420 mutex_init(&table->mutex);
421
422 size = (u64) nobj * obj_size;
423 for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
424 chunk_size = MLX4_TABLE_CHUNK_SIZE;
425 if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > size)
426 chunk_size = PAGE_ALIGN(size -
427 i * MLX4_TABLE_CHUNK_SIZE);
428
429 table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
430 (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
431 __GFP_NOWARN, use_coherent);
432 if (!table->icm[i])
433 goto err;
434 if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) {
435 mlx4_free_icm(dev, table->icm[i], use_coherent);
436 table->icm[i] = NULL;
437 goto err;
438 }
439
440 /*
441 * Add a reference to this ICM chunk so that it never
442 * gets freed (since it contains reserved firmware objects).
443 */
444 ++table->icm[i]->refcount;
445 }
446
447 return 0;
448
449 err:
450 for (i = 0; i < num_icm; ++i)
451 if (table->icm[i]) {
452 mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE,
453 MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
454 mlx4_free_icm(dev, table->icm[i], use_coherent);
455 }
456
457 kvfree(table->icm);
458
459 return -ENOMEM;
460 }
461
mlx4_cleanup_icm_table(struct mlx4_dev * dev,struct mlx4_icm_table * table)462 void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table)
463 {
464 int i;
465
466 for (i = 0; i < table->num_icm; ++i)
467 if (table->icm[i]) {
468 mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
469 MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
470 mlx4_free_icm(dev, table->icm[i], table->coherent);
471 }
472
473 kvfree(table->icm);
474 }
475