1 /*
2 * Copyright(c) 2016 Intel Corporation.
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48 #include <linux/slab.h>
49 #include <linux/vmalloc.h>
50 #include <rdma/ib_umem.h>
51 #include <rdma/rdma_vt.h>
52 #include "vt.h"
53 #include "mr.h"
54 #include "trace.h"
55
56 /**
57 * rvt_driver_mr_init - Init MR resources per driver
58 * @rdi: rvt dev struct
59 *
60 * Do any intilization needed when a driver registers with rdmavt.
61 *
62 * Return: 0 on success or errno on failure
63 */
rvt_driver_mr_init(struct rvt_dev_info * rdi)64 int rvt_driver_mr_init(struct rvt_dev_info *rdi)
65 {
66 unsigned int lkey_table_size = rdi->dparms.lkey_table_size;
67 unsigned lk_tab_size;
68 int i;
69
70 /*
71 * The top hfi1_lkey_table_size bits are used to index the
72 * table. The lower 8 bits can be owned by the user (copied from
73 * the LKEY). The remaining bits act as a generation number or tag.
74 */
75 if (!lkey_table_size)
76 return -EINVAL;
77
78 spin_lock_init(&rdi->lkey_table.lock);
79
80 /* ensure generation is at least 4 bits */
81 if (lkey_table_size > RVT_MAX_LKEY_TABLE_BITS) {
82 rvt_pr_warn(rdi, "lkey bits %u too large, reduced to %u\n",
83 lkey_table_size, RVT_MAX_LKEY_TABLE_BITS);
84 rdi->dparms.lkey_table_size = RVT_MAX_LKEY_TABLE_BITS;
85 lkey_table_size = rdi->dparms.lkey_table_size;
86 }
87 rdi->lkey_table.max = 1 << lkey_table_size;
88 rdi->lkey_table.shift = 32 - lkey_table_size;
89 lk_tab_size = rdi->lkey_table.max * sizeof(*rdi->lkey_table.table);
90 rdi->lkey_table.table = (struct rvt_mregion __rcu **)
91 vmalloc_node(lk_tab_size, rdi->dparms.node);
92 if (!rdi->lkey_table.table)
93 return -ENOMEM;
94
95 RCU_INIT_POINTER(rdi->dma_mr, NULL);
96 for (i = 0; i < rdi->lkey_table.max; i++)
97 RCU_INIT_POINTER(rdi->lkey_table.table[i], NULL);
98
99 rdi->dparms.props.max_mr = rdi->lkey_table.max;
100 return 0;
101 }
102
103 /**
104 *rvt_mr_exit: clean up MR
105 *@rdi: rvt dev structure
106 *
107 * called when drivers have unregistered or perhaps failed to register with us
108 */
rvt_mr_exit(struct rvt_dev_info * rdi)109 void rvt_mr_exit(struct rvt_dev_info *rdi)
110 {
111 if (rdi->dma_mr)
112 rvt_pr_err(rdi, "DMA MR not null!\n");
113
114 vfree(rdi->lkey_table.table);
115 }
116
rvt_deinit_mregion(struct rvt_mregion * mr)117 static void rvt_deinit_mregion(struct rvt_mregion *mr)
118 {
119 int i = mr->mapsz;
120
121 mr->mapsz = 0;
122 while (i)
123 kfree(mr->map[--i]);
124 percpu_ref_exit(&mr->refcount);
125 }
126
__rvt_mregion_complete(struct percpu_ref * ref)127 static void __rvt_mregion_complete(struct percpu_ref *ref)
128 {
129 struct rvt_mregion *mr = container_of(ref, struct rvt_mregion,
130 refcount);
131
132 complete(&mr->comp);
133 }
134
rvt_init_mregion(struct rvt_mregion * mr,struct ib_pd * pd,int count,unsigned int percpu_flags)135 static int rvt_init_mregion(struct rvt_mregion *mr, struct ib_pd *pd,
136 int count, unsigned int percpu_flags)
137 {
138 int m, i = 0;
139 struct rvt_dev_info *dev = ib_to_rvt(pd->device);
140
141 mr->mapsz = 0;
142 m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ;
143 for (; i < m; i++) {
144 mr->map[i] = kzalloc_node(sizeof(*mr->map[0]), GFP_KERNEL,
145 dev->dparms.node);
146 if (!mr->map[i])
147 goto bail;
148 mr->mapsz++;
149 }
150 init_completion(&mr->comp);
151 /* count returning the ptr to user */
152 if (percpu_ref_init(&mr->refcount, &__rvt_mregion_complete,
153 percpu_flags, GFP_KERNEL))
154 goto bail;
155
156 atomic_set(&mr->lkey_invalid, 0);
157 mr->pd = pd;
158 mr->max_segs = count;
159 return 0;
160 bail:
161 rvt_deinit_mregion(mr);
162 return -ENOMEM;
163 }
164
165 /**
166 * rvt_alloc_lkey - allocate an lkey
167 * @mr: memory region that this lkey protects
168 * @dma_region: 0->normal key, 1->restricted DMA key
169 *
170 * Returns 0 if successful, otherwise returns -errno.
171 *
172 * Increments mr reference count as required.
173 *
174 * Sets the lkey field mr for non-dma regions.
175 *
176 */
rvt_alloc_lkey(struct rvt_mregion * mr,int dma_region)177 static int rvt_alloc_lkey(struct rvt_mregion *mr, int dma_region)
178 {
179 unsigned long flags;
180 u32 r;
181 u32 n;
182 int ret = 0;
183 struct rvt_dev_info *dev = ib_to_rvt(mr->pd->device);
184 struct rvt_lkey_table *rkt = &dev->lkey_table;
185
186 rvt_get_mr(mr);
187 spin_lock_irqsave(&rkt->lock, flags);
188
189 /* special case for dma_mr lkey == 0 */
190 if (dma_region) {
191 struct rvt_mregion *tmr;
192
193 tmr = rcu_access_pointer(dev->dma_mr);
194 if (!tmr) {
195 mr->lkey_published = 1;
196 /* Insure published written first */
197 rcu_assign_pointer(dev->dma_mr, mr);
198 rvt_get_mr(mr);
199 }
200 goto success;
201 }
202
203 /* Find the next available LKEY */
204 r = rkt->next;
205 n = r;
206 for (;;) {
207 if (!rcu_access_pointer(rkt->table[r]))
208 break;
209 r = (r + 1) & (rkt->max - 1);
210 if (r == n)
211 goto bail;
212 }
213 rkt->next = (r + 1) & (rkt->max - 1);
214 /*
215 * Make sure lkey is never zero which is reserved to indicate an
216 * unrestricted LKEY.
217 */
218 rkt->gen++;
219 /*
220 * bits are capped to ensure enough bits for generation number
221 */
222 mr->lkey = (r << (32 - dev->dparms.lkey_table_size)) |
223 ((((1 << (24 - dev->dparms.lkey_table_size)) - 1) & rkt->gen)
224 << 8);
225 if (mr->lkey == 0) {
226 mr->lkey |= 1 << 8;
227 rkt->gen++;
228 }
229 mr->lkey_published = 1;
230 /* Insure published written first */
231 rcu_assign_pointer(rkt->table[r], mr);
232 success:
233 spin_unlock_irqrestore(&rkt->lock, flags);
234 out:
235 return ret;
236 bail:
237 rvt_put_mr(mr);
238 spin_unlock_irqrestore(&rkt->lock, flags);
239 ret = -ENOMEM;
240 goto out;
241 }
242
243 /**
244 * rvt_free_lkey - free an lkey
245 * @mr: mr to free from tables
246 */
rvt_free_lkey(struct rvt_mregion * mr)247 static void rvt_free_lkey(struct rvt_mregion *mr)
248 {
249 unsigned long flags;
250 u32 lkey = mr->lkey;
251 u32 r;
252 struct rvt_dev_info *dev = ib_to_rvt(mr->pd->device);
253 struct rvt_lkey_table *rkt = &dev->lkey_table;
254 int freed = 0;
255
256 spin_lock_irqsave(&rkt->lock, flags);
257 if (!lkey) {
258 if (mr->lkey_published) {
259 mr->lkey_published = 0;
260 /* insure published is written before pointer */
261 rcu_assign_pointer(dev->dma_mr, NULL);
262 rvt_put_mr(mr);
263 }
264 } else {
265 if (!mr->lkey_published)
266 goto out;
267 r = lkey >> (32 - dev->dparms.lkey_table_size);
268 mr->lkey_published = 0;
269 /* insure published is written before pointer */
270 rcu_assign_pointer(rkt->table[r], NULL);
271 }
272 freed++;
273 out:
274 spin_unlock_irqrestore(&rkt->lock, flags);
275 if (freed)
276 percpu_ref_kill(&mr->refcount);
277 }
278
__rvt_alloc_mr(int count,struct ib_pd * pd)279 static struct rvt_mr *__rvt_alloc_mr(int count, struct ib_pd *pd)
280 {
281 struct rvt_mr *mr;
282 int rval = -ENOMEM;
283 int m;
284
285 /* Allocate struct plus pointers to first level page tables. */
286 m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ;
287 mr = kzalloc(struct_size(mr, mr.map, m), GFP_KERNEL);
288 if (!mr)
289 goto bail;
290
291 rval = rvt_init_mregion(&mr->mr, pd, count, 0);
292 if (rval)
293 goto bail;
294 /*
295 * ib_reg_phys_mr() will initialize mr->ibmr except for
296 * lkey and rkey.
297 */
298 rval = rvt_alloc_lkey(&mr->mr, 0);
299 if (rval)
300 goto bail_mregion;
301 mr->ibmr.lkey = mr->mr.lkey;
302 mr->ibmr.rkey = mr->mr.lkey;
303 done:
304 return mr;
305
306 bail_mregion:
307 rvt_deinit_mregion(&mr->mr);
308 bail:
309 kfree(mr);
310 mr = ERR_PTR(rval);
311 goto done;
312 }
313
__rvt_free_mr(struct rvt_mr * mr)314 static void __rvt_free_mr(struct rvt_mr *mr)
315 {
316 rvt_free_lkey(&mr->mr);
317 rvt_deinit_mregion(&mr->mr);
318 kfree(mr);
319 }
320
321 /**
322 * rvt_get_dma_mr - get a DMA memory region
323 * @pd: protection domain for this memory region
324 * @acc: access flags
325 *
326 * Return: the memory region on success, otherwise returns an errno.
327 * Note that all DMA addresses should be created via the functions in
328 * struct dma_virt_ops.
329 */
rvt_get_dma_mr(struct ib_pd * pd,int acc)330 struct ib_mr *rvt_get_dma_mr(struct ib_pd *pd, int acc)
331 {
332 struct rvt_mr *mr;
333 struct ib_mr *ret;
334 int rval;
335
336 if (ibpd_to_rvtpd(pd)->user)
337 return ERR_PTR(-EPERM);
338
339 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
340 if (!mr) {
341 ret = ERR_PTR(-ENOMEM);
342 goto bail;
343 }
344
345 rval = rvt_init_mregion(&mr->mr, pd, 0, 0);
346 if (rval) {
347 ret = ERR_PTR(rval);
348 goto bail;
349 }
350
351 rval = rvt_alloc_lkey(&mr->mr, 1);
352 if (rval) {
353 ret = ERR_PTR(rval);
354 goto bail_mregion;
355 }
356
357 mr->mr.access_flags = acc;
358 ret = &mr->ibmr;
359 done:
360 return ret;
361
362 bail_mregion:
363 rvt_deinit_mregion(&mr->mr);
364 bail:
365 kfree(mr);
366 goto done;
367 }
368
369 /**
370 * rvt_reg_user_mr - register a userspace memory region
371 * @pd: protection domain for this memory region
372 * @start: starting userspace address
373 * @length: length of region to register
374 * @mr_access_flags: access flags for this memory region
375 * @udata: unused by the driver
376 *
377 * Return: the memory region on success, otherwise returns an errno.
378 */
rvt_reg_user_mr(struct ib_pd * pd,u64 start,u64 length,u64 virt_addr,int mr_access_flags,struct ib_udata * udata)379 struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
380 u64 virt_addr, int mr_access_flags,
381 struct ib_udata *udata)
382 {
383 struct rvt_mr *mr;
384 struct ib_umem *umem;
385 struct sg_page_iter sg_iter;
386 int n, m;
387 struct ib_mr *ret;
388
389 if (length == 0)
390 return ERR_PTR(-EINVAL);
391
392 umem = ib_umem_get(pd->device, start, length, mr_access_flags);
393 if (IS_ERR(umem))
394 return (void *)umem;
395
396 n = ib_umem_num_pages(umem);
397
398 mr = __rvt_alloc_mr(n, pd);
399 if (IS_ERR(mr)) {
400 ret = (struct ib_mr *)mr;
401 goto bail_umem;
402 }
403
404 mr->mr.user_base = start;
405 mr->mr.iova = virt_addr;
406 mr->mr.length = length;
407 mr->mr.offset = ib_umem_offset(umem);
408 mr->mr.access_flags = mr_access_flags;
409 mr->umem = umem;
410
411 mr->mr.page_shift = PAGE_SHIFT;
412 m = 0;
413 n = 0;
414 for_each_sg_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
415 void *vaddr;
416
417 vaddr = page_address(sg_page_iter_page(&sg_iter));
418 if (!vaddr) {
419 ret = ERR_PTR(-EINVAL);
420 goto bail_inval;
421 }
422 mr->mr.map[m]->segs[n].vaddr = vaddr;
423 mr->mr.map[m]->segs[n].length = PAGE_SIZE;
424 trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr, PAGE_SIZE);
425 if (++n == RVT_SEGSZ) {
426 m++;
427 n = 0;
428 }
429 }
430 return &mr->ibmr;
431
432 bail_inval:
433 __rvt_free_mr(mr);
434
435 bail_umem:
436 ib_umem_release(umem);
437
438 return ret;
439 }
440
441 /**
442 * rvt_dereg_clean_qp_cb - callback from iterator
443 * @qp - the qp
444 * @v - the mregion (as u64)
445 *
446 * This routine fields the callback for all QPs and
447 * for QPs in the same PD as the MR will call the
448 * rvt_qp_mr_clean() to potentially cleanup references.
449 */
rvt_dereg_clean_qp_cb(struct rvt_qp * qp,u64 v)450 static void rvt_dereg_clean_qp_cb(struct rvt_qp *qp, u64 v)
451 {
452 struct rvt_mregion *mr = (struct rvt_mregion *)v;
453
454 /* skip PDs that are not ours */
455 if (mr->pd != qp->ibqp.pd)
456 return;
457 rvt_qp_mr_clean(qp, mr->lkey);
458 }
459
460 /**
461 * rvt_dereg_clean_qps - find QPs for reference cleanup
462 * @mr - the MR that is being deregistered
463 *
464 * This routine iterates RC QPs looking for references
465 * to the lkey noted in mr.
466 */
rvt_dereg_clean_qps(struct rvt_mregion * mr)467 static void rvt_dereg_clean_qps(struct rvt_mregion *mr)
468 {
469 struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device);
470
471 rvt_qp_iter(rdi, (u64)mr, rvt_dereg_clean_qp_cb);
472 }
473
474 /**
475 * rvt_check_refs - check references
476 * @mr - the megion
477 * @t - the caller identification
478 *
479 * This routine checks MRs holding a reference during
480 * when being de-registered.
481 *
482 * If the count is non-zero, the code calls a clean routine then
483 * waits for the timeout for the count to zero.
484 */
rvt_check_refs(struct rvt_mregion * mr,const char * t)485 static int rvt_check_refs(struct rvt_mregion *mr, const char *t)
486 {
487 unsigned long timeout;
488 struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device);
489
490 if (mr->lkey) {
491 /* avoid dma mr */
492 rvt_dereg_clean_qps(mr);
493 /* @mr was indexed on rcu protected @lkey_table */
494 synchronize_rcu();
495 }
496
497 timeout = wait_for_completion_timeout(&mr->comp, 5 * HZ);
498 if (!timeout) {
499 rvt_pr_err(rdi,
500 "%s timeout mr %p pd %p lkey %x refcount %ld\n",
501 t, mr, mr->pd, mr->lkey,
502 atomic_long_read(&mr->refcount.data->count));
503 rvt_get_mr(mr);
504 return -EBUSY;
505 }
506 return 0;
507 }
508
509 /**
510 * rvt_mr_has_lkey - is MR
511 * @mr - the mregion
512 * @lkey - the lkey
513 */
rvt_mr_has_lkey(struct rvt_mregion * mr,u32 lkey)514 bool rvt_mr_has_lkey(struct rvt_mregion *mr, u32 lkey)
515 {
516 return mr && lkey == mr->lkey;
517 }
518
519 /**
520 * rvt_ss_has_lkey - is mr in sge tests
521 * @ss - the sge state
522 * @lkey
523 *
524 * This code tests for an MR in the indicated
525 * sge state.
526 */
rvt_ss_has_lkey(struct rvt_sge_state * ss,u32 lkey)527 bool rvt_ss_has_lkey(struct rvt_sge_state *ss, u32 lkey)
528 {
529 int i;
530 bool rval = false;
531
532 if (!ss->num_sge)
533 return rval;
534 /* first one */
535 rval = rvt_mr_has_lkey(ss->sge.mr, lkey);
536 /* any others */
537 for (i = 0; !rval && i < ss->num_sge - 1; i++)
538 rval = rvt_mr_has_lkey(ss->sg_list[i].mr, lkey);
539 return rval;
540 }
541
542 /**
543 * rvt_dereg_mr - unregister and free a memory region
544 * @ibmr: the memory region to free
545 *
546 *
547 * Note that this is called to free MRs created by rvt_get_dma_mr()
548 * or rvt_reg_user_mr().
549 *
550 * Returns 0 on success.
551 */
rvt_dereg_mr(struct ib_mr * ibmr,struct ib_udata * udata)552 int rvt_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
553 {
554 struct rvt_mr *mr = to_imr(ibmr);
555 int ret;
556
557 rvt_free_lkey(&mr->mr);
558
559 rvt_put_mr(&mr->mr); /* will set completion if last */
560 ret = rvt_check_refs(&mr->mr, __func__);
561 if (ret)
562 goto out;
563 rvt_deinit_mregion(&mr->mr);
564 ib_umem_release(mr->umem);
565 kfree(mr);
566 out:
567 return ret;
568 }
569
570 /**
571 * rvt_alloc_mr - Allocate a memory region usable with the
572 * @pd: protection domain for this memory region
573 * @mr_type: mem region type
574 * @max_num_sg: Max number of segments allowed
575 *
576 * Return: the memory region on success, otherwise return an errno.
577 */
rvt_alloc_mr(struct ib_pd * pd,enum ib_mr_type mr_type,u32 max_num_sg)578 struct ib_mr *rvt_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
579 u32 max_num_sg)
580 {
581 struct rvt_mr *mr;
582
583 if (mr_type != IB_MR_TYPE_MEM_REG)
584 return ERR_PTR(-EINVAL);
585
586 mr = __rvt_alloc_mr(max_num_sg, pd);
587 if (IS_ERR(mr))
588 return (struct ib_mr *)mr;
589
590 return &mr->ibmr;
591 }
592
593 /**
594 * rvt_set_page - page assignment function called by ib_sg_to_pages
595 * @ibmr: memory region
596 * @addr: dma address of mapped page
597 *
598 * Return: 0 on success
599 */
rvt_set_page(struct ib_mr * ibmr,u64 addr)600 static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
601 {
602 struct rvt_mr *mr = to_imr(ibmr);
603 u32 ps = 1 << mr->mr.page_shift;
604 u32 mapped_segs = mr->mr.length >> mr->mr.page_shift;
605 int m, n;
606
607 if (unlikely(mapped_segs == mr->mr.max_segs))
608 return -ENOMEM;
609
610 m = mapped_segs / RVT_SEGSZ;
611 n = mapped_segs % RVT_SEGSZ;
612 mr->mr.map[m]->segs[n].vaddr = (void *)addr;
613 mr->mr.map[m]->segs[n].length = ps;
614 mr->mr.length += ps;
615 trace_rvt_mr_page_seg(&mr->mr, m, n, (void *)addr, ps);
616
617 return 0;
618 }
619
620 /**
621 * rvt_map_mr_sg - map sg list and set it the memory region
622 * @ibmr: memory region
623 * @sg: dma mapped scatterlist
624 * @sg_nents: number of entries in sg
625 * @sg_offset: offset in bytes into sg
626 *
627 * Overwrite rvt_mr length with mr length calculated by ib_sg_to_pages.
628 *
629 * Return: number of sg elements mapped to the memory region
630 */
rvt_map_mr_sg(struct ib_mr * ibmr,struct scatterlist * sg,int sg_nents,unsigned int * sg_offset)631 int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
632 int sg_nents, unsigned int *sg_offset)
633 {
634 struct rvt_mr *mr = to_imr(ibmr);
635 int ret;
636
637 mr->mr.length = 0;
638 mr->mr.page_shift = PAGE_SHIFT;
639 ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rvt_set_page);
640 mr->mr.user_base = ibmr->iova;
641 mr->mr.iova = ibmr->iova;
642 mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr;
643 mr->mr.length = (size_t)ibmr->length;
644 trace_rvt_map_mr_sg(ibmr, sg_nents, sg_offset);
645 return ret;
646 }
647
648 /**
649 * rvt_fast_reg_mr - fast register physical MR
650 * @qp: the queue pair where the work request comes from
651 * @ibmr: the memory region to be registered
652 * @key: updated key for this memory region
653 * @access: access flags for this memory region
654 *
655 * Returns 0 on success.
656 */
rvt_fast_reg_mr(struct rvt_qp * qp,struct ib_mr * ibmr,u32 key,int access)657 int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key,
658 int access)
659 {
660 struct rvt_mr *mr = to_imr(ibmr);
661
662 if (qp->ibqp.pd != mr->mr.pd)
663 return -EACCES;
664
665 /* not applicable to dma MR or user MR */
666 if (!mr->mr.lkey || mr->umem)
667 return -EINVAL;
668
669 if ((key & 0xFFFFFF00) != (mr->mr.lkey & 0xFFFFFF00))
670 return -EINVAL;
671
672 ibmr->lkey = key;
673 ibmr->rkey = key;
674 mr->mr.lkey = key;
675 mr->mr.access_flags = access;
676 mr->mr.iova = ibmr->iova;
677 atomic_set(&mr->mr.lkey_invalid, 0);
678
679 return 0;
680 }
681 EXPORT_SYMBOL(rvt_fast_reg_mr);
682
683 /**
684 * rvt_invalidate_rkey - invalidate an MR rkey
685 * @qp: queue pair associated with the invalidate op
686 * @rkey: rkey to invalidate
687 *
688 * Returns 0 on success.
689 */
rvt_invalidate_rkey(struct rvt_qp * qp,u32 rkey)690 int rvt_invalidate_rkey(struct rvt_qp *qp, u32 rkey)
691 {
692 struct rvt_dev_info *dev = ib_to_rvt(qp->ibqp.device);
693 struct rvt_lkey_table *rkt = &dev->lkey_table;
694 struct rvt_mregion *mr;
695
696 if (rkey == 0)
697 return -EINVAL;
698
699 rcu_read_lock();
700 mr = rcu_dereference(
701 rkt->table[(rkey >> (32 - dev->dparms.lkey_table_size))]);
702 if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
703 goto bail;
704
705 atomic_set(&mr->lkey_invalid, 1);
706 rcu_read_unlock();
707 return 0;
708
709 bail:
710 rcu_read_unlock();
711 return -EINVAL;
712 }
713 EXPORT_SYMBOL(rvt_invalidate_rkey);
714
715 /**
716 * rvt_sge_adjacent - is isge compressible
717 * @last_sge: last outgoing SGE written
718 * @sge: SGE to check
719 *
720 * If adjacent will update last_sge to add length.
721 *
722 * Return: true if isge is adjacent to last sge
723 */
rvt_sge_adjacent(struct rvt_sge * last_sge,struct ib_sge * sge)724 static inline bool rvt_sge_adjacent(struct rvt_sge *last_sge,
725 struct ib_sge *sge)
726 {
727 if (last_sge && sge->lkey == last_sge->mr->lkey &&
728 ((uint64_t)(last_sge->vaddr + last_sge->length) == sge->addr)) {
729 if (sge->lkey) {
730 if (unlikely((sge->addr - last_sge->mr->user_base +
731 sge->length > last_sge->mr->length)))
732 return false; /* overrun, caller will catch */
733 } else {
734 last_sge->length += sge->length;
735 }
736 last_sge->sge_length += sge->length;
737 trace_rvt_sge_adjacent(last_sge, sge);
738 return true;
739 }
740 return false;
741 }
742
743 /**
744 * rvt_lkey_ok - check IB SGE for validity and initialize
745 * @rkt: table containing lkey to check SGE against
746 * @pd: protection domain
747 * @isge: outgoing internal SGE
748 * @last_sge: last outgoing SGE written
749 * @sge: SGE to check
750 * @acc: access flags
751 *
752 * Check the IB SGE for validity and initialize our internal version
753 * of it.
754 *
755 * Increments the reference count when a new sge is stored.
756 *
757 * Return: 0 if compressed, 1 if added , otherwise returns -errno.
758 */
rvt_lkey_ok(struct rvt_lkey_table * rkt,struct rvt_pd * pd,struct rvt_sge * isge,struct rvt_sge * last_sge,struct ib_sge * sge,int acc)759 int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
760 struct rvt_sge *isge, struct rvt_sge *last_sge,
761 struct ib_sge *sge, int acc)
762 {
763 struct rvt_mregion *mr;
764 unsigned n, m;
765 size_t off;
766
767 /*
768 * We use LKEY == zero for kernel virtual addresses
769 * (see rvt_get_dma_mr() and dma_virt_ops).
770 */
771 if (sge->lkey == 0) {
772 struct rvt_dev_info *dev = ib_to_rvt(pd->ibpd.device);
773
774 if (pd->user)
775 return -EINVAL;
776 if (rvt_sge_adjacent(last_sge, sge))
777 return 0;
778 rcu_read_lock();
779 mr = rcu_dereference(dev->dma_mr);
780 if (!mr)
781 goto bail;
782 rvt_get_mr(mr);
783 rcu_read_unlock();
784
785 isge->mr = mr;
786 isge->vaddr = (void *)sge->addr;
787 isge->length = sge->length;
788 isge->sge_length = sge->length;
789 isge->m = 0;
790 isge->n = 0;
791 goto ok;
792 }
793 if (rvt_sge_adjacent(last_sge, sge))
794 return 0;
795 rcu_read_lock();
796 mr = rcu_dereference(rkt->table[sge->lkey >> rkt->shift]);
797 if (!mr)
798 goto bail;
799 rvt_get_mr(mr);
800 if (!READ_ONCE(mr->lkey_published))
801 goto bail_unref;
802
803 if (unlikely(atomic_read(&mr->lkey_invalid) ||
804 mr->lkey != sge->lkey || mr->pd != &pd->ibpd))
805 goto bail_unref;
806
807 off = sge->addr - mr->user_base;
808 if (unlikely(sge->addr < mr->user_base ||
809 off + sge->length > mr->length ||
810 (mr->access_flags & acc) != acc))
811 goto bail_unref;
812 rcu_read_unlock();
813
814 off += mr->offset;
815 if (mr->page_shift) {
816 /*
817 * page sizes are uniform power of 2 so no loop is necessary
818 * entries_spanned_by_off is the number of times the loop below
819 * would have executed.
820 */
821 size_t entries_spanned_by_off;
822
823 entries_spanned_by_off = off >> mr->page_shift;
824 off -= (entries_spanned_by_off << mr->page_shift);
825 m = entries_spanned_by_off / RVT_SEGSZ;
826 n = entries_spanned_by_off % RVT_SEGSZ;
827 } else {
828 m = 0;
829 n = 0;
830 while (off >= mr->map[m]->segs[n].length) {
831 off -= mr->map[m]->segs[n].length;
832 n++;
833 if (n >= RVT_SEGSZ) {
834 m++;
835 n = 0;
836 }
837 }
838 }
839 isge->mr = mr;
840 isge->vaddr = mr->map[m]->segs[n].vaddr + off;
841 isge->length = mr->map[m]->segs[n].length - off;
842 isge->sge_length = sge->length;
843 isge->m = m;
844 isge->n = n;
845 ok:
846 trace_rvt_sge_new(isge, sge);
847 return 1;
848 bail_unref:
849 rvt_put_mr(mr);
850 bail:
851 rcu_read_unlock();
852 return -EINVAL;
853 }
854 EXPORT_SYMBOL(rvt_lkey_ok);
855
856 /**
857 * rvt_rkey_ok - check the IB virtual address, length, and RKEY
858 * @qp: qp for validation
859 * @sge: SGE state
860 * @len: length of data
861 * @vaddr: virtual address to place data
862 * @rkey: rkey to check
863 * @acc: access flags
864 *
865 * Return: 1 if successful, otherwise 0.
866 *
867 * increments the reference count upon success
868 */
rvt_rkey_ok(struct rvt_qp * qp,struct rvt_sge * sge,u32 len,u64 vaddr,u32 rkey,int acc)869 int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
870 u32 len, u64 vaddr, u32 rkey, int acc)
871 {
872 struct rvt_dev_info *dev = ib_to_rvt(qp->ibqp.device);
873 struct rvt_lkey_table *rkt = &dev->lkey_table;
874 struct rvt_mregion *mr;
875 unsigned n, m;
876 size_t off;
877
878 /*
879 * We use RKEY == zero for kernel virtual addresses
880 * (see rvt_get_dma_mr() and dma_virt_ops).
881 */
882 rcu_read_lock();
883 if (rkey == 0) {
884 struct rvt_pd *pd = ibpd_to_rvtpd(qp->ibqp.pd);
885 struct rvt_dev_info *rdi = ib_to_rvt(pd->ibpd.device);
886
887 if (pd->user)
888 goto bail;
889 mr = rcu_dereference(rdi->dma_mr);
890 if (!mr)
891 goto bail;
892 rvt_get_mr(mr);
893 rcu_read_unlock();
894
895 sge->mr = mr;
896 sge->vaddr = (void *)vaddr;
897 sge->length = len;
898 sge->sge_length = len;
899 sge->m = 0;
900 sge->n = 0;
901 goto ok;
902 }
903
904 mr = rcu_dereference(rkt->table[rkey >> rkt->shift]);
905 if (!mr)
906 goto bail;
907 rvt_get_mr(mr);
908 /* insure mr read is before test */
909 if (!READ_ONCE(mr->lkey_published))
910 goto bail_unref;
911 if (unlikely(atomic_read(&mr->lkey_invalid) ||
912 mr->lkey != rkey || qp->ibqp.pd != mr->pd))
913 goto bail_unref;
914
915 off = vaddr - mr->iova;
916 if (unlikely(vaddr < mr->iova || off + len > mr->length ||
917 (mr->access_flags & acc) == 0))
918 goto bail_unref;
919 rcu_read_unlock();
920
921 off += mr->offset;
922 if (mr->page_shift) {
923 /*
924 * page sizes are uniform power of 2 so no loop is necessary
925 * entries_spanned_by_off is the number of times the loop below
926 * would have executed.
927 */
928 size_t entries_spanned_by_off;
929
930 entries_spanned_by_off = off >> mr->page_shift;
931 off -= (entries_spanned_by_off << mr->page_shift);
932 m = entries_spanned_by_off / RVT_SEGSZ;
933 n = entries_spanned_by_off % RVT_SEGSZ;
934 } else {
935 m = 0;
936 n = 0;
937 while (off >= mr->map[m]->segs[n].length) {
938 off -= mr->map[m]->segs[n].length;
939 n++;
940 if (n >= RVT_SEGSZ) {
941 m++;
942 n = 0;
943 }
944 }
945 }
946 sge->mr = mr;
947 sge->vaddr = mr->map[m]->segs[n].vaddr + off;
948 sge->length = mr->map[m]->segs[n].length - off;
949 sge->sge_length = len;
950 sge->m = m;
951 sge->n = n;
952 ok:
953 return 1;
954 bail_unref:
955 rvt_put_mr(mr);
956 bail:
957 rcu_read_unlock();
958 return 0;
959 }
960 EXPORT_SYMBOL(rvt_rkey_ok);
961