1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/errno.h>
35 #include <linux/spinlock.h>
36 #include <linux/export.h>
37 #include <linux/slab.h>
38 #include <linux/jhash.h>
39 #include <linux/kthread.h>
40 
41 #include <rdma/ib_fmr_pool.h>
42 
43 #include "core_priv.h"
44 
45 #define PFX "fmr_pool: "
46 
47 enum {
48 	IB_FMR_MAX_REMAPS = 32,
49 
50 	IB_FMR_HASH_BITS  = 8,
51 	IB_FMR_HASH_SIZE  = 1 << IB_FMR_HASH_BITS,
52 	IB_FMR_HASH_MASK  = IB_FMR_HASH_SIZE - 1
53 };
54 
55 /*
56  * If an FMR is not in use, then the list member will point to either
57  * its pool's free_list (if the FMR can be mapped again; that is,
58  * remap_count < pool->max_remaps) or its pool's dirty_list (if the
59  * FMR needs to be unmapped before being remapped).  In either of
60  * these cases it is a bug if the ref_count is not 0.  In other words,
61  * if ref_count is > 0, then the list member must not be linked into
62  * either free_list or dirty_list.
63  *
64  * The cache_node member is used to link the FMR into a cache bucket
65  * (if caching is enabled).  This is independent of the reference
66  * count of the FMR.  When a valid FMR is released, its ref_count is
67  * decremented, and if ref_count reaches 0, the FMR is placed in
68  * either free_list or dirty_list as appropriate.  However, it is not
69  * removed from the cache and may be "revived" if a call to
70  * ib_fmr_register_physical() occurs before the FMR is remapped.  In
71  * this case we just increment the ref_count and remove the FMR from
72  * free_list/dirty_list.
73  *
74  * Before we remap an FMR from free_list, we remove it from the cache
75  * (to prevent another user from obtaining a stale FMR).  When an FMR
76  * is released, we add it to the tail of the free list, so that our
77  * cache eviction policy is "least recently used."
78  *
79  * All manipulation of ref_count, list and cache_node is protected by
80  * pool_lock to maintain consistency.
81  */
82 
83 struct ib_fmr_pool {
84 	spinlock_t                pool_lock;
85 
86 	int                       pool_size;
87 	int                       max_pages;
88 	int			  max_remaps;
89 	int                       dirty_watermark;
90 	int                       dirty_len;
91 	struct list_head          free_list;
92 	struct list_head          dirty_list;
93 	struct hlist_head        *cache_bucket;
94 
95 	void                     (*flush_function)(struct ib_fmr_pool *pool,
96 						   void *              arg);
97 	void                     *flush_arg;
98 
99 	struct kthread_worker	  *worker;
100 	struct kthread_work	  work;
101 
102 	atomic_t                  req_ser;
103 	atomic_t                  flush_ser;
104 
105 	wait_queue_head_t         force_wait;
106 };
107 
ib_fmr_hash(u64 first_page)108 static inline u32 ib_fmr_hash(u64 first_page)
109 {
110 	return jhash_2words((u32) first_page, (u32) (first_page >> 32), 0) &
111 		(IB_FMR_HASH_SIZE - 1);
112 }
113 
114 /* Caller must hold pool_lock */
ib_fmr_cache_lookup(struct ib_fmr_pool * pool,u64 * page_list,int page_list_len,u64 io_virtual_address)115 static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
116 						      u64 *page_list,
117 						      int  page_list_len,
118 						      u64  io_virtual_address)
119 {
120 	struct hlist_head *bucket;
121 	struct ib_pool_fmr *fmr;
122 
123 	if (!pool->cache_bucket)
124 		return NULL;
125 
126 	bucket = pool->cache_bucket + ib_fmr_hash(*page_list);
127 
128 	hlist_for_each_entry(fmr, bucket, cache_node)
129 		if (io_virtual_address == fmr->io_virtual_address &&
130 		    page_list_len      == fmr->page_list_len      &&
131 		    !memcmp(page_list, fmr->page_list,
132 			    page_list_len * sizeof *page_list))
133 			return fmr;
134 
135 	return NULL;
136 }
137 
ib_fmr_batch_release(struct ib_fmr_pool * pool)138 static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
139 {
140 	int                 ret;
141 	struct ib_pool_fmr *fmr;
142 	LIST_HEAD(unmap_list);
143 	LIST_HEAD(fmr_list);
144 
145 	spin_lock_irq(&pool->pool_lock);
146 
147 	list_for_each_entry(fmr, &pool->dirty_list, list) {
148 		hlist_del_init(&fmr->cache_node);
149 		fmr->remap_count = 0;
150 		list_add_tail(&fmr->fmr->list, &fmr_list);
151 
152 #ifdef DEBUG
153 		if (fmr->ref_count !=0) {
154 			pr_warn(PFX "Unmapping FMR 0x%08x with ref count %d\n",
155 				fmr, fmr->ref_count);
156 		}
157 #endif
158 	}
159 
160 	list_splice_init(&pool->dirty_list, &unmap_list);
161 	pool->dirty_len = 0;
162 
163 	spin_unlock_irq(&pool->pool_lock);
164 
165 	if (list_empty(&unmap_list)) {
166 		return;
167 	}
168 
169 	ret = ib_unmap_fmr(&fmr_list);
170 	if (ret)
171 		pr_warn(PFX "ib_unmap_fmr returned %d\n", ret);
172 
173 	spin_lock_irq(&pool->pool_lock);
174 	list_splice(&unmap_list, &pool->free_list);
175 	spin_unlock_irq(&pool->pool_lock);
176 }
177 
ib_fmr_cleanup_func(struct kthread_work * work)178 static void ib_fmr_cleanup_func(struct kthread_work *work)
179 {
180 	struct ib_fmr_pool *pool = container_of(work, struct ib_fmr_pool, work);
181 
182 	ib_fmr_batch_release(pool);
183 	atomic_inc(&pool->flush_ser);
184 	wake_up_interruptible(&pool->force_wait);
185 
186 	if (pool->flush_function)
187 		pool->flush_function(pool, pool->flush_arg);
188 
189 	if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0)
190 		kthread_queue_work(pool->worker, &pool->work);
191 }
192 
193 /**
194  * ib_create_fmr_pool - Create an FMR pool
195  * @pd:Protection domain for FMRs
196  * @params:FMR pool parameters
197  *
198  * Create a pool of FMRs.  Return value is pointer to new pool or
199  * error code if creation failed.
200  */
ib_create_fmr_pool(struct ib_pd * pd,struct ib_fmr_pool_param * params)201 struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd             *pd,
202 				       struct ib_fmr_pool_param *params)
203 {
204 	struct ib_device   *device;
205 	struct ib_fmr_pool *pool;
206 	int i;
207 	int ret;
208 	int max_remaps;
209 
210 	if (!params)
211 		return ERR_PTR(-EINVAL);
212 
213 	device = pd->device;
214 	if (!device->alloc_fmr    || !device->dealloc_fmr  ||
215 	    !device->map_phys_fmr || !device->unmap_fmr) {
216 		pr_info(PFX "Device %s does not support FMRs\n", device->name);
217 		return ERR_PTR(-ENOSYS);
218 	}
219 
220 	if (!device->attrs.max_map_per_fmr)
221 		max_remaps = IB_FMR_MAX_REMAPS;
222 	else
223 		max_remaps = device->attrs.max_map_per_fmr;
224 
225 	pool = kmalloc(sizeof *pool, GFP_KERNEL);
226 	if (!pool)
227 		return ERR_PTR(-ENOMEM);
228 
229 	pool->cache_bucket   = NULL;
230 	pool->flush_function = params->flush_function;
231 	pool->flush_arg      = params->flush_arg;
232 
233 	INIT_LIST_HEAD(&pool->free_list);
234 	INIT_LIST_HEAD(&pool->dirty_list);
235 
236 	if (params->cache) {
237 		pool->cache_bucket =
238 			kmalloc_array(IB_FMR_HASH_SIZE,
239 				      sizeof(*pool->cache_bucket),
240 				      GFP_KERNEL);
241 		if (!pool->cache_bucket) {
242 			ret = -ENOMEM;
243 			goto out_free_pool;
244 		}
245 
246 		for (i = 0; i < IB_FMR_HASH_SIZE; ++i)
247 			INIT_HLIST_HEAD(pool->cache_bucket + i);
248 	}
249 
250 	pool->pool_size       = 0;
251 	pool->max_pages       = params->max_pages_per_fmr;
252 	pool->max_remaps      = max_remaps;
253 	pool->dirty_watermark = params->dirty_watermark;
254 	pool->dirty_len       = 0;
255 	spin_lock_init(&pool->pool_lock);
256 	atomic_set(&pool->req_ser,   0);
257 	atomic_set(&pool->flush_ser, 0);
258 	init_waitqueue_head(&pool->force_wait);
259 
260 	pool->worker = kthread_create_worker(0, "ib_fmr(%s)", device->name);
261 	if (IS_ERR(pool->worker)) {
262 		pr_warn(PFX "couldn't start cleanup kthread worker\n");
263 		ret = PTR_ERR(pool->worker);
264 		goto out_free_pool;
265 	}
266 	kthread_init_work(&pool->work, ib_fmr_cleanup_func);
267 
268 	{
269 		struct ib_pool_fmr *fmr;
270 		struct ib_fmr_attr fmr_attr = {
271 			.max_pages  = params->max_pages_per_fmr,
272 			.max_maps   = pool->max_remaps,
273 			.page_shift = params->page_shift
274 		};
275 		int bytes_per_fmr = sizeof *fmr;
276 
277 		if (pool->cache_bucket)
278 			bytes_per_fmr += params->max_pages_per_fmr * sizeof (u64);
279 
280 		for (i = 0; i < params->pool_size; ++i) {
281 			fmr = kmalloc(bytes_per_fmr, GFP_KERNEL);
282 			if (!fmr)
283 				goto out_fail;
284 
285 			fmr->pool             = pool;
286 			fmr->remap_count      = 0;
287 			fmr->ref_count        = 0;
288 			INIT_HLIST_NODE(&fmr->cache_node);
289 
290 			fmr->fmr = ib_alloc_fmr(pd, params->access, &fmr_attr);
291 			if (IS_ERR(fmr->fmr)) {
292 				pr_warn(PFX "fmr_create failed for FMR %d\n",
293 					i);
294 				kfree(fmr);
295 				goto out_fail;
296 			}
297 
298 			list_add_tail(&fmr->list, &pool->free_list);
299 			++pool->pool_size;
300 		}
301 	}
302 
303 	return pool;
304 
305  out_free_pool:
306 	kfree(pool->cache_bucket);
307 	kfree(pool);
308 
309 	return ERR_PTR(ret);
310 
311  out_fail:
312 	ib_destroy_fmr_pool(pool);
313 
314 	return ERR_PTR(-ENOMEM);
315 }
316 EXPORT_SYMBOL(ib_create_fmr_pool);
317 
318 /**
319  * ib_destroy_fmr_pool - Free FMR pool
320  * @pool:FMR pool to free
321  *
322  * Destroy an FMR pool and free all associated resources.
323  */
ib_destroy_fmr_pool(struct ib_fmr_pool * pool)324 void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
325 {
326 	struct ib_pool_fmr *fmr;
327 	struct ib_pool_fmr *tmp;
328 	LIST_HEAD(fmr_list);
329 	int                 i;
330 
331 	kthread_destroy_worker(pool->worker);
332 	ib_fmr_batch_release(pool);
333 
334 	i = 0;
335 	list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) {
336 		if (fmr->remap_count) {
337 			INIT_LIST_HEAD(&fmr_list);
338 			list_add_tail(&fmr->fmr->list, &fmr_list);
339 			ib_unmap_fmr(&fmr_list);
340 		}
341 		ib_dealloc_fmr(fmr->fmr);
342 		list_del(&fmr->list);
343 		kfree(fmr);
344 		++i;
345 	}
346 
347 	if (i < pool->pool_size)
348 		pr_warn(PFX "pool still has %d regions registered\n",
349 			pool->pool_size - i);
350 
351 	kfree(pool->cache_bucket);
352 	kfree(pool);
353 }
354 EXPORT_SYMBOL(ib_destroy_fmr_pool);
355 
356 /**
357  * ib_flush_fmr_pool - Invalidate all unmapped FMRs
358  * @pool:FMR pool to flush
359  *
360  * Ensure that all unmapped FMRs are fully invalidated.
361  */
ib_flush_fmr_pool(struct ib_fmr_pool * pool)362 int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
363 {
364 	int serial;
365 	struct ib_pool_fmr *fmr, *next;
366 
367 	/*
368 	 * The free_list holds FMRs that may have been used
369 	 * but have not been remapped enough times to be dirty.
370 	 * Put them on the dirty list now so that the cleanup
371 	 * thread will reap them too.
372 	 */
373 	spin_lock_irq(&pool->pool_lock);
374 	list_for_each_entry_safe(fmr, next, &pool->free_list, list) {
375 		if (fmr->remap_count > 0)
376 			list_move(&fmr->list, &pool->dirty_list);
377 	}
378 	spin_unlock_irq(&pool->pool_lock);
379 
380 	serial = atomic_inc_return(&pool->req_ser);
381 	kthread_queue_work(pool->worker, &pool->work);
382 
383 	if (wait_event_interruptible(pool->force_wait,
384 				     atomic_read(&pool->flush_ser) - serial >= 0))
385 		return -EINTR;
386 
387 	return 0;
388 }
389 EXPORT_SYMBOL(ib_flush_fmr_pool);
390 
391 /**
392  * ib_fmr_pool_map_phys - Map an FMR from an FMR pool.
393  * @pool_handle: FMR pool to allocate FMR from
394  * @page_list: List of pages to map
395  * @list_len: Number of pages in @page_list
396  * @io_virtual_address: I/O virtual address for new FMR
397  */
ib_fmr_pool_map_phys(struct ib_fmr_pool * pool_handle,u64 * page_list,int list_len,u64 io_virtual_address)398 struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
399 					 u64                *page_list,
400 					 int                 list_len,
401 					 u64                 io_virtual_address)
402 {
403 	struct ib_fmr_pool *pool = pool_handle;
404 	struct ib_pool_fmr *fmr;
405 	unsigned long       flags;
406 	int                 result;
407 
408 	if (list_len < 1 || list_len > pool->max_pages)
409 		return ERR_PTR(-EINVAL);
410 
411 	spin_lock_irqsave(&pool->pool_lock, flags);
412 	fmr = ib_fmr_cache_lookup(pool,
413 				  page_list,
414 				  list_len,
415 				  io_virtual_address);
416 	if (fmr) {
417 		/* found in cache */
418 		++fmr->ref_count;
419 		if (fmr->ref_count == 1) {
420 			list_del(&fmr->list);
421 		}
422 
423 		spin_unlock_irqrestore(&pool->pool_lock, flags);
424 
425 		return fmr;
426 	}
427 
428 	if (list_empty(&pool->free_list)) {
429 		spin_unlock_irqrestore(&pool->pool_lock, flags);
430 		return ERR_PTR(-EAGAIN);
431 	}
432 
433 	fmr = list_entry(pool->free_list.next, struct ib_pool_fmr, list);
434 	list_del(&fmr->list);
435 	hlist_del_init(&fmr->cache_node);
436 	spin_unlock_irqrestore(&pool->pool_lock, flags);
437 
438 	result = ib_map_phys_fmr(fmr->fmr, page_list, list_len,
439 				 io_virtual_address);
440 
441 	if (result) {
442 		spin_lock_irqsave(&pool->pool_lock, flags);
443 		list_add(&fmr->list, &pool->free_list);
444 		spin_unlock_irqrestore(&pool->pool_lock, flags);
445 
446 		pr_warn(PFX "fmr_map returns %d\n", result);
447 
448 		return ERR_PTR(result);
449 	}
450 
451 	++fmr->remap_count;
452 	fmr->ref_count = 1;
453 
454 	if (pool->cache_bucket) {
455 		fmr->io_virtual_address = io_virtual_address;
456 		fmr->page_list_len      = list_len;
457 		memcpy(fmr->page_list, page_list, list_len * sizeof(*page_list));
458 
459 		spin_lock_irqsave(&pool->pool_lock, flags);
460 		hlist_add_head(&fmr->cache_node,
461 			       pool->cache_bucket + ib_fmr_hash(fmr->page_list[0]));
462 		spin_unlock_irqrestore(&pool->pool_lock, flags);
463 	}
464 
465 	return fmr;
466 }
467 EXPORT_SYMBOL(ib_fmr_pool_map_phys);
468 
469 /**
470  * ib_fmr_pool_unmap - Unmap FMR
471  * @fmr:FMR to unmap
472  *
473  * Unmap an FMR.  The FMR mapping may remain valid until the FMR is
474  * reused (or until ib_flush_fmr_pool() is called).
475  */
ib_fmr_pool_unmap(struct ib_pool_fmr * fmr)476 int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
477 {
478 	struct ib_fmr_pool *pool;
479 	unsigned long flags;
480 
481 	pool = fmr->pool;
482 
483 	spin_lock_irqsave(&pool->pool_lock, flags);
484 
485 	--fmr->ref_count;
486 	if (!fmr->ref_count) {
487 		if (fmr->remap_count < pool->max_remaps) {
488 			list_add_tail(&fmr->list, &pool->free_list);
489 		} else {
490 			list_add_tail(&fmr->list, &pool->dirty_list);
491 			if (++pool->dirty_len >= pool->dirty_watermark) {
492 				atomic_inc(&pool->req_ser);
493 				kthread_queue_work(pool->worker, &pool->work);
494 			}
495 		}
496 	}
497 
498 #ifdef DEBUG
499 	if (fmr->ref_count < 0)
500 		pr_warn(PFX "FMR %p has ref count %d < 0\n",
501 			fmr, fmr->ref_count);
502 #endif
503 
504 	spin_unlock_irqrestore(&pool->pool_lock, flags);
505 
506 	return 0;
507 }
508 EXPORT_SYMBOL(ib_fmr_pool_unmap);
509