1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015 HGST, a Western Digital Company.
4  */
5 #include <linux/module.h>
6 #include <linux/err.h>
7 #include <linux/slab.h>
8 #include <rdma/ib_verbs.h>
9 
10 #include "core_priv.h"
11 
12 #include <trace/events/rdma_core.h>
13 /* Max size for shared CQ, may require tuning */
14 #define IB_MAX_SHARED_CQ_SZ		4096U
15 
16 /* # of WCs to poll for with a single call to ib_poll_cq */
17 #define IB_POLL_BATCH			16
18 #define IB_POLL_BATCH_DIRECT		8
19 
20 /* # of WCs to iterate over before yielding */
21 #define IB_POLL_BUDGET_IRQ		256
22 #define IB_POLL_BUDGET_WORKQUEUE	65536
23 
24 #define IB_POLL_FLAGS \
25 	(IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS)
26 
27 static const struct dim_cq_moder
28 rdma_dim_prof[RDMA_DIM_PARAMS_NUM_PROFILES] = {
29 	{1,   0, 1,  0},
30 	{1,   0, 4,  0},
31 	{2,   0, 4,  0},
32 	{2,   0, 8,  0},
33 	{4,   0, 8,  0},
34 	{16,  0, 8,  0},
35 	{16,  0, 16, 0},
36 	{32,  0, 16, 0},
37 	{32,  0, 32, 0},
38 };
39 
ib_cq_rdma_dim_work(struct work_struct * w)40 static void ib_cq_rdma_dim_work(struct work_struct *w)
41 {
42 	struct dim *dim = container_of(w, struct dim, work);
43 	struct ib_cq *cq = dim->priv;
44 
45 	u16 usec = rdma_dim_prof[dim->profile_ix].usec;
46 	u16 comps = rdma_dim_prof[dim->profile_ix].comps;
47 
48 	dim->state = DIM_START_MEASURE;
49 
50 	trace_cq_modify(cq, comps, usec);
51 	cq->device->ops.modify_cq(cq, comps, usec);
52 }
53 
rdma_dim_init(struct ib_cq * cq)54 static void rdma_dim_init(struct ib_cq *cq)
55 {
56 	struct dim *dim;
57 
58 	if (!cq->device->ops.modify_cq || !cq->device->use_cq_dim ||
59 	    cq->poll_ctx == IB_POLL_DIRECT)
60 		return;
61 
62 	dim = kzalloc(sizeof(struct dim), GFP_KERNEL);
63 	if (!dim)
64 		return;
65 
66 	dim->state = DIM_START_MEASURE;
67 	dim->tune_state = DIM_GOING_RIGHT;
68 	dim->profile_ix = RDMA_DIM_START_PROFILE;
69 	dim->priv = cq;
70 	cq->dim = dim;
71 
72 	INIT_WORK(&dim->work, ib_cq_rdma_dim_work);
73 }
74 
rdma_dim_destroy(struct ib_cq * cq)75 static void rdma_dim_destroy(struct ib_cq *cq)
76 {
77 	if (!cq->dim)
78 		return;
79 
80 	cancel_work_sync(&cq->dim->work);
81 	kfree(cq->dim);
82 }
83 
__poll_cq(struct ib_cq * cq,int num_entries,struct ib_wc * wc)84 static int __poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
85 {
86 	int rc;
87 
88 	rc = ib_poll_cq(cq, num_entries, wc);
89 	trace_cq_poll(cq, num_entries, rc);
90 	return rc;
91 }
92 
__ib_process_cq(struct ib_cq * cq,int budget,struct ib_wc * wcs,int batch)93 static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs,
94 			   int batch)
95 {
96 	int i, n, completed = 0;
97 
98 	trace_cq_process(cq);
99 
100 	/*
101 	 * budget might be (-1) if the caller does not
102 	 * want to bound this call, thus we need unsigned
103 	 * minimum here.
104 	 */
105 	while ((n = __poll_cq(cq, min_t(u32, batch,
106 					budget - completed), wcs)) > 0) {
107 		for (i = 0; i < n; i++) {
108 			struct ib_wc *wc = &wcs[i];
109 
110 			if (wc->wr_cqe)
111 				wc->wr_cqe->done(cq, wc);
112 			else
113 				WARN_ON_ONCE(wc->status == IB_WC_SUCCESS);
114 		}
115 
116 		completed += n;
117 
118 		if (n != batch || (budget != -1 && completed >= budget))
119 			break;
120 	}
121 
122 	return completed;
123 }
124 
125 /**
126  * ib_process_direct_cq - process a CQ in caller context
127  * @cq:		CQ to process
128  * @budget:	number of CQEs to poll for
129  *
130  * This function is used to process all outstanding CQ entries.
131  * It does not offload CQ processing to a different context and does
132  * not ask for completion interrupts from the HCA.
133  * Using direct processing on CQ with non IB_POLL_DIRECT type may trigger
134  * concurrent processing.
135  *
136  * Note: do not pass -1 as %budget unless it is guaranteed that the number
137  * of completions that will be processed is small.
138  */
ib_process_cq_direct(struct ib_cq * cq,int budget)139 int ib_process_cq_direct(struct ib_cq *cq, int budget)
140 {
141 	struct ib_wc wcs[IB_POLL_BATCH_DIRECT];
142 
143 	return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT);
144 }
145 EXPORT_SYMBOL(ib_process_cq_direct);
146 
ib_cq_completion_direct(struct ib_cq * cq,void * private)147 static void ib_cq_completion_direct(struct ib_cq *cq, void *private)
148 {
149 	WARN_ONCE(1, "got unsolicited completion for CQ 0x%p\n", cq);
150 }
151 
ib_poll_handler(struct irq_poll * iop,int budget)152 static int ib_poll_handler(struct irq_poll *iop, int budget)
153 {
154 	struct ib_cq *cq = container_of(iop, struct ib_cq, iop);
155 	struct dim *dim = cq->dim;
156 	int completed;
157 
158 	completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH);
159 	if (completed < budget) {
160 		irq_poll_complete(&cq->iop);
161 		if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) {
162 			trace_cq_reschedule(cq);
163 			irq_poll_sched(&cq->iop);
164 		}
165 	}
166 
167 	if (dim)
168 		rdma_dim(dim, completed);
169 
170 	return completed;
171 }
172 
ib_cq_completion_softirq(struct ib_cq * cq,void * private)173 static void ib_cq_completion_softirq(struct ib_cq *cq, void *private)
174 {
175 	trace_cq_schedule(cq);
176 	irq_poll_sched(&cq->iop);
177 }
178 
ib_cq_poll_work(struct work_struct * work)179 static void ib_cq_poll_work(struct work_struct *work)
180 {
181 	struct ib_cq *cq = container_of(work, struct ib_cq, work);
182 	int completed;
183 
184 	completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc,
185 				    IB_POLL_BATCH);
186 	if (completed >= IB_POLL_BUDGET_WORKQUEUE ||
187 	    ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
188 		queue_work(cq->comp_wq, &cq->work);
189 	else if (cq->dim)
190 		rdma_dim(cq->dim, completed);
191 }
192 
ib_cq_completion_workqueue(struct ib_cq * cq,void * private)193 static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
194 {
195 	trace_cq_schedule(cq);
196 	queue_work(cq->comp_wq, &cq->work);
197 }
198 
199 /**
200  * __ib_alloc_cq        allocate a completion queue
201  * @dev:		device to allocate the CQ for
202  * @private:		driver private data, accessible from cq->cq_context
203  * @nr_cqe:		number of CQEs to allocate
204  * @comp_vector:	HCA completion vectors for this CQ
205  * @poll_ctx:		context to poll the CQ from.
206  * @caller:		module owner name.
207  *
208  * This is the proper interface to allocate a CQ for in-kernel users. A
209  * CQ allocated with this interface will automatically be polled from the
210  * specified context. The ULP must use wr->wr_cqe instead of wr->wr_id
211  * to use this CQ abstraction.
212  */
__ib_alloc_cq(struct ib_device * dev,void * private,int nr_cqe,int comp_vector,enum ib_poll_context poll_ctx,const char * caller)213 struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe,
214 			    int comp_vector, enum ib_poll_context poll_ctx,
215 			    const char *caller)
216 {
217 	struct ib_cq_init_attr cq_attr = {
218 		.cqe		= nr_cqe,
219 		.comp_vector	= comp_vector,
220 	};
221 	struct ib_cq *cq;
222 	int ret = -ENOMEM;
223 
224 	cq = rdma_zalloc_drv_obj(dev, ib_cq);
225 	if (!cq)
226 		return ERR_PTR(ret);
227 
228 	cq->device = dev;
229 	cq->cq_context = private;
230 	cq->poll_ctx = poll_ctx;
231 	atomic_set(&cq->usecnt, 0);
232 	cq->comp_vector = comp_vector;
233 
234 	cq->wc = kmalloc_array(IB_POLL_BATCH, sizeof(*cq->wc), GFP_KERNEL);
235 	if (!cq->wc)
236 		goto out_free_cq;
237 
238 	rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ);
239 	rdma_restrack_set_name(&cq->res, caller);
240 
241 	ret = dev->ops.create_cq(cq, &cq_attr, NULL);
242 	if (ret)
243 		goto out_free_wc;
244 
245 	rdma_dim_init(cq);
246 
247 	switch (cq->poll_ctx) {
248 	case IB_POLL_DIRECT:
249 		cq->comp_handler = ib_cq_completion_direct;
250 		break;
251 	case IB_POLL_SOFTIRQ:
252 		cq->comp_handler = ib_cq_completion_softirq;
253 
254 		irq_poll_init(&cq->iop, IB_POLL_BUDGET_IRQ, ib_poll_handler);
255 		ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
256 		break;
257 	case IB_POLL_WORKQUEUE:
258 	case IB_POLL_UNBOUND_WORKQUEUE:
259 		cq->comp_handler = ib_cq_completion_workqueue;
260 		INIT_WORK(&cq->work, ib_cq_poll_work);
261 		ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
262 		cq->comp_wq = (cq->poll_ctx == IB_POLL_WORKQUEUE) ?
263 				ib_comp_wq : ib_comp_unbound_wq;
264 		break;
265 	default:
266 		ret = -EINVAL;
267 		goto out_destroy_cq;
268 	}
269 
270 	rdma_restrack_add(&cq->res);
271 	trace_cq_alloc(cq, nr_cqe, comp_vector, poll_ctx);
272 	return cq;
273 
274 out_destroy_cq:
275 	rdma_dim_destroy(cq);
276 	cq->device->ops.destroy_cq(cq, NULL);
277 out_free_wc:
278 	rdma_restrack_put(&cq->res);
279 	kfree(cq->wc);
280 out_free_cq:
281 	kfree(cq);
282 	trace_cq_alloc_error(nr_cqe, comp_vector, poll_ctx, ret);
283 	return ERR_PTR(ret);
284 }
285 EXPORT_SYMBOL(__ib_alloc_cq);
286 
287 /**
288  * __ib_alloc_cq_any - allocate a completion queue
289  * @dev:		device to allocate the CQ for
290  * @private:		driver private data, accessible from cq->cq_context
291  * @nr_cqe:		number of CQEs to allocate
292  * @poll_ctx:		context to poll the CQ from
293  * @caller:		module owner name
294  *
295  * Attempt to spread ULP Completion Queues over each device's interrupt
296  * vectors. A simple best-effort mechanism is used.
297  */
__ib_alloc_cq_any(struct ib_device * dev,void * private,int nr_cqe,enum ib_poll_context poll_ctx,const char * caller)298 struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
299 				int nr_cqe, enum ib_poll_context poll_ctx,
300 				const char *caller)
301 {
302 	static atomic_t counter;
303 	int comp_vector = 0;
304 
305 	if (dev->num_comp_vectors > 1)
306 		comp_vector =
307 			atomic_inc_return(&counter) %
308 			min_t(int, dev->num_comp_vectors, num_online_cpus());
309 
310 	return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx,
311 			     caller);
312 }
313 EXPORT_SYMBOL(__ib_alloc_cq_any);
314 
315 /**
316  * ib_free_cq - free a completion queue
317  * @cq:		completion queue to free.
318  */
ib_free_cq(struct ib_cq * cq)319 void ib_free_cq(struct ib_cq *cq)
320 {
321 	int ret;
322 
323 	if (WARN_ON_ONCE(atomic_read(&cq->usecnt)))
324 		return;
325 	if (WARN_ON_ONCE(cq->cqe_used))
326 		return;
327 
328 	switch (cq->poll_ctx) {
329 	case IB_POLL_DIRECT:
330 		break;
331 	case IB_POLL_SOFTIRQ:
332 		irq_poll_disable(&cq->iop);
333 		break;
334 	case IB_POLL_WORKQUEUE:
335 	case IB_POLL_UNBOUND_WORKQUEUE:
336 		cancel_work_sync(&cq->work);
337 		break;
338 	default:
339 		WARN_ON_ONCE(1);
340 	}
341 
342 	rdma_dim_destroy(cq);
343 	trace_cq_free(cq);
344 	ret = cq->device->ops.destroy_cq(cq, NULL);
345 	WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail");
346 	rdma_restrack_del(&cq->res);
347 	kfree(cq->wc);
348 	kfree(cq);
349 }
350 EXPORT_SYMBOL(ib_free_cq);
351 
ib_cq_pool_init(struct ib_device * dev)352 void ib_cq_pool_init(struct ib_device *dev)
353 {
354 	unsigned int i;
355 
356 	spin_lock_init(&dev->cq_pools_lock);
357 	for (i = 0; i < ARRAY_SIZE(dev->cq_pools); i++)
358 		INIT_LIST_HEAD(&dev->cq_pools[i]);
359 }
360 
ib_cq_pool_destroy(struct ib_device * dev)361 void ib_cq_pool_destroy(struct ib_device *dev)
362 {
363 	struct ib_cq *cq, *n;
364 	unsigned int i;
365 
366 	for (i = 0; i < ARRAY_SIZE(dev->cq_pools); i++) {
367 		list_for_each_entry_safe(cq, n, &dev->cq_pools[i],
368 					 pool_entry) {
369 			WARN_ON(cq->cqe_used);
370 			cq->shared = false;
371 			ib_free_cq(cq);
372 		}
373 	}
374 }
375 
ib_alloc_cqs(struct ib_device * dev,unsigned int nr_cqes,enum ib_poll_context poll_ctx)376 static int ib_alloc_cqs(struct ib_device *dev, unsigned int nr_cqes,
377 			enum ib_poll_context poll_ctx)
378 {
379 	LIST_HEAD(tmp_list);
380 	unsigned int nr_cqs, i;
381 	struct ib_cq *cq, *n;
382 	int ret;
383 
384 	if (poll_ctx > IB_POLL_LAST_POOL_TYPE) {
385 		WARN_ON_ONCE(poll_ctx > IB_POLL_LAST_POOL_TYPE);
386 		return -EINVAL;
387 	}
388 
389 	/*
390 	 * Allocate at least as many CQEs as requested, and otherwise
391 	 * a reasonable batch size so that we can share CQs between
392 	 * multiple users instead of allocating a larger number of CQs.
393 	 */
394 	nr_cqes = min_t(unsigned int, dev->attrs.max_cqe,
395 			max(nr_cqes, IB_MAX_SHARED_CQ_SZ));
396 	nr_cqs = min_t(unsigned int, dev->num_comp_vectors, num_online_cpus());
397 	for (i = 0; i < nr_cqs; i++) {
398 		cq = ib_alloc_cq(dev, NULL, nr_cqes, i, poll_ctx);
399 		if (IS_ERR(cq)) {
400 			ret = PTR_ERR(cq);
401 			goto out_free_cqs;
402 		}
403 		cq->shared = true;
404 		list_add_tail(&cq->pool_entry, &tmp_list);
405 	}
406 
407 	spin_lock_irq(&dev->cq_pools_lock);
408 	list_splice(&tmp_list, &dev->cq_pools[poll_ctx]);
409 	spin_unlock_irq(&dev->cq_pools_lock);
410 
411 	return 0;
412 
413 out_free_cqs:
414 	list_for_each_entry_safe(cq, n, &tmp_list, pool_entry) {
415 		cq->shared = false;
416 		ib_free_cq(cq);
417 	}
418 	return ret;
419 }
420 
421 /**
422  * ib_cq_pool_get() - Find the least used completion queue that matches
423  *   a given cpu hint (or least used for wild card affinity) and fits
424  *   nr_cqe.
425  * @dev: rdma device
426  * @nr_cqe: number of needed cqe entries
427  * @comp_vector_hint: completion vector hint (-1) for the driver to assign
428  *   a comp vector based on internal counter
429  * @poll_ctx: cq polling context
430  *
431  * Finds a cq that satisfies @comp_vector_hint and @nr_cqe requirements and
432  * claim entries in it for us.  In case there is no available cq, allocate
433  * a new cq with the requirements and add it to the device pool.
434  * IB_POLL_DIRECT cannot be used for shared cqs so it is not a valid value
435  * for @poll_ctx.
436  */
ib_cq_pool_get(struct ib_device * dev,unsigned int nr_cqe,int comp_vector_hint,enum ib_poll_context poll_ctx)437 struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe,
438 			     int comp_vector_hint,
439 			     enum ib_poll_context poll_ctx)
440 {
441 	static unsigned int default_comp_vector;
442 	unsigned int vector, num_comp_vectors;
443 	struct ib_cq *cq, *found = NULL;
444 	int ret;
445 
446 	if (poll_ctx > IB_POLL_LAST_POOL_TYPE) {
447 		WARN_ON_ONCE(poll_ctx > IB_POLL_LAST_POOL_TYPE);
448 		return ERR_PTR(-EINVAL);
449 	}
450 
451 	num_comp_vectors =
452 		min_t(unsigned int, dev->num_comp_vectors, num_online_cpus());
453 	/* Project the affinty to the device completion vector range */
454 	if (comp_vector_hint < 0) {
455 		comp_vector_hint =
456 			(READ_ONCE(default_comp_vector) + 1) % num_comp_vectors;
457 		WRITE_ONCE(default_comp_vector, comp_vector_hint);
458 	}
459 	vector = comp_vector_hint % num_comp_vectors;
460 
461 	/*
462 	 * Find the least used CQ with correct affinity and
463 	 * enough free CQ entries
464 	 */
465 	while (!found) {
466 		spin_lock_irq(&dev->cq_pools_lock);
467 		list_for_each_entry(cq, &dev->cq_pools[poll_ctx],
468 				    pool_entry) {
469 			/*
470 			 * Check to see if we have found a CQ with the
471 			 * correct completion vector
472 			 */
473 			if (vector != cq->comp_vector)
474 				continue;
475 			if (cq->cqe_used + nr_cqe > cq->cqe)
476 				continue;
477 			found = cq;
478 			break;
479 		}
480 
481 		if (found) {
482 			found->cqe_used += nr_cqe;
483 			spin_unlock_irq(&dev->cq_pools_lock);
484 
485 			return found;
486 		}
487 		spin_unlock_irq(&dev->cq_pools_lock);
488 
489 		/*
490 		 * Didn't find a match or ran out of CQs in the device
491 		 * pool, allocate a new array of CQs.
492 		 */
493 		ret = ib_alloc_cqs(dev, nr_cqe, poll_ctx);
494 		if (ret)
495 			return ERR_PTR(ret);
496 	}
497 
498 	return found;
499 }
500 EXPORT_SYMBOL(ib_cq_pool_get);
501 
502 /**
503  * ib_cq_pool_put - Return a CQ taken from a shared pool.
504  * @cq: The CQ to return.
505  * @nr_cqe: The max number of cqes that the user had requested.
506  */
ib_cq_pool_put(struct ib_cq * cq,unsigned int nr_cqe)507 void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe)
508 {
509 	if (WARN_ON_ONCE(nr_cqe > cq->cqe_used))
510 		return;
511 
512 	spin_lock_irq(&cq->device->cq_pools_lock);
513 	cq->cqe_used -= nr_cqe;
514 	spin_unlock_irq(&cq->device->cq_pools_lock);
515 }
516 EXPORT_SYMBOL(ib_cq_pool_put);
517