1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/slab.h>
25 #include <linux/list.h>
26 #include "kfd_device_queue_manager.h"
27 #include "kfd_priv.h"
28 #include "kfd_kernel_queue.h"
29 #include "amdgpu_amdkfd.h"
30 
get_queue_by_qid(struct process_queue_manager * pqm,unsigned int qid)31 static inline struct process_queue_node *get_queue_by_qid(
32 			struct process_queue_manager *pqm, unsigned int qid)
33 {
34 	struct process_queue_node *pqn;
35 
36 	list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
37 		if ((pqn->q && pqn->q->properties.queue_id == qid) ||
38 		    (pqn->kq && pqn->kq->queue->properties.queue_id == qid))
39 			return pqn;
40 	}
41 
42 	return NULL;
43 }
44 
find_available_queue_slot(struct process_queue_manager * pqm,unsigned int * qid)45 static int find_available_queue_slot(struct process_queue_manager *pqm,
46 					unsigned int *qid)
47 {
48 	unsigned long found;
49 
50 	found = find_first_zero_bit(pqm->queue_slot_bitmap,
51 			KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
52 
53 	pr_debug("The new slot id %lu\n", found);
54 
55 	if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
56 		pr_info("Cannot open more queues for process with pasid 0x%x\n",
57 				pqm->process->pasid);
58 		return -ENOMEM;
59 	}
60 
61 	set_bit(found, pqm->queue_slot_bitmap);
62 	*qid = found;
63 
64 	return 0;
65 }
66 
kfd_process_dequeue_from_device(struct kfd_process_device * pdd)67 void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
68 {
69 	struct kfd_dev *dev = pdd->dev;
70 
71 	if (pdd->already_dequeued)
72 		return;
73 
74 	dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd);
75 	pdd->already_dequeued = true;
76 }
77 
pqm_set_gws(struct process_queue_manager * pqm,unsigned int qid,void * gws)78 int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
79 			void *gws)
80 {
81 	struct kfd_dev *dev = NULL;
82 	struct process_queue_node *pqn;
83 	struct kfd_process_device *pdd;
84 	struct kgd_mem *mem = NULL;
85 	int ret;
86 
87 	pqn = get_queue_by_qid(pqm, qid);
88 	if (!pqn) {
89 		pr_err("Queue id does not match any known queue\n");
90 		return -EINVAL;
91 	}
92 
93 	if (pqn->q)
94 		dev = pqn->q->device;
95 	if (WARN_ON(!dev))
96 		return -ENODEV;
97 
98 	pdd = kfd_get_process_device_data(dev, pqm->process);
99 	if (!pdd) {
100 		pr_err("Process device data doesn't exist\n");
101 		return -EINVAL;
102 	}
103 
104 	/* Only allow one queue per process can have GWS assigned */
105 	if (gws && pdd->qpd.num_gws)
106 		return -EBUSY;
107 
108 	if (!gws && pdd->qpd.num_gws == 0)
109 		return -EINVAL;
110 
111 	if (gws)
112 		ret = amdgpu_amdkfd_add_gws_to_process(pdd->process->kgd_process_info,
113 			gws, &mem);
114 	else
115 		ret = amdgpu_amdkfd_remove_gws_from_process(pdd->process->kgd_process_info,
116 			pqn->q->gws);
117 	if (unlikely(ret))
118 		return ret;
119 
120 	pqn->q->gws = mem;
121 	pdd->qpd.num_gws = gws ? amdgpu_amdkfd_get_num_gws(dev->kgd) : 0;
122 
123 	return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
124 							pqn->q);
125 }
126 
kfd_process_dequeue_from_all_devices(struct kfd_process * p)127 void kfd_process_dequeue_from_all_devices(struct kfd_process *p)
128 {
129 	struct kfd_process_device *pdd;
130 
131 	list_for_each_entry(pdd, &p->per_device_data, per_device_list)
132 		kfd_process_dequeue_from_device(pdd);
133 }
134 
pqm_init(struct process_queue_manager * pqm,struct kfd_process * p)135 int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
136 {
137 	INIT_LIST_HEAD(&pqm->queues);
138 	pqm->queue_slot_bitmap =
139 			kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
140 					BITS_PER_BYTE), GFP_KERNEL);
141 	if (!pqm->queue_slot_bitmap)
142 		return -ENOMEM;
143 	pqm->process = p;
144 
145 	return 0;
146 }
147 
pqm_uninit(struct process_queue_manager * pqm)148 void pqm_uninit(struct process_queue_manager *pqm)
149 {
150 	struct process_queue_node *pqn, *next;
151 
152 	list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
153 		if (pqn->q && pqn->q->gws)
154 			amdgpu_amdkfd_remove_gws_from_process(pqm->process->kgd_process_info,
155 				pqn->q->gws);
156 		uninit_queue(pqn->q);
157 		list_del(&pqn->process_queue_list);
158 		kfree(pqn);
159 	}
160 
161 	kfree(pqm->queue_slot_bitmap);
162 	pqm->queue_slot_bitmap = NULL;
163 }
164 
init_user_queue(struct process_queue_manager * pqm,struct kfd_dev * dev,struct queue ** q,struct queue_properties * q_properties,struct file * f,unsigned int qid)165 static int init_user_queue(struct process_queue_manager *pqm,
166 				struct kfd_dev *dev, struct queue **q,
167 				struct queue_properties *q_properties,
168 				struct file *f, unsigned int qid)
169 {
170 	int retval;
171 
172 	/* Doorbell initialized in user space*/
173 	q_properties->doorbell_ptr = NULL;
174 
175 	/* let DQM handle it*/
176 	q_properties->vmid = 0;
177 	q_properties->queue_id = qid;
178 
179 	retval = init_queue(q, q_properties);
180 	if (retval != 0)
181 		return retval;
182 
183 	(*q)->device = dev;
184 	(*q)->process = pqm->process;
185 
186 	pr_debug("PQM After init queue");
187 
188 	return retval;
189 }
190 
pqm_create_queue(struct process_queue_manager * pqm,struct kfd_dev * dev,struct file * f,struct queue_properties * properties,unsigned int * qid,uint32_t * p_doorbell_offset_in_process)191 int pqm_create_queue(struct process_queue_manager *pqm,
192 			    struct kfd_dev *dev,
193 			    struct file *f,
194 			    struct queue_properties *properties,
195 			    unsigned int *qid,
196 			    uint32_t *p_doorbell_offset_in_process)
197 {
198 	int retval;
199 	struct kfd_process_device *pdd;
200 	struct queue *q;
201 	struct process_queue_node *pqn;
202 	struct kernel_queue *kq;
203 	enum kfd_queue_type type = properties->type;
204 	unsigned int max_queues = 127; /* HWS limit */
205 
206 	q = NULL;
207 	kq = NULL;
208 
209 	pdd = kfd_get_process_device_data(dev, pqm->process);
210 	if (!pdd) {
211 		pr_err("Process device data doesn't exist\n");
212 		return -1;
213 	}
214 
215 	/*
216 	 * for debug process, verify that it is within the static queues limit
217 	 * currently limit is set to half of the total avail HQD slots
218 	 * If we are just about to create DIQ, the is_debug flag is not set yet
219 	 * Hence we also check the type as well
220 	 */
221 	if ((pdd->qpd.is_debug) || (type == KFD_QUEUE_TYPE_DIQ))
222 		max_queues = dev->device_info->max_no_of_hqd/2;
223 
224 	if (pdd->qpd.queue_count >= max_queues)
225 		return -ENOSPC;
226 
227 	retval = find_available_queue_slot(pqm, qid);
228 	if (retval != 0)
229 		return retval;
230 
231 	if (list_empty(&pdd->qpd.queues_list) &&
232 	    list_empty(&pdd->qpd.priv_queue_list))
233 		dev->dqm->ops.register_process(dev->dqm, &pdd->qpd);
234 
235 	pqn = kzalloc(sizeof(*pqn), GFP_KERNEL);
236 	if (!pqn) {
237 		retval = -ENOMEM;
238 		goto err_allocate_pqn;
239 	}
240 
241 	switch (type) {
242 	case KFD_QUEUE_TYPE_SDMA:
243 	case KFD_QUEUE_TYPE_SDMA_XGMI:
244 		/* SDMA queues are always allocated statically no matter
245 		 * which scheduler mode is used. We also do not need to
246 		 * check whether a SDMA queue can be allocated here, because
247 		 * allocate_sdma_queue() in create_queue() has the
248 		 * corresponding check logic.
249 		 */
250 		retval = init_user_queue(pqm, dev, &q, properties, f, *qid);
251 		if (retval != 0)
252 			goto err_create_queue;
253 		pqn->q = q;
254 		pqn->kq = NULL;
255 		retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd);
256 		print_queue(q);
257 		break;
258 
259 	case KFD_QUEUE_TYPE_COMPUTE:
260 		/* check if there is over subscription */
261 		if ((dev->dqm->sched_policy ==
262 		     KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
263 		((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) ||
264 		(dev->dqm->active_queue_count >= get_cp_queues_num(dev->dqm)))) {
265 			pr_debug("Over-subscription is not allowed when amdkfd.sched_policy == 1\n");
266 			retval = -EPERM;
267 			goto err_create_queue;
268 		}
269 
270 		retval = init_user_queue(pqm, dev, &q, properties, f, *qid);
271 		if (retval != 0)
272 			goto err_create_queue;
273 		pqn->q = q;
274 		pqn->kq = NULL;
275 		retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd);
276 		print_queue(q);
277 		break;
278 	case KFD_QUEUE_TYPE_DIQ:
279 		kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_DIQ);
280 		if (!kq) {
281 			retval = -ENOMEM;
282 			goto err_create_queue;
283 		}
284 		kq->queue->properties.queue_id = *qid;
285 		pqn->kq = kq;
286 		pqn->q = NULL;
287 		retval = dev->dqm->ops.create_kernel_queue(dev->dqm,
288 							kq, &pdd->qpd);
289 		break;
290 	default:
291 		WARN(1, "Invalid queue type %d", type);
292 		retval = -EINVAL;
293 	}
294 
295 	if (retval != 0) {
296 		pr_err("Pasid 0x%x DQM create queue type %d failed. ret %d\n",
297 			pqm->process->pasid, type, retval);
298 		goto err_create_queue;
299 	}
300 
301 	if (q && p_doorbell_offset_in_process)
302 		/* Return the doorbell offset within the doorbell page
303 		 * to the caller so it can be passed up to user mode
304 		 * (in bytes).
305 		 * There are always 1024 doorbells per process, so in case
306 		 * of 8-byte doorbells, there are two doorbell pages per
307 		 * process.
308 		 */
309 		*p_doorbell_offset_in_process =
310 			(q->properties.doorbell_off * sizeof(uint32_t)) &
311 			(kfd_doorbell_process_slice(dev) - 1);
312 
313 	pr_debug("PQM After DQM create queue\n");
314 
315 	list_add(&pqn->process_queue_list, &pqm->queues);
316 
317 	if (q) {
318 		pr_debug("PQM done creating queue\n");
319 		kfd_procfs_add_queue(q);
320 		print_queue_properties(&q->properties);
321 	}
322 
323 	return retval;
324 
325 err_create_queue:
326 	uninit_queue(q);
327 	if (kq)
328 		kernel_queue_uninit(kq, false);
329 	kfree(pqn);
330 err_allocate_pqn:
331 	/* check if queues list is empty unregister process from device */
332 	clear_bit(*qid, pqm->queue_slot_bitmap);
333 	if (list_empty(&pdd->qpd.queues_list) &&
334 	    list_empty(&pdd->qpd.priv_queue_list))
335 		dev->dqm->ops.unregister_process(dev->dqm, &pdd->qpd);
336 	return retval;
337 }
338 
pqm_destroy_queue(struct process_queue_manager * pqm,unsigned int qid)339 int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
340 {
341 	struct process_queue_node *pqn;
342 	struct kfd_process_device *pdd;
343 	struct device_queue_manager *dqm;
344 	struct kfd_dev *dev;
345 	int retval;
346 
347 	dqm = NULL;
348 
349 	retval = 0;
350 
351 	pqn = get_queue_by_qid(pqm, qid);
352 	if (!pqn) {
353 		pr_err("Queue id does not match any known queue\n");
354 		return -EINVAL;
355 	}
356 
357 	dev = NULL;
358 	if (pqn->kq)
359 		dev = pqn->kq->dev;
360 	if (pqn->q)
361 		dev = pqn->q->device;
362 	if (WARN_ON(!dev))
363 		return -ENODEV;
364 
365 	pdd = kfd_get_process_device_data(dev, pqm->process);
366 	if (!pdd) {
367 		pr_err("Process device data doesn't exist\n");
368 		return -1;
369 	}
370 
371 	if (pqn->kq) {
372 		/* destroy kernel queue (DIQ) */
373 		dqm = pqn->kq->dev->dqm;
374 		dqm->ops.destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd);
375 		kernel_queue_uninit(pqn->kq, false);
376 	}
377 
378 	if (pqn->q) {
379 		kfd_procfs_del_queue(pqn->q);
380 		dqm = pqn->q->device->dqm;
381 		retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
382 		if (retval) {
383 			pr_err("Pasid 0x%x destroy queue %d failed, ret %d\n",
384 				pqm->process->pasid,
385 				pqn->q->properties.queue_id, retval);
386 			if (retval != -ETIME)
387 				goto err_destroy_queue;
388 		}
389 
390 		if (pqn->q->gws) {
391 			amdgpu_amdkfd_remove_gws_from_process(pqm->process->kgd_process_info,
392 				pqn->q->gws);
393 			pdd->qpd.num_gws = 0;
394 		}
395 
396 		kfree(pqn->q->properties.cu_mask);
397 		pqn->q->properties.cu_mask = NULL;
398 		uninit_queue(pqn->q);
399 	}
400 
401 	list_del(&pqn->process_queue_list);
402 	kfree(pqn);
403 	clear_bit(qid, pqm->queue_slot_bitmap);
404 
405 	if (list_empty(&pdd->qpd.queues_list) &&
406 	    list_empty(&pdd->qpd.priv_queue_list))
407 		dqm->ops.unregister_process(dqm, &pdd->qpd);
408 
409 err_destroy_queue:
410 	return retval;
411 }
412 
pqm_update_queue(struct process_queue_manager * pqm,unsigned int qid,struct queue_properties * p)413 int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
414 			struct queue_properties *p)
415 {
416 	int retval;
417 	struct process_queue_node *pqn;
418 
419 	pqn = get_queue_by_qid(pqm, qid);
420 	if (!pqn) {
421 		pr_debug("No queue %d exists for update operation\n", qid);
422 		return -EFAULT;
423 	}
424 
425 	pqn->q->properties.queue_address = p->queue_address;
426 	pqn->q->properties.queue_size = p->queue_size;
427 	pqn->q->properties.queue_percent = p->queue_percent;
428 	pqn->q->properties.priority = p->priority;
429 
430 	retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
431 							pqn->q);
432 	if (retval != 0)
433 		return retval;
434 
435 	return 0;
436 }
437 
pqm_set_cu_mask(struct process_queue_manager * pqm,unsigned int qid,struct queue_properties * p)438 int pqm_set_cu_mask(struct process_queue_manager *pqm, unsigned int qid,
439 			struct queue_properties *p)
440 {
441 	int retval;
442 	struct process_queue_node *pqn;
443 
444 	pqn = get_queue_by_qid(pqm, qid);
445 	if (!pqn) {
446 		pr_debug("No queue %d exists for update operation\n", qid);
447 		return -EFAULT;
448 	}
449 
450 	/* Free the old CU mask memory if it is already allocated, then
451 	 * allocate memory for the new CU mask.
452 	 */
453 	kfree(pqn->q->properties.cu_mask);
454 
455 	pqn->q->properties.cu_mask_count = p->cu_mask_count;
456 	pqn->q->properties.cu_mask = p->cu_mask;
457 
458 	retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
459 							pqn->q);
460 	if (retval != 0)
461 		return retval;
462 
463 	return 0;
464 }
465 
pqm_get_kernel_queue(struct process_queue_manager * pqm,unsigned int qid)466 struct kernel_queue *pqm_get_kernel_queue(
467 					struct process_queue_manager *pqm,
468 					unsigned int qid)
469 {
470 	struct process_queue_node *pqn;
471 
472 	pqn = get_queue_by_qid(pqm, qid);
473 	if (pqn && pqn->kq)
474 		return pqn->kq;
475 
476 	return NULL;
477 }
478 
pqm_get_user_queue(struct process_queue_manager * pqm,unsigned int qid)479 struct queue *pqm_get_user_queue(struct process_queue_manager *pqm,
480 					unsigned int qid)
481 {
482 	struct process_queue_node *pqn;
483 
484 	pqn = get_queue_by_qid(pqm, qid);
485 	return pqn ? pqn->q : NULL;
486 }
487 
pqm_get_wave_state(struct process_queue_manager * pqm,unsigned int qid,void __user * ctl_stack,u32 * ctl_stack_used_size,u32 * save_area_used_size)488 int pqm_get_wave_state(struct process_queue_manager *pqm,
489 		       unsigned int qid,
490 		       void __user *ctl_stack,
491 		       u32 *ctl_stack_used_size,
492 		       u32 *save_area_used_size)
493 {
494 	struct process_queue_node *pqn;
495 
496 	pqn = get_queue_by_qid(pqm, qid);
497 	if (!pqn) {
498 		pr_debug("amdkfd: No queue %d exists for operation\n",
499 			 qid);
500 		return -EFAULT;
501 	}
502 
503 	return pqn->q->device->dqm->ops.get_wave_state(pqn->q->device->dqm,
504 						       pqn->q,
505 						       ctl_stack,
506 						       ctl_stack_used_size,
507 						       save_area_used_size);
508 }
509 
510 #if defined(CONFIG_DEBUG_FS)
511 
pqm_debugfs_mqds(struct seq_file * m,void * data)512 int pqm_debugfs_mqds(struct seq_file *m, void *data)
513 {
514 	struct process_queue_manager *pqm = data;
515 	struct process_queue_node *pqn;
516 	struct queue *q;
517 	enum KFD_MQD_TYPE mqd_type;
518 	struct mqd_manager *mqd_mgr;
519 	int r = 0;
520 
521 	list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
522 		if (pqn->q) {
523 			q = pqn->q;
524 			switch (q->properties.type) {
525 			case KFD_QUEUE_TYPE_SDMA:
526 			case KFD_QUEUE_TYPE_SDMA_XGMI:
527 				seq_printf(m, "  SDMA queue on device %x\n",
528 					   q->device->id);
529 				mqd_type = KFD_MQD_TYPE_SDMA;
530 				break;
531 			case KFD_QUEUE_TYPE_COMPUTE:
532 				seq_printf(m, "  Compute queue on device %x\n",
533 					   q->device->id);
534 				mqd_type = KFD_MQD_TYPE_CP;
535 				break;
536 			default:
537 				seq_printf(m,
538 				"  Bad user queue type %d on device %x\n",
539 					   q->properties.type, q->device->id);
540 				continue;
541 			}
542 			mqd_mgr = q->device->dqm->mqd_mgrs[mqd_type];
543 		} else if (pqn->kq) {
544 			q = pqn->kq->queue;
545 			mqd_mgr = pqn->kq->mqd_mgr;
546 			switch (q->properties.type) {
547 			case KFD_QUEUE_TYPE_DIQ:
548 				seq_printf(m, "  DIQ on device %x\n",
549 					   pqn->kq->dev->id);
550 				break;
551 			default:
552 				seq_printf(m,
553 				"  Bad kernel queue type %d on device %x\n",
554 					   q->properties.type,
555 					   pqn->kq->dev->id);
556 				continue;
557 			}
558 		} else {
559 			seq_printf(m,
560 		"  Weird: Queue node with neither kernel nor user queue\n");
561 			continue;
562 		}
563 
564 		r = mqd_mgr->debugfs_show_mqd(m, q->mqd);
565 		if (r != 0)
566 			break;
567 	}
568 
569 	return r;
570 }
571 
572 #endif
573