1 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
2  *
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License version 2 and
5  * only version 2 as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  */
12 
13 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
14 
15 #include <linux/debugfs.h>
16 #include <linux/irqdomain.h>
17 #include <linux/irq.h>
18 #include <linux/kthread.h>
19 
20 #include "dpu_core_irq.h"
21 #include "dpu_trace.h"
22 
23 /**
24  * dpu_core_irq_callback_handler - dispatch core interrupts
25  * @arg:		private data of callback handler
26  * @irq_idx:		interrupt index
27  */
dpu_core_irq_callback_handler(void * arg,int irq_idx)28 static void dpu_core_irq_callback_handler(void *arg, int irq_idx)
29 {
30 	struct dpu_kms *dpu_kms = arg;
31 	struct dpu_irq *irq_obj = &dpu_kms->irq_obj;
32 	struct dpu_irq_callback *cb;
33 	unsigned long irq_flags;
34 
35 	pr_debug("irq_idx=%d\n", irq_idx);
36 
37 	if (list_empty(&irq_obj->irq_cb_tbl[irq_idx])) {
38 		DRM_ERROR("no registered cb, idx:%d enable_count:%d\n", irq_idx,
39 			atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]));
40 	}
41 
42 	atomic_inc(&irq_obj->irq_counts[irq_idx]);
43 
44 	/*
45 	 * Perform registered function callback
46 	 */
47 	spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
48 	list_for_each_entry(cb, &irq_obj->irq_cb_tbl[irq_idx], list)
49 		if (cb->func)
50 			cb->func(cb->arg, irq_idx);
51 	spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
52 
53 	/*
54 	 * Clear pending interrupt status in HW.
55 	 * NOTE: dpu_core_irq_callback_handler is protected by top-level
56 	 *       spinlock, so it is safe to clear any interrupt status here.
57 	 */
58 	dpu_kms->hw_intr->ops.clear_intr_status_nolock(
59 			dpu_kms->hw_intr,
60 			irq_idx);
61 }
62 
dpu_core_irq_idx_lookup(struct dpu_kms * dpu_kms,enum dpu_intr_type intr_type,u32 instance_idx)63 int dpu_core_irq_idx_lookup(struct dpu_kms *dpu_kms,
64 		enum dpu_intr_type intr_type, u32 instance_idx)
65 {
66 	if (!dpu_kms || !dpu_kms->hw_intr ||
67 			!dpu_kms->hw_intr->ops.irq_idx_lookup)
68 		return -EINVAL;
69 
70 	return dpu_kms->hw_intr->ops.irq_idx_lookup(intr_type,
71 			instance_idx);
72 }
73 
74 /**
75  * _dpu_core_irq_enable - enable core interrupt given by the index
76  * @dpu_kms:		Pointer to dpu kms context
77  * @irq_idx:		interrupt index
78  */
_dpu_core_irq_enable(struct dpu_kms * dpu_kms,int irq_idx)79 static int _dpu_core_irq_enable(struct dpu_kms *dpu_kms, int irq_idx)
80 {
81 	unsigned long irq_flags;
82 	int ret = 0, enable_count;
83 
84 	if (!dpu_kms || !dpu_kms->hw_intr ||
85 			!dpu_kms->irq_obj.enable_counts ||
86 			!dpu_kms->irq_obj.irq_counts) {
87 		DPU_ERROR("invalid params\n");
88 		return -EINVAL;
89 	}
90 
91 	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
92 		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
93 		return -EINVAL;
94 	}
95 
96 	enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]);
97 	DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count);
98 	trace_dpu_core_irq_enable_idx(irq_idx, enable_count);
99 
100 	if (atomic_inc_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 1) {
101 		ret = dpu_kms->hw_intr->ops.enable_irq(
102 				dpu_kms->hw_intr,
103 				irq_idx);
104 		if (ret)
105 			DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
106 					irq_idx);
107 
108 		DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
109 
110 		spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
111 		/* empty callback list but interrupt is enabled */
112 		if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]))
113 			DPU_ERROR("irq_idx=%d enabled with no callback\n",
114 					irq_idx);
115 		spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
116 	}
117 
118 	return ret;
119 }
120 
dpu_core_irq_enable(struct dpu_kms * dpu_kms,int * irq_idxs,u32 irq_count)121 int dpu_core_irq_enable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
122 {
123 	int i, ret = 0, counts;
124 
125 	if (!dpu_kms || !irq_idxs || !irq_count) {
126 		DPU_ERROR("invalid params\n");
127 		return -EINVAL;
128 	}
129 
130 	counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]);
131 	if (counts)
132 		DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts);
133 
134 	for (i = 0; (i < irq_count) && !ret; i++)
135 		ret = _dpu_core_irq_enable(dpu_kms, irq_idxs[i]);
136 
137 	return ret;
138 }
139 
140 /**
141  * _dpu_core_irq_disable - disable core interrupt given by the index
142  * @dpu_kms:		Pointer to dpu kms context
143  * @irq_idx:		interrupt index
144  */
_dpu_core_irq_disable(struct dpu_kms * dpu_kms,int irq_idx)145 static int _dpu_core_irq_disable(struct dpu_kms *dpu_kms, int irq_idx)
146 {
147 	int ret = 0, enable_count;
148 
149 	if (!dpu_kms || !dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts) {
150 		DPU_ERROR("invalid params\n");
151 		return -EINVAL;
152 	}
153 
154 	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
155 		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
156 		return -EINVAL;
157 	}
158 
159 	enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]);
160 	DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count);
161 	trace_dpu_core_irq_disable_idx(irq_idx, enable_count);
162 
163 	if (atomic_dec_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 0) {
164 		ret = dpu_kms->hw_intr->ops.disable_irq(
165 				dpu_kms->hw_intr,
166 				irq_idx);
167 		if (ret)
168 			DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n",
169 					irq_idx);
170 		DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
171 	}
172 
173 	return ret;
174 }
175 
dpu_core_irq_disable(struct dpu_kms * dpu_kms,int * irq_idxs,u32 irq_count)176 int dpu_core_irq_disable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
177 {
178 	int i, ret = 0, counts;
179 
180 	if (!dpu_kms || !irq_idxs || !irq_count) {
181 		DPU_ERROR("invalid params\n");
182 		return -EINVAL;
183 	}
184 
185 	counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]);
186 	if (counts == 2)
187 		DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts);
188 
189 	for (i = 0; (i < irq_count) && !ret; i++)
190 		ret = _dpu_core_irq_disable(dpu_kms, irq_idxs[i]);
191 
192 	return ret;
193 }
194 
dpu_core_irq_read(struct dpu_kms * dpu_kms,int irq_idx,bool clear)195 u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
196 {
197 	if (!dpu_kms || !dpu_kms->hw_intr ||
198 			!dpu_kms->hw_intr->ops.get_interrupt_status)
199 		return 0;
200 
201 	if (irq_idx < 0) {
202 		DPU_ERROR("[%pS] invalid irq_idx=%d\n",
203 				__builtin_return_address(0), irq_idx);
204 		return 0;
205 	}
206 
207 	return dpu_kms->hw_intr->ops.get_interrupt_status(dpu_kms->hw_intr,
208 			irq_idx, clear);
209 }
210 
dpu_core_irq_register_callback(struct dpu_kms * dpu_kms,int irq_idx,struct dpu_irq_callback * register_irq_cb)211 int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
212 		struct dpu_irq_callback *register_irq_cb)
213 {
214 	unsigned long irq_flags;
215 
216 	if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) {
217 		DPU_ERROR("invalid params\n");
218 		return -EINVAL;
219 	}
220 
221 	if (!register_irq_cb || !register_irq_cb->func) {
222 		DPU_ERROR("invalid irq_cb:%d func:%d\n",
223 				register_irq_cb != NULL,
224 				register_irq_cb ?
225 					register_irq_cb->func != NULL : -1);
226 		return -EINVAL;
227 	}
228 
229 	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
230 		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
231 		return -EINVAL;
232 	}
233 
234 	DPU_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
235 
236 	spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
237 	trace_dpu_core_irq_register_callback(irq_idx, register_irq_cb);
238 	list_del_init(&register_irq_cb->list);
239 	list_add_tail(&register_irq_cb->list,
240 			&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]);
241 	spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
242 
243 	return 0;
244 }
245 
dpu_core_irq_unregister_callback(struct dpu_kms * dpu_kms,int irq_idx,struct dpu_irq_callback * register_irq_cb)246 int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
247 		struct dpu_irq_callback *register_irq_cb)
248 {
249 	unsigned long irq_flags;
250 
251 	if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) {
252 		DPU_ERROR("invalid params\n");
253 		return -EINVAL;
254 	}
255 
256 	if (!register_irq_cb || !register_irq_cb->func) {
257 		DPU_ERROR("invalid irq_cb:%d func:%d\n",
258 				register_irq_cb != NULL,
259 				register_irq_cb ?
260 					register_irq_cb->func != NULL : -1);
261 		return -EINVAL;
262 	}
263 
264 	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
265 		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
266 		return -EINVAL;
267 	}
268 
269 	DPU_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
270 
271 	spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
272 	trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb);
273 	list_del_init(&register_irq_cb->list);
274 	/* empty callback list but interrupt is still enabled */
275 	if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]) &&
276 			atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]))
277 		DPU_ERROR("irq_idx=%d enabled with no callback\n", irq_idx);
278 	spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
279 
280 	return 0;
281 }
282 
dpu_clear_all_irqs(struct dpu_kms * dpu_kms)283 static void dpu_clear_all_irqs(struct dpu_kms *dpu_kms)
284 {
285 	if (!dpu_kms || !dpu_kms->hw_intr ||
286 			!dpu_kms->hw_intr->ops.clear_all_irqs)
287 		return;
288 
289 	dpu_kms->hw_intr->ops.clear_all_irqs(dpu_kms->hw_intr);
290 }
291 
dpu_disable_all_irqs(struct dpu_kms * dpu_kms)292 static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
293 {
294 	if (!dpu_kms || !dpu_kms->hw_intr ||
295 			!dpu_kms->hw_intr->ops.disable_all_irqs)
296 		return;
297 
298 	dpu_kms->hw_intr->ops.disable_all_irqs(dpu_kms->hw_intr);
299 }
300 
301 #ifdef CONFIG_DEBUG_FS
302 #define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix)				\
303 static int __prefix ## _open(struct inode *inode, struct file *file)	\
304 {									\
305 	return single_open(file, __prefix ## _show, inode->i_private);	\
306 }									\
307 static const struct file_operations __prefix ## _fops = {		\
308 	.owner = THIS_MODULE,						\
309 	.open = __prefix ## _open,					\
310 	.release = single_release,					\
311 	.read = seq_read,						\
312 	.llseek = seq_lseek,						\
313 }
314 
dpu_debugfs_core_irq_show(struct seq_file * s,void * v)315 static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
316 {
317 	struct dpu_irq *irq_obj = s->private;
318 	struct dpu_irq_callback *cb;
319 	unsigned long irq_flags;
320 	int i, irq_count, enable_count, cb_count;
321 
322 	if (!irq_obj || !irq_obj->enable_counts || !irq_obj->irq_cb_tbl) {
323 		DPU_ERROR("invalid parameters\n");
324 		return 0;
325 	}
326 
327 	for (i = 0; i < irq_obj->total_irqs; i++) {
328 		spin_lock_irqsave(&irq_obj->cb_lock, irq_flags);
329 		cb_count = 0;
330 		irq_count = atomic_read(&irq_obj->irq_counts[i]);
331 		enable_count = atomic_read(&irq_obj->enable_counts[i]);
332 		list_for_each_entry(cb, &irq_obj->irq_cb_tbl[i], list)
333 			cb_count++;
334 		spin_unlock_irqrestore(&irq_obj->cb_lock, irq_flags);
335 
336 		if (irq_count || enable_count || cb_count)
337 			seq_printf(s, "idx:%d irq:%d enable:%d cb:%d\n",
338 					i, irq_count, enable_count, cb_count);
339 	}
340 
341 	return 0;
342 }
343 
344 DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_core_irq);
345 
dpu_debugfs_core_irq_init(struct dpu_kms * dpu_kms,struct dentry * parent)346 int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
347 		struct dentry *parent)
348 {
349 	dpu_kms->irq_obj.debugfs_file = debugfs_create_file("core_irq", 0600,
350 			parent, &dpu_kms->irq_obj,
351 			&dpu_debugfs_core_irq_fops);
352 
353 	return 0;
354 }
355 
dpu_debugfs_core_irq_destroy(struct dpu_kms * dpu_kms)356 void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms)
357 {
358 	debugfs_remove(dpu_kms->irq_obj.debugfs_file);
359 	dpu_kms->irq_obj.debugfs_file = NULL;
360 }
361 
362 #else
dpu_debugfs_core_irq_init(struct dpu_kms * dpu_kms,struct dentry * parent)363 int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
364 		struct dentry *parent)
365 {
366 	return 0;
367 }
368 
dpu_debugfs_core_irq_destroy(struct dpu_kms * dpu_kms)369 void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms)
370 {
371 }
372 #endif
373 
dpu_core_irq_preinstall(struct dpu_kms * dpu_kms)374 void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
375 {
376 	struct msm_drm_private *priv;
377 	int i;
378 
379 	if (!dpu_kms) {
380 		DPU_ERROR("invalid dpu_kms\n");
381 		return;
382 	} else if (!dpu_kms->dev) {
383 		DPU_ERROR("invalid drm device\n");
384 		return;
385 	} else if (!dpu_kms->dev->dev_private) {
386 		DPU_ERROR("invalid device private\n");
387 		return;
388 	}
389 	priv = dpu_kms->dev->dev_private;
390 
391 	pm_runtime_get_sync(&dpu_kms->pdev->dev);
392 	dpu_clear_all_irqs(dpu_kms);
393 	dpu_disable_all_irqs(dpu_kms);
394 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
395 
396 	spin_lock_init(&dpu_kms->irq_obj.cb_lock);
397 
398 	/* Create irq callbacks for all possible irq_idx */
399 	dpu_kms->irq_obj.total_irqs = dpu_kms->hw_intr->irq_idx_tbl_size;
400 	dpu_kms->irq_obj.irq_cb_tbl = kcalloc(dpu_kms->irq_obj.total_irqs,
401 			sizeof(struct list_head), GFP_KERNEL);
402 	dpu_kms->irq_obj.enable_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
403 			sizeof(atomic_t), GFP_KERNEL);
404 	dpu_kms->irq_obj.irq_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
405 			sizeof(atomic_t), GFP_KERNEL);
406 	for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++) {
407 		INIT_LIST_HEAD(&dpu_kms->irq_obj.irq_cb_tbl[i]);
408 		atomic_set(&dpu_kms->irq_obj.enable_counts[i], 0);
409 		atomic_set(&dpu_kms->irq_obj.irq_counts[i], 0);
410 	}
411 }
412 
dpu_core_irq_postinstall(struct dpu_kms * dpu_kms)413 int dpu_core_irq_postinstall(struct dpu_kms *dpu_kms)
414 {
415 	return 0;
416 }
417 
dpu_core_irq_uninstall(struct dpu_kms * dpu_kms)418 void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
419 {
420 	struct msm_drm_private *priv;
421 	int i;
422 
423 	if (!dpu_kms) {
424 		DPU_ERROR("invalid dpu_kms\n");
425 		return;
426 	} else if (!dpu_kms->dev) {
427 		DPU_ERROR("invalid drm device\n");
428 		return;
429 	} else if (!dpu_kms->dev->dev_private) {
430 		DPU_ERROR("invalid device private\n");
431 		return;
432 	}
433 	priv = dpu_kms->dev->dev_private;
434 
435 	pm_runtime_get_sync(&dpu_kms->pdev->dev);
436 	for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++)
437 		if (atomic_read(&dpu_kms->irq_obj.enable_counts[i]) ||
438 				!list_empty(&dpu_kms->irq_obj.irq_cb_tbl[i]))
439 			DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
440 
441 	dpu_clear_all_irqs(dpu_kms);
442 	dpu_disable_all_irqs(dpu_kms);
443 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
444 
445 	kfree(dpu_kms->irq_obj.irq_cb_tbl);
446 	kfree(dpu_kms->irq_obj.enable_counts);
447 	kfree(dpu_kms->irq_obj.irq_counts);
448 	dpu_kms->irq_obj.irq_cb_tbl = NULL;
449 	dpu_kms->irq_obj.enable_counts = NULL;
450 	dpu_kms->irq_obj.irq_counts = NULL;
451 	dpu_kms->irq_obj.total_irqs = 0;
452 }
453 
dpu_core_irq(struct dpu_kms * dpu_kms)454 irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms)
455 {
456 	/*
457 	 * Read interrupt status from all sources. Interrupt status are
458 	 * stored within hw_intr.
459 	 * Function will also clear the interrupt status after reading.
460 	 * Individual interrupt status bit will only get stored if it
461 	 * is enabled.
462 	 */
463 	dpu_kms->hw_intr->ops.get_interrupt_statuses(dpu_kms->hw_intr);
464 
465 	/*
466 	 * Dispatch to HW driver to handle interrupt lookup that is being
467 	 * fired. When matching interrupt is located, HW driver will call to
468 	 * dpu_core_irq_callback_handler with the irq_idx from the lookup table.
469 	 * dpu_core_irq_callback_handler will perform the registered function
470 	 * callback, and do the interrupt status clearing once the registered
471 	 * callback is finished.
472 	 */
473 	dpu_kms->hw_intr->ops.dispatch_irqs(
474 			dpu_kms->hw_intr,
475 			dpu_core_irq_callback_handler,
476 			dpu_kms);
477 
478 	return IRQ_HANDLED;
479 }
480