1 /*
2 * Copyright (c) 2017 Jean-Paul Etienne <fractalclone@gmail.com>
3 * Copyright (c) 2023 Meta
4 * Contributors: 2018 Antmicro <www.antmicro.com>
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9 #define DT_DRV_COMPAT sifive_plic_1_0_0
10
11 /**
12 * @brief Platform Level Interrupt Controller (PLIC) driver
13 * for RISC-V processors
14 */
15
16 #include <stdlib.h>
17
18 #include "sw_isr_common.h"
19
20 #include <zephyr/debug/symtab.h>
21 #include <zephyr/kernel.h>
22 #include <zephyr/arch/cpu.h>
23 #include <zephyr/device.h>
24 #include <zephyr/devicetree/interrupt_controller.h>
25 #include <zephyr/shell/shell.h>
26
27 #include <zephyr/sw_isr_table.h>
28 #include <zephyr/drivers/interrupt_controller/riscv_plic.h>
29 #include <zephyr/irq.h>
30
31 #define PLIC_BASE_ADDR(n) DT_INST_REG_ADDR(n)
32 /*
33 * These registers' offset are defined in the RISCV PLIC specs, see:
34 * https://github.com/riscv/riscv-plic-spec
35 */
36 #define CONTEXT_BASE 0x200000
37 #define CONTEXT_SIZE 0x1000
38 #define CONTEXT_THRESHOLD 0x00
39 #define CONTEXT_CLAIM 0x04
40 #define CONTEXT_ENABLE_BASE 0x2000
41 #define CONTEXT_ENABLE_SIZE 0x80
42 #define CONTEXT_PENDING_BASE 0x1000
43
44 /*
45 * Trigger type is mentioned, but not defined in the RISCV PLIC specs.
46 * However, it is defined and supported by at least the Andes & Telink datasheet, and supported
47 * in Linux's SiFive PLIC driver
48 */
49 #ifdef CONFIG_PLIC_SUPPORTS_TRIG_TYPE
50 #define PLIC_TRIG_LEVEL ((uint32_t)0)
51 #define PLIC_TRIG_EDGE ((uint32_t)1)
52 #endif /* CONFIG_PLIC_SUPPORTS_TRIG_TYPE */
53
54 /* PLIC registers are 32-bit memory-mapped */
55 #define PLIC_REG_SIZE 32
56 #define PLIC_REG_MASK BIT_MASK(LOG2(PLIC_REG_SIZE))
57
58 #ifdef CONFIG_TEST_INTC_PLIC
59 #define INTC_PLIC_STATIC
60 #define INTC_PLIC_STATIC_INLINE
61 #else
62 #define INTC_PLIC_STATIC static
63 #define INTC_PLIC_STATIC_INLINE static inline
64 #endif /* CONFIG_TEST_INTC_PLIC */
65
66 #ifdef CONFIG_PLIC_IRQ_AFFINITY
67 #if CONFIG_MP_MAX_NUM_CPUS <= 8
68 typedef uint8_t plic_cpumask_t;
69 #elif CONFIG_MP_MAX_NUM_CPUS <= 16
70 typedef uint16_t plic_cpumask_t;
71 #elif CONFIG_MP_MAX_NUM_CPUS <= 32
72 typedef uint32_t plic_cpumask_t;
73 #else
74 #error "Currently only supports up to 32 cores"
75 #endif /* CONFIG_MP_MAX_NUM_CPUS */
76 #endif /* CONFIG_PLIC_IRQ_AFFINITY */
77
78 typedef void (*riscv_plic_irq_config_func_t)(void);
79 struct plic_config {
80 mem_addr_t prio;
81 mem_addr_t irq_en;
82 mem_addr_t reg;
83 #ifdef CONFIG_PLIC_SUPPORTS_SOFT_INTERRUPT
84 mem_addr_t pend;
85 #endif /* CONFIG_PLIC_SUPPORTS_SOFT_INTERRUPT */
86 #ifdef CONFIG_PLIC_SUPPORTS_TRIG_TYPE
87 mem_addr_t trig;
88 #endif /* CONFIG_PLIC_SUPPORTS_TRIG_TYPE */
89 uint32_t max_prio;
90 /* Number of IRQs that the PLIC physically supports */
91 uint32_t riscv_ndev;
92 /* Number of IRQs supported in this driver */
93 uint32_t nr_irqs;
94 uint32_t irq;
95 riscv_plic_irq_config_func_t irq_config_func;
96 struct _isr_table_entry *isr_table;
97 const uint32_t *const hart_context;
98 };
99
100 struct plic_stats {
101 uint16_t *const irq_count;
102 const int irq_count_len;
103 };
104
105 struct plic_data {
106 struct k_spinlock lock;
107
108 #ifdef CONFIG_PLIC_SHELL_IRQ_COUNT
109 struct plic_stats stats;
110 #endif /* CONFIG_PLIC_SHELL_IRQ_COUNT */
111
112 #ifdef CONFIG_PLIC_IRQ_AFFINITY
113 plic_cpumask_t *irq_cpumask;
114 #endif /* CONFIG_PLIC_IRQ_AFFINITY */
115
116 };
117
118 static uint32_t save_irq[CONFIG_MP_MAX_NUM_CPUS];
119 static const struct device *save_dev[CONFIG_MP_MAX_NUM_CPUS];
120
local_irq_to_reg_index(uint32_t local_irq)121 INTC_PLIC_STATIC_INLINE uint32_t local_irq_to_reg_index(uint32_t local_irq)
122 {
123 return local_irq >> LOG2(PLIC_REG_SIZE);
124 }
125
local_irq_to_reg_offset(uint32_t local_irq)126 INTC_PLIC_STATIC_INLINE uint32_t local_irq_to_reg_offset(uint32_t local_irq)
127 {
128 return local_irq_to_reg_index(local_irq) * sizeof(uint32_t);
129 }
130
get_plic_enabled_size(const struct device * dev)131 static inline uint32_t get_plic_enabled_size(const struct device *dev)
132 {
133 const struct plic_config *config = dev->config;
134
135 return local_irq_to_reg_index(config->nr_irqs) + 1;
136 }
137
get_hart_context(const struct device * dev,uint32_t hartid)138 static ALWAYS_INLINE uint32_t get_hart_context(const struct device *dev, uint32_t hartid)
139 {
140 const struct plic_config *config = dev->config;
141
142 return config->hart_context[hartid];
143 }
144
get_irq_cpumask(const struct device * dev,uint32_t local_irq)145 static ALWAYS_INLINE uint32_t get_irq_cpumask(const struct device *dev, uint32_t local_irq)
146 {
147 #ifdef CONFIG_PLIC_IRQ_AFFINITY
148 const struct plic_data *data = dev->data;
149
150 return data->irq_cpumask[local_irq];
151 #else
152 ARG_UNUSED(dev);
153 ARG_UNUSED(local_irq);
154
155 return 0x1;
156 #endif /* CONFIG_PLIC_IRQ_AFFINITY */
157 }
158
get_context_en_addr(const struct device * dev,uint32_t cpu_num)159 static inline mem_addr_t get_context_en_addr(const struct device *dev, uint32_t cpu_num)
160 {
161 const struct plic_config *config = dev->config;
162 uint32_t hartid;
163 /*
164 * We want to return the irq_en address for the context of given hart.
165 */
166 #if CONFIG_MP_MAX_NUM_CPUS > 1
167 hartid = _kernel.cpus[cpu_num].arch.hartid;
168 #else
169 hartid = arch_proc_id();
170 #endif
171 return config->irq_en + get_hart_context(dev, hartid) * CONTEXT_ENABLE_SIZE;
172 }
173
get_claim_complete_addr(const struct device * dev)174 static inline mem_addr_t get_claim_complete_addr(const struct device *dev)
175 {
176 const struct plic_config *config = dev->config;
177
178 /*
179 * We want to return the claim complete addr for the hart's context.
180 */
181
182 return config->reg + get_hart_context(dev, arch_proc_id()) * CONTEXT_SIZE + CONTEXT_CLAIM;
183 }
184
get_threshold_priority_addr(const struct device * dev,uint32_t cpu_num)185 static inline mem_addr_t get_threshold_priority_addr(const struct device *dev, uint32_t cpu_num)
186 {
187 const struct plic_config *config = dev->config;
188 uint32_t hartid;
189
190 #if CONFIG_MP_MAX_NUM_CPUS > 1
191 hartid = _kernel.cpus[cpu_num].arch.hartid;
192 #else
193 hartid = arch_proc_id();
194 #endif
195
196 return config->reg + (get_hart_context(dev, hartid) * CONTEXT_SIZE);
197 }
198
local_irq_to_irq(const struct device * dev,uint32_t local_irq)199 static ALWAYS_INLINE uint32_t local_irq_to_irq(const struct device *dev, uint32_t local_irq)
200 {
201 const struct plic_config *config = dev->config;
202
203 return irq_to_level_2(local_irq) | config->irq;
204 }
205
206 #ifdef CONFIG_PLIC_SUPPORTS_SOFT_INTERRUPT
get_pending_reg(const struct device * dev,uint32_t local_irq)207 static inline mem_addr_t get_pending_reg(const struct device *dev, uint32_t local_irq)
208 {
209 const struct plic_config *config = dev->config;
210
211 return config->pend + local_irq_to_reg_offset(local_irq);
212 }
213 #endif /* CONFIG_PLIC_SUPPORTS_SOFT_INTERRUPT */
214
215 /**
216 * @brief Determine the PLIC device from the IRQ
217 *
218 * @param irq IRQ number
219 *
220 * @return PLIC device of that IRQ
221 */
get_plic_dev_from_irq(uint32_t irq)222 static inline const struct device *get_plic_dev_from_irq(uint32_t irq)
223 {
224 #ifdef CONFIG_DYNAMIC_INTERRUPTS
225 return z_get_sw_isr_device_from_irq(irq);
226 #else
227 return DEVICE_DT_INST_GET(0);
228 #endif
229 }
230
231 #ifdef CONFIG_PLIC_SUPPORTS_TRIG_TYPE
232 /**
233 * @brief Return the value of the trigger type register for the IRQ
234 *
235 * In the event edge irq is enable this will return the trigger
236 * value of the irq. In the event edge irq is not supported this
237 * routine will return 0
238 *
239 * @param dev PLIC-instance device
240 * @param local_irq PLIC-instance IRQ number to add to the trigger
241 *
242 * @return Trigger type register value if PLIC supports trigger type, PLIC_TRIG_LEVEL otherwise
243 */
riscv_plic_irq_trig_val(const struct device * dev,uint32_t local_irq)244 static uint32_t riscv_plic_irq_trig_val(const struct device *dev, uint32_t local_irq)
245 {
246 const struct plic_config *config = dev->config;
247 mem_addr_t trig_addr = config->trig + local_irq_to_reg_offset(local_irq);
248 uint32_t offset = local_irq * CONFIG_PLIC_TRIG_TYPE_BITWIDTH;
249
250 return sys_read32(trig_addr) & GENMASK(offset + CONFIG_PLIC_TRIG_TYPE_BITWIDTH - 1, offset);
251 }
252 #endif /* CONFIG_PLIC_SUPPORTS_TRIG_TYPE */
253
plic_irq_enable_set_state(uint32_t irq,bool enable)254 static void plic_irq_enable_set_state(uint32_t irq, bool enable)
255 {
256 const struct device *dev = get_plic_dev_from_irq(irq);
257 const uint32_t local_irq = irq_from_level_2(irq);
258
259 for (uint32_t cpu_num = 0; cpu_num < arch_num_cpus(); cpu_num++) {
260 mem_addr_t en_addr =
261 get_context_en_addr(dev, cpu_num) + local_irq_to_reg_offset(local_irq);
262 uint32_t en_value;
263
264 en_value = sys_read32(en_addr);
265 WRITE_BIT(en_value, local_irq & PLIC_REG_MASK,
266 enable ? (get_irq_cpumask(dev, local_irq) & BIT(cpu_num)) != 0 : false);
267 sys_write32(en_value, en_addr);
268 }
269 }
270
271 /**
272 * @brief Enable a riscv PLIC-specific interrupt line
273 *
274 * This routine enables a RISCV PLIC-specific interrupt line.
275 * riscv_plic_irq_enable is called by RISCV_PRIVILEGED
276 * arch_irq_enable function to enable external interrupts for
277 * IRQS level == 2, whenever CONFIG_RISCV_HAS_PLIC variable is set.
278 *
279 * @param irq IRQ number to enable
280 */
riscv_plic_irq_enable(uint32_t irq)281 void riscv_plic_irq_enable(uint32_t irq)
282 {
283 const struct device *dev = get_plic_dev_from_irq(irq);
284 struct plic_data *data = dev->data;
285 k_spinlock_key_t key = k_spin_lock(&data->lock);
286
287 plic_irq_enable_set_state(irq, true);
288
289 k_spin_unlock(&data->lock, key);
290 }
291
292 /**
293 * @brief Disable a riscv PLIC-specific interrupt line
294 *
295 * This routine disables a RISCV PLIC-specific interrupt line.
296 * riscv_plic_irq_disable is called by RISCV_PRIVILEGED
297 * arch_irq_disable function to disable external interrupts, for
298 * IRQS level == 2, whenever CONFIG_RISCV_HAS_PLIC variable is set.
299 *
300 * @param irq IRQ number to disable
301 */
riscv_plic_irq_disable(uint32_t irq)302 void riscv_plic_irq_disable(uint32_t irq)
303 {
304 const struct device *dev = get_plic_dev_from_irq(irq);
305 struct plic_data *data = dev->data;
306 k_spinlock_key_t key = k_spin_lock(&data->lock);
307
308 plic_irq_enable_set_state(irq, false);
309
310 k_spin_unlock(&data->lock, key);
311 }
312
313 /* Check if the local IRQ of a PLIC instance is enabled */
local_irq_is_enabled(const struct device * dev,uint32_t local_irq)314 static int local_irq_is_enabled(const struct device *dev, uint32_t local_irq)
315 {
316 uint32_t bit_position = local_irq & PLIC_REG_MASK;
317 int is_enabled = IS_ENABLED(CONFIG_PLIC_IRQ_AFFINITY) ? 0 : 1;
318
319 for (uint32_t cpu_num = 0; cpu_num < arch_num_cpus(); cpu_num++) {
320 mem_addr_t en_addr =
321 get_context_en_addr(dev, cpu_num) + local_irq_to_reg_offset(local_irq);
322 uint32_t en_value = sys_read32(en_addr);
323
324 if (IS_ENABLED(CONFIG_PLIC_IRQ_AFFINITY)) {
325 is_enabled |= !!(en_value & BIT(bit_position));
326 } else {
327 is_enabled &= !!(en_value & BIT(bit_position));
328 }
329 }
330
331 return is_enabled;
332 }
333
334 /**
335 * @brief Check if a riscv PLIC-specific interrupt line is enabled
336 *
337 * This routine checks if a RISCV PLIC-specific interrupt line is enabled.
338 * @param irq IRQ number to check
339 *
340 * @return 1 or 0
341 */
riscv_plic_irq_is_enabled(uint32_t irq)342 int riscv_plic_irq_is_enabled(uint32_t irq)
343 {
344 const struct device *dev = get_plic_dev_from_irq(irq);
345 struct plic_data *data = dev->data;
346 const uint32_t local_irq = irq_from_level_2(irq);
347 int ret = 0;
348
349 K_SPINLOCK(&data->lock) {
350 ret = local_irq_is_enabled(dev, local_irq);
351 }
352
353 return ret;
354 }
355
356 /**
357 * @brief Set priority of a riscv PLIC-specific interrupt line
358 *
359 * This routine set the priority of a RISCV PLIC-specific interrupt line.
360 * riscv_plic_irq_set_prio is called by riscv arch_irq_priority_set to set
361 * the priority of an interrupt whenever CONFIG_RISCV_HAS_PLIC variable is set.
362 *
363 * @param irq IRQ number for which to set priority
364 * @param priority Priority of IRQ to set to
365 */
riscv_plic_set_priority(uint32_t irq,uint32_t priority)366 void riscv_plic_set_priority(uint32_t irq, uint32_t priority)
367 {
368 const struct device *dev = get_plic_dev_from_irq(irq);
369 const struct plic_config *config = dev->config;
370 const uint32_t local_irq = irq_from_level_2(irq);
371 mem_addr_t prio_addr = config->prio + (local_irq * sizeof(uint32_t));
372
373 if (priority > config->max_prio) {
374 priority = config->max_prio;
375 }
376
377 sys_write32(priority, prio_addr);
378 }
379
380 #ifdef CONFIG_PLIC_SUPPORTS_SOFT_INTERRUPT
riscv_plic_irq_set_pending(uint32_t irq)381 void riscv_plic_irq_set_pending(uint32_t irq)
382 {
383 const struct device *dev = get_plic_dev_from_irq(irq);
384 const uint32_t local_irq = irq_from_level_2(irq);
385 mem_addr_t pend_addr = get_pending_reg(dev, local_irq);
386 uint32_t pend_value = sys_read32(pend_addr);
387
388 WRITE_BIT(pend_value, local_irq & PLIC_REG_MASK, true);
389 sys_write32(pend_value, pend_addr);
390 }
391 #endif /* CONFIG_PLIC_SUPPORTS_SOFT_INTERRUPT */
392
393 /**
394 * @brief Get riscv PLIC-specific interrupt line causing an interrupt
395 *
396 * This routine returns the RISCV PLIC-specific interrupt line causing an
397 * interrupt.
398 *
399 * @param dev Optional device pointer to get the interrupt line's controller
400 *
401 * @return PLIC-specific interrupt line causing an interrupt.
402 */
riscv_plic_get_irq(void)403 unsigned int riscv_plic_get_irq(void)
404 {
405 return save_irq[arch_curr_cpu()->id];
406 }
407
408 /**
409 * @brief Get riscv PLIC causing an interrupt
410 *
411 * This routine returns the RISCV PLIC device causing an interrupt.
412 *
413 * @return PLIC device causing an interrupt.
414 */
riscv_plic_get_dev(void)415 const struct device *riscv_plic_get_dev(void)
416 {
417 return save_dev[arch_curr_cpu()->id];
418 }
419
420 #ifdef CONFIG_PLIC_IRQ_AFFINITY
421 /**
422 * @brief Set riscv PLIC-specific interrupt enable by cpu bitmask
423 *
424 * @param irq IRQ number for which to set smp irq affinity
425 * @param cpumask Bitmask to specific which cores can handle IRQ
426 */
riscv_plic_irq_set_affinity(uint32_t irq,uint32_t cpumask)427 int riscv_plic_irq_set_affinity(uint32_t irq, uint32_t cpumask)
428 {
429 const struct device *dev = get_plic_dev_from_irq(irq);
430 struct plic_data *data = dev->data;
431 __maybe_unused const struct plic_config *config = dev->config;
432 const uint32_t local_irq = irq_from_level_2(irq);
433 k_spinlock_key_t key;
434
435 if (local_irq >= config->nr_irqs) {
436 __ASSERT(false, "overflow: irq %d, local_irq %d", irq, local_irq);
437 return -EINVAL;
438 }
439
440 if ((cpumask & ~BIT_MASK(arch_num_cpus())) != 0) {
441 __ASSERT(false, "cpumask: 0x%X", cpumask);
442 return -EINVAL;
443 }
444
445 key = k_spin_lock(&data->lock);
446 /* Updated irq_cpumask for next time setting plic enable register */
447 data->irq_cpumask[local_irq] = (plic_cpumask_t)cpumask;
448
449 /* If irq is enabled, apply the new irq affinity */
450 if (local_irq_is_enabled(dev, local_irq)) {
451 plic_irq_enable_set_state(irq, true);
452 }
453 k_spin_unlock(&data->lock, key);
454
455 return 0;
456 }
457 #endif /* CONFIG_PLIC_IRQ_AFFINITY */
458
459 #ifdef CONFIG_PLIC_SHELL_IRQ_COUNT
460 /**
461 * If there's more than one core, irq_count points to a 2D-array: irq_count[NUM_CPUs + 1][nr_irqs]
462 *
463 * i.e. NUM_CPUs == 2:
464 * CPU 0 [0 ... nr_irqs - 1]
465 * CPU 1 [0 ... nr_irqs - 1]
466 * TOTAL [0 ... nr_irqs - 1]
467 */
get_irq_hit_count_cpu(const struct device * dev,int cpu,uint32_t local_irq)468 static ALWAYS_INLINE uint16_t *get_irq_hit_count_cpu(const struct device *dev, int cpu,
469 uint32_t local_irq)
470 {
471 const struct plic_config *config = dev->config;
472 const struct plic_data *data = dev->data;
473 uint32_t offset = local_irq;
474
475 if (CONFIG_MP_MAX_NUM_CPUS > 1) {
476 offset = cpu * config->nr_irqs + local_irq;
477 }
478
479 return &data->stats.irq_count[offset];
480 }
481
get_irq_hit_count_total(const struct device * dev,uint32_t local_irq)482 static ALWAYS_INLINE uint16_t *get_irq_hit_count_total(const struct device *dev, uint32_t local_irq)
483 {
484 const struct plic_config *config = dev->config;
485 const struct plic_data *data = dev->data;
486 uint32_t offset = local_irq;
487
488 if (CONFIG_MP_MAX_NUM_CPUS > 1) {
489 offset = arch_num_cpus() * config->nr_irqs + local_irq;
490 }
491
492 return &data->stats.irq_count[offset];
493 }
494 #endif /* CONFIG_PLIC_SHELL_IRQ_COUNT */
495
plic_irq_handler(const struct device * dev)496 static void plic_irq_handler(const struct device *dev)
497 {
498 const struct plic_config *config = dev->config;
499 mem_addr_t claim_complete_addr = get_claim_complete_addr(dev);
500 struct _isr_table_entry *ite;
501 uint32_t cpu_id = arch_curr_cpu()->id;
502 /* Get the IRQ number generating the interrupt */
503 const uint32_t local_irq = sys_read32(claim_complete_addr);
504
505 #ifdef CONFIG_PLIC_SHELL_IRQ_COUNT
506 uint16_t *cpu_count = get_irq_hit_count_cpu(dev, cpu_id, local_irq);
507 uint16_t *total_count = get_irq_hit_count_total(dev, local_irq);
508
509 /* Cap the count at __UINT16_MAX__ */
510 if (*total_count < __UINT16_MAX__) {
511 (*cpu_count)++;
512 if (CONFIG_MP_MAX_NUM_CPUS > 1) {
513 (*total_count)++;
514 }
515 }
516 #endif /* CONFIG_PLIC_SHELL_IRQ_COUNT */
517
518 /*
519 * Note: Because PLIC only supports multicast of interrupt, all enabled
520 * targets will receive interrupt notification. Only the fastest target
521 * will claim this interrupt, and other targets will claim ID 0 if
522 * no other pending interrupt now.
523 *
524 * (by RISC-V Privileged Architecture v1.10)
525 */
526 if ((CONFIG_MP_MAX_NUM_CPUS > 1) && (local_irq == 0U)) {
527 return;
528 }
529
530 /*
531 * Save IRQ in save_irq. To be used, if need be, by
532 * subsequent handlers registered in the _sw_isr_table table,
533 * as IRQ number held by the claim_complete register is
534 * cleared upon read.
535 */
536 save_irq[cpu_id] = local_irq;
537 save_dev[cpu_id] = dev;
538
539 /*
540 * If the IRQ is out of range, call z_irq_spurious.
541 * A call to z_irq_spurious will not return.
542 */
543 if ((local_irq == 0U) || (local_irq >= config->nr_irqs)) {
544 z_irq_spurious(NULL);
545 }
546
547 #ifdef CONFIG_PLIC_SUPPORTS_TRIG_EDGE
548 uint32_t trig_val = riscv_plic_irq_trig_val(dev, local_irq);
549 /*
550 * Edge-triggered interrupts have to be acknowledged first before
551 * getting handled so that we don't miss on the next edge-triggered interrupt.
552 */
553 if (trig_val == PLIC_TRIG_EDGE) {
554 sys_write32(local_irq, claim_complete_addr);
555 }
556 #endif /* CONFIG_PLIC_SUPPORTS_TRIG_EDGE */
557
558 /* Call the corresponding IRQ handler in _sw_isr_table */
559 ite = &config->isr_table[local_irq];
560 ite->isr(ite->arg);
561
562 /*
563 * Write to claim_complete register to indicate to
564 * PLIC controller that the IRQ has been handled
565 * for level triggered interrupts.
566 */
567 #ifdef CONFIG_PLIC_SUPPORTS_TRIG_EDGE
568 /* Handle only if level-triggered */
569 if (trig_val == PLIC_TRIG_LEVEL) {
570 sys_write32(local_irq, claim_complete_addr);
571 }
572 #else
573 sys_write32(local_irq, claim_complete_addr);
574 #endif /* #ifdef CONFIG_PLIC_SUPPORTS_TRIG_EDGE */
575 }
576
577 /**
578 * @brief Initialize the Platform Level Interrupt Controller
579 *
580 * @param dev PLIC device struct
581 *
582 * @retval 0 on success.
583 */
plic_init(const struct device * dev)584 static int plic_init(const struct device *dev)
585 {
586 const struct plic_config *config = dev->config;
587 mem_addr_t en_addr, thres_prio_addr;
588 mem_addr_t prio_addr = config->prio;
589
590 /* Iterate through each of the contexts, HART + PRIV */
591 for (uint32_t cpu_num = 0; cpu_num < arch_num_cpus(); cpu_num++) {
592 en_addr = get_context_en_addr(dev, cpu_num);
593 thres_prio_addr = get_threshold_priority_addr(dev, cpu_num);
594
595 /* Ensure that all interrupts are disabled initially */
596 for (uint32_t i = 0; i < get_plic_enabled_size(dev); i++) {
597 sys_write32(0U, en_addr + (i * sizeof(uint32_t)));
598 }
599
600 /* Set threshold priority to 0 */
601 sys_write32(0U, thres_prio_addr);
602 }
603
604 /* Set priority of each interrupt line to 0 initially */
605 for (uint32_t i = 0; i < config->nr_irqs; i++) {
606 sys_write32(0U, prio_addr + (i * sizeof(uint32_t)));
607 }
608
609 /* Configure IRQ for PLIC driver */
610 config->irq_config_func();
611
612 return 0;
613 }
614
615 #ifdef CONFIG_PLIC_SHELL
parse_device(const struct shell * sh,size_t argc,char * argv[],const struct device ** plic)616 static inline int parse_device(const struct shell *sh, size_t argc, char *argv[],
617 const struct device **plic)
618 {
619 ARG_UNUSED(argc);
620
621 *plic = device_get_binding(argv[1]);
622 if (*plic == NULL) {
623 shell_error(sh, "PLIC device (%s) not found!\n", argv[1]);
624 return -ENODEV;
625 }
626
627 return 0;
628 }
629
630 #ifdef CONFIG_PLIC_SHELL_IRQ_COUNT
cmd_stats_get(const struct shell * sh,size_t argc,char * argv[])631 static int cmd_stats_get(const struct shell *sh, size_t argc, char *argv[])
632 {
633 const struct device *dev;
634 int ret = parse_device(sh, argc, argv, &dev);
635 uint16_t min_hit = 0;
636
637 if (ret != 0) {
638 return ret;
639 }
640
641 const struct plic_config *config = dev->config;
642
643 if (argc > 2) {
644 min_hit = (uint16_t)shell_strtoul(argv[2], 10, &ret);
645 if (ret != 0) {
646 shell_error(sh, "Failed to parse %s: %d", argv[2], ret);
647 return ret;
648 }
649 shell_print(sh, "IRQ line with > %d hits:", min_hit);
650 }
651
652 shell_fprintf(sh, SHELL_NORMAL, " IRQ");
653 for (int cpu_id = 0; cpu_id < arch_num_cpus(); cpu_id++) {
654 shell_fprintf(sh, SHELL_NORMAL, " CPU%2d", cpu_id);
655 }
656 if (CONFIG_MP_MAX_NUM_CPUS > 1) {
657 shell_fprintf(sh, SHELL_NORMAL, " Total");
658 }
659 shell_fprintf(sh, SHELL_NORMAL, "\tISR(ARG)\n");
660
661 for (int i = 0; i < config->nr_irqs; i++) {
662 uint16_t *total_count = get_irq_hit_count_total(dev, i);
663
664 if (*total_count <= min_hit) {
665 /* Skips printing if total_hit is lesser than min_hit */
666 continue;
667 }
668
669 shell_fprintf(sh, SHELL_NORMAL, " %4d", i); /* IRQ number */
670 /* Print the IRQ hit counts on each CPU */
671 for (int cpu_id = 0; cpu_id < arch_num_cpus(); cpu_id++) {
672 uint16_t *cpu_count = get_irq_hit_count_cpu(dev, cpu_id, i);
673
674 shell_fprintf(sh, SHELL_NORMAL, " %5d", *cpu_count);
675 }
676 if (CONFIG_MP_MAX_NUM_CPUS > 1) {
677 /* If there's > 1 CPU, print the total hit count at the end */
678 shell_fprintf(sh, SHELL_NORMAL, " %5d", *total_count);
679 }
680 #ifdef CONFIG_SYMTAB
681 const char *name =
682 symtab_find_symbol_name((uintptr_t)config->isr_table[i].isr, NULL);
683
684 shell_fprintf(sh, SHELL_NORMAL, "\t%s(%p)\n", name, config->isr_table[i].arg);
685 #else
686 shell_fprintf(sh, SHELL_NORMAL, "\t%p(%p)\n", (void *)config->isr_table[i].isr,
687 config->isr_table[i].arg);
688 #endif /* CONFIG_SYMTAB */
689 }
690 shell_print(sh, "");
691
692 return 0;
693 }
694
cmd_stats_clear(const struct shell * sh,size_t argc,char * argv[])695 static int cmd_stats_clear(const struct shell *sh, size_t argc, char *argv[])
696 {
697 const struct device *dev;
698 int ret = parse_device(sh, argc, argv, &dev);
699
700 if (ret != 0) {
701 return ret;
702 }
703
704 const struct plic_data *data = dev->data;
705 const struct plic_config *config = dev->config;
706 struct plic_stats stat = data->stats;
707
708 memset(stat.irq_count, 0,
709 config->nr_irqs *
710 COND_CODE_1(CONFIG_MP_MAX_NUM_CPUS, (1),
711 (UTIL_INC(CONFIG_MP_MAX_NUM_CPUS))) *
712 sizeof(uint16_t));
713
714 shell_print(sh, "Cleared stats of %s.\n", dev->name);
715
716 return 0;
717 }
718 #endif /* CONFIG_PLIC_SHELL_IRQ_COUNT */
719
720 #ifdef CONFIG_PLIC_SHELL_IRQ_AFFINITY
cmd_affinity_set(const struct shell * sh,size_t argc,char ** argv)721 static int cmd_affinity_set(const struct shell *sh, size_t argc, char **argv)
722 {
723 ARG_UNUSED(argc);
724
725 uint32_t local_irq, irq, mask;
726 const struct device *dev;
727 int rc = parse_device(sh, argc, argv, &dev);
728 const struct plic_config *config = dev->config;
729
730 if (rc != 0) {
731 return rc;
732 }
733
734 local_irq = (uint32_t)shell_strtol(argv[2], 10, &rc);
735 if (rc != 0) {
736 shell_error(sh, "Failed to parse %s: %d", argv[2], rc);
737 }
738
739 if (local_irq >= config->nr_irqs) {
740 shell_error(sh, "local_irq (%d) > nr_irqs (%d)", local_irq, config->nr_irqs);
741 return -EINVAL;
742 }
743
744 mask = (uint32_t)shell_strtol(argv[3], 16, &rc);
745 if (rc != 0) {
746 shell_error(sh, "Failed to parse %s: %d", argv[3], rc);
747 }
748
749 if ((mask & ~BIT_MASK(arch_num_cpus())) != 0) {
750 shell_error(sh, "cpumask: 0x%X num_cpus: %d", mask, arch_num_cpus());
751 return -EINVAL;
752 }
753
754 if (local_irq != 0) {
755 irq = local_irq_to_irq(dev, local_irq);
756 riscv_plic_irq_set_affinity(irq, mask);
757 shell_print(sh, "IRQ %d affinity set to 0x%X", local_irq, mask);
758 } else {
759 for (local_irq = 1; local_irq <= config->nr_irqs; local_irq++) {
760 irq = local_irq_to_irq(dev, local_irq);
761 riscv_plic_irq_set_affinity(irq, mask);
762 }
763 shell_print(sh, "All IRQ affinity set to 0x%X", mask);
764 }
765
766 return 0;
767 }
768
cmd_affinity_get(const struct shell * sh,size_t argc,char ** argv)769 static int cmd_affinity_get(const struct shell *sh, size_t argc, char **argv)
770 {
771 ARG_UNUSED(argc);
772
773 const struct device *dev;
774 int rc = parse_device(sh, argc, argv, &dev);
775 const struct plic_config *config = dev->config;
776
777 if (rc != 0) {
778 return rc;
779 }
780
781 shell_print(sh, " IRQ MASK");
782 if (argc == 2) {
783 for (uint32_t local_irq = 0; local_irq < config->nr_irqs; local_irq++) {
784 shell_print(sh, "%4d 0x%X", local_irq, get_irq_cpumask(dev, local_irq));
785 }
786 } else {
787 uint32_t local_irq = (uint32_t)shell_strtol(argv[2], 10, &rc);
788
789 if (rc != 0) {
790 shell_error(sh, "Failed to parse %s: %d", argv[2], rc);
791 }
792
793 if (local_irq >= config->nr_irqs) {
794 shell_error(sh, "local_irq (%d) > nr_irqs (%d)", local_irq,
795 config->nr_irqs);
796 return -EINVAL;
797 }
798
799 shell_print(sh, "%4d 0x%X", local_irq, get_irq_cpumask(dev, local_irq));
800 }
801
802 return 0;
803 }
804 #endif /* CONFIG_PLIC_SHELL_IRQ_AFFINITY */
805
806 /* Device name autocompletion support */
device_name_get(size_t idx,struct shell_static_entry * entry)807 static void device_name_get(size_t idx, struct shell_static_entry *entry)
808 {
809 const struct device *dev = shell_device_lookup(idx, "interrupt-controller");
810
811 entry->syntax = (dev != NULL) ? dev->name : NULL;
812 entry->handler = NULL;
813 entry->help = NULL;
814 entry->subcmd = NULL;
815 }
816
817 SHELL_DYNAMIC_CMD_CREATE(dsub_device_name, device_name_get);
818
819 #ifdef CONFIG_PLIC_SHELL_IRQ_COUNT
820 SHELL_STATIC_SUBCMD_SET_CREATE(plic_stats_cmds,
821 SHELL_CMD_ARG(get, &dsub_device_name,
822 "Read PLIC's stats.\n"
823 "Usage: plic stats get <device> [minimum hits]",
824 cmd_stats_get, 2, 1),
825 SHELL_CMD_ARG(clear, &dsub_device_name,
826 "Reset PLIC's stats.\n"
827 "Usage: plic stats clear <device>",
828 cmd_stats_clear, 2, 0),
829 SHELL_SUBCMD_SET_END
830 );
831 #endif /* CONFIG_PLIC_SHELL_IRQ_COUNT */
832
833 #ifdef CONFIG_PLIC_SHELL_IRQ_AFFINITY
834 SHELL_STATIC_SUBCMD_SET_CREATE(plic_affinity_cmds,
835 SHELL_CMD_ARG(set, &dsub_device_name,
836 "Set IRQ affinity.\n"
837 "Usage: plic affinity set <device> <local_irq> <cpumask>",
838 cmd_affinity_set, 4, 0),
839 SHELL_CMD_ARG(get, &dsub_device_name,
840 "Get IRQ affinity.\n"
841 "Usage: plic affinity get <device> <local_irq>",
842 cmd_affinity_get, 2, 1),
843 SHELL_SUBCMD_SET_END);
844 #endif /* CONFIG_PLIC_SHELL_IRQ_AFFINITY */
845
846 SHELL_STATIC_SUBCMD_SET_CREATE(plic_cmds,
847 #ifdef CONFIG_PLIC_SHELL_IRQ_COUNT
848 SHELL_CMD(stats, &plic_stats_cmds, "IRQ stats", NULL),
849 #endif /* CONFIG_PLIC_SHELL_IRQ_COUNT */
850 #ifdef CONFIG_PLIC_SHELL_IRQ_AFFINITY
851 SHELL_CMD(affinity, &plic_affinity_cmds, "IRQ affinity", NULL),
852 #endif /* CONFIG_PLIC_SHELL_IRQ_AFFINITY */
853 SHELL_SUBCMD_SET_END
854 );
855
856 SHELL_CMD_REGISTER(plic, &plic_cmds, "PLIC shell commands", NULL);
857 #endif /* CONFIG_PLIC_SHELL */
858
859 #define PLIC_MIN_IRQ_NUM(n) MIN(DT_INST_PROP(n, riscv_ndev), CONFIG_MAX_IRQ_PER_AGGREGATOR)
860
861 #ifdef CONFIG_PLIC_SHELL_IRQ_COUNT
862 #define PLIC_INTC_IRQ_COUNT_BUF_DEFINE(n) \
863 static uint16_t local_irq_count_##n[COND_CODE_1(CONFIG_MP_MAX_NUM_CPUS, (1), \
864 (UTIL_INC(CONFIG_MP_MAX_NUM_CPUS)))] \
865 [PLIC_MIN_IRQ_NUM(n)];
866 #define PLIC_INTC_IRQ_COUNT_INIT(n) \
867 .stats = { \
868 .irq_count = &local_irq_count_##n[0][0], \
869 },
870
871 #else
872 #define PLIC_INTC_IRQ_COUNT_BUF_DEFINE(n)
873 #define PLIC_INTC_IRQ_COUNT_INIT(n)
874 #endif /* CONFIG_PLIC_SHELL_IRQ_COUNT */
875
876 #ifdef CONFIG_PLIC_IRQ_AFFINITY
877 #define PLIC_IRQ_CPUMASK_BUF_DECLARE(n) \
878 static plic_cpumask_t irq_cpumask_##n[PLIC_MIN_IRQ_NUM(n)] = { \
879 [0 ...(PLIC_MIN_IRQ_NUM(n) - 1)] = CONFIG_PLIC_IRQ_AFFINITY_MASK, \
880 }
881 #define PLIC_IRQ_CPUMASK_BUF_INIT(n) .irq_cpumask = &irq_cpumask_##n[0],
882 #else
883 #define PLIC_IRQ_CPUMASK_BUF_DECLARE(n)
884 #define PLIC_IRQ_CPUMASK_BUF_INIT(n)
885 #endif /* CONFIG_PLIC_IRQ_AFFINITY */
886
887 #define PLIC_INTC_DATA_INIT(n) \
888 PLIC_INTC_IRQ_COUNT_BUF_DEFINE(n); \
889 PLIC_IRQ_CPUMASK_BUF_DECLARE(n); \
890 static struct plic_data plic_data_##n = { \
891 PLIC_INTC_IRQ_COUNT_INIT(n) \
892 PLIC_IRQ_CPUMASK_BUF_INIT(n) \
893 };
894
895 #define PLIC_INTC_IRQ_FUNC_DECLARE(n) static void plic_irq_config_func_##n(void)
896
897 #define PLIC_INTC_IRQ_FUNC_DEFINE(n) \
898 static void plic_irq_config_func_##n(void) \
899 { \
900 IRQ_CONNECT(DT_INST_IRQN(n), 0, plic_irq_handler, DEVICE_DT_INST_GET(n), 0); \
901 irq_enable(DT_INST_IRQN(n)); \
902 }
903
904 #define HART_CONTEXTS(i, n) IF_ENABLED(IS_EQ(DT_INST_IRQN_BY_IDX(n, i), DT_INST_IRQN(n)), (i,))
905 #define PLIC_HART_CONTEXT_DECLARE(n) \
906 INTC_PLIC_STATIC const uint32_t plic_hart_contexts_##n[DT_CHILD_NUM(DT_PATH(cpus))] = { \
907 LISTIFY(DT_INST_NUM_IRQS(n), HART_CONTEXTS, (), n)}
908
909 #define PLIC_INTC_CONFIG_INIT(n) \
910 PLIC_INTC_IRQ_FUNC_DECLARE(n); \
911 PLIC_HART_CONTEXT_DECLARE(n); \
912 static const struct plic_config plic_config_##n = { \
913 .prio = PLIC_BASE_ADDR(n), \
914 .irq_en = PLIC_BASE_ADDR(n) + CONTEXT_ENABLE_BASE, \
915 .reg = PLIC_BASE_ADDR(n) + CONTEXT_BASE, \
916 IF_ENABLED(CONFIG_PLIC_SUPPORTS_SOFT_INTERRUPT, \
917 (.pend = PLIC_BASE_ADDR(n) + CONTEXT_PENDING_BASE,)) \
918 IF_ENABLED(CONFIG_PLIC_SUPPORTS_TRIG_TYPE, \
919 (.trig = PLIC_BASE_ADDR(n) + CONFIG_PLIC_TRIG_TYPE_REG_OFFSET,)) \
920 .max_prio = DT_INST_PROP(n, riscv_max_priority), \
921 .riscv_ndev = DT_INST_PROP(n, riscv_ndev), \
922 .nr_irqs = PLIC_MIN_IRQ_NUM(n), \
923 .irq = DT_INST_IRQN(n), \
924 .irq_config_func = plic_irq_config_func_##n, \
925 .isr_table = &_sw_isr_table[INTC_INST_ISR_TBL_OFFSET(n)], \
926 .hart_context = plic_hart_contexts_##n, \
927 }; \
928 PLIC_INTC_IRQ_FUNC_DEFINE(n)
929
930 #define PLIC_INTC_DEVICE_INIT(n) \
931 IRQ_PARENT_ENTRY_DEFINE( \
932 plic##n, DEVICE_DT_INST_GET(n), DT_INST_IRQN(n), \
933 INTC_INST_ISR_TBL_OFFSET(n), \
934 DT_INST_INTC_GET_AGGREGATOR_LEVEL(n)); \
935 PLIC_INTC_CONFIG_INIT(n) \
936 PLIC_INTC_DATA_INIT(n) \
937 DEVICE_DT_INST_DEFINE(n, &plic_init, NULL, \
938 &plic_data_##n, &plic_config_##n, \
939 PRE_KERNEL_1, CONFIG_INTC_INIT_PRIORITY, \
940 NULL);
941
942 DT_INST_FOREACH_STATUS_OKAY(PLIC_INTC_DEVICE_INIT)
943