1 /*
2 * Copyright (c) 2017 Jean-Paul Etienne <fractalclone@gmail.com>
3 * Copyright (c) 2023 Meta
4 * Contributors: 2018 Antmicro <www.antmicro.com>
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9 #define DT_DRV_COMPAT sifive_plic_1_0_0
10
11 /**
12 * @brief Platform Level Interrupt Controller (PLIC) driver
13 * for RISC-V processors
14 */
15
16 #include <stdlib.h>
17
18 #include "sw_isr_common.h"
19
20 #include <zephyr/debug/symtab.h>
21 #include <zephyr/kernel.h>
22 #include <zephyr/arch/cpu.h>
23 #include <zephyr/device.h>
24 #include <zephyr/devicetree/interrupt_controller.h>
25 #include <zephyr/shell/shell.h>
26
27 #include <zephyr/sw_isr_table.h>
28 #include <zephyr/drivers/interrupt_controller/riscv_plic.h>
29 #include <zephyr/irq.h>
30
31 #define PLIC_BASE_ADDR(n) DT_INST_REG_ADDR(n)
32 /*
33 * These registers' offset are defined in the RISCV PLIC specs, see:
34 * https://github.com/riscv/riscv-plic-spec
35 */
36 #define CONTEXT_BASE 0x200000
37 #define CONTEXT_SIZE 0x1000
38 #define CONTEXT_THRESHOLD 0x00
39 #define CONTEXT_CLAIM 0x04
40 #define CONTEXT_ENABLE_BASE 0x2000
41 #define CONTEXT_ENABLE_SIZE 0x80
42 #define CONTEXT_PENDING_BASE 0x1000
43
44 /*
45 * Trigger type is mentioned, but not defined in the RISCV PLIC specs.
46 * However, it is defined and supported by at least the Andes & Telink datasheet, and supported
47 * in Linux's SiFive PLIC driver
48 */
49 #ifdef CONFIG_PLIC_SUPPORTS_TRIG_TYPE
50 #define PLIC_TRIG_LEVEL ((uint32_t)0)
51 #define PLIC_TRIG_EDGE ((uint32_t)1)
52 #endif /* CONFIG_PLIC_SUPPORTS_TRIG_TYPE */
53
54 /* PLIC registers are 32-bit memory-mapped */
55 #define PLIC_REG_SIZE 32
56 #define PLIC_REG_MASK BIT_MASK(LOG2(PLIC_REG_SIZE))
57
58 #ifdef CONFIG_TEST_INTC_PLIC
59 #define INTC_PLIC_STATIC
60 #define INTC_PLIC_STATIC_INLINE
61 #else
62 #define INTC_PLIC_STATIC static
63 #define INTC_PLIC_STATIC_INLINE static inline
64 #endif /* CONFIG_TEST_INTC_PLIC */
65
66 #ifdef CONFIG_PLIC_IRQ_AFFINITY
67 #if CONFIG_MP_MAX_NUM_CPUS <= 8
68 typedef uint8_t plic_cpumask_t;
69 #elif CONFIG_MP_MAX_NUM_CPUS <= 16
70 typedef uint16_t plic_cpumask_t;
71 #elif CONFIG_MP_MAX_NUM_CPUS <= 32
72 typedef uint32_t plic_cpumask_t;
73 #else
74 #error "Currently only supports up to 32 cores"
75 #endif /* CONFIG_MP_MAX_NUM_CPUS */
76 #endif /* CONFIG_PLIC_IRQ_AFFINITY */
77
78 typedef void (*riscv_plic_irq_config_func_t)(void);
79 struct plic_config {
80 mem_addr_t prio;
81 mem_addr_t irq_en;
82 mem_addr_t reg;
83 #ifdef CONFIG_PLIC_SUPPORTS_SOFT_INTERRUPT
84 mem_addr_t pend;
85 #endif /* CONFIG_PLIC_SUPPORTS_SOFT_INTERRUPT */
86 #ifdef CONFIG_PLIC_SUPPORTS_TRIG_TYPE
87 mem_addr_t trig;
88 #endif /* CONFIG_PLIC_SUPPORTS_TRIG_TYPE */
89 uint32_t max_prio;
90 /* Number of IRQs that the PLIC physically supports */
91 uint32_t riscv_ndev;
92 /* Number of IRQs supported in this driver */
93 uint32_t nr_irqs;
94 uint32_t irq;
95 riscv_plic_irq_config_func_t irq_config_func;
96 const struct _isr_table_entry *isr_table;
97 const uint32_t *const hart_context;
98 };
99
100 struct plic_stats {
101 uint16_t *const irq_count;
102 const int irq_count_len;
103 };
104
105 struct plic_data {
106 struct k_spinlock lock;
107
108 #ifdef CONFIG_PLIC_SHELL_IRQ_COUNT
109 struct plic_stats stats;
110 #endif /* CONFIG_PLIC_SHELL_IRQ_COUNT */
111
112 #ifdef CONFIG_PLIC_IRQ_AFFINITY
113 plic_cpumask_t *irq_cpumask;
114 #endif /* CONFIG_PLIC_IRQ_AFFINITY */
115
116 };
117
118 static uint32_t save_irq[CONFIG_MP_MAX_NUM_CPUS];
119 static const struct device *save_dev[CONFIG_MP_MAX_NUM_CPUS];
120
local_irq_to_reg_index(uint32_t local_irq)121 INTC_PLIC_STATIC_INLINE uint32_t local_irq_to_reg_index(uint32_t local_irq)
122 {
123 return local_irq >> LOG2(PLIC_REG_SIZE);
124 }
125
local_irq_to_reg_offset(uint32_t local_irq)126 INTC_PLIC_STATIC_INLINE uint32_t local_irq_to_reg_offset(uint32_t local_irq)
127 {
128 return local_irq_to_reg_index(local_irq) * sizeof(uint32_t);
129 }
130
get_plic_enabled_size(const struct device * dev)131 static inline uint32_t get_plic_enabled_size(const struct device *dev)
132 {
133 const struct plic_config *config = dev->config;
134
135 return local_irq_to_reg_index(config->nr_irqs) + 1;
136 }
137
get_hart_context(const struct device * dev,uint32_t hartid)138 static ALWAYS_INLINE uint32_t get_hart_context(const struct device *dev, uint32_t hartid)
139 {
140 const struct plic_config *config = dev->config;
141
142 return config->hart_context[hartid];
143 }
144
get_irq_cpumask(const struct device * dev,uint32_t local_irq)145 static ALWAYS_INLINE uint32_t get_irq_cpumask(const struct device *dev, uint32_t local_irq)
146 {
147 #ifdef CONFIG_PLIC_IRQ_AFFINITY
148 const struct plic_data *data = dev->data;
149
150 return data->irq_cpumask[local_irq];
151 #else
152 ARG_UNUSED(dev);
153 ARG_UNUSED(local_irq);
154
155 return 0x1;
156 #endif /* CONFIG_PLIC_IRQ_AFFINITY */
157 }
158
get_context_en_addr(const struct device * dev,uint32_t cpu_num)159 static inline mem_addr_t get_context_en_addr(const struct device *dev, uint32_t cpu_num)
160 {
161 const struct plic_config *config = dev->config;
162 uint32_t hartid;
163 /*
164 * We want to return the irq_en address for the context of given hart.
165 */
166 #if CONFIG_MP_MAX_NUM_CPUS > 1
167 hartid = _kernel.cpus[cpu_num].arch.hartid;
168 #else
169 hartid = arch_proc_id();
170 #endif
171 return config->irq_en + get_hart_context(dev, hartid) * CONTEXT_ENABLE_SIZE;
172 }
173
get_claim_complete_addr(const struct device * dev)174 static inline mem_addr_t get_claim_complete_addr(const struct device *dev)
175 {
176 const struct plic_config *config = dev->config;
177
178 /*
179 * We want to return the claim complete addr for the hart's context.
180 */
181
182 return config->reg + get_hart_context(dev, arch_proc_id()) * CONTEXT_SIZE + CONTEXT_CLAIM;
183 }
184
get_threshold_priority_addr(const struct device * dev,uint32_t cpu_num)185 static inline mem_addr_t get_threshold_priority_addr(const struct device *dev, uint32_t cpu_num)
186 {
187 const struct plic_config *config = dev->config;
188 uint32_t hartid;
189
190 #if CONFIG_MP_MAX_NUM_CPUS > 1
191 hartid = _kernel.cpus[cpu_num].arch.hartid;
192 #else
193 hartid = arch_proc_id();
194 #endif
195
196 return config->reg + (get_hart_context(dev, hartid) * CONTEXT_SIZE);
197 }
198
199 #ifdef CONFIG_PLIC_SUPPORTS_SOFT_INTERRUPT
get_pending_reg(const struct device * dev,uint32_t local_irq)200 static inline mem_addr_t get_pending_reg(const struct device *dev, uint32_t local_irq)
201 {
202 const struct plic_config *config = dev->config;
203
204 return config->pend + local_irq_to_reg_offset(local_irq);
205 }
206 #endif /* CONFIG_PLIC_SUPPORTS_SOFT_INTERRUPT */
207
208 /**
209 * @brief Determine the PLIC device from the IRQ
210 *
211 * @param irq IRQ number
212 *
213 * @return PLIC device of that IRQ
214 */
get_plic_dev_from_irq(uint32_t irq)215 static inline const struct device *get_plic_dev_from_irq(uint32_t irq)
216 {
217 #ifdef CONFIG_DYNAMIC_INTERRUPTS
218 return z_get_sw_isr_device_from_irq(irq);
219 #else
220 return DEVICE_DT_INST_GET(0);
221 #endif
222 }
223
224 #ifdef CONFIG_PLIC_SUPPORTS_TRIG_TYPE
225 /**
226 * @brief Return the value of the trigger type register for the IRQ
227 *
228 * In the event edge irq is enable this will return the trigger
229 * value of the irq. In the event edge irq is not supported this
230 * routine will return 0
231 *
232 * @param dev PLIC-instance device
233 * @param local_irq PLIC-instance IRQ number to add to the trigger
234 *
235 * @return Trigger type register value if PLIC supports trigger type, PLIC_TRIG_LEVEL otherwise
236 */
riscv_plic_irq_trig_val(const struct device * dev,uint32_t local_irq)237 static uint32_t riscv_plic_irq_trig_val(const struct device *dev, uint32_t local_irq)
238 {
239 const struct plic_config *config = dev->config;
240 mem_addr_t trig_addr = config->trig + local_irq_to_reg_offset(local_irq);
241 uint32_t offset = local_irq * CONFIG_PLIC_TRIG_TYPE_BITWIDTH;
242
243 return sys_read32(trig_addr) & GENMASK(offset + CONFIG_PLIC_TRIG_TYPE_BITWIDTH - 1, offset);
244 }
245 #endif /* CONFIG_PLIC_SUPPORTS_TRIG_TYPE */
246
plic_irq_enable_set_state(uint32_t irq,bool enable)247 static void plic_irq_enable_set_state(uint32_t irq, bool enable)
248 {
249 const struct device *dev = get_plic_dev_from_irq(irq);
250 const uint32_t local_irq = irq_from_level_2(irq);
251
252 for (uint32_t cpu_num = 0; cpu_num < arch_num_cpus(); cpu_num++) {
253 mem_addr_t en_addr =
254 get_context_en_addr(dev, cpu_num) + local_irq_to_reg_offset(local_irq);
255 uint32_t en_value;
256
257 en_value = sys_read32(en_addr);
258 WRITE_BIT(en_value, local_irq & PLIC_REG_MASK,
259 enable ? (get_irq_cpumask(dev, local_irq) & BIT(cpu_num)) != 0 : false);
260 sys_write32(en_value, en_addr);
261 }
262 }
263
264 /**
265 * @brief Enable a riscv PLIC-specific interrupt line
266 *
267 * This routine enables a RISCV PLIC-specific interrupt line.
268 * riscv_plic_irq_enable is called by RISCV_PRIVILEGED
269 * arch_irq_enable function to enable external interrupts for
270 * IRQS level == 2, whenever CONFIG_RISCV_HAS_PLIC variable is set.
271 *
272 * @param irq IRQ number to enable
273 */
riscv_plic_irq_enable(uint32_t irq)274 void riscv_plic_irq_enable(uint32_t irq)
275 {
276 const struct device *dev = get_plic_dev_from_irq(irq);
277 struct plic_data *data = dev->data;
278 k_spinlock_key_t key = k_spin_lock(&data->lock);
279
280 plic_irq_enable_set_state(irq, true);
281
282 k_spin_unlock(&data->lock, key);
283 }
284
285 /**
286 * @brief Disable a riscv PLIC-specific interrupt line
287 *
288 * This routine disables a RISCV PLIC-specific interrupt line.
289 * riscv_plic_irq_disable is called by RISCV_PRIVILEGED
290 * arch_irq_disable function to disable external interrupts, for
291 * IRQS level == 2, whenever CONFIG_RISCV_HAS_PLIC variable is set.
292 *
293 * @param irq IRQ number to disable
294 */
riscv_plic_irq_disable(uint32_t irq)295 void riscv_plic_irq_disable(uint32_t irq)
296 {
297 const struct device *dev = get_plic_dev_from_irq(irq);
298 struct plic_data *data = dev->data;
299 k_spinlock_key_t key = k_spin_lock(&data->lock);
300
301 plic_irq_enable_set_state(irq, false);
302
303 k_spin_unlock(&data->lock, key);
304 }
305
306 /* Check if the local IRQ of a PLIC instance is enabled */
local_irq_is_enabled(const struct device * dev,uint32_t local_irq)307 static int local_irq_is_enabled(const struct device *dev, uint32_t local_irq)
308 {
309 uint32_t bit_position = local_irq & PLIC_REG_MASK;
310 int is_enabled = IS_ENABLED(CONFIG_PLIC_IRQ_AFFINITY) ? 0 : 1;
311
312 for (uint32_t cpu_num = 0; cpu_num < arch_num_cpus(); cpu_num++) {
313 mem_addr_t en_addr =
314 get_context_en_addr(dev, cpu_num) + local_irq_to_reg_offset(local_irq);
315 uint32_t en_value = sys_read32(en_addr);
316
317 if (IS_ENABLED(CONFIG_PLIC_IRQ_AFFINITY)) {
318 is_enabled |= !!(en_value & BIT(bit_position));
319 } else {
320 is_enabled &= !!(en_value & BIT(bit_position));
321 }
322 }
323
324 return is_enabled;
325 }
326
327 /**
328 * @brief Check if a riscv PLIC-specific interrupt line is enabled
329 *
330 * This routine checks if a RISCV PLIC-specific interrupt line is enabled.
331 * @param irq IRQ number to check
332 *
333 * @return 1 or 0
334 */
riscv_plic_irq_is_enabled(uint32_t irq)335 int riscv_plic_irq_is_enabled(uint32_t irq)
336 {
337 const struct device *dev = get_plic_dev_from_irq(irq);
338 struct plic_data *data = dev->data;
339 const uint32_t local_irq = irq_from_level_2(irq);
340 int ret = 0;
341
342 K_SPINLOCK(&data->lock) {
343 ret = local_irq_is_enabled(dev, local_irq);
344 }
345
346 return ret;
347 }
348
349 /**
350 * @brief Set priority of a riscv PLIC-specific interrupt line
351 *
352 * This routine set the priority of a RISCV PLIC-specific interrupt line.
353 * riscv_plic_irq_set_prio is called by riscv arch_irq_priority_set to set
354 * the priority of an interrupt whenever CONFIG_RISCV_HAS_PLIC variable is set.
355 *
356 * @param irq IRQ number for which to set priority
357 * @param priority Priority of IRQ to set to
358 */
riscv_plic_set_priority(uint32_t irq,uint32_t priority)359 void riscv_plic_set_priority(uint32_t irq, uint32_t priority)
360 {
361 const struct device *dev = get_plic_dev_from_irq(irq);
362 const struct plic_config *config = dev->config;
363 const uint32_t local_irq = irq_from_level_2(irq);
364 mem_addr_t prio_addr = config->prio + (local_irq * sizeof(uint32_t));
365
366 if (priority > config->max_prio) {
367 priority = config->max_prio;
368 }
369
370 sys_write32(priority, prio_addr);
371 }
372
373 #ifdef CONFIG_PLIC_SUPPORTS_SOFT_INTERRUPT
riscv_plic_irq_set_pending(uint32_t irq)374 void riscv_plic_irq_set_pending(uint32_t irq)
375 {
376 const struct device *dev = get_plic_dev_from_irq(irq);
377 const uint32_t local_irq = irq_from_level_2(irq);
378 mem_addr_t pend_addr = get_pending_reg(dev, local_irq);
379 uint32_t pend_value = sys_read32(pend_addr);
380
381 WRITE_BIT(pend_value, local_irq & PLIC_REG_MASK, true);
382 sys_write32(pend_value, pend_addr);
383 }
384 #endif /* CONFIG_PLIC_SUPPORTS_SOFT_INTERRUPT */
385
386 /**
387 * @brief Get riscv PLIC-specific interrupt line causing an interrupt
388 *
389 * This routine returns the RISCV PLIC-specific interrupt line causing an
390 * interrupt.
391 *
392 * @param dev Optional device pointer to get the interrupt line's controller
393 *
394 * @return PLIC-specific interrupt line causing an interrupt.
395 */
riscv_plic_get_irq(void)396 unsigned int riscv_plic_get_irq(void)
397 {
398 return save_irq[arch_curr_cpu()->id];
399 }
400
401 /**
402 * @brief Get riscv PLIC causing an interrupt
403 *
404 * This routine returns the RISCV PLIC device causing an interrupt.
405 *
406 * @return PLIC device causing an interrupt.
407 */
riscv_plic_get_dev(void)408 const struct device *riscv_plic_get_dev(void)
409 {
410 return save_dev[arch_curr_cpu()->id];
411 }
412
413 #ifdef CONFIG_PLIC_IRQ_AFFINITY
414 /**
415 * @brief Set riscv PLIC-specific interrupt enable by cpu bitmask
416 *
417 * @param irq IRQ number for which to set smp irq affinity
418 * @param cpumask Bitmask to specific which cores can handle IRQ
419 */
riscv_plic_irq_set_affinity(uint32_t irq,uint32_t cpumask)420 int riscv_plic_irq_set_affinity(uint32_t irq, uint32_t cpumask)
421 {
422 const struct device *dev = get_plic_dev_from_irq(irq);
423 struct plic_data *data = dev->data;
424 __maybe_unused const struct plic_config *config = dev->config;
425 const uint32_t local_irq = irq_from_level_2(irq);
426 k_spinlock_key_t key;
427
428 if (local_irq >= config->nr_irqs) {
429 __ASSERT(false, "overflow: irq %d, local_irq %d", irq, local_irq);
430 return -EINVAL;
431 }
432
433 if ((cpumask & ~BIT_MASK(arch_num_cpus())) != 0) {
434 __ASSERT(false, "cpumask: 0x%X", cpumask);
435 return -EINVAL;
436 }
437
438 key = k_spin_lock(&data->lock);
439 /* Updated irq_cpumask for next time setting plic enable register */
440 data->irq_cpumask[local_irq] = (plic_cpumask_t)cpumask;
441
442 /* If irq is enabled, apply the new irq affinity */
443 if (local_irq_is_enabled(dev, local_irq)) {
444 plic_irq_enable_set_state(irq, true);
445 }
446 k_spin_unlock(&data->lock, key);
447
448 return 0;
449 }
450 #endif /* CONFIG_PLIC_IRQ_AFFINITY */
451
452 #ifdef CONFIG_PLIC_SHELL_IRQ_COUNT
453 /**
454 * If there's more than one core, irq_count points to a 2D-array: irq_count[NUM_CPUs + 1][nr_irqs]
455 *
456 * i.e. NUM_CPUs == 2:
457 * CPU 0 [0 ... nr_irqs - 1]
458 * CPU 1 [0 ... nr_irqs - 1]
459 * TOTAL [0 ... nr_irqs - 1]
460 */
get_irq_hit_count_cpu(const struct device * dev,int cpu,uint32_t local_irq)461 static ALWAYS_INLINE uint16_t *get_irq_hit_count_cpu(const struct device *dev, int cpu,
462 uint32_t local_irq)
463 {
464 const struct plic_config *config = dev->config;
465 const struct plic_data *data = dev->data;
466 uint32_t offset = local_irq;
467
468 if (CONFIG_MP_MAX_NUM_CPUS > 1) {
469 offset = cpu * config->nr_irqs + local_irq;
470 }
471
472 return &data->stats.irq_count[offset];
473 }
474
get_irq_hit_count_total(const struct device * dev,uint32_t local_irq)475 static ALWAYS_INLINE uint16_t *get_irq_hit_count_total(const struct device *dev, uint32_t local_irq)
476 {
477 const struct plic_config *config = dev->config;
478 const struct plic_data *data = dev->data;
479 uint32_t offset = local_irq;
480
481 if (CONFIG_MP_MAX_NUM_CPUS > 1) {
482 offset = arch_num_cpus() * config->nr_irqs + local_irq;
483 }
484
485 return &data->stats.irq_count[offset];
486 }
487 #endif /* CONFIG_PLIC_SHELL_IRQ_COUNT */
488
plic_irq_handler(const struct device * dev)489 static void plic_irq_handler(const struct device *dev)
490 {
491 const struct plic_config *config = dev->config;
492 mem_addr_t claim_complete_addr = get_claim_complete_addr(dev);
493 const struct _isr_table_entry *ite;
494 uint32_t cpu_id = arch_curr_cpu()->id;
495 /* Get the IRQ number generating the interrupt */
496 const uint32_t local_irq = sys_read32(claim_complete_addr);
497
498 #ifdef CONFIG_PLIC_SHELL_IRQ_COUNT
499 uint16_t *cpu_count = get_irq_hit_count_cpu(dev, cpu_id, local_irq);
500 uint16_t *total_count = get_irq_hit_count_total(dev, local_irq);
501
502 /* Cap the count at __UINT16_MAX__ */
503 if (*total_count < __UINT16_MAX__) {
504 (*cpu_count)++;
505 if (CONFIG_MP_MAX_NUM_CPUS > 1) {
506 (*total_count)++;
507 }
508 }
509 #endif /* CONFIG_PLIC_SHELL_IRQ_COUNT */
510
511 /*
512 * Note: Because PLIC only supports multicast of interrupt, all enabled
513 * targets will receive interrupt notification. Only the fastest target
514 * will claim this interrupt, and other targets will claim ID 0 if
515 * no other pending interrupt now.
516 *
517 * (by RISC-V Privileged Architecture v1.10)
518 */
519 if ((CONFIG_MP_MAX_NUM_CPUS > 1) && (local_irq == 0U)) {
520 return;
521 }
522
523 /*
524 * Save IRQ in save_irq. To be used, if need be, by
525 * subsequent handlers registered in the _sw_isr_table table,
526 * as IRQ number held by the claim_complete register is
527 * cleared upon read.
528 */
529 save_irq[cpu_id] = local_irq;
530 save_dev[cpu_id] = dev;
531
532 /*
533 * If the IRQ is out of range, call z_irq_spurious.
534 * A call to z_irq_spurious will not return.
535 */
536 if ((local_irq == 0U) || (local_irq >= config->nr_irqs)) {
537 z_irq_spurious(NULL);
538 }
539
540 #ifdef CONFIG_PLIC_SUPPORTS_TRIG_EDGE
541 uint32_t trig_val = riscv_plic_irq_trig_val(dev, local_irq);
542 /*
543 * Edge-triggered interrupts have to be acknowledged first before
544 * getting handled so that we don't miss on the next edge-triggered interrupt.
545 */
546 if (trig_val == PLIC_TRIG_EDGE) {
547 sys_write32(local_irq, claim_complete_addr);
548 }
549 #endif /* CONFIG_PLIC_SUPPORTS_TRIG_EDGE */
550
551 /* Call the corresponding IRQ handler in _sw_isr_table */
552 ite = &config->isr_table[local_irq];
553 ite->isr(ite->arg);
554
555 /*
556 * Write to claim_complete register to indicate to
557 * PLIC controller that the IRQ has been handled
558 * for level triggered interrupts.
559 */
560 #ifdef CONFIG_PLIC_SUPPORTS_TRIG_EDGE
561 /* Handle only if level-triggered */
562 if (trig_val == PLIC_TRIG_LEVEL) {
563 sys_write32(local_irq, claim_complete_addr);
564 }
565 #else
566 sys_write32(local_irq, claim_complete_addr);
567 #endif /* #ifdef CONFIG_PLIC_SUPPORTS_TRIG_EDGE */
568 }
569
570 /**
571 * @brief Initialize the Platform Level Interrupt Controller
572 *
573 * @param dev PLIC device struct
574 *
575 * @retval 0 on success.
576 */
plic_init(const struct device * dev)577 static int plic_init(const struct device *dev)
578 {
579 const struct plic_config *config = dev->config;
580 mem_addr_t en_addr, thres_prio_addr;
581 mem_addr_t prio_addr = config->prio;
582
583 /* Iterate through each of the contexts, HART + PRIV */
584 for (uint32_t cpu_num = 0; cpu_num < arch_num_cpus(); cpu_num++) {
585 en_addr = get_context_en_addr(dev, cpu_num);
586 thres_prio_addr = get_threshold_priority_addr(dev, cpu_num);
587
588 /* Ensure that all interrupts are disabled initially */
589 for (uint32_t i = 0; i < get_plic_enabled_size(dev); i++) {
590 sys_write32(0U, en_addr + (i * sizeof(uint32_t)));
591 }
592
593 /* Set threshold priority to 0 */
594 sys_write32(0U, thres_prio_addr);
595 }
596
597 /* Set priority of each interrupt line to 0 initially */
598 for (uint32_t i = 0; i < config->nr_irqs; i++) {
599 sys_write32(0U, prio_addr + (i * sizeof(uint32_t)));
600 }
601
602 /* Configure IRQ for PLIC driver */
603 config->irq_config_func();
604
605 return 0;
606 }
607
608 #ifdef CONFIG_PLIC_SHELL
parse_device(const struct shell * sh,size_t argc,char * argv[],const struct device ** plic)609 static inline int parse_device(const struct shell *sh, size_t argc, char *argv[],
610 const struct device **plic)
611 {
612 ARG_UNUSED(argc);
613
614 *plic = device_get_binding(argv[1]);
615 if (*plic == NULL) {
616 shell_error(sh, "PLIC device (%s) not found!\n", argv[1]);
617 return -ENODEV;
618 }
619
620 return 0;
621 }
622
623 #ifdef CONFIG_PLIC_SHELL_IRQ_COUNT
cmd_stats_get(const struct shell * sh,size_t argc,char * argv[])624 static int cmd_stats_get(const struct shell *sh, size_t argc, char *argv[])
625 {
626 const struct device *dev;
627 int ret = parse_device(sh, argc, argv, &dev);
628 uint16_t min_hit = 0;
629
630 if (ret != 0) {
631 return ret;
632 }
633
634 const struct plic_config *config = dev->config;
635
636 if (argc > 2) {
637 min_hit = (uint16_t)shell_strtoul(argv[2], 10, &ret);
638 if (ret != 0) {
639 shell_error(sh, "Failed to parse %s: %d", argv[2], ret);
640 return ret;
641 }
642 shell_print(sh, "IRQ line with > %d hits:", min_hit);
643 }
644
645 shell_fprintf(sh, SHELL_NORMAL, " IRQ");
646 for (int cpu_id = 0; cpu_id < arch_num_cpus(); cpu_id++) {
647 shell_fprintf(sh, SHELL_NORMAL, " CPU%2d", cpu_id);
648 }
649 if (CONFIG_MP_MAX_NUM_CPUS > 1) {
650 shell_fprintf(sh, SHELL_NORMAL, " Total");
651 }
652 shell_fprintf(sh, SHELL_NORMAL, "\tISR(ARG)\n");
653
654 for (int i = 0; i < config->nr_irqs; i++) {
655 uint16_t *total_count = get_irq_hit_count_total(dev, i);
656
657 if (*total_count <= min_hit) {
658 /* Skips printing if total_hit is lesser than min_hit */
659 continue;
660 }
661
662 shell_fprintf(sh, SHELL_NORMAL, " %4d", i); /* IRQ number */
663 /* Print the IRQ hit counts on each CPU */
664 for (int cpu_id = 0; cpu_id < arch_num_cpus(); cpu_id++) {
665 uint16_t *cpu_count = get_irq_hit_count_cpu(dev, cpu_id, i);
666
667 shell_fprintf(sh, SHELL_NORMAL, " %5d", *cpu_count);
668 }
669 if (CONFIG_MP_MAX_NUM_CPUS > 1) {
670 /* If there's > 1 CPU, print the total hit count at the end */
671 shell_fprintf(sh, SHELL_NORMAL, " %5d", *total_count);
672 }
673 #ifdef CONFIG_SYMTAB
674 const char *name =
675 symtab_find_symbol_name((uintptr_t)config->isr_table[i].isr, NULL);
676
677 shell_fprintf(sh, SHELL_NORMAL, "\t%s(%p)\n", name, config->isr_table[i].arg);
678 #else
679 shell_fprintf(sh, SHELL_NORMAL, "\t%p(%p)\n", (void *)config->isr_table[i].isr,
680 config->isr_table[i].arg);
681 #endif /* CONFIG_SYMTAB */
682 }
683 shell_print(sh, "");
684
685 return 0;
686 }
687
cmd_stats_clear(const struct shell * sh,size_t argc,char * argv[])688 static int cmd_stats_clear(const struct shell *sh, size_t argc, char *argv[])
689 {
690 const struct device *dev;
691 int ret = parse_device(sh, argc, argv, &dev);
692
693 if (ret != 0) {
694 return ret;
695 }
696
697 const struct plic_data *data = dev->data;
698 const struct plic_config *config = dev->config;
699 struct plic_stats stat = data->stats;
700
701 memset(stat.irq_count, 0,
702 config->nr_irqs *
703 COND_CODE_1(CONFIG_MP_MAX_NUM_CPUS, (1),
704 (UTIL_INC(CONFIG_MP_MAX_NUM_CPUS))) *
705 sizeof(uint16_t));
706
707 shell_print(sh, "Cleared stats of %s.\n", dev->name);
708
709 return 0;
710 }
711 #endif /* CONFIG_PLIC_SHELL_IRQ_COUNT */
712
713 #ifdef CONFIG_PLIC_SHELL_IRQ_AFFINITY
local_irq_to_irq(const struct device * dev,uint32_t local_irq)714 static ALWAYS_INLINE uint32_t local_irq_to_irq(const struct device *dev, uint32_t local_irq)
715 {
716 const struct plic_config *config = dev->config;
717
718 return irq_to_level_2(local_irq) | config->irq;
719 }
720
cmd_affinity_set(const struct shell * sh,size_t argc,char ** argv)721 static int cmd_affinity_set(const struct shell *sh, size_t argc, char **argv)
722 {
723 ARG_UNUSED(argc);
724
725 uint32_t local_irq, irq, mask;
726 const struct device *dev;
727 int rc = parse_device(sh, argc, argv, &dev);
728 const struct plic_config *config = dev->config;
729
730 if (rc != 0) {
731 return rc;
732 }
733
734 local_irq = (uint32_t)shell_strtol(argv[2], 10, &rc);
735 if (rc != 0) {
736 shell_error(sh, "Failed to parse %s: %d", argv[2], rc);
737 }
738
739 if (local_irq >= config->nr_irqs) {
740 shell_error(sh, "local_irq (%d) > nr_irqs (%d)", local_irq, config->nr_irqs);
741 return -EINVAL;
742 }
743
744 mask = (uint32_t)shell_strtol(argv[3], 16, &rc);
745 if (rc != 0) {
746 shell_error(sh, "Failed to parse %s: %d", argv[3], rc);
747 }
748
749 if ((mask & ~BIT_MASK(arch_num_cpus())) != 0) {
750 shell_error(sh, "cpumask: 0x%X num_cpus: %d", mask, arch_num_cpus());
751 return -EINVAL;
752 }
753
754 if (local_irq != 0) {
755 irq = local_irq_to_irq(dev, local_irq);
756 riscv_plic_irq_set_affinity(irq, mask);
757 shell_print(sh, "IRQ %d affinity set to 0x%X", local_irq, mask);
758 } else {
759 for (local_irq = 1; local_irq <= config->nr_irqs; local_irq++) {
760 irq = local_irq_to_irq(dev, local_irq);
761 riscv_plic_irq_set_affinity(irq, mask);
762 }
763 shell_print(sh, "All IRQ affinity set to 0x%X", mask);
764 }
765
766 return 0;
767 }
768
cmd_affinity_get(const struct shell * sh,size_t argc,char ** argv)769 static int cmd_affinity_get(const struct shell *sh, size_t argc, char **argv)
770 {
771 ARG_UNUSED(argc);
772
773 const struct device *dev;
774 int rc = parse_device(sh, argc, argv, &dev);
775 const struct plic_config *config = dev->config;
776
777 if (rc != 0) {
778 return rc;
779 }
780
781 shell_print(sh, " IRQ MASK");
782 if (argc == 2) {
783 for (uint32_t local_irq = 0; local_irq < config->nr_irqs; local_irq++) {
784 shell_print(sh, "%4d 0x%X", local_irq, get_irq_cpumask(dev, local_irq));
785 }
786 } else {
787 uint32_t local_irq = (uint32_t)shell_strtol(argv[2], 10, &rc);
788
789 if (rc != 0) {
790 shell_error(sh, "Failed to parse %s: %d", argv[2], rc);
791 }
792
793 if (local_irq >= config->nr_irqs) {
794 shell_error(sh, "local_irq (%d) > nr_irqs (%d)", local_irq,
795 config->nr_irqs);
796 return -EINVAL;
797 }
798
799 shell_print(sh, "%4d 0x%X", local_irq, get_irq_cpumask(dev, local_irq));
800 }
801
802 return 0;
803 }
804 #endif /* CONFIG_PLIC_SHELL_IRQ_AFFINITY */
805
806 /* Device name autocompletion support */
device_name_get(size_t idx,struct shell_static_entry * entry)807 static void device_name_get(size_t idx, struct shell_static_entry *entry)
808 {
809 const struct device *dev = shell_device_lookup(idx, "interrupt-controller");
810
811 entry->syntax = (dev != NULL) ? dev->name : NULL;
812 entry->handler = NULL;
813 entry->help = NULL;
814 entry->subcmd = NULL;
815 }
816
817 SHELL_DYNAMIC_CMD_CREATE(dsub_device_name, device_name_get);
818
819 #ifdef CONFIG_PLIC_SHELL_IRQ_COUNT
820 SHELL_STATIC_SUBCMD_SET_CREATE(plic_stats_cmds,
821 SHELL_CMD_ARG(get, &dsub_device_name,
822 "Read PLIC's stats.\n"
823 "Usage: plic stats get <device> [minimum hits]",
824 cmd_stats_get, 2, 1),
825 SHELL_CMD_ARG(clear, &dsub_device_name,
826 "Reset PLIC's stats.\n"
827 "Usage: plic stats clear <device>",
828 cmd_stats_clear, 2, 0),
829 SHELL_SUBCMD_SET_END
830 );
831 #endif /* CONFIG_PLIC_SHELL_IRQ_COUNT */
832
833 #ifdef CONFIG_PLIC_SHELL_IRQ_AFFINITY
834 SHELL_STATIC_SUBCMD_SET_CREATE(plic_affinity_cmds,
835 SHELL_CMD_ARG(set, &dsub_device_name,
836 "Set IRQ affinity.\n"
837 "Usage: plic affinity set <device> <local_irq> <cpumask>",
838 cmd_affinity_set, 4, 0),
839 SHELL_CMD_ARG(get, &dsub_device_name,
840 "Get IRQ affinity.\n"
841 "Usage: plic affinity get <device> <local_irq>",
842 cmd_affinity_get, 2, 1),
843 SHELL_SUBCMD_SET_END);
844 #endif /* CONFIG_PLIC_SHELL_IRQ_AFFINITY */
845
846 SHELL_STATIC_SUBCMD_SET_CREATE(plic_cmds,
847 #ifdef CONFIG_PLIC_SHELL_IRQ_COUNT
848 SHELL_CMD(stats, &plic_stats_cmds, "IRQ stats", NULL),
849 #endif /* CONFIG_PLIC_SHELL_IRQ_COUNT */
850 #ifdef CONFIG_PLIC_SHELL_IRQ_AFFINITY
851 SHELL_CMD(affinity, &plic_affinity_cmds, "IRQ affinity", NULL),
852 #endif /* CONFIG_PLIC_SHELL_IRQ_AFFINITY */
853 SHELL_SUBCMD_SET_END
854 );
855
856 SHELL_CMD_REGISTER(plic, &plic_cmds, "PLIC shell commands", NULL);
857 #endif /* CONFIG_PLIC_SHELL */
858
859 #define PLIC_MIN_IRQ_NUM(n) MIN(DT_INST_PROP(n, riscv_ndev), CONFIG_MAX_IRQ_PER_AGGREGATOR)
860
861 #ifdef CONFIG_PLIC_SHELL_IRQ_COUNT
862 #define PLIC_INTC_IRQ_COUNT_BUF_DEFINE(n) \
863 static uint16_t local_irq_count_##n[COND_CODE_1(CONFIG_MP_MAX_NUM_CPUS, (1), \
864 (UTIL_INC(CONFIG_MP_MAX_NUM_CPUS)))] \
865 [PLIC_MIN_IRQ_NUM(n)];
866 #define PLIC_INTC_IRQ_COUNT_INIT(n) \
867 .stats = { \
868 .irq_count = &local_irq_count_##n[0][0], \
869 },
870
871 #else
872 #define PLIC_INTC_IRQ_COUNT_BUF_DEFINE(n)
873 #define PLIC_INTC_IRQ_COUNT_INIT(n)
874 #endif /* CONFIG_PLIC_SHELL_IRQ_COUNT */
875
876 #ifdef CONFIG_PLIC_IRQ_AFFINITY
877 #define PLIC_IRQ_CPUMASK_BUF_DECLARE(n) \
878 static plic_cpumask_t irq_cpumask_##n[PLIC_MIN_IRQ_NUM(n)] = { \
879 [0 ...(PLIC_MIN_IRQ_NUM(n) - 1)] = CONFIG_PLIC_IRQ_AFFINITY_MASK, \
880 }
881 #define PLIC_IRQ_CPUMASK_BUF_INIT(n) .irq_cpumask = &irq_cpumask_##n[0],
882 #else
883 #define PLIC_IRQ_CPUMASK_BUF_DECLARE(n)
884 #define PLIC_IRQ_CPUMASK_BUF_INIT(n)
885 #endif /* CONFIG_PLIC_IRQ_AFFINITY */
886
887 #define PLIC_INTC_DATA_INIT(n) \
888 PLIC_INTC_IRQ_COUNT_BUF_DEFINE(n); \
889 PLIC_IRQ_CPUMASK_BUF_DECLARE(n); \
890 static struct plic_data plic_data_##n = { \
891 PLIC_INTC_IRQ_COUNT_INIT(n) \
892 PLIC_IRQ_CPUMASK_BUF_INIT(n) \
893 };
894
895 #define PLIC_INTC_IRQ_FUNC_DECLARE(n) static void plic_irq_config_func_##n(void)
896
897 #define PLIC_INTC_IRQ_FUNC_DEFINE(n) \
898 static void plic_irq_config_func_##n(void) \
899 { \
900 IRQ_CONNECT(DT_INST_IRQN(n), 0, plic_irq_handler, DEVICE_DT_INST_GET(n), 0); \
901 irq_enable(DT_INST_IRQN(n)); \
902 }
903
904 #define HART_CONTEXTS(i, n) IF_ENABLED(IS_EQ(DT_INST_IRQN_BY_IDX(n, i), DT_INST_IRQN(n)), (i,))
905 #define PLIC_HART_CONTEXT_DECLARE(n) \
906 INTC_PLIC_STATIC const uint32_t plic_hart_contexts_##n[DT_CHILD_NUM(DT_PATH(cpus))] = { \
907 LISTIFY(DT_INST_NUM_IRQS(n), HART_CONTEXTS, (), n)}
908
909 #define PLIC_INTC_CONFIG_INIT(n) \
910 PLIC_INTC_IRQ_FUNC_DECLARE(n); \
911 PLIC_HART_CONTEXT_DECLARE(n); \
912 static const struct plic_config plic_config_##n = { \
913 .prio = PLIC_BASE_ADDR(n), \
914 .irq_en = PLIC_BASE_ADDR(n) + CONTEXT_ENABLE_BASE, \
915 .reg = PLIC_BASE_ADDR(n) + CONTEXT_BASE, \
916 IF_ENABLED(CONFIG_PLIC_SUPPORTS_SOFT_INTERRUPT, \
917 (.pend = PLIC_BASE_ADDR(n) + CONTEXT_PENDING_BASE,)) \
918 IF_ENABLED(CONFIG_PLIC_SUPPORTS_TRIG_TYPE, \
919 (.trig = PLIC_BASE_ADDR(n) + CONFIG_PLIC_TRIG_TYPE_REG_OFFSET,)) \
920 .max_prio = DT_INST_PROP(n, riscv_max_priority), \
921 .riscv_ndev = DT_INST_PROP(n, riscv_ndev), \
922 .nr_irqs = PLIC_MIN_IRQ_NUM(n), \
923 .irq = DT_INST_IRQN(n), \
924 .irq_config_func = plic_irq_config_func_##n, \
925 .isr_table = &_sw_isr_table[INTC_INST_ISR_TBL_OFFSET(n)], \
926 .hart_context = plic_hart_contexts_##n, \
927 }; \
928 PLIC_INTC_IRQ_FUNC_DEFINE(n)
929
930 #define PLIC_INTC_DEVICE_INIT(n) \
931 IRQ_PARENT_ENTRY_DEFINE( \
932 plic##n, DEVICE_DT_INST_GET(n), DT_INST_IRQN(n), \
933 INTC_INST_ISR_TBL_OFFSET(n), \
934 DT_INST_INTC_GET_AGGREGATOR_LEVEL(n)); \
935 PLIC_INTC_CONFIG_INIT(n) \
936 PLIC_INTC_DATA_INIT(n) \
937 DEVICE_DT_INST_DEFINE(n, &plic_init, NULL, \
938 &plic_data_##n, &plic_config_##n, \
939 PRE_KERNEL_1, CONFIG_INTC_INIT_PRIORITY, \
940 NULL);
941
942 DT_INST_FOREACH_STATUS_OKAY(PLIC_INTC_DEVICE_INIT)
943