1 /*
2 * Copyright 2022-2024 NXP
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT nxp_s32_siul2_eirq
8
9 #include <soc.h>
10 #include <zephyr/irq.h>
11 #include <zephyr/sys/sys_io.h>
12 #include <zephyr/sys/math_extras.h>
13 #include <zephyr/drivers/pinctrl.h>
14 #include <zephyr/drivers/interrupt_controller/intc_eirq_nxp_s32.h>
15
16 /* SIUL2 External Interrupt Controller registers (offsets from DISR0) */
17 /* SIUL2 DMA/Interrupt Status Flag */
18 #define SIUL2_DISR0 0x0
19 /* SIUL2 DMA/Interrupt Request Enable */
20 #define SIUL2_DIRER0 0x8
21 /* SIUL2 DMA/Interrupt Request Select */
22 #define SIUL2_DIRSR0 0x10
23 /* SIUL2 Interrupt Rising-Edge Event Enable */
24 #define SIUL2_IREER0 0x18
25 /* SIUL2 Interrupt Falling-Edge Event Enable */
26 #define SIUL2_IFEER0 0x20
27 /* SIUL2 Interrupt Filter Enable */
28 #define SIUL2_IFER0 0x28
29 /* SIUL2 Interrupt Filter Maximum Counter Register */
30 #define SIUL2_IFMCR(n) (0x30 + 0x4 * (n))
31 #define SIUL2_IFMCR_MAXCNT_MASK GENMASK(3, 0)
32 #define SIUL2_IFMCR_MAXCNT(v) FIELD_PREP(SIUL2_IFMCR_MAXCNT_MASK, (v))
33 /* SIUL2 Interrupt Filter Clock Prescaler Register */
34 #define SIUL2_IFCPR 0xb0
35 #define SIUL2_IFCPR_IFCP_MASK GENMASK(3, 0)
36 #define SIUL2_IFCPR_IFCP(v) FIELD_PREP(SIUL2_IFCPR_IFCP_MASK, (v))
37
38 /* Handy accessors */
39 #define REG_READ(r) sys_read32(config->base + (r))
40 #define REG_WRITE(r, v) sys_write32((v), config->base + (r))
41
42 #define GLITCH_FILTER_DISABLED (SIUL2_IFMCR_MAXCNT_MASK + 1)
43
44 struct eirq_nxp_s32_config {
45 mem_addr_t base;
46 const struct pinctrl_dev_config *pincfg;
47 uint8_t filter_clock_prescaler;
48 uint8_t max_filter_counter[CONFIG_NXP_S32_EIRQ_EXT_INTERRUPTS_MAX];
49 };
50
51 struct eirq_nxp_s32_cb {
52 eirq_nxp_s32_callback_t cb;
53 uint8_t pin;
54 void *data;
55 };
56
57 struct eirq_nxp_s32_data {
58 struct eirq_nxp_s32_cb *cb;
59 };
60
eirq_nxp_s32_interrupt_handler(const struct device * dev,uint32_t irq_idx)61 static inline void eirq_nxp_s32_interrupt_handler(const struct device *dev, uint32_t irq_idx)
62 {
63 const struct eirq_nxp_s32_config *config = dev->config;
64 struct eirq_nxp_s32_data *data = dev->data;
65 uint32_t mask = GENMASK(CONFIG_NXP_S32_EIRQ_EXT_INTERRUPTS_GROUP - 1, 0);
66 uint32_t pending;
67 uint8_t irq;
68
69 pending = eirq_nxp_s32_get_pending(dev);
70 pending &= mask << (irq_idx * CONFIG_NXP_S32_EIRQ_EXT_INTERRUPTS_GROUP);
71
72 while (pending) {
73 mask = LSB_GET(pending);
74 irq = u64_count_trailing_zeros(mask);
75
76 /* Clear status flag */
77 REG_WRITE(SIUL2_DISR0, REG_READ(SIUL2_DISR0) | mask);
78
79 if (data->cb[irq].cb != NULL) {
80 data->cb[irq].cb(data->cb[irq].pin, data->cb[irq].data);
81 }
82
83 pending ^= mask;
84 }
85 }
86
eirq_nxp_s32_set_callback(const struct device * dev,uint8_t irq,uint8_t pin,eirq_nxp_s32_callback_t cb,void * arg)87 int eirq_nxp_s32_set_callback(const struct device *dev, uint8_t irq, uint8_t pin,
88 eirq_nxp_s32_callback_t cb, void *arg)
89 {
90 struct eirq_nxp_s32_data *data = dev->data;
91
92 __ASSERT_NO_MSG(irq < CONFIG_NXP_S32_EIRQ_EXT_INTERRUPTS_MAX);
93
94 if ((data->cb[irq].cb == cb) && (data->cb[irq].data == arg)) {
95 return 0;
96 }
97
98 if (data->cb[irq].cb) {
99 return -EBUSY;
100 }
101
102 data->cb[irq].cb = cb;
103 data->cb[irq].pin = pin;
104 data->cb[irq].data = arg;
105
106 return 0;
107 }
108
eirq_nxp_s32_unset_callback(const struct device * dev,uint8_t irq)109 void eirq_nxp_s32_unset_callback(const struct device *dev, uint8_t irq)
110 {
111 struct eirq_nxp_s32_data *data = dev->data;
112
113 __ASSERT_NO_MSG(irq < CONFIG_NXP_S32_EIRQ_EXT_INTERRUPTS_MAX);
114
115 data->cb[irq].cb = NULL;
116 data->cb[irq].pin = 0;
117 data->cb[irq].data = NULL;
118 }
119
eirq_nxp_s32_enable_interrupt(const struct device * dev,uint8_t irq,enum eirq_nxp_s32_trigger trigger)120 void eirq_nxp_s32_enable_interrupt(const struct device *dev, uint8_t irq,
121 enum eirq_nxp_s32_trigger trigger)
122 {
123 const struct eirq_nxp_s32_config *config = dev->config;
124 uint32_t reg_val;
125
126 __ASSERT_NO_MSG(irq < CONFIG_NXP_S32_EIRQ_EXT_INTERRUPTS_MAX);
127
128 /* Configure trigger */
129 reg_val = REG_READ(SIUL2_IREER0);
130 if ((trigger == EIRQ_NXP_S32_RISING_EDGE) || (trigger == EIRQ_NXP_S32_BOTH_EDGES)) {
131 reg_val |= BIT(irq);
132 } else {
133 reg_val &= ~BIT(irq);
134 }
135 REG_WRITE(SIUL2_IREER0, reg_val);
136
137 reg_val = REG_READ(SIUL2_IFEER0);
138 if ((trigger == EIRQ_NXP_S32_FALLING_EDGE) || (trigger == EIRQ_NXP_S32_BOTH_EDGES)) {
139 reg_val |= BIT(irq);
140 } else {
141 reg_val &= ~BIT(irq);
142 }
143 REG_WRITE(SIUL2_IFEER0, reg_val);
144
145 /* Clear status flag and unmask interrupt */
146 REG_WRITE(SIUL2_DISR0, REG_READ(SIUL2_DISR0) | BIT(irq));
147 REG_WRITE(SIUL2_DIRER0, REG_READ(SIUL2_DIRER0) | BIT(irq));
148 }
149
eirq_nxp_s32_disable_interrupt(const struct device * dev,uint8_t irq)150 void eirq_nxp_s32_disable_interrupt(const struct device *dev, uint8_t irq)
151 {
152 const struct eirq_nxp_s32_config *config = dev->config;
153
154 __ASSERT_NO_MSG(irq < CONFIG_NXP_S32_EIRQ_EXT_INTERRUPTS_MAX);
155
156 /* Disable triggers */
157 REG_WRITE(SIUL2_IREER0, REG_READ(SIUL2_IREER0) & ~BIT(irq));
158 REG_WRITE(SIUL2_IFEER0, REG_READ(SIUL2_IFEER0) & ~BIT(irq));
159
160 /* Clear status flag and mask interrupt */
161 REG_WRITE(SIUL2_DISR0, REG_READ(SIUL2_DISR0) | BIT(irq));
162 REG_WRITE(SIUL2_DIRER0, REG_READ(SIUL2_DIRER0) & ~BIT(irq));
163 }
164
eirq_nxp_s32_get_pending(const struct device * dev)165 uint32_t eirq_nxp_s32_get_pending(const struct device *dev)
166 {
167 const struct eirq_nxp_s32_config *config = dev->config;
168
169 return REG_READ(SIUL2_DISR0) & REG_READ(SIUL2_DIRER0);
170 }
171
eirq_nxp_s32_init(const struct device * dev)172 static int eirq_nxp_s32_init(const struct device *dev)
173 {
174 const struct eirq_nxp_s32_config *config = dev->config;
175 uint8_t irq;
176 int err;
177
178 err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT);
179 if (err) {
180 return err;
181 }
182
183 /* Disable triggers, clear status flags and mask all interrupts */
184 REG_WRITE(SIUL2_IREER0, 0U);
185 REG_WRITE(SIUL2_IFEER0, 0U);
186 REG_WRITE(SIUL2_DISR0, 0xffffffff);
187 REG_WRITE(SIUL2_DIRER0, 0U);
188
189 /* Select the request type as interrupt */
190 REG_WRITE(SIUL2_DIRSR0, 0U);
191
192 /* Configure glitch filters */
193 REG_WRITE(SIUL2_IFCPR, SIUL2_IFCPR_IFCP(config->filter_clock_prescaler));
194
195 for (irq = 0; irq < CONFIG_NXP_S32_EIRQ_EXT_INTERRUPTS_MAX; irq++) {
196 if (config->max_filter_counter[irq] < GLITCH_FILTER_DISABLED) {
197 REG_WRITE(SIUL2_IFMCR(irq),
198 SIUL2_IFMCR_MAXCNT(config->max_filter_counter[irq]));
199 REG_WRITE(SIUL2_IFER0, REG_READ(SIUL2_IFER0) | BIT(irq));
200 } else {
201 REG_WRITE(SIUL2_IFER0, REG_READ(SIUL2_IFER0) & ~BIT(irq));
202 }
203 }
204
205 return 0;
206 }
207
208 #define EIRQ_NXP_S32_ISR_DEFINE(idx, n) \
209 static void eirq_nxp_s32_isr##idx##_##n(const struct device *dev) \
210 { \
211 eirq_nxp_s32_interrupt_handler(dev, idx); \
212 }
213
214 #define _EIRQ_NXP_S32_IRQ_CONFIG(idx, n) \
215 do { \
216 IRQ_CONNECT(DT_INST_IRQ_BY_IDX(n, idx, irq), DT_INST_IRQ_BY_IDX(n, idx, priority), \
217 eirq_nxp_s32_isr##idx##_##n, DEVICE_DT_INST_GET(n), \
218 COND_CODE_1(CONFIG_GIC, (DT_INST_IRQ_BY_IDX(n, idx, flags)), (0))); \
219 irq_enable(DT_INST_IRQ_BY_IDX(n, idx, irq)); \
220 } while (false);
221
222 #define EIRQ_NXP_S32_IRQ_CONFIG(n) \
223 LISTIFY(DT_NUM_IRQS(DT_DRV_INST(n)), _EIRQ_NXP_S32_IRQ_CONFIG, (), n)
224
225 #define EIRQ_NXP_S32_FILTER_CONFIG(idx, n) \
226 COND_CODE_1(DT_NODE_EXISTS(DT_INST_CHILD(n, irq_##idx)), \
227 (DT_PROP_OR(DT_INST_CHILD(n, irq_##idx), max_filter_counter, \
228 GLITCH_FILTER_DISABLED)), \
229 (GLITCH_FILTER_DISABLED))
230
231 #define EIRQ_NXP_S32_INIT_DEVICE(n) \
232 LISTIFY(DT_NUM_IRQS(DT_DRV_INST(n)), EIRQ_NXP_S32_ISR_DEFINE, (), n) \
233 PINCTRL_DT_INST_DEFINE(n); \
234 static const struct eirq_nxp_s32_config eirq_nxp_s32_conf_##n = { \
235 .base = DT_INST_REG_ADDR(n), \
236 .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
237 .filter_clock_prescaler = DT_INST_PROP_OR(n, filter_prescaler, 0), \
238 .max_filter_counter = {LISTIFY(CONFIG_NXP_S32_EIRQ_EXT_INTERRUPTS_MAX, \
239 EIRQ_NXP_S32_FILTER_CONFIG, (,), n)}, \
240 }; \
241 static struct eirq_nxp_s32_cb eirq_nxp_s32_cb_##n[CONFIG_NXP_S32_EIRQ_EXT_INTERRUPTS_MAX]; \
242 static struct eirq_nxp_s32_data eirq_nxp_s32_data_##n = { \
243 .cb = eirq_nxp_s32_cb_##n, \
244 }; \
245 static int eirq_nxp_s32_init_##n(const struct device *dev) \
246 { \
247 int err; \
248 \
249 err = eirq_nxp_s32_init(dev); \
250 if (err) { \
251 return err; \
252 } \
253 \
254 EIRQ_NXP_S32_IRQ_CONFIG(n); \
255 \
256 return 0; \
257 } \
258 DEVICE_DT_INST_DEFINE(n, eirq_nxp_s32_init_##n, NULL, &eirq_nxp_s32_data_##n, \
259 &eirq_nxp_s32_conf_##n, PRE_KERNEL_2, CONFIG_INTC_INIT_PRIORITY, \
260 NULL);
261
262 DT_INST_FOREACH_STATUS_OKAY(EIRQ_NXP_S32_INIT_DEVICE)
263