1 /*
2 * Copyright (c) 2020 Nuvoton Technology Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT nuvoton_npcx_miwu
8
9 /**
10 * @file
11 * @brief Nuvoton NPCX MIWU driver
12 *
13 * The device Multi-Input Wake-Up Unit (MIWU) supports the Nuvoton embedded
14 * controller (EC) to exit 'Sleep' or 'Deep Sleep' power state which allows chip
15 * has better power consumption. Also, it provides signal conditioning such as
16 * 'Level' and 'Edge' trigger type and grouping of external interrupt sources
17 * of NVIC. The NPCX series has three identical MIWU modules: MIWU0, MIWU1,
18 * MIWU2. Together, they support a total of over 140 internal and/or external
19 * wake-up input (WUI) sources.
20 *
21 * This driver uses device tree files to present the relationship between
22 * MIWU and the other devices in different npcx series. For npcx7 series,
23 * it include:
24 * 1. npcxn-miwus-wui-map.dtsi: it presents relationship between wake-up inputs
25 * (WUI) and its source device such as gpio, timer, eSPI VWs and so on.
26 * 2. npcxn-miwus-int-map.dtsi: it presents relationship between MIWU group
27 * and NVIC interrupt in npcx series. Please notice it isn't 1-to-1 mapping.
28 * For example, here is the mapping between miwu0's group a & d and IRQ7:
29 *
30 * map_miwu0_groups: {
31 * parent = <&miwu0>;
32 * group_ad0: group_ad0_map {
33 * irq = <7>;
34 * group_mask = <0x09>;
35 * };
36 * ...
37 * };
38 *
39 * It will connect IRQ 7 and intc_miwu_isr0() with the argument, group_mask,
40 * by IRQ_CONNECT() during driver initialization function. With group_mask,
41 * 0x09, the driver checks the pending bits of group a and group d in ISR.
42 * Then it will execute related callback functions if they have been
43 * registered properly.
44 *
45 * INCLUDE FILES: soc_miwu.h
46 *
47 */
48
49 #include <zephyr/device.h>
50 #include <zephyr/kernel.h>
51 #include <soc.h>
52 #include <zephyr/sys/__assert.h>
53 #include <zephyr/irq_nextlevel.h>
54 #include <zephyr/drivers/gpio.h>
55
56 #include "soc_miwu.h"
57 #include "soc_gpio.h"
58
59 #include <zephyr/logging/log.h>
60 #include <zephyr/irq.h>
61 LOG_MODULE_REGISTER(intc_miwu, LOG_LEVEL_ERR);
62
63 /* MIWU module instances */
64 #define NPCX_MIWU_DEV(inst) DEVICE_DT_INST_GET(inst),
65
66 static const struct device *const miwu_devs[] = {
67 DT_INST_FOREACH_STATUS_OKAY(NPCX_MIWU_DEV)
68 };
69
70 BUILD_ASSERT(ARRAY_SIZE(miwu_devs) == NPCX_MIWU_TABLE_COUNT,
71 "Size of miwu_devs array must equal to NPCX_MIWU_TABLE_COUNT");
72
73 /* Driver config */
74 struct intc_miwu_config {
75 /* miwu controller base address */
76 uintptr_t base;
77 /* index of miwu controller */
78 uint8_t index;
79 };
80
81 /* Driver data */
82 struct intc_miwu_data {
83 /* Callback functions list for each MIWU group */
84 sys_slist_t cb_list_grp[8];
85 #ifdef CONFIG_NPCX_MIWU_BOTH_EDGE_TRIG_WORKAROUND
86 uint8_t both_edge_pins[8];
87 struct k_spinlock lock;
88 #endif
89 };
90
91 BUILD_ASSERT(sizeof(struct miwu_io_params) == sizeof(gpio_port_pins_t),
92 "Size of struct miwu_io_params must equal to struct gpio_port_pins_t");
93
94 BUILD_ASSERT(offsetof(struct miwu_callback, io_cb.params) +
95 sizeof(struct miwu_io_params) == sizeof(struct gpio_callback),
96 "Failed in size check of miwu_callback and gpio_callback structures!");
97
98 BUILD_ASSERT(offsetof(struct miwu_callback, io_cb.params.cb_type) ==
99 offsetof(struct miwu_callback, dev_cb.params.cb_type),
100 "Failed in offset check of cb_type field of miwu_callback structure");
101
102 /* MIWU local functions */
intc_miwu_dispatch_isr(sys_slist_t * cb_list,uint8_t mask)103 static void intc_miwu_dispatch_isr(sys_slist_t *cb_list, uint8_t mask)
104 {
105 struct miwu_callback *cb, *tmp;
106
107 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(cb_list, cb, tmp, node) {
108
109 if (cb->io_cb.params.cb_type == NPCX_MIWU_CALLBACK_GPIO) {
110 if (BIT(cb->io_cb.params.wui.bit) & mask) {
111 __ASSERT(cb->io_cb.handler, "No GPIO callback handler!");
112 cb->io_cb.handler(
113 npcx_get_gpio_dev(cb->io_cb.params.gpio_port),
114 (struct gpio_callback *)cb,
115 cb->io_cb.params.pin_mask);
116 }
117 } else {
118 if (BIT(cb->dev_cb.params.wui.bit) & mask) {
119 __ASSERT(cb->dev_cb.handler, "No device callback handler!");
120
121 cb->dev_cb.handler(cb->dev_cb.params.source,
122 &cb->dev_cb.params.wui);
123 }
124 }
125 }
126 }
127
128 #ifdef CONFIG_NPCX_MIWU_BOTH_EDGE_TRIG_WORKAROUND
npcx_miwu_set_pseudo_both_edge(uint8_t table,uint8_t group,uint8_t bit)129 static void npcx_miwu_set_pseudo_both_edge(uint8_t table, uint8_t group, uint8_t bit)
130 {
131 const struct intc_miwu_config *config = miwu_devs[table]->config;
132 const uint32_t base = config->base;
133 uint8_t pmask = BIT(bit);
134
135 if (IS_BIT_SET(NPCX_WKST(base, group), bit)) {
136 /* Current signal level is high, set falling edge triger. */
137 NPCX_WKEDG(base, group) |= pmask;
138 } else {
139 /* Current signal level is low, set rising edge triger. */
140 NPCX_WKEDG(base, group) &= ~pmask;
141 }
142 }
143 #endif
144
intc_miwu_isr_pri(int wui_table,int wui_group)145 static void intc_miwu_isr_pri(int wui_table, int wui_group)
146 {
147 const struct intc_miwu_config *config = miwu_devs[wui_table]->config;
148 struct intc_miwu_data *data = miwu_devs[wui_table]->data;
149 const uint32_t base = config->base;
150 uint8_t mask = NPCX_WKPND(base, wui_group) & NPCX_WKEN(base, wui_group);
151
152 #ifdef CONFIG_NPCX_MIWU_BOTH_EDGE_TRIG_WORKAROUND
153 uint8_t new_mask = mask;
154
155 while (new_mask != 0) {
156 uint8_t pending_bit = find_lsb_set(new_mask) - 1;
157 uint8_t pending_mask = BIT(pending_bit);
158
159 NPCX_WKPCL(base, wui_group) = pending_mask;
160 if ((data->both_edge_pins[wui_group] & pending_mask) != 0) {
161 npcx_miwu_set_pseudo_both_edge(wui_table, wui_group, pending_bit);
162 }
163
164 new_mask &= ~pending_mask;
165 };
166 #else
167 /* Clear pending bits before dispatch ISR */
168 if (mask) {
169 NPCX_WKPCL(base, wui_group) = mask;
170 }
171 #endif
172
173 /* Dispatch registered gpio isrs */
174 intc_miwu_dispatch_isr(&data->cb_list_grp[wui_group], mask);
175 }
176
177 /* Platform specific MIWU functions */
npcx_miwu_irq_enable(const struct npcx_wui * wui)178 void npcx_miwu_irq_enable(const struct npcx_wui *wui)
179 {
180 const struct intc_miwu_config *config = miwu_devs[wui->table]->config;
181 const uint32_t base = config->base;
182
183 #ifdef CONFIG_NPCX_MIWU_BOTH_EDGE_TRIG_WORKAROUND
184 k_spinlock_key_t key;
185 struct intc_miwu_data *data = miwu_devs[wui->table]->data;
186
187 key = k_spin_lock(&data->lock);
188 #endif
189
190 NPCX_WKEN(base, wui->group) |= BIT(wui->bit);
191
192 #ifdef CONFIG_NPCX_MIWU_BOTH_EDGE_TRIG_WORKAROUND
193 if ((data->both_edge_pins[wui->group] & BIT(wui->bit)) != 0) {
194 npcx_miwu_set_pseudo_both_edge(wui->table, wui->group, wui->bit);
195 }
196 k_spin_unlock(&data->lock, key);
197 #endif
198 }
199
npcx_miwu_irq_disable(const struct npcx_wui * wui)200 void npcx_miwu_irq_disable(const struct npcx_wui *wui)
201 {
202 const struct intc_miwu_config *config = miwu_devs[wui->table]->config;
203 const uint32_t base = config->base;
204
205 NPCX_WKEN(base, wui->group) &= ~BIT(wui->bit);
206 }
207
npcx_miwu_io_enable(const struct npcx_wui * wui)208 void npcx_miwu_io_enable(const struct npcx_wui *wui)
209 {
210 const struct intc_miwu_config *config = miwu_devs[wui->table]->config;
211 const uint32_t base = config->base;
212
213 NPCX_WKINEN(base, wui->group) |= BIT(wui->bit);
214 }
215
npcx_miwu_io_disable(const struct npcx_wui * wui)216 void npcx_miwu_io_disable(const struct npcx_wui *wui)
217 {
218 const struct intc_miwu_config *config = miwu_devs[wui->table]->config;
219 const uint32_t base = config->base;
220
221 NPCX_WKINEN(base, wui->group) &= ~BIT(wui->bit);
222 }
223
npcx_miwu_irq_get_state(const struct npcx_wui * wui)224 bool npcx_miwu_irq_get_state(const struct npcx_wui *wui)
225 {
226 const struct intc_miwu_config *config = miwu_devs[wui->table]->config;
227 const uint32_t base = config->base;
228
229 return IS_BIT_SET(NPCX_WKEN(base, wui->group), wui->bit);
230 }
231
npcx_miwu_irq_get_and_clear_pending(const struct npcx_wui * wui)232 bool npcx_miwu_irq_get_and_clear_pending(const struct npcx_wui *wui)
233 {
234 const struct intc_miwu_config *config = miwu_devs[wui->table]->config;
235 const uint32_t base = config->base;
236 #ifdef CONFIG_NPCX_MIWU_BOTH_EDGE_TRIG_WORKAROUND
237 k_spinlock_key_t key;
238 struct intc_miwu_data *data = miwu_devs[wui->table]->data;
239 #endif
240
241 bool pending = IS_BIT_SET(NPCX_WKPND(base, wui->group), wui->bit);
242
243 if (pending) {
244 #ifdef CONFIG_NPCX_MIWU_BOTH_EDGE_TRIG_WORKAROUND
245 key = k_spin_lock(&data->lock);
246
247 NPCX_WKPCL(base, wui->group) = BIT(wui->bit);
248
249 if ((data->both_edge_pins[wui->group] & BIT(wui->bit)) != 0) {
250 npcx_miwu_set_pseudo_both_edge(wui->table, wui->group, wui->bit);
251 }
252 k_spin_unlock(&data->lock, key);
253 #else
254 NPCX_WKPCL(base, wui->group) = BIT(wui->bit);
255 #endif
256 }
257
258 return pending;
259 }
260
npcx_miwu_interrupt_configure(const struct npcx_wui * wui,enum miwu_int_mode mode,enum miwu_int_trig trig)261 int npcx_miwu_interrupt_configure(const struct npcx_wui *wui,
262 enum miwu_int_mode mode, enum miwu_int_trig trig)
263 {
264 const struct intc_miwu_config *config = miwu_devs[wui->table]->config;
265 const uint32_t base = config->base;
266 uint8_t pmask = BIT(wui->bit);
267 int ret = 0;
268 #ifdef CONFIG_NPCX_MIWU_BOTH_EDGE_TRIG_WORKAROUND
269 struct intc_miwu_data *data = miwu_devs[wui->table]->data;
270 k_spinlock_key_t key;
271 #endif
272
273 /* Disable interrupt of wake-up input source before configuring it */
274 npcx_miwu_irq_disable(wui);
275
276 #ifdef CONFIG_NPCX_MIWU_BOTH_EDGE_TRIG_WORKAROUND
277 key = k_spin_lock(&data->lock);
278 data->both_edge_pins[wui->group] &= ~BIT(wui->bit);
279 #endif
280 /* Handle interrupt for level trigger */
281 if (mode == NPCX_MIWU_MODE_LEVEL) {
282 /* Set detection mode to level */
283 NPCX_WKMOD(base, wui->group) |= pmask;
284 switch (trig) {
285 /* Enable interrupting on level high */
286 case NPCX_MIWU_TRIG_HIGH:
287 NPCX_WKEDG(base, wui->group) &= ~pmask;
288 break;
289 /* Enable interrupting on level low */
290 case NPCX_MIWU_TRIG_LOW:
291 NPCX_WKEDG(base, wui->group) |= pmask;
292 break;
293 default:
294 ret = -EINVAL;
295 goto early_exit;
296 }
297 /* Handle interrupt for edge trigger */
298 } else {
299 /* Set detection mode to edge */
300 NPCX_WKMOD(base, wui->group) &= ~pmask;
301 switch (trig) {
302 /* Handle interrupting on falling edge */
303 case NPCX_MIWU_TRIG_LOW:
304 NPCX_WKAEDG(base, wui->group) &= ~pmask;
305 NPCX_WKEDG(base, wui->group) |= pmask;
306 break;
307 /* Handle interrupting on rising edge */
308 case NPCX_MIWU_TRIG_HIGH:
309 NPCX_WKAEDG(base, wui->group) &= ~pmask;
310 NPCX_WKEDG(base, wui->group) &= ~pmask;
311 break;
312 /* Handle interrupting on both edges */
313 case NPCX_MIWU_TRIG_BOTH:
314 #ifdef CONFIG_NPCX_MIWU_BOTH_EDGE_TRIG_WORKAROUND
315 NPCX_WKAEDG(base, wui->group) &= ~pmask;
316 data->both_edge_pins[wui->group] |= BIT(wui->bit);
317 #else
318 /* Enable any edge */
319 NPCX_WKAEDG(base, wui->group) |= pmask;
320 #endif
321 break;
322 default:
323 ret = -EINVAL;
324 goto early_exit;
325 }
326 }
327
328 /* Enable wake-up input sources */
329 NPCX_WKINEN(base, wui->group) |= pmask;
330
331 /*
332 * Clear pending bit since it might be set if WKINEN bit is
333 * changed.
334 */
335 NPCX_WKPCL(base, wui->group) |= pmask;
336
337 #ifdef CONFIG_NPCX_MIWU_BOTH_EDGE_TRIG_WORKAROUND
338 if ((data->both_edge_pins[wui->group] & BIT(wui->bit)) != 0) {
339 npcx_miwu_set_pseudo_both_edge(wui->table, wui->group, wui->bit);
340 }
341 #endif
342
343 early_exit:
344 #ifdef CONFIG_NPCX_MIWU_BOTH_EDGE_TRIG_WORKAROUND
345 k_spin_unlock(&data->lock, key);
346 #endif
347 return ret;
348 }
349
npcx_miwu_init_gpio_callback(struct miwu_callback * callback,const struct npcx_wui * io_wui,int port)350 void npcx_miwu_init_gpio_callback(struct miwu_callback *callback,
351 const struct npcx_wui *io_wui, int port)
352 {
353 /* Initialize WUI and GPIO settings in unused bits field */
354 callback->io_cb.params.wui.table = io_wui->table;
355 callback->io_cb.params.wui.bit = io_wui->bit;
356 callback->io_cb.params.gpio_port = port;
357 callback->io_cb.params.cb_type = NPCX_MIWU_CALLBACK_GPIO;
358 callback->io_cb.params.wui.group = io_wui->group;
359 }
360
npcx_miwu_init_dev_callback(struct miwu_callback * callback,const struct npcx_wui * dev_wui,miwu_dev_callback_handler_t handler,const struct device * source)361 void npcx_miwu_init_dev_callback(struct miwu_callback *callback,
362 const struct npcx_wui *dev_wui,
363 miwu_dev_callback_handler_t handler,
364 const struct device *source)
365 {
366 /* Initialize WUI and input device settings */
367 callback->dev_cb.params.wui.table = dev_wui->table;
368 callback->dev_cb.params.wui.group = dev_wui->group;
369 callback->dev_cb.params.wui.bit = dev_wui->bit;
370 callback->dev_cb.params.source = source;
371 callback->dev_cb.params.cb_type = NPCX_MIWU_CALLBACK_DEV;
372 callback->dev_cb.handler = handler;
373 }
374
npcx_miwu_manage_callback(struct miwu_callback * cb,bool set)375 int npcx_miwu_manage_callback(struct miwu_callback *cb, bool set)
376 {
377 struct npcx_wui *wui;
378 struct intc_miwu_data *data;
379 sys_slist_t *cb_list;
380
381 if (cb->io_cb.params.cb_type == NPCX_MIWU_CALLBACK_GPIO) {
382 wui = &cb->io_cb.params.wui;
383 } else {
384 wui = &cb->dev_cb.params.wui;
385 }
386
387 data = miwu_devs[wui->table]->data;
388 cb_list = &data->cb_list_grp[wui->group];
389 if (!sys_slist_is_empty(cb_list)) {
390 if (!sys_slist_find_and_remove(cb_list, &cb->node)) {
391 if (!set) {
392 return -EINVAL;
393 }
394 }
395 }
396
397 if (set) {
398 sys_slist_prepend(cb_list, &cb->node);
399 }
400
401 return 0;
402 }
403
404 /* MIWU driver registration */
405 #define NPCX_MIWU_ISR_FUNC(index) _CONCAT(intc_miwu_isr, index)
406 #define NPCX_MIWU_INIT_FUNC(inst) _CONCAT(intc_miwu_init, inst)
407 #define NPCX_MIWU_INIT_FUNC_DECL(inst) \
408 static int intc_miwu_init##inst(const struct device *dev)
409
410 /* MIWU ISR implementation */
411 #define NPCX_MIWU_ISR_FUNC_IMPL(inst) \
412 static void intc_miwu_isr##inst(void *arg) \
413 { \
414 uint8_t grp_mask = (uint32_t)arg; \
415 int group = 0; \
416 \
417 /* Check all MIWU groups belong to the same irq */ \
418 do { \
419 if (grp_mask & 0x01) \
420 intc_miwu_isr_pri(inst, group); \
421 group++; \
422 grp_mask = grp_mask >> 1; \
423 \
424 } while (grp_mask != 0); \
425 }
426
427 /* MIWU init function implementation */
428 #define NPCX_MIWU_INIT_FUNC_IMPL(inst) \
429 static int intc_miwu_init##inst(const struct device *dev) \
430 { \
431 int i; \
432 const struct intc_miwu_config *config = dev->config; \
433 const uint32_t base = config->base; \
434 \
435 /* Clear all MIWUs' pending and enable bits of MIWU device */ \
436 for (i = 0; i < NPCX_MIWU_GROUP_COUNT; i++) { \
437 NPCX_WKEN(base, i) = 0; \
438 NPCX_WKPCL(base, i) = 0xFF; \
439 } \
440 \
441 /* Config IRQ and MWIU group directly */ \
442 DT_FOREACH_CHILD(NPCX_DT_NODE_FROM_MIWU_MAP(inst), \
443 NPCX_DT_MIWU_IRQ_CONNECT_IMPL_CHILD_FUNC) \
444 return 0; \
445 } \
446
447 #define NPCX_MIWU_INIT(inst) \
448 NPCX_MIWU_INIT_FUNC_DECL(inst); \
449 \
450 static const struct intc_miwu_config miwu_config_##inst = { \
451 .base = DT_REG_ADDR(DT_NODELABEL(miwu##inst)), \
452 .index = DT_PROP(DT_NODELABEL(miwu##inst), index), \
453 }; \
454 struct intc_miwu_data miwu_data_##inst; \
455 \
456 DEVICE_DT_INST_DEFINE(inst, \
457 NPCX_MIWU_INIT_FUNC(inst), \
458 NULL, \
459 &miwu_data_##inst, &miwu_config_##inst, \
460 PRE_KERNEL_1, \
461 CONFIG_INTC_INIT_PRIORITY, NULL); \
462 \
463 NPCX_MIWU_ISR_FUNC_IMPL(inst) \
464 \
465 NPCX_MIWU_INIT_FUNC_IMPL(inst)
466
467 DT_INST_FOREACH_STATUS_OKAY(NPCX_MIWU_INIT)
468