1 /*
2 * Copyright 2024 NXP
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Wrapper of the i.MX Message Unit driver into Zephyr's MBOX model.
7 */
8
9 #include <zephyr/devicetree.h>
10 #include <zephyr/drivers/mbox.h>
11 #include <zephyr/irq.h>
12 #include <zephyr/sys/util_macro.h>
13 #include <fsl_mu.h>
14
15 #define LOG_LEVEL CONFIG_MBOX_LOG_LEVEL
16 #include <zephyr/logging/log.h>
17 LOG_MODULE_REGISTER(nxp_mbox_imx_mu);
18
19 #define DT_DRV_COMPAT nxp_mbox_imx_mu
20
21 #define MU_MAX_CHANNELS 4
22 #define MU_MBOX_SIZE sizeof(uint32_t)
23
24 struct nxp_imx_mu_data {
25 mbox_callback_t cb[MU_MAX_CHANNELS];
26 void *user_data[MU_MAX_CHANNELS];
27 uint32_t received_data;
28 };
29
30 struct nxp_imx_mu_config {
31 MU_Type *base;
32 };
33
nxp_imx_mu_send(const struct device * dev,uint32_t channel,const struct mbox_msg * msg)34 static int nxp_imx_mu_send(const struct device *dev, uint32_t channel, const struct mbox_msg *msg)
35 {
36 uint32_t __aligned(4) data32;
37 const struct nxp_imx_mu_config *cfg = dev->config;
38
39 if (channel >= MU_MAX_CHANNELS) {
40 return -EINVAL;
41 }
42
43 /* Signalling mode. */
44 if (msg == NULL) {
45 return MU_TriggerInterrupts(cfg->base, kMU_GenInt0InterruptTrigger >> channel);
46 }
47
48 /* Data transfer mode. */
49 if (msg->size != MU_MBOX_SIZE) {
50 /* We can only send this many bytes at a time. */
51 return -EMSGSIZE;
52 }
53
54 /* memcpy to avoid issues when msg->data is not word-aligned. */
55 memcpy(&data32, msg->data, msg->size);
56 MU_SendMsg(cfg->base, channel, data32);
57 return 0;
58 }
59
nxp_imx_mu_register_callback(const struct device * dev,uint32_t channel,mbox_callback_t cb,void * user_data)60 static int nxp_imx_mu_register_callback(const struct device *dev, uint32_t channel,
61 mbox_callback_t cb, void *user_data)
62 {
63 struct nxp_imx_mu_data *data = dev->data;
64
65 if (channel >= MU_MAX_CHANNELS) {
66 return -EINVAL;
67 }
68
69 data->cb[channel] = cb;
70 data->user_data[channel] = user_data;
71
72 return 0;
73 }
74
nxp_imx_mu_mtu_get(const struct device * dev)75 static int nxp_imx_mu_mtu_get(const struct device *dev)
76 {
77 ARG_UNUSED(dev);
78 return MU_MBOX_SIZE;
79 }
80
nxp_imx_mu_max_channels_get(const struct device * dev)81 static uint32_t nxp_imx_mu_max_channels_get(const struct device *dev)
82 {
83 ARG_UNUSED(dev);
84 return MU_MAX_CHANNELS;
85 }
86
nxp_imx_mu_set_enabled(const struct device * dev,uint32_t channel,bool enable)87 static int nxp_imx_mu_set_enabled(const struct device *dev, uint32_t channel, bool enable)
88 {
89 struct nxp_imx_mu_data *data = dev->data;
90 const struct nxp_imx_mu_config *cfg = dev->config;
91
92 if (channel >= MU_MAX_CHANNELS) {
93 return -EINVAL;
94 }
95
96 if (enable) {
97 if (data->cb[channel] == NULL) {
98 LOG_WRN("Enabling channel without a registered callback");
99 }
100 MU_EnableInterrupts(
101 cfg->base, kMU_GenInt0InterruptEnable | kMU_GenInt1InterruptEnable |
102 kMU_GenInt2InterruptEnable | kMU_GenInt3InterruptEnable |
103 kMU_Rx0FullInterruptEnable | kMU_Rx1FullInterruptEnable |
104 kMU_Rx2FullInterruptEnable | kMU_Rx3FullInterruptEnable);
105 } else {
106 MU_DisableInterrupts(
107 cfg->base, kMU_GenInt0InterruptEnable | kMU_GenInt1InterruptEnable |
108 kMU_GenInt2InterruptEnable | kMU_GenInt3InterruptEnable |
109 kMU_Rx0FullInterruptEnable | kMU_Rx1FullInterruptEnable |
110 kMU_Rx2FullInterruptEnable | kMU_Rx3FullInterruptEnable);
111 }
112
113 return 0;
114 }
115
116 static DEVICE_API(mbox, nxp_imx_mu_driver_api) = {
117 .send = nxp_imx_mu_send,
118 .register_callback = nxp_imx_mu_register_callback,
119 .mtu_get = nxp_imx_mu_mtu_get,
120 .max_channels_get = nxp_imx_mu_max_channels_get,
121 .set_enabled = nxp_imx_mu_set_enabled,
122 };
123
124 static void handle_irq(const struct device *dev);
125
126 #define MU_INSTANCE_DEFINE(idx) \
127 static struct nxp_imx_mu_data nxp_imx_mu_##idx##_data; \
128 const static struct nxp_imx_mu_config nxp_imx_mu_##idx##_config = { \
129 .base = (MU_Type *)DT_INST_REG_ADDR(idx), \
130 }; \
131 void MU_##idx##_IRQHandler(void); \
132 static int nxp_imx_mu_##idx##_init(const struct device *dev) \
133 { \
134 ARG_UNUSED(dev); \
135 MU_Init(nxp_imx_mu_##idx##_config.base); \
136 IRQ_CONNECT(DT_INST_IRQN(idx), DT_INST_IRQ(idx, priority), MU_##idx##_IRQHandler, \
137 NULL, 0); \
138 irq_enable(DT_INST_IRQN(idx)); \
139 return 0; \
140 } \
141 DEVICE_DT_INST_DEFINE(idx, nxp_imx_mu_##idx##_init, NULL, &nxp_imx_mu_##idx##_data, \
142 &nxp_imx_mu_##idx##_config, PRE_KERNEL_1, CONFIG_MBOX_INIT_PRIORITY, \
143 &nxp_imx_mu_driver_api)
144
145 #define MU_IRQ_HANDLER(idx) \
146 void MU_##idx##_IRQHandler(void) \
147 { \
148 const struct device *dev = DEVICE_DT_INST_GET(idx); \
149 handle_irq(dev); \
150 }
151
152 #define MU_INST(idx) \
153 MU_INSTANCE_DEFINE(idx); \
154 MU_IRQ_HANDLER(idx);
155
DT_INST_FOREACH_STATUS_OKAY(MU_INST)156 DT_INST_FOREACH_STATUS_OKAY(MU_INST)
157
158 static void handle_irq(const struct device *dev)
159 {
160 struct nxp_imx_mu_data *data = dev->data;
161 const struct nxp_imx_mu_config *config = dev->config;
162 const uint32_t flag = MU_GetStatusFlags(config->base);
163
164 for (int i_channel = 0; i_channel < MU_MAX_CHANNELS; i_channel++) {
165 if ((flag & (kMU_Rx0FullFlag >> i_channel)) == (kMU_Rx0FullFlag >> i_channel)) {
166 data->received_data = MU_ReceiveMsgNonBlocking(config->base, i_channel);
167 struct mbox_msg msg = {(const void *)&data->received_data, MU_MBOX_SIZE};
168
169 if (data->cb[i_channel]) {
170 data->cb[i_channel](dev, i_channel, data->user_data[i_channel],
171 &msg);
172 }
173 } else if ((flag & (kMU_GenInt0Flag >> i_channel)) ==
174 (kMU_GenInt0Flag >> i_channel)) {
175 MU_ClearStatusFlags(config->base, (kMU_GenInt0Flag >> i_channel));
176 if (data->cb[i_channel]) {
177 data->cb[i_channel](dev, i_channel, data->user_data[i_channel],
178 NULL);
179 }
180 }
181 }
182 }
183