1 /*
2  * Copyright 2022-2023 NXP
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DT_DRV_COMPAT nxp_s32_mru
8 
9 #include <zephyr/drivers/mbox.h>
10 #include <zephyr/irq.h>
11 #include <zephyr/sys/util_macro.h>
12 #include <Mru_Ip.h>
13 
14 #define LOG_LEVEL CONFIG_MBOX_LOG_LEVEL
15 #include <zephyr/logging/log.h>
16 LOG_MODULE_REGISTER(nxp_s32_mru);
17 
18 #define MRU_MAX_INT_GROUPS	2
19 #define MRU_MAX_CHANNELS	12
20 #define MRU_MAX_MBOX_PER_CHAN	1
21 #define MRU_MBOX_SIZE		4
22 #define MRU_CHANNEL_OFFSET	0x1000
23 
24 /* Utility macros to convert from GIC index to interrupt group index */
25 #define _MRU_IRQ_17		MRU_IP_INT_GROUP_0
26 #define _MRU_IRQ_18		MRU_IP_INT_GROUP_1
27 #define MRU_INT_GROUP(irq)	_CONCAT(_MRU_IRQ_, irq)
28 
29 struct nxp_s32_mru_data {
30 	mbox_callback_t cb[MRU_MAX_CHANNELS];
31 	void *user_data[MRU_MAX_CHANNELS];
32 };
33 
34 struct nxp_s32_mru_config {
35 	RTU_MRU_Type *base;
36 	Mru_Ip_ConfigType hw_cfg;
37 	void (*config_irq)(void);
38 	uint8_t irq_group;
39 };
40 
is_rx_channel_valid(const struct device * dev,uint32_t ch)41 static inline bool is_rx_channel_valid(const struct device *dev, uint32_t ch)
42 {
43 	const struct nxp_s32_mru_config *cfg = dev->config;
44 
45 	return ((ch < MRU_MAX_CHANNELS) && (ch < cfg->hw_cfg.NumChannel));
46 }
47 
48 /* Get a channel's mailbox address, no boundaries validation */
get_mbox_addr(const struct device * dev,uint32_t channel,uint32_t mbox)49 static inline uintptr_t get_mbox_addr(const struct device *dev, uint32_t channel,
50 				      uint32_t mbox)
51 {
52 	const struct nxp_s32_mru_config *cfg = dev->config;
53 
54 	return ((uintptr_t)cfg->base + (channel + 1) * MRU_CHANNEL_OFFSET
55 		+ mbox * MRU_MBOX_SIZE);
56 }
57 
nxp_s32_mru_send(const struct device * dev,uint32_t channel,const struct mbox_msg * msg)58 static int nxp_s32_mru_send(const struct device *dev, uint32_t channel,
59 			    const struct mbox_msg *msg)
60 {
61 	const struct nxp_s32_mru_config *cfg = dev->config;
62 	uint32_t *tx_mbox_addr[MRU_MAX_MBOX_PER_CHAN];
63 	Mru_Ip_TransmitChannelType tx_cfg;
64 	Mru_Ip_StatusType status;
65 
66 	if (channel >= MRU_MAX_CHANNELS) {
67 		return -EINVAL;
68 	}
69 
70 	if (msg == NULL) {
71 		return -EINVAL;
72 	} else if (msg->size > (MRU_MBOX_SIZE * MRU_MAX_MBOX_PER_CHAN)) {
73 		return -EMSGSIZE;
74 	}
75 
76 	for (int i = 0; i < MRU_MAX_MBOX_PER_CHAN; i++) {
77 		tx_mbox_addr[i] = (uint32_t *)get_mbox_addr(dev, channel, i);
78 	}
79 
80 	tx_cfg.NumTxMB = MRU_MAX_MBOX_PER_CHAN;
81 	tx_cfg.LastTxMBIndex = MRU_MAX_MBOX_PER_CHAN - 1;
82 	tx_cfg.MBAddList = (volatile uint32 * const *)tx_mbox_addr;
83 	tx_cfg.ChMBSTATAdd = &cfg->base->CHXCONFIG[channel].CH_MBSTAT;
84 
85 	status = Mru_Ip_Transmit(&tx_cfg, (const uint32_t *)msg->data);
86 
87 	return (status == MRU_IP_STATUS_SUCCESS ? 0 : -EBUSY);
88 }
89 
nxp_s32_mru_register_callback(const struct device * dev,uint32_t channel,mbox_callback_t cb,void * user_data)90 static int nxp_s32_mru_register_callback(const struct device *dev, uint32_t channel,
91 					 mbox_callback_t cb, void *user_data)
92 {
93 	struct nxp_s32_mru_data *data = dev->data;
94 
95 	if (!is_rx_channel_valid(dev, channel)) {
96 		return -EINVAL;
97 	}
98 
99 	data->cb[channel] = cb;
100 	data->user_data[channel] = user_data;
101 
102 	return 0;
103 }
104 
nxp_s32_mru_mtu_get(const struct device * dev)105 static int nxp_s32_mru_mtu_get(const struct device *dev)
106 {
107 	return (MRU_MBOX_SIZE * MRU_MAX_MBOX_PER_CHAN);
108 }
109 
nxp_s32_mru_max_channels_get(const struct device * dev)110 static uint32_t nxp_s32_mru_max_channels_get(const struct device *dev)
111 {
112 	return MRU_MAX_CHANNELS;
113 }
114 
nxp_s32_mru_set_enabled(const struct device * dev,uint32_t channel,bool enable)115 static int nxp_s32_mru_set_enabled(const struct device *dev, uint32_t channel,
116 				   bool enable)
117 {
118 	struct nxp_s32_mru_data *data = dev->data;
119 	const struct nxp_s32_mru_config *cfg = dev->config;
120 
121 	const Mru_Ip_ChannelCfgType *ch_cfg = cfg->hw_cfg.ChannelCfg;
122 
123 	if (!is_rx_channel_valid(dev, channel)) {
124 		return -EINVAL;
125 	}
126 
127 	if (enable && (data->cb[channel] == NULL)) {
128 		LOG_WRN("Enabling channel without a registered callback\n");
129 	}
130 
131 	if (enable) {
132 		/*
133 		 * Make the channel's registers writable and then once again after
134 		 * enabling interrupts and mailboxes so remote can transmit
135 		 */
136 		*ch_cfg[channel].ChCFG0Add = RTU_MRU_CH_CFG0_CHE(1);
137 		*ch_cfg[channel].ChCFG0Add = RTU_MRU_CH_CFG0_IE(1)
138 						| RTU_MRU_CH_CFG0_MBE0(1)
139 						| RTU_MRU_CH_CFG0_CHE(1);
140 	} else {
141 		/*
142 		 * Disable interrupts and mailboxes on this channel, making
143 		 * the channel's registers not writable afterwards
144 		 */
145 		*ch_cfg[channel].ChCFG0Add = RTU_MRU_CH_CFG0_IE(0)
146 						| RTU_MRU_CH_CFG0_MBE0(0);
147 	}
148 
149 	return 0;
150 }
151 
nxp_s32_mru_init(const struct device * dev)152 static int nxp_s32_mru_init(const struct device *dev)
153 {
154 	const struct nxp_s32_mru_config *cfg = dev->config;
155 
156 	if (cfg->hw_cfg.NumChannel == 0) {
157 		/* Nothing to do if no Rx channels are configured */
158 		return 0;
159 	}
160 
161 	/* All configured Rx channels will be disabled after this call */
162 	Mru_Ip_Init(&cfg->hw_cfg);
163 
164 	/*
165 	 * Configure and enable interrupt group, but channel's interrupt are
166 	 * disabled until calling .set_enabled()
167 	 */
168 	cfg->config_irq();
169 
170 	return 0;
171 }
172 
nxp_s32_mru_isr(const struct device * dev)173 void nxp_s32_mru_isr(const struct device *dev)
174 {
175 	const struct nxp_s32_mru_config *config = dev->config;
176 
177 	Mru_Ip_IrqHandler(config->hw_cfg.InstanceId, config->irq_group);
178 }
179 
180 static const struct mbox_driver_api nxp_s32_mru_driver_api = {
181 	.send = nxp_s32_mru_send,
182 	.register_callback = nxp_s32_mru_register_callback,
183 	.mtu_get = nxp_s32_mru_mtu_get,
184 	.max_channels_get = nxp_s32_mru_max_channels_get,
185 	.set_enabled = nxp_s32_mru_set_enabled,
186 };
187 
188 #define MRU_BASE(n)		((RTU_MRU_Type *)DT_INST_REG_ADDR(n))
189 #define MRU_RX_CHANNELS(n)	DT_INST_PROP_OR(n, rx_channels, 0)
190 #define MRU_MBOX_ADDR(n, ch, mb)	\
191 	(DT_INST_REG_ADDR(n) + ((ch + 1) * MRU_CHANNEL_OFFSET) + (MRU_MBOX_SIZE * mb))
192 
193 #define MRU_HW_INSTANCE_CHECK(i, n) \
194 	((DT_INST_REG_ADDR(n) == IP_MRU_##i##_BASE) ? i : 0)
195 
196 #define MRU_HW_INSTANCE(n) \
197 	LISTIFY(__DEBRACKET RTU_MRU_INSTANCE_COUNT, MRU_HW_INSTANCE_CHECK, (|), n)
198 
199 #define MRU_INIT_IRQ_FUNC(n)					\
200 	static void nxp_s32_mru_##n##_init_irq(void)		\
201 	{							\
202 		IRQ_CONNECT(DT_INST_IRQN(n),			\
203 			    DT_INST_IRQ(n, priority),		\
204 			    nxp_s32_mru_isr,			\
205 			    DEVICE_DT_INST_GET(n),		\
206 			    DT_INST_IRQ(n, flags));		\
207 		irq_enable(DT_INST_IRQN(n));			\
208 	}
209 
210 #define MRU_CH_RX_CFG(i, n)								\
211 	static volatile const uint32_t * const						\
212 	nxp_s32_mru_##n##_ch_##i##_rx_mbox_addr[MRU_MAX_MBOX_PER_CHAN] = {		\
213 		(uint32_t *const)MRU_MBOX_ADDR(n, i, 0),				\
214 	};										\
215 	static uint32_t nxp_s32_mru_##n##_ch_##i##_buf[MRU_MAX_MBOX_PER_CHAN];		\
216 	static const Mru_Ip_ReceiveChannelType nxp_s32_mru_##n##_ch_##i##_rx_cfg = {	\
217 		.ChannelId = i,								\
218 		.ChannelIndex = i,							\
219 		.NumRxMB = MRU_MAX_MBOX_PER_CHAN,					\
220 		.MBAddList = nxp_s32_mru_##n##_ch_##i##_rx_mbox_addr,			\
221 		.RxBuffer = nxp_s32_mru_##n##_ch_##i##_buf,				\
222 		.ReceiveNotification = nxp_s32_mru_##n##_cb				\
223 	}
224 
225 #define MRU_CH_RX_LINK_CFG_MBOX(i, n, chan, intgroup)					\
226 	{										\
227 		[intgroup] = { &nxp_s32_mru_##n##_ch_##chan##_rx_cfg }			\
228 	}
229 
230 #define MRU_CH_RX_LINK_CFG(i, n)							\
231 	static const Mru_Ip_MBLinkReceiveChannelType					\
232 	nxp_s32_mru_##n##_ch_##i##_rx_link_cfg[MRU_MAX_MBOX_PER_CHAN][MRU_MAX_INT_GROUPS] = {\
233 		MRU_CH_RX_LINK_CFG_MBOX(0, n, i, MRU_INT_GROUP(DT_INST_IRQN(n)))	\
234 	}
235 
236 #define MRU_CH_CFG(i, n)								\
237 	{										\
238 		.ChCFG0Add = &MRU_BASE(n)->CHXCONFIG[i].CH_CFG0,			\
239 		.ChCFG0 = RTU_MRU_CH_CFG0_IE(0) | RTU_MRU_CH_CFG0_MBE0(0),		\
240 		.ChCFG1Add = &MRU_BASE(n)->CHXCONFIG[i].CH_CFG1,			\
241 		.ChCFG1 = RTU_MRU_CH_CFG1_MBIC0(MRU_INT_GROUP(DT_INST_IRQN(n))),	\
242 		.ChMBSTATAdd = &MRU_BASE(n)->CHXCONFIG[i].CH_MBSTAT,			\
243 		.NumMailbox = MRU_MAX_MBOX_PER_CHAN,					\
244 		.MBLinkReceiveChCfg = nxp_s32_mru_##n##_ch_##i##_rx_link_cfg		\
245 	}
246 
247 /* Callback wrapper to adapt MRU's baremetal driver callback to Zephyr's mbox driver callback */
248 #define MRU_CALLBACK_WRAPPER_FUNC(n)								\
249 	void nxp_s32_mru_##n##_cb(uint8_t channel, const uint32_t *buf, uint8_t mbox_count)	\
250 	{											\
251 		const struct device *dev = DEVICE_DT_INST_GET(n);				\
252 		struct nxp_s32_mru_data *data = dev->data;					\
253 												\
254 		if (is_rx_channel_valid(dev, channel)) {					\
255 			if (data->cb[channel] != NULL) {					\
256 				struct mbox_msg msg = {						\
257 					.data = (const void *)buf,				\
258 					.size = mbox_count * MRU_MBOX_SIZE			\
259 				};								\
260 				data->cb[channel](dev, channel, data->user_data[channel], &msg);\
261 			}									\
262 		}										\
263 	}
264 
265 #define MRU_CH_RX_DEFINITIONS(n)						\
266 	MRU_CALLBACK_WRAPPER_FUNC(n)						\
267 	MRU_INIT_IRQ_FUNC(n)							\
268 	LISTIFY(MRU_RX_CHANNELS(n), MRU_CH_RX_CFG, (;), n);			\
269 	LISTIFY(MRU_RX_CHANNELS(n), MRU_CH_RX_LINK_CFG, (;), n);		\
270 	static const Mru_Ip_ChannelCfgType nxp_s32_mru_##n##_ch_cfg[] = {	\
271 		LISTIFY(MRU_RX_CHANNELS(n), MRU_CH_CFG, (,), n)			\
272 	}
273 
274 #define MRU_INSTANCE_DEFINE(n)								\
275 	COND_CODE_0(MRU_RX_CHANNELS(n), (EMPTY), (MRU_CH_RX_DEFINITIONS(n)));		\
276 	static struct nxp_s32_mru_data nxp_s32_mru_##n##_data;				\
277 	static struct nxp_s32_mru_config nxp_s32_mru_##n##_config = {			\
278 		.base = MRU_BASE(n),							\
279 		.hw_cfg = {								\
280 			.InstanceId = MRU_HW_INSTANCE(n),				\
281 			.StateIndex = n,						\
282 			.NumChannel = MRU_RX_CHANNELS(n),				\
283 			.ChannelCfg = COND_CODE_0(MRU_RX_CHANNELS(n),			\
284 						  (NULL), (nxp_s32_mru_##n##_ch_cfg)),	\
285 			.NOTIFYAdd = {							\
286 				&MRU_BASE(n)->NOTIFY[0],				\
287 				&MRU_BASE(n)->NOTIFY[1]					\
288 			},								\
289 		},									\
290 		.irq_group = MRU_INT_GROUP(DT_INST_IRQN(n)),				\
291 		.config_irq = COND_CODE_0(MRU_RX_CHANNELS(n),				\
292 					  (NULL), (nxp_s32_mru_##n##_init_irq)),	\
293 	};										\
294 											\
295 	DEVICE_DT_INST_DEFINE(n, nxp_s32_mru_init, NULL,				\
296 			&nxp_s32_mru_##n##_data, &nxp_s32_mru_##n##_config,		\
297 			POST_KERNEL, CONFIG_MBOX_INIT_PRIORITY,				\
298 			&nxp_s32_mru_driver_api);
299 
300 DT_INST_FOREACH_STATUS_OKAY(MRU_INSTANCE_DEFINE)
301