1 /*
2 * Copyright (c) 2020-2022 Arm Limited
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #include <stdint.h>
17 #include <stdbool.h>
18 #include "mhu_v2_x.h"
19
20 #define _MHU_V2_X_MAX_CHANNELS 124
21 #define _MHU_V2_1_MAX_CHCOMB_INT 4
22 #define ENABLE 0x1
23 #define DISABLE 0x0
24 #define CLEAR_INTR 0x1
25 #define CH_PER_CH_COMB 0x20
26 #define SEND_FRAME(p_mhu) ((struct _mhu_v2_x_send_frame_t *)p_mhu)
27 #define RECV_FRAME(p_mhu) ((struct _mhu_v2_x_recv_frame_t *)p_mhu)
28
29 #define MHU_MAJOR_REV_V2 0x1u
30 #define MHU_MINOR_REV_2_0 0x0u
31 #define MHU_MINOR_REV_2_1 0x1u
32
33 struct _mhu_v2_x_send_ch_window_t {
34 /* Offset: 0x00 (R/ ) Channel Status */
35 volatile uint32_t ch_st;
36 /* Offset: 0x04 (R/ ) Reserved */
37 volatile uint32_t reserved_0;
38 /* Offset: 0x08 (R/ ) Reserved */
39 volatile uint32_t reserved_1;
40 /* Offset: 0x0C ( /W) Channel Set */
41 volatile uint32_t ch_set;
42 /* Offset: 0x10 (R/ ) Channel Interrupt Status (Reserved in 2.0) */
43 volatile uint32_t ch_int_st;
44 /* Offset: 0x14 ( /W) Channel Interrupt Clear (Reserved in 2.0) */
45 volatile uint32_t ch_int_clr;
46 /* Offset: 0x18 (R/W) Channel Interrupt Enable (Reserved in 2.0) */
47 volatile uint32_t ch_int_en;
48 /* Offset: 0x1C (R/ ) Reserved */
49 volatile uint32_t reserved_2;
50 };
51
52 struct _mhu_v2_x_send_frame_t {
53 /* Offset: 0x000 ( / ) Sender Channel Window 0 -123 */
54 struct _mhu_v2_x_send_ch_window_t send_ch_window[_MHU_V2_X_MAX_CHANNELS];
55 /* Offset: 0xF80 (R/ ) Message Handling Unit Configuration */
56 volatile uint32_t mhu_cfg;
57 /* Offset: 0xF84 (R/W) Response Configuration */
58 volatile uint32_t resp_cfg;
59 /* Offset: 0xF88 (R/W) Access Request */
60 volatile uint32_t access_request;
61 /* Offset: 0xF8C (R/ ) Access Ready */
62 volatile uint32_t access_ready;
63 /* Offset: 0xF90 (R/ ) Interrupt Status */
64 volatile uint32_t int_st;
65 /* Offset: 0xF94 ( /W) Interrupt Clear */
66 volatile uint32_t int_clr;
67 /* Offset: 0xF98 (R/W) Interrupt Enable */
68 volatile uint32_t int_en;
69 /* Offset: 0xF9C (R/ ) Reserved */
70 volatile uint32_t reserved_0;
71 /* Offset: 0xFA0 (R/W) Channel Combined Interrupt Stat (Reserved in 2.0) */
72 volatile uint32_t ch_comb_int_st[_MHU_V2_1_MAX_CHCOMB_INT];
73 /* Offset: 0xFC4 (R/ ) Reserved */
74 volatile uint32_t reserved_1[6];
75 /* Offset: 0xFC8 (R/ ) Implementer Identification Register */
76 volatile uint32_t iidr;
77 /* Offset: 0xFCC (R/ ) Architecture Identification Register */
78 volatile uint32_t aidr;
79 /* Offset: 0xFD0 (R/ ) */
80 volatile uint32_t pid_1[4];
81 /* Offset: 0xFE0 (R/ ) */
82 volatile uint32_t pid_0[4];
83 /* Offset: 0xFF0 (R/ ) */
84 volatile uint32_t cid[4];
85 };
86
87 struct _mhu_v2_x_rec_ch_window_t {
88 /* Offset: 0x00 (R/ ) Channel Status */
89 volatile uint32_t ch_st;
90 /* Offset: 0x04 (R/ ) Channel Status Masked */
91 volatile uint32_t ch_st_msk;
92 /* Offset: 0x08 ( /W) Channel Clear */
93 volatile uint32_t ch_clr;
94 /* Offset: 0x0C (R/ ) Reserved */
95 volatile uint32_t reserved_0;
96 /* Offset: 0x10 (R/ ) Channel Mask Status */
97 volatile uint32_t ch_msk_st;
98 /* Offset: 0x14 ( /W) Channel Mask Set */
99 volatile uint32_t ch_msk_set;
100 /* Offset: 0x18 ( /W) Channel Mask Clear */
101 volatile uint32_t ch_msk_clr;
102 /* Offset: 0x1C (R/ ) Reserved */
103 volatile uint32_t reserved_1;
104 };
105
106 struct _mhu_v2_x_recv_frame_t {
107 /* Offset: 0x000 ( / ) Receiver Channel Window 0 -123 */
108 struct _mhu_v2_x_rec_ch_window_t rec_ch_window[_MHU_V2_X_MAX_CHANNELS];
109 /* Offset: 0xF80 (R/ ) Message Handling Unit Configuration */
110 volatile uint32_t mhu_cfg;
111 /* Offset: 0xF84 (R/ ) Reserved */
112 volatile uint32_t reserved_0[3];
113 /* Offset: 0xF90 (R/ ) Interrupt Status (Reserved in 2.0) */
114 volatile uint32_t int_st;
115 /* Offset: 0xF94 (R/ ) Interrupt Clear (Reserved in 2.0) */
116 volatile uint32_t int_clr;
117 /* Offset: 0xF98 (R/W) Interrupt Enable (Reserved in 2.0) */
118 volatile uint32_t int_en;
119 /* Offset: 0xF9C (R/ ) Reserved */
120 volatile uint32_t reserved_1;
121 /* Offset: 0xFA0 (R/ ) Channel Combined Interrupt Stat (Reserved in 2.0) */
122 volatile uint32_t ch_comb_int_st[_MHU_V2_1_MAX_CHCOMB_INT];
123 /* Offset: 0xFB0 (R/ ) Reserved */
124 volatile uint32_t reserved_2[6];
125 /* Offset: 0xFC8 (R/ ) Implementer Identification Register */
126 volatile uint32_t iidr;
127 /* Offset: 0xFCC (R/ ) Architecture Identification Register */
128 volatile uint32_t aidr;
129 /* Offset: 0xFD0 (R/ ) */
130 volatile uint32_t pid_1[4];
131 /* Offset: 0xFE0 (R/ ) */
132 volatile uint32_t pid_0[4];
133 /* Offset: 0xFF0 (R/ ) */
134 volatile uint32_t cid[4];
135 };
136
137 union _mhu_v2_x_frame_t {
138 struct _mhu_v2_x_send_frame_t send_frame;
139 struct _mhu_v2_x_recv_frame_t recv_frame;
140 };
141
mhu_v2_x_driver_init(struct mhu_v2_x_dev_t * dev,enum mhu_v2_x_supported_revisions rev)142 enum mhu_v2_x_error_t mhu_v2_x_driver_init(struct mhu_v2_x_dev_t *dev,
143 enum mhu_v2_x_supported_revisions rev)
144 {
145 uint32_t AIDR = 0;
146 union _mhu_v2_x_frame_t *p_mhu = (union _mhu_v2_x_frame_t *)dev->base;
147
148 if (dev->is_initialized) {
149 return MHU_V_2_X_ERR_ALREADY_INIT;
150 }
151
152 if (rev == MHU_REV_READ_FROM_HW) {
153 /* Read revision from HW */
154 if (dev->frame == MHU_V2_X_RECEIVER_FRAME) {
155 AIDR = p_mhu->recv_frame.aidr;
156 } else {
157 AIDR = p_mhu->send_frame.aidr;
158 }
159
160 /* Get bits 7:4 to read major revision */
161 if ( ((AIDR >> 4) & 0b1111) != MHU_MAJOR_REV_V2) {
162 /* Unsupported MHU version */
163 return MHU_V_2_X_ERR_UNSUPPORTED_VERSION;
164 } /* No need to save major version, driver only supports MHUv2 */
165
166 /* Get bits 3:0 to read minor revision */
167 dev->subversion = AIDR & 0b1111;
168
169 if (dev->subversion != MHU_MINOR_REV_2_0 &&
170 dev->subversion != MHU_MINOR_REV_2_1) {
171 /* Unsupported subversion */
172 return MHU_V_2_X_ERR_UNSUPPORTED_VERSION;
173 }
174 } else {
175 /* Revisions were provided by caller */
176 if (rev == MHU_REV_2_0) {
177 dev->subversion = MHU_MINOR_REV_2_0;
178 } else if (rev == MHU_REV_2_1) {
179 dev->subversion = MHU_MINOR_REV_2_1;
180 } else {
181 /* Unsupported subversion */
182 return MHU_V_2_X_ERR_UNSUPPORTED_VERSION;
183 }/* No need to save major version, driver only supports MHUv2 */
184 }
185
186 dev->is_initialized = true;
187
188 return MHU_V_2_X_ERR_NONE;
189 }
190
mhu_v2_x_get_num_channel_implemented(const struct mhu_v2_x_dev_t * dev)191 uint32_t mhu_v2_x_get_num_channel_implemented(const struct mhu_v2_x_dev_t *dev)
192 {
193 union _mhu_v2_x_frame_t *p_mhu = (union _mhu_v2_x_frame_t *)dev->base;
194
195 if ( !(dev->is_initialized) ) {
196 return MHU_V_2_X_ERR_NOT_INIT;
197 }
198
199 if(dev->frame == MHU_V2_X_SENDER_FRAME) {
200 return (SEND_FRAME(p_mhu))->mhu_cfg;
201 } else {
202 return (RECV_FRAME(p_mhu))->mhu_cfg;
203 }
204 }
205
mhu_v2_x_channel_send(const struct mhu_v2_x_dev_t * dev,uint32_t channel,uint32_t val)206 enum mhu_v2_x_error_t mhu_v2_x_channel_send(const struct mhu_v2_x_dev_t *dev,
207 uint32_t channel, uint32_t val)
208 {
209 union _mhu_v2_x_frame_t *p_mhu = (union _mhu_v2_x_frame_t *)dev->base;
210
211 if ( !(dev->is_initialized) ) {
212 return MHU_V_2_X_ERR_NOT_INIT;
213 }
214
215 if(dev->frame == MHU_V2_X_SENDER_FRAME) {
216 (SEND_FRAME(p_mhu))->send_ch_window[channel].ch_set = val;
217 return MHU_V_2_X_ERR_NONE;
218 } else {
219 return MHU_V_2_X_ERR_INVALID_ARG;
220 }
221 }
222
mhu_v2_x_channel_poll(const struct mhu_v2_x_dev_t * dev,uint32_t channel,uint32_t * value)223 enum mhu_v2_x_error_t mhu_v2_x_channel_poll(const struct mhu_v2_x_dev_t *dev,
224 uint32_t channel, uint32_t *value)
225 {
226 union _mhu_v2_x_frame_t *p_mhu = (union _mhu_v2_x_frame_t *)dev->base;
227
228 if ( !(dev->is_initialized) ) {
229 return MHU_V_2_X_ERR_NOT_INIT;
230 }
231
232 if (dev->frame == MHU_V2_X_SENDER_FRAME) {
233 *value = (SEND_FRAME(p_mhu))->send_ch_window[channel].ch_st;
234 return MHU_V_2_X_ERR_NONE;
235 } else {
236 return MHU_V_2_X_ERR_INVALID_ARG;
237 }
238 }
239
mhu_v2_x_channel_clear(const struct mhu_v2_x_dev_t * dev,uint32_t channel)240 enum mhu_v2_x_error_t mhu_v2_x_channel_clear(const struct mhu_v2_x_dev_t *dev,
241 uint32_t channel)
242 {
243 union _mhu_v2_x_frame_t *p_mhu = (union _mhu_v2_x_frame_t *)dev->base;
244
245 if ( !(dev->is_initialized) ) {
246 return MHU_V_2_X_ERR_NOT_INIT;
247 }
248
249 if(dev->frame == MHU_V2_X_RECEIVER_FRAME) {
250 (RECV_FRAME(p_mhu))->rec_ch_window[channel].ch_clr = UINT32_MAX;
251 return MHU_V_2_X_ERR_NONE;
252 } else {
253 return MHU_V_2_X_ERR_INVALID_ARG;
254 }
255 }
256
mhu_v2_x_channel_receive(const struct mhu_v2_x_dev_t * dev,uint32_t channel,uint32_t * value)257 enum mhu_v2_x_error_t mhu_v2_x_channel_receive(
258 const struct mhu_v2_x_dev_t *dev, uint32_t channel, uint32_t *value)
259 {
260 union _mhu_v2_x_frame_t *p_mhu = (union _mhu_v2_x_frame_t *)dev->base;
261
262 if ( !(dev->is_initialized) ) {
263 return MHU_V_2_X_ERR_NOT_INIT;
264 }
265
266 if(dev->frame == MHU_V2_X_RECEIVER_FRAME) {
267 *value = (RECV_FRAME(p_mhu))->rec_ch_window[channel].ch_st;
268 return MHU_V_2_X_ERR_NONE;
269 } else {
270 return MHU_V_2_X_ERR_INVALID_ARG;
271 }
272 }
273
mhu_v2_x_channel_mask_set(const struct mhu_v2_x_dev_t * dev,uint32_t channel,uint32_t mask)274 enum mhu_v2_x_error_t mhu_v2_x_channel_mask_set(
275 const struct mhu_v2_x_dev_t *dev, uint32_t channel, uint32_t mask)
276 {
277 union _mhu_v2_x_frame_t *p_mhu = (union _mhu_v2_x_frame_t *)dev->base;
278
279 if ( !(dev->is_initialized) ) {
280 return MHU_V_2_X_ERR_NOT_INIT;
281 }
282
283 if(dev->frame == MHU_V2_X_RECEIVER_FRAME) {
284 (RECV_FRAME(p_mhu))->rec_ch_window[channel].ch_msk_set = mask;
285 return MHU_V_2_X_ERR_NONE;
286 } else {
287 return MHU_V_2_X_ERR_INVALID_ARG;
288 }
289 }
290
mhu_v2_x_channel_mask_clear(const struct mhu_v2_x_dev_t * dev,uint32_t channel,uint32_t mask)291 enum mhu_v2_x_error_t mhu_v2_x_channel_mask_clear(
292 const struct mhu_v2_x_dev_t *dev, uint32_t channel, uint32_t mask)
293 {
294 union _mhu_v2_x_frame_t *p_mhu = (union _mhu_v2_x_frame_t *)dev->base;
295
296 if ( !(dev->is_initialized) ) {
297 return MHU_V_2_X_ERR_NOT_INIT;
298 }
299
300 if(dev->frame == MHU_V2_X_RECEIVER_FRAME) {
301 (RECV_FRAME(p_mhu))->rec_ch_window[channel].ch_msk_clr = mask;
302 return MHU_V_2_X_ERR_NONE;
303 } else {
304 return MHU_V_2_X_ERR_INVALID_ARG;
305 }
306 }
307
mhu_v2_x_channel_interrupt_enable(const struct mhu_v2_x_dev_t * dev,uint32_t channel)308 enum mhu_v2_x_error_t mhu_v2_x_channel_interrupt_enable(
309 const struct mhu_v2_x_dev_t *dev, uint32_t channel)
310 {
311 union _mhu_v2_x_frame_t *p_mhu = (union _mhu_v2_x_frame_t *)dev->base;
312
313 if ( !(dev->is_initialized) ) {
314 return MHU_V_2_X_ERR_NOT_INIT;
315 }
316
317 if (dev->subversion == MHU_MINOR_REV_2_1) {
318 return MHU_V_2_X_ERR_UNSUPPORTED_VERSION;
319 }
320
321 if(dev->frame == MHU_V2_X_SENDER_FRAME) {
322 (SEND_FRAME(p_mhu))->send_ch_window[channel].ch_int_en = ENABLE;
323 return MHU_V_2_X_ERR_NONE;
324 } else {
325 return MHU_V_2_X_ERR_INVALID_ARG;
326 }
327 }
328
mhu_v2_x_channel_interrupt_disable(const struct mhu_v2_x_dev_t * dev,uint32_t channel)329 enum mhu_v2_x_error_t mhu_v2_x_channel_interrupt_disable(
330 const struct mhu_v2_x_dev_t *dev, uint32_t channel)
331 {
332 union _mhu_v2_x_frame_t *p_mhu = (union _mhu_v2_x_frame_t *)dev->base;
333
334 if ( !(dev->is_initialized) ) {
335 return MHU_V_2_X_ERR_NOT_INIT;
336 }
337
338 if (dev->subversion == MHU_MINOR_REV_2_1) {
339 return MHU_V_2_X_ERR_UNSUPPORTED_VERSION;
340 }
341
342 if(dev->frame == MHU_V2_X_SENDER_FRAME) {
343 (SEND_FRAME(p_mhu))->send_ch_window[channel].ch_int_en = DISABLE;
344 return MHU_V_2_X_ERR_NONE;
345 } else {
346 return MHU_V_2_X_ERR_INVALID_ARG;
347 }
348 }
349
mhu_v2_x_channel_interrupt_clear(const struct mhu_v2_x_dev_t * dev,uint32_t channel)350 enum mhu_v2_x_error_t mhu_v2_x_channel_interrupt_clear(
351 const struct mhu_v2_x_dev_t *dev, uint32_t channel)
352 {
353 union _mhu_v2_x_frame_t *p_mhu = (union _mhu_v2_x_frame_t *)dev->base;
354
355 if ( !(dev->is_initialized) ) {
356 return MHU_V_2_X_ERR_NOT_INIT;
357 }
358
359 if (dev->subversion == MHU_MINOR_REV_2_1) {
360 return MHU_V_2_X_ERR_UNSUPPORTED_VERSION;
361 }
362
363 if(dev->frame == MHU_V2_X_SENDER_FRAME) {
364 (SEND_FRAME(p_mhu))->send_ch_window[channel].ch_int_clr = CLEAR_INTR;
365 return MHU_V_2_X_ERR_NONE;
366 } else {
367 return MHU_V_2_X_ERR_INVALID_ARG;
368 }
369 }
370
mhu_v2_x_initiate_transfer(const struct mhu_v2_x_dev_t * dev)371 enum mhu_v2_x_error_t mhu_v2_x_initiate_transfer(
372 const struct mhu_v2_x_dev_t *dev)
373 {
374 union _mhu_v2_x_frame_t *p_mhu = (union _mhu_v2_x_frame_t *)dev->base;
375
376 if ( !(dev->is_initialized) ) {
377 return MHU_V_2_X_ERR_NOT_INIT;
378 }
379
380 if(dev->frame != MHU_V2_X_SENDER_FRAME) {
381 return MHU_V_2_X_ERR_INVALID_ARG;
382 }
383
384 (SEND_FRAME(p_mhu))->access_request = ENABLE;
385
386 while ( !((SEND_FRAME(p_mhu))->access_ready) ) {
387 /* Wait in a loop for access ready signal to be high */
388 ;
389 }
390
391 return MHU_V_2_X_ERR_NONE;
392 }
393
mhu_v2_x_close_transfer(const struct mhu_v2_x_dev_t * dev)394 enum mhu_v2_x_error_t mhu_v2_x_close_transfer(const struct mhu_v2_x_dev_t *dev)
395 {
396 union _mhu_v2_x_frame_t *p_mhu = (union _mhu_v2_x_frame_t *)dev->base;
397
398 if ( !(dev->is_initialized) ) {
399 return MHU_V_2_X_ERR_NOT_INIT;
400 }
401
402 if(dev->frame != MHU_V2_X_SENDER_FRAME) {
403 return MHU_V_2_X_ERR_INVALID_ARG;
404 }
405
406 (SEND_FRAME(p_mhu))->access_request = DISABLE;
407
408 return MHU_V_2_X_ERR_NONE;
409 }
410
mhu_v2_x_get_access_request(const struct mhu_v2_x_dev_t * dev,uint32_t * val)411 enum mhu_v2_x_error_t mhu_v2_x_get_access_request(
412 const struct mhu_v2_x_dev_t *dev, uint32_t *val)
413 {
414 union _mhu_v2_x_frame_t *p_mhu = (union _mhu_v2_x_frame_t *)dev->base;
415
416 if ( !(dev->is_initialized) ) {
417 return MHU_V_2_X_ERR_NOT_INIT;
418 }
419
420 if(dev->frame != MHU_V2_X_SENDER_FRAME) {
421 return MHU_V_2_X_ERR_INVALID_ARG;
422 }
423
424 *val = (SEND_FRAME(p_mhu))->access_request;
425
426 return MHU_V_2_X_ERR_NONE;
427 }
428
mhu_v2_x_set_access_request(const struct mhu_v2_x_dev_t * dev)429 enum mhu_v2_x_error_t mhu_v2_x_set_access_request(
430 const struct mhu_v2_x_dev_t *dev)
431 {
432 union _mhu_v2_x_frame_t *p_mhu = (union _mhu_v2_x_frame_t *)dev->base;
433
434 if ( !(dev->is_initialized) ) {
435 return MHU_V_2_X_ERR_NOT_INIT;
436 }
437
438 if(dev->frame != MHU_V2_X_SENDER_FRAME) {
439 return MHU_V_2_X_ERR_INVALID_ARG;
440 }
441
442 (SEND_FRAME(p_mhu))->access_request = ENABLE;
443
444 return MHU_V_2_X_ERR_NONE;
445 }
446
mhu_v2_x_reset_access_request(const struct mhu_v2_x_dev_t * dev)447 enum mhu_v2_x_error_t mhu_v2_x_reset_access_request(
448 const struct mhu_v2_x_dev_t *dev)
449 {
450 union _mhu_v2_x_frame_t *p_mhu = (union _mhu_v2_x_frame_t *)dev->base;
451
452 if ( !(dev->is_initialized) ) {
453 return MHU_V_2_X_ERR_NOT_INIT;
454 }
455
456 if(dev->frame != MHU_V2_X_SENDER_FRAME) {
457 return MHU_V_2_X_ERR_INVALID_ARG;
458 }
459
460 (SEND_FRAME(p_mhu))->access_request = DISABLE;
461
462 return MHU_V_2_X_ERR_NONE;
463 }
464
mhu_v2_x_get_access_ready(const struct mhu_v2_x_dev_t * dev,uint32_t * val)465 enum mhu_v2_x_error_t mhu_v2_x_get_access_ready(
466 const struct mhu_v2_x_dev_t *dev, uint32_t *val)
467 {
468 union _mhu_v2_x_frame_t *p_mhu = (union _mhu_v2_x_frame_t *)dev->base;
469
470 if ( !(dev->is_initialized) ) {
471 return MHU_V_2_X_ERR_NOT_INIT;
472 }
473
474 if(dev->frame != MHU_V2_X_SENDER_FRAME) {
475 return MHU_V_2_X_ERR_INVALID_ARG;
476 }
477
478 *val = (SEND_FRAME(p_mhu))->access_ready;
479
480 return MHU_V_2_X_ERR_NONE;
481 }
482
mhu_v2_x_get_interrupt_status(const struct mhu_v2_x_dev_t * dev)483 uint32_t mhu_v2_x_get_interrupt_status(const struct mhu_v2_x_dev_t *dev)
484 {
485 union _mhu_v2_x_frame_t *p_mhu = (union _mhu_v2_x_frame_t *)dev->base;
486
487 if ( !(dev->is_initialized) ) {
488 return MHU_V_2_X_ERR_NOT_INIT;
489 }
490
491 if(dev->frame == MHU_V2_X_SENDER_FRAME) {
492 return (SEND_FRAME(p_mhu))->int_st;
493 } else {
494 return (RECV_FRAME(p_mhu))->int_st;
495 }
496 }
497
mhu_v2_x_interrupt_enable(const struct mhu_v2_x_dev_t * dev,uint32_t mask)498 enum mhu_v2_x_error_t mhu_v2_x_interrupt_enable(
499 const struct mhu_v2_x_dev_t *dev, uint32_t mask)
500 {
501 union _mhu_v2_x_frame_t *p_mhu = (union _mhu_v2_x_frame_t *)dev->base;
502
503 if ( !(dev->is_initialized) ) {
504 return MHU_V_2_X_ERR_NOT_INIT;
505 }
506
507 if (dev->subversion == MHU_MINOR_REV_2_0) {
508 if (mask & MHU_2_1_INTR_CHCOMB_MASK) {
509 /* Combined channel IRQ is not present in v2.0 */
510 return MHU_V_2_X_ERR_INVALID_ARG;
511 }
512
513 if (dev->frame == MHU_V2_X_RECEIVER_FRAME) {
514 /* Only sender frame has these registers */
515 return MHU_V_2_X_ERR_UNSUPPORTED_VERSION;
516 }
517 }
518
519 if(dev->frame == MHU_V2_X_SENDER_FRAME) {
520 (SEND_FRAME(p_mhu))->int_en |= mask;
521 } else {
522 (RECV_FRAME(p_mhu))->int_en |= mask;
523 }
524
525 return MHU_V_2_X_ERR_NONE;
526 }
527
mhu_v2_x_interrupt_disable(const struct mhu_v2_x_dev_t * dev,uint32_t mask)528 enum mhu_v2_x_error_t mhu_v2_x_interrupt_disable(
529 const struct mhu_v2_x_dev_t *dev, uint32_t mask)
530 {
531 union _mhu_v2_x_frame_t *p_mhu = (union _mhu_v2_x_frame_t *)dev->base;
532
533 if ( !(dev->is_initialized) ) {
534 return MHU_V_2_X_ERR_NOT_INIT;
535 }
536
537 if (dev->subversion == MHU_MINOR_REV_2_0) {
538 if (mask & MHU_2_1_INTR_CHCOMB_MASK) {
539 /* Combined channel IRQ is not present in v2.0 */
540 return MHU_V_2_X_ERR_INVALID_ARG;
541 }
542
543 if (dev->frame == MHU_V2_X_RECEIVER_FRAME) {
544 /* Only sender frame has these registers */
545 return MHU_V_2_X_ERR_UNSUPPORTED_VERSION;
546 }
547 }
548
549 if(dev->frame == MHU_V2_X_SENDER_FRAME) {
550 (SEND_FRAME(p_mhu))->int_en &= ~mask;
551 } else {
552 (RECV_FRAME(p_mhu))->int_en &= ~mask;
553 }
554
555 return MHU_V_2_X_ERR_NONE;
556 }
557
mhu_v2_x_interrupt_clear(const struct mhu_v2_x_dev_t * dev,uint32_t mask)558 enum mhu_v2_x_error_t mhu_v2_x_interrupt_clear(
559 const struct mhu_v2_x_dev_t *dev, uint32_t mask)
560 {
561 union _mhu_v2_x_frame_t *p_mhu = (union _mhu_v2_x_frame_t *)dev->base;
562
563 if ( !(dev->is_initialized) ) {
564 return MHU_V_2_X_ERR_NOT_INIT;
565 }
566
567 if (dev->subversion == MHU_MINOR_REV_2_0) {
568 if (mask & MHU_2_1_INTR_CHCOMB_MASK) {
569 /* Combined channel IRQ is not present in v2.0 */
570 return MHU_V_2_X_ERR_INVALID_ARG;
571 }
572
573 if (dev->frame == MHU_V2_X_RECEIVER_FRAME) {
574 /* Only sender frame has these registers */
575 return MHU_V_2_X_ERR_UNSUPPORTED_VERSION;
576 }
577 }
578
579 if(dev->frame == MHU_V2_X_SENDER_FRAME) {
580 (SEND_FRAME(p_mhu))->int_clr = mask;
581 } else {
582 (RECV_FRAME(p_mhu))->int_clr = mask;
583 }
584
585 return MHU_V_2_X_ERR_NONE;
586 }
587
mhu_v2_1_get_ch_interrupt_num(const struct mhu_v2_x_dev_t * dev,uint32_t * channel)588 enum mhu_v2_x_error_t mhu_v2_1_get_ch_interrupt_num(
589 const struct mhu_v2_x_dev_t *dev, uint32_t *channel)
590 {
591 uint32_t i, j, status;
592 union _mhu_v2_x_frame_t *p_mhu = (union _mhu_v2_x_frame_t *)dev->base;
593
594 if ( !(dev->is_initialized) ) {
595 return MHU_V_2_X_ERR_NOT_INIT;
596 }
597
598 if (dev->subversion != MHU_MINOR_REV_2_1) {
599 /* Feature is only supported in MHU v2.1 */
600 return MHU_V_2_X_ERR_UNSUPPORTED_VERSION;
601 }
602
603 for(i = 0; i < _MHU_V2_1_MAX_CHCOMB_INT; i++) {
604 if(dev->frame == MHU_V2_X_SENDER_FRAME) {
605 status = (SEND_FRAME(p_mhu))->ch_comb_int_st[i];
606 } else {
607 status = (RECV_FRAME(p_mhu))->ch_comb_int_st[i];
608 }
609
610 for(j = 0; j < CH_PER_CH_COMB; j++) {
611 if (status & ENABLE) {
612 *channel = (j + (i * CH_PER_CH_COMB));
613 return MHU_V_2_X_ERR_NONE;
614 }
615 status >>= 1;
616 }
617 }
618
619 return MHU_V_2_X_ERR_GENERAL;
620 }
621