1 /*
2 * Copyright (c) 2023, Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /*
8 * IPC - Interprocessor communication
9 * https://infocenter.nordicsemi.com/topic/ps_nrf5340/ipc.html?cp=4_0_0_6_15
10 *
11 * This file provides the implementation of the nRF5340 IPC peripherals,
12 * and instantiates N of them (and initializes them at startup and frees them on exit),
13 * as described in the configuration (NHW_config.h)
14 *
15 * Each IPC instance has a configurable number of channels/tasks/events (up to 32)
16 *
17 * Notes:
18 * * Close enough events in the IPC channels are not merged.
19 * Note the spec does not specify t_IPC.
20 */
21
22 #include <string.h>
23 #include <stdint.h>
24 #include "nsi_tasks.h"
25 #include "bs_tracing.h"
26 #include "bs_oswrap.h"
27 #include "NHW_common_types.h"
28 #include "NHW_config.h"
29 #include "NHW_peri_types.h"
30 #include "NHW_templates.h"
31 #include "irq_ctrl.h"
32 #include "NHW_DPPI.h"
33
34 struct ipc_status {
35 NRF_IPC_Type *NRF_IPC_regs;
36 uint n_ch; /* Number of events configured in this IPC instance */
37
38 uint dppi_map; //To which DPPI instance are this IPC subscribe&publish ports connected to
39 //Which of the subscription ports are currently connected, and to which channel:
40 struct nhw_subsc_mem* subscribed; //[n_ch]
41 };
42
43 static uint nhw_IPC_dppi_map[NHW_IPC_TOTAL_INST] = NHW_IPC_DPPI_MAP;
44 static struct ipc_status nhw_ipc_st[NHW_IPC_TOTAL_INST];
45 NRF_IPC_Type NRF_IPC_regs[NHW_IPC_TOTAL_INST];
46
47 /**
48 * Initialize the IPC model
49 */
nhw_ipc_init(void)50 static void nhw_ipc_init(void) {
51 /* Mapping of peripheral instance to DPPI instance */
52 uint nhw_ipc_n_ch[NHW_IPC_TOTAL_INST] = NHW_IPC_N_CH;
53
54 memset(NRF_IPC_regs, 0, sizeof(NRF_IPC_Type) * NHW_IPC_TOTAL_INST);
55
56 for (int i = 0; i< NHW_IPC_TOTAL_INST; i++) {
57 nhw_ipc_st[i].NRF_IPC_regs = &NRF_IPC_regs[i];
58 nhw_ipc_st[i].n_ch = nhw_ipc_n_ch[i];
59
60 nhw_ipc_st[i].dppi_map = nhw_IPC_dppi_map[i];
61 nhw_ipc_st[i].subscribed = (struct nhw_subsc_mem*)bs_calloc(nhw_ipc_n_ch[i], sizeof(struct nhw_subsc_mem));
62 }
63 }
64
65 NSI_TASK(nhw_ipc_init, HW_INIT, 100);
66
67 /*
68 * Free all IPC instances resources before program exit
69 */
nhw_ipc_free(void)70 static void nhw_ipc_free(void)
71 {
72 for (int i = 0; i < NHW_IPC_TOTAL_INST; i++) {
73 free(nhw_ipc_st[i].subscribed);
74 nhw_ipc_st[i].subscribed = NULL;
75 }
76 }
77
78 NSI_TASK(nhw_ipc_free, ON_EXIT_PRE, 100);
79
nhw_IPC_eval_interrupt(uint inst)80 static void nhw_IPC_eval_interrupt(uint inst) {
81 static bool ipc_int_line[NHW_IPC_TOTAL_INST]; /* Is this IPC currently driving its interrupt line high */
82 /* Mapping of peripheral instance to {int controller instance, int number} */
83 static struct nhw_irq_mapping nhw_ipc_irq_map[NHW_IPC_TOTAL_INST] = NHW_IPC_INT_MAP;
84 bool new_int_line = false;
85
86 struct ipc_status *this = &nhw_ipc_st[inst];
87 NRF_IPC_Type *IPC_regs = &NRF_IPC_regs[inst];
88
89 uint32_t event_m = 0;
90
91 for (int i = 0; i < this->n_ch; i++) {
92 if (IPC_regs->EVENTS_RECEIVE[i]) {
93 event_m |= 1<<i;
94 }
95 }
96
97 IPC_regs->INTPEND = IPC_regs->INTEN & event_m;
98
99 new_int_line = (IPC_regs->INTPEND != 0);
100
101 hw_irq_ctrl_toggle_level_irq_line_if(&ipc_int_line[inst],
102 new_int_line,
103 &nhw_ipc_irq_map[inst]);
104 }
105
106 #define CHECK_VALID_CHANNEL(inst, ch, what) \
107 if (nhw_ipc_st[inst].n_ch <= ch) { \
108 bs_trace_error_time_line("Attempting to access IPC%i %s for channel %u>=%u\n", \
109 inst, what, ch, nhw_ipc_st[inst].n_ch); \
110 }
111
nhw_IPC_signal_EVENT(uint inst,uint ch)112 static void nhw_IPC_signal_EVENT(uint inst, uint ch) {
113 NRF_IPC_regs[inst].EVENTS_RECEIVE[ch] = 1;
114 nhw_IPC_eval_interrupt(inst);
115 nhw_dppi_event_signal_if(nhw_IPC_dppi_map[inst], NRF_IPC_regs[inst].PUBLISH_RECEIVE[ch]);
116 }
117
118 /*
119 * Signal an event in channel <ch> in the IPC backbone
120 * i.e. Check for all IPC peripherals, if they are receiving
121 * in that channel, and if so have them signal the corresponding
122 * EVENT RECEIVE
123 */
nhw_IPC_notify_ipc_ch(uint ch)124 static void nhw_IPC_notify_ipc_ch(uint ch) {
125 for (uint inst = 0; inst < NHW_IPC_TOTAL_INST; inst++) {
126 uint ch_mask = 1 << ch;
127 for (int i = 0; i < nhw_ipc_st[inst].n_ch; i++) {
128 if (NRF_IPC_regs[inst].RECEIVE_CNF[i] & ch_mask) {
129 nhw_IPC_signal_EVENT(inst, i);
130 }
131 }
132 }
133 }
134
nhw_IPC_TASKS_SEND(uint inst,uint ch)135 static void nhw_IPC_TASKS_SEND(uint inst, uint ch) {
136 CHECK_VALID_CHANNEL(inst, ch, "TASK_SEND");
137
138 uint32_t send_cnf = NRF_IPC_regs[inst].SEND_CNF[ch];
139
140 for (int i = __builtin_ffs(send_cnf) - 1; i >= 0; i = __builtin_ffs(send_cnf) - 1) {
141 nhw_IPC_notify_ipc_ch(i);
142 send_cnf &= ~(1 << i);
143 }
144 }
145
nhw_IPC_regw_sideeffects_TASKS_SEND(uint inst,uint ch)146 void nhw_IPC_regw_sideeffects_TASKS_SEND(uint inst, uint ch) {
147 if (NRF_IPC_regs[inst].TASKS_SEND[ch]) {
148 NRF_IPC_regs[inst].TASKS_SEND[ch] = 0;
149 nhw_IPC_TASKS_SEND(inst, ch);
150 }
151 }
152
nhw_IPC_tasks_send_wrap(void * param)153 static void nhw_IPC_tasks_send_wrap(void* param) {
154 unsigned int inst = (uintptr_t)param >> 16;
155 uint n = (uintptr_t)param & 0xFFFF;
156 nhw_IPC_TASKS_SEND(inst, n);
157 }
158
nhw_IPC_regw_sideeffects_SUBSCRIBE_SEND(uint inst,uint ch)159 void nhw_IPC_regw_sideeffects_SUBSCRIBE_SEND(uint inst, uint ch) {
160 struct ipc_status *this = &nhw_ipc_st[inst];
161
162 CHECK_VALID_CHANNEL(inst, ch, "SUBSCRIBE");
163
164 nhw_dppi_common_subscribe_sideeffect(this->dppi_map,
165 this->NRF_IPC_regs->SUBSCRIBE_SEND[ch],
166 &this->subscribed[ch],
167 nhw_IPC_tasks_send_wrap,
168 (void*)((inst << 16) + ch));
169 }
170
171 NHW_SIDEEFFECTS_INTEN(IPC, NRF_IPC_regs[inst]., NRF_IPC_regs[inst].INTEN)
172 NHW_SIDEEFFECTS_INTSET(IPC, NRF_IPC_regs[inst]., NRF_IPC_regs[inst].INTEN)
173 NHW_SIDEEFFECTS_INTCLR(IPC, NRF_IPC_regs[inst]., NRF_IPC_regs[inst].INTEN)
174
175 NHW_SIDEEFFECTS_EVENTS(IPC)
176