1 /* Copyright (c) 2022 Intel Corporation
2  * SPDX-License-Identifier: Apache-2.0
3  */
4  #include <zephyr/spinlock.h>
5 
6 #include <intel_adsp_ipc.h>
7 #include <adsp_ipc_regs.h>
8 #include <adsp_interrupt.h>
9 #include <zephyr/irq.h>
10 #include <zephyr/pm/state.h>
11 #include <zephyr/pm/pm.h>
12 #include <zephyr/pm/device.h>
13 #include <zephyr/pm/policy.h>
14 #include <errno.h>
15 
intel_adsp_ipc_set_message_handler(const struct device * dev,intel_adsp_ipc_handler_t fn,void * arg)16 void intel_adsp_ipc_set_message_handler(const struct device *dev,
17 	intel_adsp_ipc_handler_t fn, void *arg)
18 {
19 	struct intel_adsp_ipc_data *devdata = dev->data;
20 	k_spinlock_key_t key = k_spin_lock(&devdata->lock);
21 
22 	devdata->handle_message = fn;
23 	devdata->handler_arg = arg;
24 	k_spin_unlock(&devdata->lock, key);
25 }
26 
intel_adsp_ipc_set_done_handler(const struct device * dev,intel_adsp_ipc_done_t fn,void * arg)27 void intel_adsp_ipc_set_done_handler(const struct device *dev,
28 	intel_adsp_ipc_done_t fn, void *arg)
29 {
30 	struct intel_adsp_ipc_data *devdata = dev->data;
31 	k_spinlock_key_t key = k_spin_lock(&devdata->lock);
32 
33 	devdata->done_notify = fn;
34 	devdata->done_arg = arg;
35 	k_spin_unlock(&devdata->lock, key);
36 }
37 
z_intel_adsp_ipc_isr(const void * devarg)38 void z_intel_adsp_ipc_isr(const void *devarg)
39 {
40 	const struct device *dev = devarg;
41 	const struct intel_adsp_ipc_config *config = dev->config;
42 	struct intel_adsp_ipc_data *devdata = dev->data;
43 
44 	volatile struct intel_adsp_ipc *regs = config->regs;
45 	k_spinlock_key_t key = k_spin_lock(&devdata->lock);
46 
47 	if (regs->tdr & INTEL_ADSP_IPC_BUSY) {
48 		bool done = true;
49 
50 		if (devdata->handle_message != NULL) {
51 			uint32_t msg = regs->tdr & ~INTEL_ADSP_IPC_BUSY;
52 			uint32_t ext = regs->tdd;
53 
54 			done = devdata->handle_message(dev, devdata->handler_arg, msg, ext);
55 		}
56 
57 		regs->tdr = INTEL_ADSP_IPC_BUSY;
58 		if (done) {
59 #ifdef CONFIG_SOC_SERIES_INTEL_ADSP_ACE
60 			regs->tda = INTEL_ADSP_IPC_ACE1X_TDA_DONE;
61 #else
62 			regs->tda = INTEL_ADSP_IPC_DONE;
63 #endif
64 		}
65 	}
66 
67 	/* Same signal, but on different bits in 1.5 */
68 	bool done =  (regs->ida & INTEL_ADSP_IPC_DONE);
69 
70 	if (done) {
71 		bool external_completion = false;
72 
73 		if (devdata->done_notify != NULL) {
74 			external_completion = devdata->done_notify(dev, devdata->done_arg);
75 		}
76 		devdata->tx_ack_pending = false;
77 		/* Allow the system to enter the runtime idle state after the IPC acknowledgment
78 		 * is received.
79 		 */
80 		pm_policy_state_lock_put(PM_STATE_RUNTIME_IDLE, PM_ALL_SUBSTATES);
81 		k_sem_give(&devdata->sem);
82 
83 		/* IPC completion registers will be set externally */
84 		if (external_completion) {
85 			k_spin_unlock(&devdata->lock, key);
86 			return;
87 		}
88 
89 		regs->ida = INTEL_ADSP_IPC_DONE;
90 	}
91 
92 	k_spin_unlock(&devdata->lock, key);
93 }
94 
intel_adsp_ipc_init(const struct device * dev)95 int intel_adsp_ipc_init(const struct device *dev)
96 {
97 	pm_device_busy_set(dev);
98 	struct intel_adsp_ipc_data *devdata = dev->data;
99 	const struct intel_adsp_ipc_config *config = dev->config;
100 
101 	memset(devdata, 0, sizeof(*devdata));
102 
103 	k_sem_init(&devdata->sem, 0, 1);
104 
105 	/* ACK any latched interrupts (including TDA to clear IDA on
106 	 * the other side!), then enable.
107 	 */
108 	config->regs->tdr = INTEL_ADSP_IPC_BUSY;
109 	config->regs->ida = INTEL_ADSP_IPC_DONE;
110 #ifdef CONFIG_SOC_SERIES_INTEL_ADSP_ACE
111 	config->regs->tda = INTEL_ADSP_IPC_ACE1X_TDA_DONE;
112 #else
113 	config->regs->tda = INTEL_ADSP_IPC_DONE;
114 #endif
115 	config->regs->ctl |= (INTEL_ADSP_IPC_CTL_IDIE | INTEL_ADSP_IPC_CTL_TBIE);
116 	pm_device_busy_clear(dev);
117 
118 	return 0;
119 }
120 
intel_adsp_ipc_complete(const struct device * dev)121 void intel_adsp_ipc_complete(const struct device *dev)
122 {
123 	const struct intel_adsp_ipc_config *config = dev->config;
124 
125 #ifdef CONFIG_SOC_SERIES_INTEL_ADSP_ACE
126 	config->regs->tda = INTEL_ADSP_IPC_ACE1X_TDA_DONE;
127 #else
128 	config->regs->tda = INTEL_ADSP_IPC_DONE;
129 #endif
130 }
131 
intel_adsp_ipc_is_complete(const struct device * dev)132 bool intel_adsp_ipc_is_complete(const struct device *dev)
133 {
134 	const struct intel_adsp_ipc_config *config = dev->config;
135 	const struct intel_adsp_ipc_data *devdata = dev->data;
136 	bool not_busy = (config->regs->idr & INTEL_ADSP_IPC_BUSY) == 0;
137 
138 	return not_busy && !devdata->tx_ack_pending;
139 }
140 
intel_adsp_ipc_send_message(const struct device * dev,uint32_t data,uint32_t ext_data)141 int intel_adsp_ipc_send_message(const struct device *dev,
142 			   uint32_t data, uint32_t ext_data)
143 {
144 #ifdef CONFIG_PM_DEVICE
145 	enum pm_device_state current_state;
146 
147 	if (pm_device_state_get(INTEL_ADSP_IPC_HOST_DEV, &current_state) != 0 ||
148 		current_state != PM_DEVICE_STATE_ACTIVE) {
149 		return -ESHUTDOWN;
150 	}
151 #endif
152 
153 	pm_device_busy_set(dev);
154 	const struct intel_adsp_ipc_config *config = dev->config;
155 	struct intel_adsp_ipc_data *devdata = dev->data;
156 	k_spinlock_key_t key = k_spin_lock(&devdata->lock);
157 
158 	if ((config->regs->idr & INTEL_ADSP_IPC_BUSY) != 0 || devdata->tx_ack_pending) {
159 		k_spin_unlock(&devdata->lock, key);
160 		return -EBUSY;
161 	}
162 
163 	k_sem_reset(&devdata->sem);
164 	/* Prevent entering runtime idle state until IPC acknowledgment is received. */
165 	pm_policy_state_lock_get(PM_STATE_RUNTIME_IDLE, PM_ALL_SUBSTATES);
166 	devdata->tx_ack_pending = true;
167 	config->regs->idd = ext_data;
168 	config->regs->idr = data | INTEL_ADSP_IPC_BUSY;
169 	k_spin_unlock(&devdata->lock, key);
170 	pm_device_busy_clear(dev);
171 	return 0;
172 }
173 
intel_adsp_ipc_send_message_sync(const struct device * dev,uint32_t data,uint32_t ext_data,k_timeout_t timeout)174 int intel_adsp_ipc_send_message_sync(const struct device *dev,
175 				uint32_t data, uint32_t ext_data,
176 				k_timeout_t timeout)
177 {
178 	struct intel_adsp_ipc_data *devdata = dev->data;
179 
180 	int ret = intel_adsp_ipc_send_message(dev, data, ext_data);
181 
182 	if (!ret) {
183 		k_sem_take(&devdata->sem, timeout);
184 	}
185 	return ret;
186 }
187 
intel_adsp_ipc_send_message_emergency(const struct device * dev,uint32_t data,uint32_t ext_data)188 void intel_adsp_ipc_send_message_emergency(const struct device *dev, uint32_t data,
189 					   uint32_t ext_data)
190 {
191 	const struct intel_adsp_ipc_config * const config = dev->config;
192 
193 	volatile struct intel_adsp_ipc * const regs = config->regs;
194 	bool done;
195 
196 	/* check if host is processing message. */
197 	while (regs->idr & INTEL_ADSP_IPC_BUSY) {
198 		k_busy_wait(1);
199 	}
200 
201 	/* check if host has pending acknowledge msg
202 	 * Same signal, but on different bits in 1.5
203 	 */
204 	done = regs->ida & INTEL_ADSP_IPC_DONE;
205 	if (done) {
206 		/* IPC completion */
207 		regs->ida = INTEL_ADSP_IPC_DONE;
208 	}
209 
210 	regs->idd = ext_data;
211 	regs->idr = data | INTEL_ADSP_IPC_BUSY;
212 }
213 
214 #if DT_NODE_EXISTS(INTEL_ADSP_IPC_HOST_DTNODE)
215 
216 #if defined(CONFIG_SOC_SERIES_INTEL_ADSP_ACE)
ace_ipc_intc_unmask(void)217 static inline void ace_ipc_intc_unmask(void)
218 {
219 	ACE_DINT[0].ie[ACE_INTL_HIPC] = BIT(0);
220 }
221 #else
ace_ipc_intc_unmask(void)222 static inline void ace_ipc_intc_unmask(void) {}
223 #endif
224 
dt_init(const struct device * dev)225 static int dt_init(const struct device *dev)
226 {
227 	IRQ_CONNECT(DT_IRQN(INTEL_ADSP_IPC_HOST_DTNODE), 0, z_intel_adsp_ipc_isr,
228 		INTEL_ADSP_IPC_HOST_DEV, 0);
229 	irq_enable(DT_IRQN(INTEL_ADSP_IPC_HOST_DTNODE));
230 
231 	ace_ipc_intc_unmask();
232 
233 	return intel_adsp_ipc_init(dev);
234 }
235 
236 #ifdef CONFIG_PM_DEVICE
237 
intel_adsp_ipc_set_resume_handler(const struct device * dev,intel_adsp_ipc_resume_handler_t fn,void * arg)238 void intel_adsp_ipc_set_resume_handler(const struct device *dev,
239 	intel_adsp_ipc_resume_handler_t fn, void *arg)
240 {
241 	struct ipc_control_driver_api *api =
242 		(struct ipc_control_driver_api *)dev->api;
243 	struct intel_adsp_ipc_data *devdata = dev->data;
244 	k_spinlock_key_t key = k_spin_lock(&devdata->lock);
245 
246 	api->resume_fn = fn;
247 	api->resume_fn_args = arg;
248 
249 	k_spin_unlock(&devdata->lock, key);
250 }
251 
intel_adsp_ipc_set_suspend_handler(const struct device * dev,intel_adsp_ipc_suspend_handler_t fn,void * arg)252 void intel_adsp_ipc_set_suspend_handler(const struct device *dev,
253 	intel_adsp_ipc_suspend_handler_t fn, void *arg)
254 {
255 	struct ipc_control_driver_api *api =
256 		(struct ipc_control_driver_api *)dev->api;
257 	struct intel_adsp_ipc_data *devdata = dev->data;
258 	k_spinlock_key_t key = k_spin_lock(&devdata->lock);
259 
260 	api->suspend_fn = fn;
261 	api->suspend_fn_args = arg;
262 
263 	k_spin_unlock(&devdata->lock, key);
264 }
265 
266 /**
267  * @brief Manages IPC driver power state change.
268  *
269  * @param dev IPC device.
270  * @param action Power state to be changed to.
271  * @return int Returns 0 on success or optionaly error code from the
272  * registered ipc_power_control_api callbacks.
273  *
274  * @note PM lock is taken at the start of each power transition to prevent concurrent calls
275  * to @ref pm_device_action_run function.
276  * If IPC Device performs hardware operation and device is busy (what should not happen)
277  * function returns failure. It is API user responsibility to make sure we are not entering
278  * device power transition while device is busy.
279  */
ipc_pm_action(const struct device * dev,enum pm_device_action action)280 static int ipc_pm_action(const struct device *dev, enum pm_device_action action)
281 {
282 	if (pm_device_is_busy(INTEL_ADSP_IPC_HOST_DEV)) {
283 		return -EBUSY;
284 	}
285 
286 	const struct ipc_control_driver_api *api =
287 		(const struct ipc_control_driver_api *)dev->api;
288 
289 	int ret = 0;
290 
291 	switch (action) {
292 	case PM_DEVICE_ACTION_SUSPEND:
293 		if (api->suspend_fn) {
294 			ret = api->suspend_fn(dev, api->suspend_fn_args);
295 			if (!ret) {
296 				irq_disable(DT_IRQN(INTEL_ADSP_IPC_HOST_DTNODE));
297 			}
298 		}
299 		break;
300 	case PM_DEVICE_ACTION_RESUME:
301 		irq_enable(DT_IRQN(INTEL_ADSP_IPC_HOST_DTNODE));
302 		if (!irq_is_enabled(DT_IRQN(INTEL_ADSP_IPC_HOST_DTNODE))) {
303 			ret = -EINTR;
304 			break;
305 		}
306 		ace_ipc_intc_unmask();
307 		ret = intel_adsp_ipc_init(dev);
308 		if (ret) {
309 			break;
310 		}
311 		if (api->resume_fn) {
312 			ret = api->resume_fn(dev, api->resume_fn_args);
313 		}
314 		break;
315 	default:
316 		/* Return as default value when given PM action is not supported */
317 		return -ENOTSUP;
318 	}
319 
320 	return ret;
321 }
322 
323 /**
324  * @brief Callback functions to be executed by Zephyr application
325  * during IPC device suspend and resume.
326  */
327 static struct ipc_control_driver_api ipc_power_control_api = {
328 	.resume_fn = NULL,
329 	.resume_fn_args = NULL,
330 	.suspend_fn = NULL,
331 	.suspend_fn_args = NULL
332 };
333 
334 PM_DEVICE_DT_DEFINE(INTEL_ADSP_IPC_HOST_DTNODE, ipc_pm_action);
335 
336 #endif /* CONFIG_PM_DEVICE */
337 
338 static const struct intel_adsp_ipc_config ipc_host_config = {
339 	.regs = (void *)INTEL_ADSP_IPC_REG_ADDRESS,
340 };
341 
342 static struct intel_adsp_ipc_data ipc_host_data;
343 
344 DEVICE_DT_DEFINE(INTEL_ADSP_IPC_HOST_DTNODE, dt_init, PM_DEVICE_DT_GET(INTEL_ADSP_IPC_HOST_DTNODE),
345 	&ipc_host_data, &ipc_host_config, PRE_KERNEL_2, 0, COND_CODE_1(CONFIG_PM_DEVICE,
346 	(&ipc_power_control_api), (NULL)));
347 
348 #endif /* DT_NODE_EXISTS(INTEL_ADSP_IPC_HOST_DTNODE) */
349