1 /* Copyright (c) 2022, Intel Corporation
2 * SPDX-License-Identifier: Apache-2.0
3 */
4 #include <zephyr/kernel.h>
5 #include <zephyr/drivers/ipm.h>
6 #include <adsp_memory.h>
7 #include <adsp_shim.h>
8 #include <intel_adsp_ipc.h>
9 #include <mem_window.h>
10 #include <zephyr/cache.h>
11
12 /* Matches SOF_IPC_MSG_MAX_SIZE, though in practice nothing anywhere
13 * near that big is ever sent. Should maybe consider making this a
14 * kconfig to avoid waste.
15 */
16 #define MAX_MSG 384
17
18 /* Note: these addresses aren't flexible! We require that they match
19 * current SOF ipc3/4 layout, which means that:
20 *
21 * + Buffer addresses are 4k-aligned (this is a hardware requirement)
22 * + Inbuf must be 4k after outbuf, with no use of the intervening memory
23 * + Outbuf must be 4k after the start of win0 (this is where the host driver looks)
24 *
25 * One side effect is that the word "before" MSG_INBUF is owned by our
26 * code too, and can be used for a nice trick below.
27 */
28
29 /* host windows */
30 #define DMWBA(win_base) (win_base + 0x0)
31 #define DMWLO(win_base) (win_base + 0x4)
32
33
34 struct ipm_cavs_host_data {
35 ipm_callback_t callback;
36 void *user_data;
37 bool enabled;
38 };
39
40 /* Note: this call is unsynchronized. The IPM docs are silent as to
41 * whether this is required, and the SOF code that will be using this
42 * is externally synchronized already.
43 */
send(const struct device * dev,int wait,uint32_t id,const void * data,int size)44 static int send(const struct device *dev, int wait, uint32_t id,
45 const void *data, int size)
46 {
47 const struct device *mw0 = DEVICE_DT_GET(DT_NODELABEL(mem_window0));
48
49 if (!device_is_ready(mw0)) {
50 return -ENODEV;
51 }
52 const struct mem_win_config *mw0_config = mw0->config;
53 uint32_t *buf = (uint32_t *)sys_cache_uncached_ptr_get(
54 (void *)((uint32_t)mw0_config->mem_base
55 + CONFIG_IPM_CAVS_HOST_OUTBOX_OFFSET));
56
57 if (!intel_adsp_ipc_is_complete(INTEL_ADSP_IPC_HOST_DEV)) {
58 return -EBUSY;
59 }
60
61 if ((size < 0) || (size > MAX_MSG)) {
62 return -EMSGSIZE;
63 }
64
65 if ((id & 0xc0000000) != 0) {
66 /* cAVS IDR register has only 30 usable bits */
67 return -EINVAL;
68 }
69
70 uint32_t ext_data = 0;
71
72 /* Protocol variant (used by SOF "ipc4"): store the first word
73 * of the message in the IPC scratch registers
74 */
75 if (IS_ENABLED(CONFIG_IPM_CAVS_HOST_REGWORD) && size >= 4) {
76 ext_data = ((uint32_t *)data)[0];
77 data = &((const uint32_t *)data)[1];
78 size -= 4;
79 }
80
81 memcpy(buf, data, size);
82
83 int ret = intel_adsp_ipc_send_message(INTEL_ADSP_IPC_HOST_DEV, id, ext_data);
84
85 /* The IPM docs call for "busy waiting" here, but in fact
86 * there's a blocking synchronous call available that might be
87 * better. But then we'd have to check whether we're in
88 * interrupt context, and it's not clear to me that SOF would
89 * benefit anyway as all its usage is async. This is OK for
90 * now.
91 */
92 if (ret == -EBUSY && wait) {
93 while (!intel_adsp_ipc_is_complete(INTEL_ADSP_IPC_HOST_DEV)) {
94 k_busy_wait(1);
95 }
96 }
97
98 return ret;
99 }
100
ipc_handler(const struct device * dev,void * arg,uint32_t data,uint32_t ext_data)101 static bool ipc_handler(const struct device *dev, void *arg,
102 uint32_t data, uint32_t ext_data)
103 {
104 ARG_UNUSED(arg);
105 struct device *ipmdev = arg;
106 struct ipm_cavs_host_data *devdata = ipmdev->data;
107 const struct device *mw1 = DEVICE_DT_GET(DT_NODELABEL(mem_window1));
108
109 if (!device_is_ready(mw1)) {
110 return -ENODEV;
111 }
112 const struct mem_win_config *mw1_config = mw1->config;
113 uint32_t *msg = sys_cache_uncached_ptr_get((void *)mw1_config->mem_base);
114
115 /* We play tricks to leave one word available before the
116 * beginning of the SRAM window, this way the host can see the
117 * same offsets it does with the original ipc4 protocol
118 * implementation, but here in the firmware we see a single
119 * contiguous buffer. See above.
120 */
121 if (IS_ENABLED(CONFIG_IPM_CAVS_HOST_REGWORD)) {
122 msg = &msg[-1];
123 msg[0] = ext_data;
124 }
125
126 if (devdata->enabled && (devdata->callback != NULL)) {
127 devdata->callback(ipmdev, devdata->user_data,
128 data & 0x3fffffff, msg);
129 }
130
131 /* Return false for async handling */
132 return !IS_ENABLED(IPM_CALLBACK_ASYNC);
133 }
134
max_data_size_get(const struct device * ipmdev)135 static int max_data_size_get(const struct device *ipmdev)
136 {
137 return MAX_MSG;
138 }
139
max_id_val_get(const struct device * ipmdev)140 static uint32_t max_id_val_get(const struct device *ipmdev)
141 {
142 /* 30 user-writable bits in cAVS IDR register */
143 return 0x3fffffff;
144 }
145
register_callback(const struct device * port,ipm_callback_t cb,void * user_data)146 static void register_callback(const struct device *port,
147 ipm_callback_t cb,
148 void *user_data)
149 {
150 struct ipm_cavs_host_data *data = port->data;
151
152 data->callback = cb;
153 data->user_data = user_data;
154 }
155
set_enabled(const struct device * ipmdev,int enable)156 static int set_enabled(const struct device *ipmdev, int enable)
157 {
158 /* This protocol doesn't support any kind of queuing, and in
159 * fact will stall if a message goes unacknowledged. Support
160 * it as best we can by gating the callbacks only. That will
161 * allow the DONE notifications to proceed as normal, at the
162 * cost of dropping any messages received while not "enabled"
163 * of course.
164 */
165 struct ipm_cavs_host_data *data = ipmdev->data;
166
167 data->enabled = enable;
168 return 0;
169 }
170
complete(const struct device * ipmdev)171 static void complete(const struct device *ipmdev)
172 {
173 intel_adsp_ipc_complete(INTEL_ADSP_IPC_HOST_DEV);
174 }
175
init(const struct device * dev)176 static int init(const struct device *dev)
177 {
178 struct ipm_cavs_host_data *data = dev->data;
179
180 const struct device *mw1 = DEVICE_DT_GET(DT_NODELABEL(mem_window1));
181
182 if (!device_is_ready(mw1)) {
183 return -ENODEV;
184 }
185 const struct mem_win_config *mw1_config = mw1->config;
186 /* Initialize hardware SRAM window. SOF will give the host 8k
187 * here, let's limit it to just the memory we're using for
188 * futureproofing.
189 */
190
191 sys_write32(ROUND_UP(MAX_MSG, 8) | 0x7, DMWLO(mw1_config->base_addr));
192 sys_write32((mw1_config->mem_base | ADSP_DMWBA_ENABLE), DMWBA(mw1_config->base_addr));
193
194 intel_adsp_ipc_set_message_handler(INTEL_ADSP_IPC_HOST_DEV, ipc_handler, (void *)dev);
195
196 data->enabled = true;
197 return 0;
198 }
199
200 static DEVICE_API(ipm, api) = {
201 .send = send,
202 .max_data_size_get = max_data_size_get,
203 .max_id_val_get = max_id_val_get,
204 .register_callback = register_callback,
205 .set_enabled = set_enabled,
206 .complete = complete,
207 };
208
209 static struct ipm_cavs_host_data data;
210
211 DEVICE_DEFINE(ipm_cavs_host, "ipm_cavs_host", init, NULL, &data, NULL,
212 PRE_KERNEL_2, 1, &api);
213