1 /*
2 * Copyright (c) 2023 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <string.h>
9 #include <errno.h>
10 #include <zephyr/cache.h>
11 #include <zephyr/ipc/pbuf.h>
12 #include <zephyr/sys/byteorder.h>
13
14 #if defined(CONFIG_ARCH_POSIX)
15 #include <soc.h>
16 #endif
17
18 /* Helper function for getting number of bytes being written to the buffer. */
idx_occupied(uint32_t len,uint32_t wr_idx,uint32_t rd_idx)19 static uint32_t idx_occupied(uint32_t len, uint32_t wr_idx, uint32_t rd_idx)
20 {
21 /* It is implicitly assumed wr_idx and rd_idx cannot differ by more then len. */
22 return (rd_idx > wr_idx) ? (len - (rd_idx - wr_idx)) : (wr_idx - rd_idx);
23 }
24
25 /* Helper function for wrapping the index from the begging if above buffer len. */
idx_wrap(uint32_t len,uint32_t idx)26 static uint32_t idx_wrap(uint32_t len, uint32_t idx)
27 {
28 return (idx >= len) ? (idx % len) : (idx);
29 }
30
validate_cfg(const struct pbuf_cfg * cfg)31 static int validate_cfg(const struct pbuf_cfg *cfg)
32 {
33 /* Validate pointers. */
34 if (!cfg || !cfg->rd_idx_loc || !cfg->wr_idx_loc || !cfg->data_loc) {
35 return -EINVAL;
36 }
37
38 /* Validate pointer alignment. */
39 if (!IS_PTR_ALIGNED_BYTES(cfg->rd_idx_loc, MAX(cfg->dcache_alignment, _PBUF_IDX_SIZE)) ||
40 !IS_PTR_ALIGNED_BYTES(cfg->wr_idx_loc, MAX(cfg->dcache_alignment, _PBUF_IDX_SIZE)) ||
41 !IS_PTR_ALIGNED_BYTES(cfg->handshake_loc, _PBUF_IDX_SIZE) ||
42 !IS_PTR_ALIGNED_BYTES(cfg->data_loc, _PBUF_IDX_SIZE)) {
43 return -EINVAL;
44 }
45
46 /* Validate len. */
47 if (cfg->len < _PBUF_MIN_DATA_LEN || !IS_PTR_ALIGNED_BYTES(cfg->len, _PBUF_IDX_SIZE)) {
48 return -EINVAL;
49 }
50
51 /* Validate pointer values. */
52 if (!(cfg->rd_idx_loc < cfg->wr_idx_loc) ||
53 (cfg->handshake_loc && !(cfg->rd_idx_loc < cfg->handshake_loc)) ||
54 !(cfg->handshake_loc < cfg->wr_idx_loc) ||
55 !((uint8_t *)cfg->wr_idx_loc < cfg->data_loc) ||
56 !(((uint8_t *)cfg->rd_idx_loc + MAX(_PBUF_IDX_SIZE, cfg->dcache_alignment)) ==
57 (uint8_t *)cfg->wr_idx_loc)) {
58 return -EINVAL;
59 }
60
61 return 0;
62 }
63
64 #if defined(CONFIG_ARCH_POSIX)
pbuf_native_addr_remap(struct pbuf * pb)65 void pbuf_native_addr_remap(struct pbuf *pb)
66 {
67 native_emb_addr_remap((void **)&pb->cfg->rd_idx_loc);
68 native_emb_addr_remap((void **)&pb->cfg->wr_idx_loc);
69 native_emb_addr_remap((void **)&pb->cfg->data_loc);
70 }
71 #endif
72
pbuf_tx_init(struct pbuf * pb)73 int pbuf_tx_init(struct pbuf *pb)
74 {
75 if (validate_cfg(pb->cfg) != 0) {
76 return -EINVAL;
77 }
78 #if defined(CONFIG_ARCH_POSIX)
79 pbuf_native_addr_remap(pb);
80 #endif
81
82 /* Initialize local copy of indexes. */
83 pb->data.wr_idx = 0;
84 pb->data.rd_idx = 0;
85
86 /* Clear shared memory. */
87 *(pb->cfg->wr_idx_loc) = pb->data.wr_idx;
88 *(pb->cfg->rd_idx_loc) = pb->data.rd_idx;
89
90 __sync_synchronize();
91
92 /* Take care cache. */
93 sys_cache_data_flush_range((void *)(pb->cfg->wr_idx_loc), sizeof(*(pb->cfg->wr_idx_loc)));
94 sys_cache_data_flush_range((void *)(pb->cfg->rd_idx_loc), sizeof(*(pb->cfg->rd_idx_loc)));
95
96 return 0;
97 }
98
pbuf_rx_init(struct pbuf * pb)99 int pbuf_rx_init(struct pbuf *pb)
100 {
101 if (validate_cfg(pb->cfg) != 0) {
102 return -EINVAL;
103 }
104 #if defined(CONFIG_ARCH_POSIX)
105 pbuf_native_addr_remap(pb);
106 #endif
107
108 /* Initialize local copy of indexes. */
109 pb->data.wr_idx = 0;
110 pb->data.rd_idx = 0;
111
112 return 0;
113 }
114
pbuf_write(struct pbuf * pb,const char * data,uint16_t len)115 int pbuf_write(struct pbuf *pb, const char *data, uint16_t len)
116 {
117 if (pb == NULL || len == 0 || data == NULL) {
118 /* Incorrect call. */
119 return -EINVAL;
120 }
121
122 /* Invalidate rd_idx only, local wr_idx is used to increase buffer security. */
123 sys_cache_data_invd_range((void *)(pb->cfg->rd_idx_loc), sizeof(*(pb->cfg->rd_idx_loc)));
124 __sync_synchronize();
125
126 uint8_t *const data_loc = pb->cfg->data_loc;
127 const uint32_t blen = pb->cfg->len;
128 uint32_t rd_idx = *(pb->cfg->rd_idx_loc);
129 uint32_t wr_idx = pb->data.wr_idx;
130
131 /* wr_idx must always be aligned. */
132 __ASSERT_NO_MSG(IS_PTR_ALIGNED_BYTES(wr_idx, _PBUF_IDX_SIZE));
133 /* rd_idx shall always be aligned, but its value is received from the reader.
134 * Can not assert.
135 */
136 if (!IS_PTR_ALIGNED_BYTES(rd_idx, _PBUF_IDX_SIZE)) {
137 return -EINVAL;
138 }
139
140 uint32_t free_space = blen - idx_occupied(blen, wr_idx, rd_idx) - _PBUF_IDX_SIZE;
141
142 /* Packet length, data + packet length size. */
143 uint32_t plen = len + PBUF_PACKET_LEN_SZ;
144
145 /* Check if packet will fit into the buffer. */
146 if (free_space < plen) {
147 return -ENOMEM;
148 }
149
150 /* Clear packet len with zeros and update. Clearing is done for possible versioning in the
151 * future. Writing is allowed now, because shared wr_idx value is updated at the very end.
152 */
153 *((uint32_t *)(&data_loc[wr_idx])) = 0;
154 sys_put_be16(len, &data_loc[wr_idx]);
155 __sync_synchronize();
156 sys_cache_data_flush_range(&data_loc[wr_idx], PBUF_PACKET_LEN_SZ);
157
158 wr_idx = idx_wrap(blen, wr_idx + PBUF_PACKET_LEN_SZ);
159
160 /* Write until end of the buffer, if data will be wrapped. */
161 uint32_t tail = MIN(len, blen - wr_idx);
162
163 memcpy(&data_loc[wr_idx], data, tail);
164 sys_cache_data_flush_range(&data_loc[wr_idx], tail);
165
166 if (len > tail) {
167 /* Copy remaining data to buffer front. */
168 memcpy(&data_loc[0], data + tail, len - tail);
169 sys_cache_data_flush_range(&data_loc[0], len - tail);
170 }
171
172 wr_idx = idx_wrap(blen, ROUND_UP(wr_idx + len, _PBUF_IDX_SIZE));
173 /* Update wr_idx. */
174 pb->data.wr_idx = wr_idx;
175 *(pb->cfg->wr_idx_loc) = wr_idx;
176 __sync_synchronize();
177 sys_cache_data_flush_range((void *)pb->cfg->wr_idx_loc, sizeof(*(pb->cfg->wr_idx_loc)));
178
179 return len;
180 }
181
pbuf_get_initial_buf(struct pbuf * pb,volatile char ** buf,uint16_t * len)182 int pbuf_get_initial_buf(struct pbuf *pb, volatile char **buf, uint16_t *len)
183 {
184 uint32_t wr_idx;
185 uint16_t plen;
186
187 if (pb == NULL || pb->data.rd_idx != 0) {
188 /* Incorrect call. */
189 return -EINVAL;
190 }
191
192 sys_cache_data_invd_range((void *)(pb->cfg->wr_idx_loc), sizeof(*(pb->cfg->wr_idx_loc)));
193 __sync_synchronize();
194
195 wr_idx = *(pb->cfg->wr_idx_loc);
196 if (wr_idx >= pb->cfg->len || wr_idx > 0xFFFF || wr_idx == 0) {
197 /* Wrong index - probably pbuf was not initialized or message was not send yet. */
198 return -EINVAL;
199 }
200
201 sys_cache_data_invd_range((void *)(pb->cfg->data_loc), PBUF_PACKET_LEN_SZ);
202 __sync_synchronize();
203
204 plen = sys_get_be16(&pb->cfg->data_loc[0]);
205
206 if (plen + 4 > wr_idx) {
207 /* Wrong length - probably pbuf was not initialized or message was not send yet. */
208 return -EINVAL;
209 }
210
211 *buf = &pb->cfg->data_loc[PBUF_PACKET_LEN_SZ];
212 *len = plen;
213
214 sys_cache_data_invd_range((void *)*buf, plen);
215 __sync_synchronize();
216
217 return 0;
218 }
219
pbuf_read(struct pbuf * pb,char * buf,uint16_t len)220 int pbuf_read(struct pbuf *pb, char *buf, uint16_t len)
221 {
222 if (pb == NULL) {
223 /* Incorrect call. */
224 return -EINVAL;
225 }
226
227 /* Invalidate wr_idx only, local rd_idx is used to increase buffer security. */
228 sys_cache_data_invd_range((void *)(pb->cfg->wr_idx_loc), sizeof(*(pb->cfg->wr_idx_loc)));
229 __sync_synchronize();
230
231 uint8_t *const data_loc = pb->cfg->data_loc;
232 const uint32_t blen = pb->cfg->len;
233 uint32_t wr_idx = *(pb->cfg->wr_idx_loc);
234 uint32_t rd_idx = pb->data.rd_idx;
235
236 /* rd_idx must always be aligned. */
237 __ASSERT_NO_MSG(IS_PTR_ALIGNED_BYTES(rd_idx, _PBUF_IDX_SIZE));
238 /* wr_idx shall always be aligned, but its value is received from the
239 * writer. Can not assert.
240 */
241 if (!IS_PTR_ALIGNED_BYTES(wr_idx, _PBUF_IDX_SIZE)) {
242 return -EINVAL;
243 }
244
245 if (rd_idx == wr_idx) {
246 /* Buffer is empty. */
247 return 0;
248 }
249
250 /* Get packet len.*/
251 sys_cache_data_invd_range(&data_loc[rd_idx], PBUF_PACKET_LEN_SZ);
252 uint16_t plen = sys_get_be16(&data_loc[rd_idx]);
253
254 if (!buf) {
255 return (int)plen;
256 }
257
258 if (plen > len) {
259 return -ENOMEM;
260 }
261
262 uint32_t occupied_space = idx_occupied(blen, wr_idx, rd_idx);
263
264 if (occupied_space < plen + PBUF_PACKET_LEN_SZ) {
265 /* This should never happen. */
266 return -EAGAIN;
267 }
268
269 rd_idx = idx_wrap(blen, rd_idx + PBUF_PACKET_LEN_SZ);
270
271 /* Packet will fit into provided buffer, truncate len if provided len
272 * is bigger than necessary.
273 */
274 len = MIN(plen, len);
275
276 /* Read until end of the buffer, if data are wrapped. */
277 uint32_t tail = MIN(blen - rd_idx, len);
278
279 sys_cache_data_invd_range(&data_loc[rd_idx], tail);
280 memcpy(buf, &data_loc[rd_idx], tail);
281
282 if (len > tail) {
283 sys_cache_data_invd_range(&data_loc[0], len - tail);
284 memcpy(&buf[tail], &pb->cfg->data_loc[0], len - tail);
285 }
286
287 /* Update rd_idx. */
288 rd_idx = idx_wrap(blen, ROUND_UP(rd_idx + len, _PBUF_IDX_SIZE));
289
290 pb->data.rd_idx = rd_idx;
291 *(pb->cfg->rd_idx_loc) = rd_idx;
292 __sync_synchronize();
293 sys_cache_data_flush_range((void *)pb->cfg->rd_idx_loc, sizeof(*(pb->cfg->rd_idx_loc)));
294
295 return len;
296 }
297
pbuf_handshake_read(struct pbuf * pb)298 uint32_t pbuf_handshake_read(struct pbuf *pb)
299 {
300 volatile uint32_t *ptr = pb->cfg->handshake_loc;
301
302 __ASSERT_NO_MSG(ptr);
303 sys_cache_data_invd_range((void *)ptr, sizeof(*ptr));
304 __sync_synchronize();
305 return *ptr;
306 }
307
pbuf_handshake_write(struct pbuf * pb,uint32_t value)308 void pbuf_handshake_write(struct pbuf *pb, uint32_t value)
309 {
310 volatile uint32_t *ptr = pb->cfg->handshake_loc;
311
312 __ASSERT_NO_MSG(ptr);
313 *ptr = value;
314 __sync_synchronize();
315 sys_cache_data_flush_range((void *)ptr, sizeof(*ptr));
316 }
317