1 /*
2 * Copyright (c) 2023 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <string.h>
9 #include <errno.h>
10 #include <zephyr/cache.h>
11 #include <zephyr/ipc/pbuf.h>
12 #include <zephyr/sys/byteorder.h>
13
14 #if defined(CONFIG_ARCH_POSIX)
15 #include <soc.h>
16 #endif
17
18 /* Helper funciton for getting numer of bytes being written to the bufer. */
idx_occupied(uint32_t len,uint32_t wr_idx,uint32_t rd_idx)19 static uint32_t idx_occupied(uint32_t len, uint32_t wr_idx, uint32_t rd_idx)
20 {
21 /* It is implicitly assumed wr_idx and rd_idx cannot differ by more then len. */
22 return (rd_idx > wr_idx) ? (len - (rd_idx - wr_idx)) : (wr_idx - rd_idx);
23 }
24
25 /* Helper function for wrapping the index from the begging if above buffer len. */
idx_wrap(uint32_t len,uint32_t idx)26 static uint32_t idx_wrap(uint32_t len, uint32_t idx)
27 {
28 return (idx >= len) ? (idx % len) : (idx);
29 }
30
validate_cfg(const struct pbuf_cfg * cfg)31 static int validate_cfg(const struct pbuf_cfg *cfg)
32 {
33 /* Validate pointers. */
34 if (!cfg || !cfg->rd_idx_loc || !cfg->wr_idx_loc || !cfg->data_loc) {
35 return -EINVAL;
36 }
37
38 /* Validate pointer alignment. */
39 if (!IS_PTR_ALIGNED_BYTES(cfg->rd_idx_loc, MAX(cfg->dcache_alignment, _PBUF_IDX_SIZE)) ||
40 !IS_PTR_ALIGNED_BYTES(cfg->wr_idx_loc, MAX(cfg->dcache_alignment, _PBUF_IDX_SIZE)) ||
41 !IS_PTR_ALIGNED_BYTES(cfg->data_loc, _PBUF_IDX_SIZE)) {
42 return -EINVAL;
43 }
44
45 /* Validate len. */
46 if (cfg->len < _PBUF_MIN_DATA_LEN || !IS_PTR_ALIGNED_BYTES(cfg->len, _PBUF_IDX_SIZE)) {
47 return -EINVAL;
48 }
49
50 /* Validate pointer values. */
51 if (!(cfg->rd_idx_loc < cfg->wr_idx_loc) ||
52 !((uint8_t *)cfg->wr_idx_loc < cfg->data_loc) ||
53 !(((uint8_t *)cfg->rd_idx_loc + MAX(_PBUF_IDX_SIZE, cfg->dcache_alignment)) ==
54 (uint8_t *)cfg->wr_idx_loc)) {
55 return -EINVAL;
56 }
57
58 return 0;
59 }
60
61 #if defined(CONFIG_ARCH_POSIX)
pbuf_native_addr_remap(struct pbuf * pb)62 void pbuf_native_addr_remap(struct pbuf *pb)
63 {
64 native_emb_addr_remap((void **)&pb->cfg->rd_idx_loc);
65 native_emb_addr_remap((void **)&pb->cfg->wr_idx_loc);
66 native_emb_addr_remap((void **)&pb->cfg->data_loc);
67 }
68 #endif
69
pbuf_tx_init(struct pbuf * pb)70 int pbuf_tx_init(struct pbuf *pb)
71 {
72 if (validate_cfg(pb->cfg) != 0) {
73 return -EINVAL;
74 }
75 #if defined(CONFIG_ARCH_POSIX)
76 pbuf_native_addr_remap(pb);
77 #endif
78
79 /* Initialize local copy of indexes. */
80 pb->data.wr_idx = 0;
81 pb->data.rd_idx = 0;
82
83 /* Clear shared memory. */
84 *(pb->cfg->wr_idx_loc) = pb->data.wr_idx;
85 *(pb->cfg->rd_idx_loc) = pb->data.rd_idx;
86
87 __sync_synchronize();
88
89 /* Take care cache. */
90 sys_cache_data_flush_range((void *)(pb->cfg->wr_idx_loc), sizeof(*(pb->cfg->wr_idx_loc)));
91 sys_cache_data_flush_range((void *)(pb->cfg->rd_idx_loc), sizeof(*(pb->cfg->rd_idx_loc)));
92
93 return 0;
94 }
95
pbuf_rx_init(struct pbuf * pb)96 int pbuf_rx_init(struct pbuf *pb)
97 {
98 if (validate_cfg(pb->cfg) != 0) {
99 return -EINVAL;
100 }
101 #if defined(CONFIG_ARCH_POSIX)
102 pbuf_native_addr_remap(pb);
103 #endif
104
105 /* Initialize local copy of indexes. */
106 pb->data.wr_idx = 0;
107 pb->data.rd_idx = 0;
108
109 return 0;
110 }
111
pbuf_write(struct pbuf * pb,const char * data,uint16_t len)112 int pbuf_write(struct pbuf *pb, const char *data, uint16_t len)
113 {
114 if (pb == NULL || len == 0 || data == NULL) {
115 /* Incorrect call. */
116 return -EINVAL;
117 }
118
119 /* Invalidate rd_idx only, local wr_idx is used to increase buffer security. */
120 sys_cache_data_invd_range((void *)(pb->cfg->rd_idx_loc), sizeof(*(pb->cfg->rd_idx_loc)));
121 __sync_synchronize();
122
123 uint8_t *const data_loc = pb->cfg->data_loc;
124 const uint32_t blen = pb->cfg->len;
125 uint32_t rd_idx = *(pb->cfg->rd_idx_loc);
126 uint32_t wr_idx = pb->data.wr_idx;
127
128 /* wr_idx must always be aligned. */
129 __ASSERT_NO_MSG(IS_PTR_ALIGNED_BYTES(wr_idx, _PBUF_IDX_SIZE));
130 /* rd_idx shall always be aligned, but its value is received from the reader.
131 * Can not assert.
132 */
133 if (!IS_PTR_ALIGNED_BYTES(rd_idx, _PBUF_IDX_SIZE)) {
134 return -EINVAL;
135 }
136
137 uint32_t free_space = blen - idx_occupied(blen, wr_idx, rd_idx) - _PBUF_IDX_SIZE;
138
139 /* Packet length, data + packet length size. */
140 uint32_t plen = len + PBUF_PACKET_LEN_SZ;
141
142 /* Check if packet will fit into the buffer. */
143 if (free_space < plen) {
144 return -ENOMEM;
145 }
146
147 /* Clear packet len with zeros and update. Clearing is done for possible versioning in the
148 * future. Writing is allowed now, because shared wr_idx value is updated at the very end.
149 */
150 *((uint32_t *)(&data_loc[wr_idx])) = 0;
151 sys_put_be16(len, &data_loc[wr_idx]);
152 __sync_synchronize();
153 sys_cache_data_flush_range(&data_loc[wr_idx], PBUF_PACKET_LEN_SZ);
154
155 wr_idx = idx_wrap(blen, wr_idx + PBUF_PACKET_LEN_SZ);
156
157 /* Write until end of the buffer, if data will be wrapped. */
158 uint32_t tail = MIN(len, blen - wr_idx);
159
160 memcpy(&data_loc[wr_idx], data, tail);
161 sys_cache_data_flush_range(&data_loc[wr_idx], tail);
162
163 if (len > tail) {
164 /* Copy remaining data to buffer front. */
165 memcpy(&data_loc[0], data + tail, len - tail);
166 sys_cache_data_flush_range(&data_loc[0], len - tail);
167 }
168
169 wr_idx = idx_wrap(blen, ROUND_UP(wr_idx + len, _PBUF_IDX_SIZE));
170 /* Update wr_idx. */
171 pb->data.wr_idx = wr_idx;
172 *(pb->cfg->wr_idx_loc) = wr_idx;
173 __sync_synchronize();
174 sys_cache_data_flush_range((void *)pb->cfg->wr_idx_loc, sizeof(*(pb->cfg->wr_idx_loc)));
175
176 return len;
177 }
178
pbuf_read(struct pbuf * pb,char * buf,uint16_t len)179 int pbuf_read(struct pbuf *pb, char *buf, uint16_t len)
180 {
181 if (pb == NULL) {
182 /* Incorrect call. */
183 return -EINVAL;
184 }
185
186 /* Invalidate wr_idx only, local rd_idx is used to increase buffer security. */
187 sys_cache_data_invd_range((void *)(pb->cfg->wr_idx_loc), sizeof(*(pb->cfg->wr_idx_loc)));
188 __sync_synchronize();
189
190 uint8_t *const data_loc = pb->cfg->data_loc;
191 const uint32_t blen = pb->cfg->len;
192 uint32_t wr_idx = *(pb->cfg->wr_idx_loc);
193 uint32_t rd_idx = pb->data.rd_idx;
194
195 /* rd_idx must always be aligned. */
196 __ASSERT_NO_MSG(IS_PTR_ALIGNED_BYTES(rd_idx, _PBUF_IDX_SIZE));
197 /* wr_idx shall always be aligned, but its value is received from the
198 * writer. Can not assert.
199 */
200 if (!IS_PTR_ALIGNED_BYTES(wr_idx, _PBUF_IDX_SIZE)) {
201 return -EINVAL;
202 }
203
204 if (rd_idx == wr_idx) {
205 /* Buffer is empty. */
206 return 0;
207 }
208
209 /* Get packet len.*/
210 sys_cache_data_invd_range(&data_loc[rd_idx], PBUF_PACKET_LEN_SZ);
211 uint16_t plen = sys_get_be16(&data_loc[rd_idx]);
212
213 if (!buf) {
214 return (int)plen;
215 }
216
217 if (plen > len) {
218 return -ENOMEM;
219 }
220
221 uint32_t occupied_space = idx_occupied(blen, wr_idx, rd_idx);
222
223 if (occupied_space < plen + PBUF_PACKET_LEN_SZ) {
224 /* This should never happen. */
225 return -EAGAIN;
226 }
227
228 rd_idx = idx_wrap(blen, rd_idx + PBUF_PACKET_LEN_SZ);
229
230 /* Packet will fit into provided buffer, truncate len if provided len
231 * is bigger than necessary.
232 */
233 len = MIN(plen, len);
234
235 /* Read until end of the buffer, if data are wrapped. */
236 uint32_t tail = MIN(blen - rd_idx, len);
237
238 sys_cache_data_invd_range(&data_loc[rd_idx], tail);
239 memcpy(buf, &data_loc[rd_idx], tail);
240
241 if (len > tail) {
242 sys_cache_data_invd_range(&data_loc[0], len - tail);
243 memcpy(&buf[tail], &pb->cfg->data_loc[0], len - tail);
244 }
245
246 /* Update rd_idx. */
247 rd_idx = idx_wrap(blen, ROUND_UP(rd_idx + len, _PBUF_IDX_SIZE));
248
249 pb->data.rd_idx = rd_idx;
250 *(pb->cfg->rd_idx_loc) = rd_idx;
251 __sync_synchronize();
252 sys_cache_data_flush_range((void *)pb->cfg->rd_idx_loc, sizeof(*(pb->cfg->rd_idx_loc)));
253
254 return len;
255 }
256