1 /*
2 * Copyright (c) 2023 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <string.h>
9 #include <errno.h>
10 #include <zephyr/cache.h>
11 #include <zephyr/ipc/pbuf.h>
12 #include <zephyr/sys/byteorder.h>
13
14 /* Helper funciton for getting numer of bytes being written to the bufer. */
idx_occupied(uint32_t len,uint32_t wr_idx,uint32_t rd_idx)15 static uint32_t idx_occupied(uint32_t len, uint32_t wr_idx, uint32_t rd_idx)
16 {
17 /* It is implicitly assumed wr_idx and rd_idx cannot differ by more then len. */
18 return (rd_idx > wr_idx) ? (len - (rd_idx - wr_idx)) : (wr_idx - rd_idx);
19 }
20
21 /* Helper function for wrapping the index from the begging if above buffer len. */
idx_wrap(uint32_t len,uint32_t idx)22 static uint32_t idx_wrap(uint32_t len, uint32_t idx)
23 {
24 return (idx >= len) ? (idx % len) : (idx);
25 }
26
validate_cfg(const struct pbuf_cfg * cfg)27 static int validate_cfg(const struct pbuf_cfg *cfg)
28 {
29 /* Validate pointers. */
30 if (!cfg || !cfg->rd_idx_loc || !cfg->wr_idx_loc || !cfg->data_loc) {
31 return -EINVAL;
32 }
33
34 /* Validate pointer alignment. */
35 if (!IS_PTR_ALIGNED_BYTES(cfg->rd_idx_loc, MAX(cfg->dcache_alignment, _PBUF_IDX_SIZE)) ||
36 !IS_PTR_ALIGNED_BYTES(cfg->wr_idx_loc, MAX(cfg->dcache_alignment, _PBUF_IDX_SIZE)) ||
37 !IS_PTR_ALIGNED_BYTES(cfg->data_loc, _PBUF_IDX_SIZE)) {
38 return -EINVAL;
39 }
40
41 /* Validate len. */
42 if (cfg->len < _PBUF_MIN_DATA_LEN || !IS_PTR_ALIGNED_BYTES(cfg->len, _PBUF_IDX_SIZE)) {
43 return -EINVAL;
44 }
45
46 /* Validate pointer values. */
47 if (!(cfg->rd_idx_loc < cfg->wr_idx_loc) ||
48 !((uint8_t *)cfg->wr_idx_loc < cfg->data_loc) ||
49 !(((uint8_t *)cfg->rd_idx_loc + MAX(_PBUF_IDX_SIZE, cfg->dcache_alignment)) ==
50 (uint8_t *)cfg->wr_idx_loc)) {
51 return -EINVAL;
52 }
53
54 return 0;
55 }
56
pbuf_init(struct pbuf * pb)57 int pbuf_init(struct pbuf *pb)
58 {
59 if (validate_cfg(pb->cfg) != 0) {
60 return -EINVAL;
61 }
62
63 /* Initialize local copy of indexes. */
64 pb->data.wr_idx = 0;
65 pb->data.rd_idx = 0;
66
67 /* Clear shared memory. */
68 *(pb->cfg->wr_idx_loc) = pb->data.wr_idx;
69 *(pb->cfg->rd_idx_loc) = pb->data.rd_idx;
70
71 __sync_synchronize();
72
73 /* Take care cache. */
74 sys_cache_data_flush_range((void *)(pb->cfg->wr_idx_loc), sizeof(*(pb->cfg->wr_idx_loc)));
75 sys_cache_data_flush_range((void *)(pb->cfg->rd_idx_loc), sizeof(*(pb->cfg->rd_idx_loc)));
76
77 return 0;
78 }
79
pbuf_write(struct pbuf * pb,const char * data,uint16_t len)80 int pbuf_write(struct pbuf *pb, const char *data, uint16_t len)
81 {
82 if (pb == NULL || len == 0 || data == NULL) {
83 /* Incorrect call. */
84 return -EINVAL;
85 }
86
87 /* Invalidate rd_idx only, local wr_idx is used to increase buffer security. */
88 sys_cache_data_invd_range((void *)(pb->cfg->rd_idx_loc), sizeof(*(pb->cfg->rd_idx_loc)));
89 __sync_synchronize();
90
91 uint8_t *const data_loc = pb->cfg->data_loc;
92 const uint32_t blen = pb->cfg->len;
93 uint32_t rd_idx = *(pb->cfg->rd_idx_loc);
94 uint32_t wr_idx = pb->data.wr_idx;
95
96 /* wr_idx must always be aligned. */
97 __ASSERT_NO_MSG(IS_PTR_ALIGNED_BYTES(wr_idx, _PBUF_IDX_SIZE));
98 /* rd_idx shall always be aligned, but its value is received from the reader.
99 * Can not assert.
100 */
101 if (!IS_PTR_ALIGNED_BYTES(rd_idx, _PBUF_IDX_SIZE)) {
102 return -EINVAL;
103 }
104
105 uint32_t free_space = blen - idx_occupied(blen, wr_idx, rd_idx) - _PBUF_IDX_SIZE;
106
107 /* Packet length, data + packet length size. */
108 uint32_t plen = len + PBUF_PACKET_LEN_SZ;
109
110 /* Check if packet will fit into the buffer. */
111 if (free_space < plen) {
112 return -ENOMEM;
113 }
114
115 /* Clear packet len with zeros and update. Clearing is done for possible versioning in the
116 * future. Writing is allowed now, because shared wr_idx value is updated at the very end.
117 */
118 *((uint32_t *)(&data_loc[wr_idx])) = 0;
119 sys_put_be16(len, &data_loc[wr_idx]);
120 __sync_synchronize();
121 sys_cache_data_flush_range(&data_loc[wr_idx], PBUF_PACKET_LEN_SZ);
122
123 wr_idx = idx_wrap(blen, wr_idx + PBUF_PACKET_LEN_SZ);
124
125 /* Write until end of the buffer, if data will be wrapped. */
126 uint32_t tail = MIN(len, blen - wr_idx);
127
128 memcpy(&data_loc[wr_idx], data, tail);
129 sys_cache_data_flush_range(&data_loc[wr_idx], tail);
130
131 if (len > tail) {
132 /* Copy remaining data to buffer front. */
133 memcpy(&data_loc[0], data + tail, len - tail);
134 sys_cache_data_flush_range(&data_loc[0], len - tail);
135 }
136
137 wr_idx = idx_wrap(blen, ROUND_UP(wr_idx + len, _PBUF_IDX_SIZE));
138 /* Update wr_idx. */
139 pb->data.wr_idx = wr_idx;
140 *(pb->cfg->wr_idx_loc) = wr_idx;
141 __sync_synchronize();
142 sys_cache_data_flush_range((void *)pb->cfg->wr_idx_loc, sizeof(*(pb->cfg->wr_idx_loc)));
143
144 return len;
145 }
146
pbuf_read(struct pbuf * pb,char * buf,uint16_t len)147 int pbuf_read(struct pbuf *pb, char *buf, uint16_t len)
148 {
149 if (pb == NULL) {
150 /* Incorrect call. */
151 return -EINVAL;
152 }
153
154 /* Invalidate wr_idx only, local rd_idx is used to increase buffer security. */
155 sys_cache_data_invd_range((void *)(pb->cfg->wr_idx_loc), sizeof(*(pb->cfg->wr_idx_loc)));
156 __sync_synchronize();
157
158 uint8_t *const data_loc = pb->cfg->data_loc;
159 const uint32_t blen = pb->cfg->len;
160 uint32_t wr_idx = *(pb->cfg->wr_idx_loc);
161 uint32_t rd_idx = pb->data.rd_idx;
162
163 /* rd_idx must always be aligned. */
164 __ASSERT_NO_MSG(IS_PTR_ALIGNED_BYTES(rd_idx, _PBUF_IDX_SIZE));
165 /* wr_idx shall always be aligned, but its value is received from the
166 * writer. Can not assert.
167 */
168 if (!IS_PTR_ALIGNED_BYTES(wr_idx, _PBUF_IDX_SIZE)) {
169 return -EINVAL;
170 }
171
172 if (rd_idx == wr_idx) {
173 /* Buffer is empty. */
174 return 0;
175 }
176
177 /* Get packet len.*/
178 sys_cache_data_invd_range(&data_loc[rd_idx], PBUF_PACKET_LEN_SZ);
179 uint16_t plen = sys_get_be16(&data_loc[rd_idx]);
180
181 if (!buf) {
182 return (int)plen;
183 }
184
185 if (plen > len) {
186 return -ENOMEM;
187 }
188
189 uint32_t occupied_space = idx_occupied(blen, wr_idx, rd_idx);
190
191 if (occupied_space < plen + PBUF_PACKET_LEN_SZ) {
192 /* This should never happen. */
193 return -EAGAIN;
194 }
195
196 rd_idx = idx_wrap(blen, rd_idx + PBUF_PACKET_LEN_SZ);
197
198 /* Packet will fit into provided buffer, truncate len if provided len
199 * is bigger than necessary.
200 */
201 len = MIN(plen, len);
202
203 /* Read until end of the buffer, if data are wrapped. */
204 uint32_t tail = MIN(blen - rd_idx, len);
205
206 sys_cache_data_invd_range(&data_loc[rd_idx], tail);
207 memcpy(buf, &data_loc[rd_idx], tail);
208
209 if (len > tail) {
210 sys_cache_data_invd_range(&data_loc[0], len - tail);
211 memcpy(&buf[tail], &pb->cfg->data_loc[0], len - tail);
212 }
213
214 /* Update rd_idx. */
215 rd_idx = idx_wrap(blen, ROUND_UP(rd_idx + len, _PBUF_IDX_SIZE));
216
217 pb->data.rd_idx = rd_idx;
218 *(pb->cfg->rd_idx_loc) = rd_idx;
219 __sync_synchronize();
220 sys_cache_data_flush_range((void *)pb->cfg->rd_idx_loc, sizeof(*(pb->cfg->rd_idx_loc)));
221
222 return len;
223 }
224