1 /*
2 * Copyright (c) 2023 DENX Software Engineering GmbH
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include "oa_tc6.h"
8
9 #include <zephyr/logging/log.h>
10 LOG_MODULE_REGISTER(oa_tc6, CONFIG_ETHERNET_LOG_LEVEL);
11
oa_tc6_reg_read(struct oa_tc6 * tc6,const uint32_t reg,uint32_t * val)12 int oa_tc6_reg_read(struct oa_tc6 *tc6, const uint32_t reg, uint32_t *val)
13 {
14 uint8_t buf[OA_TC6_HDR_SIZE + 12] = { 0 };
15 struct spi_buf tx_buf = { .buf = buf, .len = sizeof(buf) };
16 const struct spi_buf_set tx = { .buffers = &tx_buf, .count = 1 };
17 struct spi_buf rx_buf = { .buf = buf, .len = sizeof(buf) };
18 const struct spi_buf_set rx = { .buffers = &rx_buf, .count = 1 };
19 uint32_t rv, rvn, hdr_bkp, *hdr = (uint32_t *) &buf[0];
20 int ret = 0;
21
22 /*
23 * Buffers are allocated for protected (larger) case (by 4 bytes).
24 * When non-protected case - we need to decrase them
25 */
26 if (!tc6->protected) {
27 tx_buf.len -= sizeof(rvn);
28 rx_buf.len -= sizeof(rvn);
29 }
30
31 *hdr = FIELD_PREP(OA_CTRL_HDR_DNC, 0) |
32 FIELD_PREP(OA_CTRL_HDR_WNR, 0) |
33 FIELD_PREP(OA_CTRL_HDR_AID, 0) |
34 FIELD_PREP(OA_CTRL_HDR_MMS, reg >> 16) |
35 FIELD_PREP(OA_CTRL_HDR_ADDR, reg) |
36 FIELD_PREP(OA_CTRL_HDR_LEN, 0); /* To read single register len = 0 */
37 *hdr |= FIELD_PREP(OA_CTRL_HDR_P, oa_tc6_get_parity(*hdr));
38 hdr_bkp = *hdr;
39 *hdr = sys_cpu_to_be32(*hdr);
40
41 ret = spi_transceive_dt(tc6->spi, &tx, &rx);
42 if (ret < 0) {
43 return ret;
44 }
45
46 /* Check if echoed control command header is correct */
47 rv = sys_be32_to_cpu(*(uint32_t *)&buf[4]);
48 if (hdr_bkp != rv) {
49 LOG_ERR("Header transmission error!");
50 return -1;
51 }
52
53 rv = sys_be32_to_cpu(*(uint32_t *)&buf[8]);
54
55 /* In protected mode read data is followed by its compliment value */
56 if (tc6->protected) {
57 rvn = sys_be32_to_cpu(*(uint32_t *)&buf[12]);
58 if (rv != ~rvn) {
59 LOG_ERR("Protected mode transmission error!");
60 return -1;
61 }
62 }
63
64 *val = rv;
65
66 return ret;
67 }
68
oa_tc6_reg_write(struct oa_tc6 * tc6,const uint32_t reg,uint32_t val)69 int oa_tc6_reg_write(struct oa_tc6 *tc6, const uint32_t reg, uint32_t val)
70 {
71 uint8_t buf_tx[OA_TC6_HDR_SIZE + 12] = { 0 };
72 uint8_t buf_rx[OA_TC6_HDR_SIZE + 12] = { 0 };
73 struct spi_buf tx_buf = { .buf = buf_tx, .len = sizeof(buf_tx) };
74 const struct spi_buf_set tx = { .buffers = &tx_buf, .count = 1 };
75 struct spi_buf rx_buf = { .buf = buf_rx, .len = sizeof(buf_rx) };
76 const struct spi_buf_set rx = { .buffers = &rx_buf, .count = 1 };
77 uint32_t rv, rvn, hdr_bkp, *hdr = (uint32_t *) &buf_tx[0];
78 int ret;
79
80 /*
81 * Buffers are allocated for protected (larger) case (by 4 bytes).
82 * When non-protected case - we need to decrase them
83 */
84 if (!tc6->protected) {
85 tx_buf.len -= sizeof(rvn);
86 rx_buf.len -= sizeof(rvn);
87 }
88
89 *hdr = FIELD_PREP(OA_CTRL_HDR_DNC, 0) |
90 FIELD_PREP(OA_CTRL_HDR_WNR, 1) |
91 FIELD_PREP(OA_CTRL_HDR_AID, 0) |
92 FIELD_PREP(OA_CTRL_HDR_MMS, reg >> 16) |
93 FIELD_PREP(OA_CTRL_HDR_ADDR, reg) |
94 FIELD_PREP(OA_CTRL_HDR_LEN, 0); /* To read single register len = 0 */
95 *hdr |= FIELD_PREP(OA_CTRL_HDR_P, oa_tc6_get_parity(*hdr));
96 hdr_bkp = *hdr;
97 *hdr = sys_cpu_to_be32(*hdr);
98
99 *(uint32_t *)&buf_tx[4] = sys_cpu_to_be32(val);
100 if (tc6->protected) {
101 *(uint32_t *)&buf_tx[8] = sys_be32_to_cpu(~val);
102 }
103
104 ret = spi_transceive_dt(tc6->spi, &tx, &rx);
105 if (ret < 0) {
106 return ret;
107 }
108
109 /* Check if echoed control command header is correct */
110 rv = sys_be32_to_cpu(*(uint32_t *)&buf_rx[4]);
111 if (hdr_bkp != rv) {
112 LOG_ERR("Header transmission error!");
113 return -1;
114 }
115
116 /* Check if echoed value is correct */
117 rv = sys_be32_to_cpu(*(uint32_t *)&buf_rx[8]);
118 if (val != rv) {
119 LOG_ERR("Header transmission error!");
120 return -1;
121 }
122
123 /*
124 * In protected mode check if read value is followed by its
125 * compliment value
126 */
127 if (tc6->protected) {
128 rvn = sys_be32_to_cpu(*(uint32_t *)&buf_rx[12]);
129 if (val != ~rvn) {
130 LOG_ERR("Protected mode transmission error!");
131 return -1;
132 }
133 }
134
135 return ret;
136 }
137
oa_tc6_reg_rmw(struct oa_tc6 * tc6,const uint32_t reg,uint32_t mask,uint32_t val)138 int oa_tc6_reg_rmw(struct oa_tc6 *tc6, const uint32_t reg,
139 uint32_t mask, uint32_t val)
140 {
141 uint32_t tmp;
142 int ret;
143
144 ret = oa_tc6_reg_read(tc6, reg, &tmp);
145 if (ret < 0) {
146 return ret;
147 }
148
149 tmp &= ~mask;
150
151 if (val) {
152 tmp |= val;
153 }
154
155 return oa_tc6_reg_write(tc6, reg, tmp);
156 }
157
oa_tc6_set_protected_ctrl(struct oa_tc6 * tc6,bool prote)158 int oa_tc6_set_protected_ctrl(struct oa_tc6 *tc6, bool prote)
159 {
160 int ret = oa_tc6_reg_rmw(tc6, OA_CONFIG0, OA_CONFIG0_PROTE,
161 prote ? OA_CONFIG0_PROTE : 0);
162 if (ret < 0) {
163 return ret;
164 }
165
166 tc6->protected = prote;
167 return 0;
168 }
169
oa_tc6_send_chunks(struct oa_tc6 * tc6,struct net_pkt * pkt)170 int oa_tc6_send_chunks(struct oa_tc6 *tc6, struct net_pkt *pkt)
171 {
172 uint16_t len = net_pkt_get_len(pkt);
173 uint8_t oa_tx[tc6->cps];
174 uint32_t hdr, ftr;
175 uint8_t chunks, i;
176 int ret;
177
178 if (len == 0) {
179 return -ENODATA;
180 }
181
182 chunks = len / tc6->cps;
183 if (len % tc6->cps) {
184 chunks++;
185 }
186
187 /* Check if LAN865x has any free internal buffer space */
188 if (chunks > tc6->txc) {
189 return -EIO;
190 }
191
192 /* Transform struct net_pkt content into chunks */
193 for (i = 1; i <= chunks; i++) {
194 hdr = FIELD_PREP(OA_DATA_HDR_DNC, 1) |
195 FIELD_PREP(OA_DATA_HDR_DV, 1) |
196 FIELD_PREP(OA_DATA_HDR_NORX, 1) |
197 FIELD_PREP(OA_DATA_HDR_SWO, 0);
198
199 if (i == 1) {
200 hdr |= FIELD_PREP(OA_DATA_HDR_SV, 1);
201 }
202
203 if (i == chunks) {
204 hdr |= FIELD_PREP(OA_DATA_HDR_EBO, len - 1) |
205 FIELD_PREP(OA_DATA_HDR_EV, 1);
206 }
207
208 hdr |= FIELD_PREP(OA_DATA_HDR_P, oa_tc6_get_parity(hdr));
209
210 ret = net_pkt_read(pkt, oa_tx, len > tc6->cps ? tc6->cps : len);
211 if (ret < 0) {
212 return ret;
213 }
214
215 ret = oa_tc6_chunk_spi_transfer(tc6, NULL, oa_tx, hdr, &ftr);
216 if (ret < 0) {
217 return ret;
218 }
219
220 len -= tc6->cps;
221 }
222
223 return 0;
224 }
225
oa_tc6_check_status(struct oa_tc6 * tc6)226 int oa_tc6_check_status(struct oa_tc6 *tc6)
227 {
228 uint32_t sts;
229
230 if (!tc6->sync) {
231 LOG_ERR("SYNC: Configuration lost, reset IC!");
232 return -EIO;
233 }
234
235 if (tc6->exst) {
236 /*
237 * Just clear any pending interrupts.
238 * The RESETC is handled separately as it requires per
239 * device configuration.
240 */
241 oa_tc6_reg_read(tc6, OA_STATUS0, &sts);
242 if (sts != 0) {
243 oa_tc6_reg_write(tc6, OA_STATUS0, sts);
244 LOG_WRN("EXST: OA_STATUS0: 0x%x", sts);
245 }
246
247 oa_tc6_reg_read(tc6, OA_STATUS1, &sts);
248 if (sts != 0) {
249 oa_tc6_reg_write(tc6, OA_STATUS1, sts);
250 LOG_WRN("EXST: OA_STATUS1: 0x%x", sts);
251 }
252 }
253
254 return 0;
255 }
256
oa_tc6_update_status(struct oa_tc6 * tc6,uint32_t ftr)257 static int oa_tc6_update_status(struct oa_tc6 *tc6, uint32_t ftr)
258 {
259 if (oa_tc6_get_parity(ftr)) {
260 LOG_DBG("OA Status Update: Footer parity error!");
261 return -EIO;
262 }
263
264 tc6->exst = FIELD_GET(OA_DATA_FTR_EXST, ftr);
265 tc6->sync = FIELD_GET(OA_DATA_FTR_SYNC, ftr);
266 tc6->rca = FIELD_GET(OA_DATA_FTR_RCA, ftr);
267 tc6->txc = FIELD_GET(OA_DATA_FTR_TXC, ftr);
268
269 return 0;
270 }
271
oa_tc6_chunk_spi_transfer(struct oa_tc6 * tc6,uint8_t * buf_rx,uint8_t * buf_tx,uint32_t hdr,uint32_t * ftr)272 int oa_tc6_chunk_spi_transfer(struct oa_tc6 *tc6, uint8_t *buf_rx, uint8_t *buf_tx,
273 uint32_t hdr, uint32_t *ftr)
274 {
275 struct spi_buf tx_buf[2];
276 struct spi_buf rx_buf[2];
277 struct spi_buf_set tx;
278 struct spi_buf_set rx;
279 int ret;
280
281 hdr = sys_cpu_to_be32(hdr);
282 tx_buf[0].buf = &hdr;
283 tx_buf[0].len = sizeof(hdr);
284
285 tx_buf[1].buf = buf_tx;
286 tx_buf[1].len = tc6->cps;
287
288 tx.buffers = tx_buf;
289 tx.count = ARRAY_SIZE(tx_buf);
290
291 rx_buf[0].buf = buf_rx;
292 rx_buf[0].len = tc6->cps;
293
294 rx_buf[1].buf = ftr;
295 rx_buf[1].len = sizeof(*ftr);
296
297 rx.buffers = rx_buf;
298 rx.count = ARRAY_SIZE(rx_buf);
299
300 ret = spi_transceive_dt(tc6->spi, &tx, &rx);
301 if (ret < 0) {
302 return ret;
303 }
304 *ftr = sys_be32_to_cpu(*ftr);
305
306 return oa_tc6_update_status(tc6, *ftr);
307 }
308
oa_tc6_read_status(struct oa_tc6 * tc6,uint32_t * ftr)309 int oa_tc6_read_status(struct oa_tc6 *tc6, uint32_t *ftr)
310 {
311 uint32_t hdr;
312
313 hdr = FIELD_PREP(OA_DATA_HDR_DNC, 1) |
314 FIELD_PREP(OA_DATA_HDR_DV, 0) |
315 FIELD_PREP(OA_DATA_HDR_NORX, 1);
316 hdr |= FIELD_PREP(OA_DATA_HDR_P, oa_tc6_get_parity(hdr));
317
318 return oa_tc6_chunk_spi_transfer(tc6, NULL, NULL, hdr, ftr);
319 }
320
oa_tc6_read_chunks(struct oa_tc6 * tc6,struct net_pkt * pkt)321 int oa_tc6_read_chunks(struct oa_tc6 *tc6, struct net_pkt *pkt)
322 {
323 struct net_buf *buf_rx = NULL;
324 uint32_t hdr, ftr;
325 uint8_t sbo, ebo;
326 int ret;
327
328 /*
329 * Special case - append already received data (extracted from previous
330 * chunk) to new packet.
331 */
332 if (tc6->concat_buf) {
333 net_pkt_append_buffer(pkt, tc6->concat_buf);
334 tc6->concat_buf = NULL;
335 }
336
337 do {
338 buf_rx = net_pkt_get_frag(pkt, tc6->cps, OA_TC6_BUF_ALLOC_TIMEOUT);
339 if (!buf_rx) {
340 LOG_ERR("OA RX: Can't allocate RX buffer fordata!");
341 return -ENOMEM;
342 }
343
344 hdr = FIELD_PREP(OA_DATA_HDR_DNC, 1);
345 hdr |= FIELD_PREP(OA_DATA_HDR_P, oa_tc6_get_parity(hdr));
346
347 ret = oa_tc6_chunk_spi_transfer(tc6, buf_rx->data, NULL, hdr, &ftr);
348 if (ret < 0) {
349 LOG_ERR("OA RX: transmission error: %d!", ret);
350 goto unref_buf;
351 }
352
353 ret = -EIO;
354 if (oa_tc6_get_parity(ftr)) {
355 LOG_ERR("OA RX: Footer parity error!");
356 goto unref_buf;
357 }
358
359 if (!FIELD_GET(OA_DATA_FTR_SYNC, ftr)) {
360 LOG_ERR("OA RX: Configuration not SYNC'ed!");
361 goto unref_buf;
362 }
363
364 if (!FIELD_GET(OA_DATA_FTR_DV, ftr)) {
365 LOG_DBG("OA RX: Data chunk not valid, skip!");
366 goto unref_buf;
367 }
368
369 sbo = FIELD_GET(OA_DATA_FTR_SWO, ftr) * sizeof(uint32_t);
370 ebo = FIELD_GET(OA_DATA_FTR_EBO, ftr) + 1;
371
372 if (FIELD_GET(OA_DATA_FTR_SV, ftr)) {
373 /*
374 * Adjust beginning of the buffer with SWO only when
375 * we DO NOT have two frames concatenated together
376 * in one chunk.
377 */
378 if (!(FIELD_GET(OA_DATA_FTR_EV, ftr) && (ebo <= sbo))) {
379 if (sbo) {
380 net_buf_pull(buf_rx, sbo);
381 }
382 }
383 }
384
385 net_pkt_append_buffer(pkt, buf_rx);
386 buf_rx->len = tc6->cps;
387
388 if (FIELD_GET(OA_DATA_FTR_EV, ftr)) {
389 /*
390 * Check if received frame shall be dropped - i.e. MAC has
391 * detected error condition, which shall result in frame drop
392 * by the SPI host.
393 */
394 if (FIELD_GET(OA_DATA_FTR_FD, ftr)) {
395 ret = -EIO;
396 goto unref_buf;
397 }
398
399 /*
400 * Concatenation of frames in a single chunk - one frame ends
401 * and second one starts just afterwards (ebo == sbo).
402 */
403 if (FIELD_GET(OA_DATA_FTR_SV, ftr) && (ebo <= sbo)) {
404 tc6->concat_buf = net_buf_clone(buf_rx, OA_TC6_BUF_ALLOC_TIMEOUT);
405 if (!tc6->concat_buf) {
406 LOG_ERR("OA RX: Can't allocate RX buffer for data!");
407 ret = -ENOMEM;
408 goto unref_buf;
409 }
410 net_buf_pull(tc6->concat_buf, sbo);
411 }
412
413 /* Set final size of the buffer */
414 buf_rx->len = ebo;
415 /*
416 * Exit when complete packet is read and added to
417 * struct net_pkt
418 */
419 break;
420 }
421 } while (tc6->rca > 0);
422
423 return 0;
424
425 unref_buf:
426 net_buf_unref(buf_rx);
427 return ret;
428 }
429