1 /*
2  * Copyright (c) 2023 DENX Software Engineering GmbH
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include "oa_tc6.h"
8 
9 #include <zephyr/logging/log.h>
10 LOG_MODULE_REGISTER(oa_tc6, CONFIG_ETHERNET_LOG_LEVEL);
11 
12 /*
13  * When IPv6 support enabled - the minimal size of network buffer
14  * shall be at least 128 bytes (i.e. default value).
15  */
16 #if defined(CONFIG_NET_IPV6) && (CONFIG_NET_BUF_DATA_SIZE < 128)
17 #error IPv6 requires at least 128 bytes of continuous data to handle headers!
18 #endif
19 
oa_tc6_reg_read(struct oa_tc6 * tc6,const uint32_t reg,uint32_t * val)20 int oa_tc6_reg_read(struct oa_tc6 *tc6, const uint32_t reg, uint32_t *val)
21 {
22 	uint8_t buf[OA_TC6_HDR_SIZE + 12] = { 0 };
23 	struct spi_buf tx_buf = { .buf = buf, .len = sizeof(buf) };
24 	const struct spi_buf_set tx = {	.buffers = &tx_buf, .count = 1 };
25 	struct spi_buf rx_buf = { .buf = buf, .len = sizeof(buf) };
26 	const struct spi_buf_set rx = {	.buffers = &rx_buf, .count = 1 };
27 	uint32_t rv, rvn, hdr_bkp, *hdr = (uint32_t *) &buf[0];
28 	int ret = 0;
29 
30 	/*
31 	 * Buffers are allocated for protected (larger) case (by 4 bytes).
32 	 * When non-protected case - we need to decrase them
33 	 */
34 	if (!tc6->protected) {
35 		tx_buf.len -= sizeof(rvn);
36 		rx_buf.len -= sizeof(rvn);
37 	}
38 
39 	*hdr = FIELD_PREP(OA_CTRL_HDR_DNC, 0) |
40 		FIELD_PREP(OA_CTRL_HDR_WNR, 0) |
41 		FIELD_PREP(OA_CTRL_HDR_AID, 0) |
42 		FIELD_PREP(OA_CTRL_HDR_MMS, reg >> 16) |
43 		FIELD_PREP(OA_CTRL_HDR_ADDR, reg) |
44 		FIELD_PREP(OA_CTRL_HDR_LEN, 0); /* To read single register len = 0 */
45 	*hdr |= FIELD_PREP(OA_CTRL_HDR_P, oa_tc6_get_parity(*hdr));
46 	hdr_bkp = *hdr;
47 	*hdr = sys_cpu_to_be32(*hdr);
48 
49 	ret = spi_transceive_dt(tc6->spi, &tx, &rx);
50 	if (ret < 0) {
51 		return ret;
52 	}
53 
54 	/* Check if echoed control command header is correct */
55 	rv = sys_be32_to_cpu(*(uint32_t *)&buf[4]);
56 	if (hdr_bkp != rv) {
57 		LOG_ERR("Header transmission error!");
58 		return -1;
59 	}
60 
61 	rv = sys_be32_to_cpu(*(uint32_t *)&buf[8]);
62 
63 	/* In protected mode read data is followed by its compliment value */
64 	if (tc6->protected) {
65 		rvn = sys_be32_to_cpu(*(uint32_t *)&buf[12]);
66 		if (rv != ~rvn) {
67 			LOG_ERR("Protected mode transmission error!");
68 			return -1;
69 		}
70 	}
71 
72 	*val = rv;
73 
74 	return ret;
75 }
76 
oa_tc6_reg_write(struct oa_tc6 * tc6,const uint32_t reg,uint32_t val)77 int oa_tc6_reg_write(struct oa_tc6 *tc6, const uint32_t reg, uint32_t val)
78 {
79 	uint8_t buf_tx[OA_TC6_HDR_SIZE + 12] = { 0 };
80 	uint8_t buf_rx[OA_TC6_HDR_SIZE + 12] = { 0 };
81 	struct spi_buf tx_buf = { .buf = buf_tx, .len = sizeof(buf_tx) };
82 	const struct spi_buf_set tx = {	.buffers = &tx_buf, .count = 1 };
83 	struct spi_buf rx_buf = { .buf = buf_rx, .len = sizeof(buf_rx) };
84 	const struct spi_buf_set rx = {	.buffers = &rx_buf, .count = 1	};
85 	uint32_t rv, rvn, hdr_bkp, *hdr = (uint32_t *) &buf_tx[0];
86 	int ret;
87 
88 	/*
89 	 * Buffers are allocated for protected (larger) case (by 4 bytes).
90 	 * When non-protected case - we need to decrase them
91 	 */
92 	if (!tc6->protected) {
93 		tx_buf.len -= sizeof(rvn);
94 		rx_buf.len -= sizeof(rvn);
95 	}
96 
97 	*hdr = FIELD_PREP(OA_CTRL_HDR_DNC, 0) |
98 		FIELD_PREP(OA_CTRL_HDR_WNR, 1) |
99 		FIELD_PREP(OA_CTRL_HDR_AID, 0) |
100 		FIELD_PREP(OA_CTRL_HDR_MMS, reg >> 16) |
101 		FIELD_PREP(OA_CTRL_HDR_ADDR, reg) |
102 		FIELD_PREP(OA_CTRL_HDR_LEN, 0); /* To read single register len = 0 */
103 	*hdr |= FIELD_PREP(OA_CTRL_HDR_P, oa_tc6_get_parity(*hdr));
104 	hdr_bkp = *hdr;
105 	*hdr = sys_cpu_to_be32(*hdr);
106 
107 	*(uint32_t *)&buf_tx[4] = sys_cpu_to_be32(val);
108 	if (tc6->protected) {
109 		*(uint32_t *)&buf_tx[8] = sys_be32_to_cpu(~val);
110 	}
111 
112 	ret = spi_transceive_dt(tc6->spi, &tx, &rx);
113 	if (ret < 0) {
114 		return ret;
115 	}
116 
117 	/* Check if echoed control command header is correct */
118 	rv = sys_be32_to_cpu(*(uint32_t *)&buf_rx[4]);
119 	if (hdr_bkp != rv) {
120 		LOG_ERR("Header transmission error!");
121 		return -1;
122 	}
123 
124 	/* Check if echoed value is correct */
125 	rv = sys_be32_to_cpu(*(uint32_t *)&buf_rx[8]);
126 	if (val != rv) {
127 		LOG_ERR("Header transmission error!");
128 		return -1;
129 	}
130 
131 	/*
132 	 * In protected mode check if read value is followed by its
133 	 * compliment value
134 	 */
135 	if (tc6->protected) {
136 		rvn = sys_be32_to_cpu(*(uint32_t *)&buf_rx[12]);
137 		if (val != ~rvn) {
138 			LOG_ERR("Protected mode transmission error!");
139 			return -1;
140 		}
141 	}
142 
143 	return ret;
144 }
145 
oa_tc6_reg_rmw(struct oa_tc6 * tc6,const uint32_t reg,uint32_t mask,uint32_t val)146 int oa_tc6_reg_rmw(struct oa_tc6 *tc6, const uint32_t reg,
147 		   uint32_t mask, uint32_t val)
148 {
149 	uint32_t tmp;
150 	int ret;
151 
152 	ret = oa_tc6_reg_read(tc6, reg, &tmp);
153 	if (ret < 0) {
154 		return ret;
155 	}
156 
157 	tmp &= ~mask;
158 
159 	if (val) {
160 		tmp |= val;
161 	}
162 
163 	return oa_tc6_reg_write(tc6, reg, tmp);
164 }
165 
oa_tc6_set_protected_ctrl(struct oa_tc6 * tc6,bool prote)166 int oa_tc6_set_protected_ctrl(struct oa_tc6 *tc6, bool prote)
167 {
168 	int ret = oa_tc6_reg_rmw(tc6, OA_CONFIG0, OA_CONFIG0_PROTE,
169 				 prote ? OA_CONFIG0_PROTE : 0);
170 	if (ret < 0) {
171 		return ret;
172 	}
173 
174 	tc6->protected = prote;
175 	return 0;
176 }
177 
oa_tc6_send_chunks(struct oa_tc6 * tc6,struct net_pkt * pkt)178 int oa_tc6_send_chunks(struct oa_tc6 *tc6, struct net_pkt *pkt)
179 {
180 	uint16_t len = net_pkt_get_len(pkt);
181 	uint8_t oa_tx[tc6->cps];
182 	uint32_t hdr, ftr;
183 	uint8_t chunks, i;
184 	int ret;
185 
186 	if (len == 0) {
187 		return -ENODATA;
188 	}
189 
190 	chunks = len / tc6->cps;
191 	if (len % tc6->cps) {
192 		chunks++;
193 	}
194 
195 	/* Check if LAN865x has any free internal buffer space */
196 	if (chunks > tc6->txc) {
197 		return -EIO;
198 	}
199 
200 	/* Transform struct net_pkt content into chunks */
201 	for (i = 1; i <= chunks; i++) {
202 		hdr = FIELD_PREP(OA_DATA_HDR_DNC, 1) |
203 			FIELD_PREP(OA_DATA_HDR_DV, 1) |
204 			FIELD_PREP(OA_DATA_HDR_NORX, 1) |
205 			FIELD_PREP(OA_DATA_HDR_SWO, 0);
206 
207 		if (i == 1) {
208 			hdr |= FIELD_PREP(OA_DATA_HDR_SV, 1);
209 		}
210 
211 		if (i == chunks) {
212 			hdr |= FIELD_PREP(OA_DATA_HDR_EBO, len - 1) |
213 				FIELD_PREP(OA_DATA_HDR_EV, 1);
214 		}
215 
216 		hdr |= FIELD_PREP(OA_DATA_HDR_P, oa_tc6_get_parity(hdr));
217 
218 		ret = net_pkt_read(pkt, oa_tx, len > tc6->cps ? tc6->cps : len);
219 		if (ret < 0) {
220 			return ret;
221 		}
222 
223 		ret = oa_tc6_chunk_spi_transfer(tc6, NULL, oa_tx, hdr, &ftr);
224 		if (ret < 0) {
225 			return ret;
226 		}
227 
228 		len -= tc6->cps;
229 	}
230 
231 	return 0;
232 }
233 
oa_tc6_check_status(struct oa_tc6 * tc6)234 int oa_tc6_check_status(struct oa_tc6 *tc6)
235 {
236 	uint32_t sts;
237 
238 	if (!tc6->sync) {
239 		LOG_ERR("SYNC: Configuration lost, reset IC!");
240 		return -EIO;
241 	}
242 
243 	if (tc6->exst) {
244 		/*
245 		 * Just clear any pending interrupts.
246 		 * The RESETC is handled separately as it requires per
247 		 * device configuration.
248 		 */
249 		oa_tc6_reg_read(tc6, OA_STATUS0, &sts);
250 		if (sts != 0) {
251 			oa_tc6_reg_write(tc6, OA_STATUS0, sts);
252 			LOG_WRN("EXST: OA_STATUS0: 0x%x", sts);
253 		}
254 
255 		oa_tc6_reg_read(tc6, OA_STATUS1, &sts);
256 		if (sts != 0) {
257 			oa_tc6_reg_write(tc6, OA_STATUS1, sts);
258 			LOG_WRN("EXST: OA_STATUS1: 0x%x", sts);
259 		}
260 	}
261 
262 	return 0;
263 }
264 
oa_tc6_update_status(struct oa_tc6 * tc6,uint32_t ftr)265 static int oa_tc6_update_status(struct oa_tc6 *tc6, uint32_t ftr)
266 {
267 	if (oa_tc6_get_parity(ftr)) {
268 		LOG_DBG("OA Status Update: Footer parity error!");
269 		return -EIO;
270 	}
271 
272 	tc6->exst = FIELD_GET(OA_DATA_FTR_EXST, ftr);
273 	tc6->sync = FIELD_GET(OA_DATA_FTR_SYNC, ftr);
274 	tc6->rca = FIELD_GET(OA_DATA_FTR_RCA, ftr);
275 	tc6->txc = FIELD_GET(OA_DATA_FTR_TXC, ftr);
276 
277 	return 0;
278 }
279 
oa_tc6_chunk_spi_transfer(struct oa_tc6 * tc6,uint8_t * buf_rx,uint8_t * buf_tx,uint32_t hdr,uint32_t * ftr)280 int oa_tc6_chunk_spi_transfer(struct oa_tc6 *tc6, uint8_t *buf_rx, uint8_t *buf_tx,
281 				     uint32_t hdr, uint32_t *ftr)
282 {
283 	struct spi_buf tx_buf[2];
284 	struct spi_buf rx_buf[2];
285 	struct spi_buf_set tx;
286 	struct spi_buf_set rx;
287 	int ret;
288 
289 	hdr = sys_cpu_to_be32(hdr);
290 	tx_buf[0].buf = &hdr;
291 	tx_buf[0].len = sizeof(hdr);
292 
293 	tx_buf[1].buf = buf_tx;
294 	tx_buf[1].len = tc6->cps;
295 
296 	tx.buffers = tx_buf;
297 	tx.count = ARRAY_SIZE(tx_buf);
298 
299 	rx_buf[0].buf = buf_rx;
300 	rx_buf[0].len = tc6->cps;
301 
302 	rx_buf[1].buf = ftr;
303 	rx_buf[1].len = sizeof(*ftr);
304 
305 	rx.buffers = rx_buf;
306 	rx.count = ARRAY_SIZE(rx_buf);
307 
308 	ret = spi_transceive_dt(tc6->spi, &tx, &rx);
309 	if (ret < 0) {
310 		return ret;
311 	}
312 	*ftr = sys_be32_to_cpu(*ftr);
313 
314 	return oa_tc6_update_status(tc6, *ftr);
315 }
316 
oa_tc6_read_status(struct oa_tc6 * tc6,uint32_t * ftr)317 int oa_tc6_read_status(struct oa_tc6 *tc6, uint32_t *ftr)
318 {
319 	uint32_t hdr;
320 
321 	hdr = FIELD_PREP(OA_DATA_HDR_DNC, 1) |
322 		FIELD_PREP(OA_DATA_HDR_DV, 0) |
323 		FIELD_PREP(OA_DATA_HDR_NORX, 1);
324 	hdr |= FIELD_PREP(OA_DATA_HDR_P, oa_tc6_get_parity(hdr));
325 
326 	return oa_tc6_chunk_spi_transfer(tc6, NULL, NULL, hdr, ftr);
327 }
328 
oa_tc6_read_chunks(struct oa_tc6 * tc6,struct net_pkt * pkt)329 int oa_tc6_read_chunks(struct oa_tc6 *tc6, struct net_pkt *pkt)
330 {
331 	const uint16_t buf_rx_size = CONFIG_NET_BUF_DATA_SIZE;
332 	struct net_buf *buf_rx = NULL;
333 	uint32_t buf_rx_used = 0;
334 	uint32_t hdr, ftr;
335 	uint8_t sbo, ebo;
336 	int ret;
337 
338 	/*
339 	 * Special case - append already received data (extracted from previous
340 	 * chunk) to new packet.
341 	 *
342 	 * This code is NOT used when OA_CONFIG0 RFA [13:12] is set to 01
343 	 * (ZAREFE) - so received ethernet frames will always start on the
344 	 * beginning of new chunks.
345 	 */
346 	if (tc6->concat_buf) {
347 		net_pkt_append_buffer(pkt, tc6->concat_buf);
348 		tc6->concat_buf = NULL;
349 	}
350 
351 	do {
352 		if (!buf_rx) {
353 			buf_rx = net_pkt_get_frag(pkt, buf_rx_size, OA_TC6_BUF_ALLOC_TIMEOUT);
354 			if (!buf_rx) {
355 				LOG_ERR("OA RX: Can't allocate RX buffer fordata!");
356 				return -ENOMEM;
357 			}
358 		}
359 
360 		hdr = FIELD_PREP(OA_DATA_HDR_DNC, 1);
361 		hdr |= FIELD_PREP(OA_DATA_HDR_P, oa_tc6_get_parity(hdr));
362 
363 		ret = oa_tc6_chunk_spi_transfer(tc6, buf_rx->data + buf_rx_used, NULL, hdr, &ftr);
364 		if (ret < 0) {
365 			LOG_ERR("OA RX: transmission error: %d!", ret);
366 			goto unref_buf;
367 		}
368 
369 		ret = -EIO;
370 		if (oa_tc6_get_parity(ftr)) {
371 			LOG_ERR("OA RX: Footer parity error!");
372 			goto unref_buf;
373 		}
374 
375 		if (!FIELD_GET(OA_DATA_FTR_SYNC, ftr)) {
376 			LOG_ERR("OA RX: Configuration not SYNC'ed!");
377 			goto unref_buf;
378 		}
379 
380 		if (!FIELD_GET(OA_DATA_FTR_DV, ftr)) {
381 			LOG_DBG("OA RX: Data chunk not valid, skip!");
382 			goto unref_buf;
383 		}
384 
385 		sbo = FIELD_GET(OA_DATA_FTR_SWO, ftr) * sizeof(uint32_t);
386 		ebo = FIELD_GET(OA_DATA_FTR_EBO, ftr) + 1;
387 
388 		if (FIELD_GET(OA_DATA_FTR_SV, ftr)) {
389 			/*
390 			 * Adjust beginning of the buffer with SWO only when
391 			 * we DO NOT have two frames concatenated together
392 			 * in one chunk.
393 			 */
394 			if (!(FIELD_GET(OA_DATA_FTR_EV, ftr) && (ebo <= sbo))) {
395 				if (sbo) {
396 					net_buf_pull(buf_rx, sbo);
397 				}
398 			}
399 		}
400 
401 		if (FIELD_GET(OA_DATA_FTR_EV, ftr)) {
402 			/*
403 			 * Check if received frame shall be dropped - i.e. MAC has
404 			 * detected error condition, which shall result in frame drop
405 			 * by the SPI host.
406 			 */
407 			if (FIELD_GET(OA_DATA_FTR_FD, ftr)) {
408 				ret = -EIO;
409 				goto unref_buf;
410 			}
411 
412 			/*
413 			 * Concatenation of frames in a single chunk - one frame ends
414 			 * and second one starts just afterwards (ebo == sbo).
415 			 */
416 			if (FIELD_GET(OA_DATA_FTR_SV, ftr) && (ebo <= sbo)) {
417 				tc6->concat_buf = net_buf_clone(buf_rx, OA_TC6_BUF_ALLOC_TIMEOUT);
418 				if (!tc6->concat_buf) {
419 					LOG_ERR("OA RX: Can't allocate RX buffer for data!");
420 					ret = -ENOMEM;
421 					goto unref_buf;
422 				}
423 				net_buf_pull(tc6->concat_buf, sbo);
424 			}
425 
426 			/* Set final size of the buffer */
427 			buf_rx_used += ebo;
428 			buf_rx->len = buf_rx_used;
429 			net_pkt_append_buffer(pkt, buf_rx);
430 			/*
431 			 * Exit when complete packet is read and added to
432 			 * struct net_pkt
433 			 */
434 			break;
435 		} else {
436 			buf_rx_used += tc6->cps;
437 			if ((buf_rx_size - buf_rx_used) < tc6->cps) {
438 				net_pkt_append_buffer(pkt, buf_rx);
439 				buf_rx->len = buf_rx_used;
440 				buf_rx_used = 0;
441 				buf_rx = NULL;
442 			}
443 		}
444 	} while (tc6->rca > 0);
445 
446 	return 0;
447 
448  unref_buf:
449 	net_buf_unref(buf_rx);
450 	return ret;
451 }
452