1 /*
2  * Copyright (c) 2023 DENX Software Engineering GmbH
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/net/mdio.h>
8 #include "oa_tc6.h"
9 
10 #include <zephyr/logging/log.h>
11 LOG_MODULE_REGISTER(oa_tc6, CONFIG_ETHERNET_LOG_LEVEL);
12 
13 /*
14  * When IPv6 support enabled - the minimal size of network buffer
15  * shall be at least 128 bytes (i.e. default value).
16  */
17 #if defined(CONFIG_NET_IPV6) && (CONFIG_NET_BUF_DATA_SIZE < 128)
18 #error IPv6 requires at least 128 bytes of continuous data to handle headers!
19 #endif
20 
oa_tc6_reg_read(struct oa_tc6 * tc6,const uint32_t reg,uint32_t * val)21 int oa_tc6_reg_read(struct oa_tc6 *tc6, const uint32_t reg, uint32_t *val)
22 {
23 	uint8_t buf[OA_TC6_HDR_SIZE + 12] = {0};
24 	struct spi_buf tx_buf = {.buf = buf, .len = sizeof(buf)};
25 	const struct spi_buf_set tx = {.buffers = &tx_buf, .count = 1};
26 	struct spi_buf rx_buf = {.buf = buf, .len = sizeof(buf)};
27 	const struct spi_buf_set rx = {.buffers = &rx_buf, .count = 1};
28 	uint32_t rv, rvn, hdr_bkp, *hdr = (uint32_t *)&buf[0];
29 	int ret = 0;
30 
31 	/*
32 	 * Buffers are allocated for protected (larger) case (by 4 bytes).
33 	 * When non-protected case - we need to decrase them
34 	 */
35 	if (!tc6->protected) {
36 		tx_buf.len -= sizeof(rvn);
37 		rx_buf.len -= sizeof(rvn);
38 	}
39 
40 	*hdr = FIELD_PREP(OA_CTRL_HDR_DNC, 0) | FIELD_PREP(OA_CTRL_HDR_WNR, 0) |
41 	       FIELD_PREP(OA_CTRL_HDR_AID, 0) | FIELD_PREP(OA_CTRL_HDR_MMS, reg >> 16) |
42 	       FIELD_PREP(OA_CTRL_HDR_ADDR, reg) |
43 	       FIELD_PREP(OA_CTRL_HDR_LEN, 0); /* To read single register len = 0 */
44 	*hdr |= FIELD_PREP(OA_CTRL_HDR_P, oa_tc6_get_parity(*hdr));
45 	hdr_bkp = *hdr;
46 	*hdr = sys_cpu_to_be32(*hdr);
47 
48 	ret = spi_transceive_dt(tc6->spi, &tx, &rx);
49 	if (ret < 0) {
50 		return ret;
51 	}
52 
53 	/* Check if echoed control command header is correct */
54 	rv = sys_be32_to_cpu(*(uint32_t *)&buf[4]);
55 	if (hdr_bkp != rv) {
56 		LOG_ERR("Header transmission error!");
57 		return -1;
58 	}
59 
60 	rv = sys_be32_to_cpu(*(uint32_t *)&buf[8]);
61 
62 	/* In protected mode read data is followed by its compliment value */
63 	if (tc6->protected) {
64 		rvn = sys_be32_to_cpu(*(uint32_t *)&buf[12]);
65 		if (rv != ~rvn) {
66 			LOG_ERR("Protected mode transmission error!");
67 			return -1;
68 		}
69 	}
70 
71 	*val = rv;
72 
73 	return ret;
74 }
75 
oa_tc6_reg_write(struct oa_tc6 * tc6,const uint32_t reg,uint32_t val)76 int oa_tc6_reg_write(struct oa_tc6 *tc6, const uint32_t reg, uint32_t val)
77 {
78 	uint8_t buf_tx[OA_TC6_HDR_SIZE + 12] = {0};
79 	uint8_t buf_rx[OA_TC6_HDR_SIZE + 12] = {0};
80 	struct spi_buf tx_buf = {.buf = buf_tx, .len = sizeof(buf_tx)};
81 	const struct spi_buf_set tx = {.buffers = &tx_buf, .count = 1};
82 	struct spi_buf rx_buf = {.buf = buf_rx, .len = sizeof(buf_rx)};
83 	const struct spi_buf_set rx = {.buffers = &rx_buf, .count = 1};
84 	uint32_t rv, rvn, hdr_bkp, *hdr = (uint32_t *)&buf_tx[0];
85 	int ret;
86 
87 	/*
88 	 * Buffers are allocated for protected (larger) case (by 4 bytes).
89 	 * When non-protected case - we need to decrase them
90 	 */
91 	if (!tc6->protected) {
92 		tx_buf.len -= sizeof(rvn);
93 		rx_buf.len -= sizeof(rvn);
94 	}
95 
96 	*hdr = FIELD_PREP(OA_CTRL_HDR_DNC, 0) | FIELD_PREP(OA_CTRL_HDR_WNR, 1) |
97 	       FIELD_PREP(OA_CTRL_HDR_AID, 0) | FIELD_PREP(OA_CTRL_HDR_MMS, reg >> 16) |
98 	       FIELD_PREP(OA_CTRL_HDR_ADDR, reg) |
99 	       FIELD_PREP(OA_CTRL_HDR_LEN, 0); /* To read single register len = 0 */
100 	*hdr |= FIELD_PREP(OA_CTRL_HDR_P, oa_tc6_get_parity(*hdr));
101 	hdr_bkp = *hdr;
102 	*hdr = sys_cpu_to_be32(*hdr);
103 
104 	*(uint32_t *)&buf_tx[4] = sys_cpu_to_be32(val);
105 	if (tc6->protected) {
106 		*(uint32_t *)&buf_tx[8] = sys_be32_to_cpu(~val);
107 	}
108 
109 	ret = spi_transceive_dt(tc6->spi, &tx, &rx);
110 	if (ret < 0) {
111 		return ret;
112 	}
113 
114 	/* Check if echoed control command header is correct */
115 	rv = sys_be32_to_cpu(*(uint32_t *)&buf_rx[4]);
116 	if (hdr_bkp != rv) {
117 		LOG_ERR("Header transmission error!");
118 		return -1;
119 	}
120 
121 	/* Check if echoed value is correct */
122 	rv = sys_be32_to_cpu(*(uint32_t *)&buf_rx[8]);
123 	if (val != rv) {
124 		LOG_ERR("Header transmission error!");
125 		return -1;
126 	}
127 
128 	/*
129 	 * In protected mode check if read value is followed by its
130 	 * compliment value
131 	 */
132 	if (tc6->protected) {
133 		rvn = sys_be32_to_cpu(*(uint32_t *)&buf_rx[12]);
134 		if (val != ~rvn) {
135 			LOG_ERR("Protected mode transmission error!");
136 			return -1;
137 		}
138 	}
139 
140 	return ret;
141 }
142 
oa_tc6_reg_rmw(struct oa_tc6 * tc6,const uint32_t reg,uint32_t mask,uint32_t val)143 int oa_tc6_reg_rmw(struct oa_tc6 *tc6, const uint32_t reg, uint32_t mask, uint32_t val)
144 {
145 	uint32_t tmp;
146 	int ret;
147 
148 	ret = oa_tc6_reg_read(tc6, reg, &tmp);
149 	if (ret < 0) {
150 		return ret;
151 	}
152 
153 	tmp &= ~mask;
154 
155 	if (val) {
156 		tmp |= val;
157 	}
158 
159 	return oa_tc6_reg_write(tc6, reg, tmp);
160 }
161 
oa_tc6_mdio_read(struct oa_tc6 * tc6,uint8_t prtad,uint8_t regad,uint16_t * data)162 int oa_tc6_mdio_read(struct oa_tc6 *tc6, uint8_t prtad, uint8_t regad, uint16_t *data)
163 {
164 	return oa_tc6_reg_read(
165 		tc6, OA_TC6_PHY_STD_REG_ADDR_BASE | (regad & OA_TC6_PHY_STD_REG_ADDR_MASK),
166 		(uint32_t *)data);
167 }
168 
oa_tc6_mdio_write(struct oa_tc6 * tc6,uint8_t prtad,uint8_t regad,uint16_t data)169 int oa_tc6_mdio_write(struct oa_tc6 *tc6, uint8_t prtad, uint8_t regad, uint16_t data)
170 {
171 	return oa_tc6_reg_write(
172 		tc6, OA_TC6_PHY_STD_REG_ADDR_BASE | (regad & OA_TC6_PHY_STD_REG_ADDR_MASK), data);
173 }
174 
oa_tc6_get_phy_c45_mms(int devad)175 static int oa_tc6_get_phy_c45_mms(int devad)
176 {
177 	switch (devad) {
178 	case MDIO_MMD_PCS:
179 		return OA_TC6_PHY_C45_PCS_MMS2;
180 	case MDIO_MMD_PMAPMD:
181 		return OA_TC6_PHY_C45_PMA_PMD_MMS3;
182 	case MDIO_MMD_VENDOR_SPECIFIC2:
183 		return OA_TC6_PHY_C45_VS_PLCA_MMS4;
184 	case MDIO_MMD_AN:
185 		return OA_TC6_PHY_C45_AUTO_NEG_MMS5;
186 	default:
187 		return -EOPNOTSUPP;
188 	}
189 }
190 
oa_tc6_mdio_read_c45(struct oa_tc6 * tc6,uint8_t prtad,uint8_t devad,uint16_t regad,uint16_t * data)191 int oa_tc6_mdio_read_c45(struct oa_tc6 *tc6, uint8_t prtad, uint8_t devad, uint16_t regad,
192 			 uint16_t *data)
193 {
194 	uint32_t tmp;
195 	int ret;
196 
197 	ret = oa_tc6_get_phy_c45_mms(devad);
198 	if (ret < 0) {
199 		return ret;
200 	}
201 
202 	ret = oa_tc6_reg_read(tc6, (ret << 16) | regad, &tmp);
203 	if (ret < 0) {
204 		return ret;
205 	}
206 
207 	*data = (uint16_t)tmp;
208 
209 	return 0;
210 }
211 
oa_tc6_mdio_write_c45(struct oa_tc6 * tc6,uint8_t prtad,uint8_t devad,uint16_t regad,uint16_t data)212 int oa_tc6_mdio_write_c45(struct oa_tc6 *tc6, uint8_t prtad, uint8_t devad, uint16_t regad,
213 			  uint16_t data)
214 {
215 	int ret;
216 
217 	ret = oa_tc6_get_phy_c45_mms(devad);
218 	if (ret < 0) {
219 		return ret;
220 	}
221 
222 	return oa_tc6_reg_write(tc6, (ret << 16) | regad, (uint32_t)data);
223 }
224 
oa_tc6_set_protected_ctrl(struct oa_tc6 * tc6,bool prote)225 int oa_tc6_set_protected_ctrl(struct oa_tc6 *tc6, bool prote)
226 {
227 	int ret;
228 
229 	ret = oa_tc6_reg_rmw(tc6, OA_CONFIG0, OA_CONFIG0_PROTE, prote ? OA_CONFIG0_PROTE : 0);
230 	if (ret < 0) {
231 		return ret;
232 	}
233 
234 	tc6->protected = prote;
235 	return 0;
236 }
237 
oa_tc6_send_chunks(struct oa_tc6 * tc6,struct net_pkt * pkt)238 int oa_tc6_send_chunks(struct oa_tc6 *tc6, struct net_pkt *pkt)
239 {
240 	uint16_t len = net_pkt_get_len(pkt);
241 	uint8_t oa_tx[tc6->cps];
242 	uint32_t hdr, ftr;
243 	uint8_t chunks, i;
244 	int ret;
245 
246 	if (len == 0) {
247 		return -ENODATA;
248 	}
249 
250 	chunks = len / tc6->cps;
251 	if (len % tc6->cps) {
252 		chunks++;
253 	}
254 
255 	/* Check if LAN865x has any free internal buffer space */
256 	if (chunks > tc6->txc) {
257 		return -EIO;
258 	}
259 
260 	/* Transform struct net_pkt content into chunks */
261 	for (i = 1; i <= chunks; i++) {
262 		hdr = FIELD_PREP(OA_DATA_HDR_DNC, 1) | FIELD_PREP(OA_DATA_HDR_DV, 1) |
263 		      FIELD_PREP(OA_DATA_HDR_NORX, 1) | FIELD_PREP(OA_DATA_HDR_SWO, 0);
264 
265 		if (i == 1) {
266 			hdr |= FIELD_PREP(OA_DATA_HDR_SV, 1);
267 		}
268 
269 		if (i == chunks) {
270 			hdr |= FIELD_PREP(OA_DATA_HDR_EBO, len - 1) | FIELD_PREP(OA_DATA_HDR_EV, 1);
271 		}
272 
273 		hdr |= FIELD_PREP(OA_DATA_HDR_P, oa_tc6_get_parity(hdr));
274 
275 		ret = net_pkt_read(pkt, oa_tx, len > tc6->cps ? tc6->cps : len);
276 		if (ret < 0) {
277 			return ret;
278 		}
279 
280 		ret = oa_tc6_chunk_spi_transfer(tc6, NULL, oa_tx, hdr, &ftr);
281 		if (ret < 0) {
282 			return ret;
283 		}
284 
285 		len -= tc6->cps;
286 	}
287 
288 	return 0;
289 }
290 
oa_tc6_check_status(struct oa_tc6 * tc6)291 int oa_tc6_check_status(struct oa_tc6 *tc6)
292 {
293 	uint32_t sts;
294 
295 	if (!tc6->sync) {
296 		LOG_ERR("SYNC: Configuration lost, reset IC!");
297 		return -EIO;
298 	}
299 
300 	if (tc6->exst) {
301 		/*
302 		 * Just clear any pending interrupts.
303 		 * The RESETC is handled separately as it requires per
304 		 * device configuration.
305 		 */
306 		oa_tc6_reg_read(tc6, OA_STATUS0, &sts);
307 		if (sts != 0) {
308 			oa_tc6_reg_write(tc6, OA_STATUS0, sts);
309 			LOG_WRN("EXST: OA_STATUS0: 0x%x", sts);
310 		}
311 
312 		oa_tc6_reg_read(tc6, OA_STATUS1, &sts);
313 		if (sts != 0) {
314 			oa_tc6_reg_write(tc6, OA_STATUS1, sts);
315 			LOG_WRN("EXST: OA_STATUS1: 0x%x", sts);
316 		}
317 	}
318 
319 	return 0;
320 }
321 
oa_tc6_update_status(struct oa_tc6 * tc6,uint32_t ftr)322 static int oa_tc6_update_status(struct oa_tc6 *tc6, uint32_t ftr)
323 {
324 	if (oa_tc6_get_parity(ftr)) {
325 		LOG_DBG("OA Status Update: Footer parity error!");
326 		return -EIO;
327 	}
328 
329 	tc6->exst = FIELD_GET(OA_DATA_FTR_EXST, ftr);
330 	tc6->sync = FIELD_GET(OA_DATA_FTR_SYNC, ftr);
331 	tc6->rca = FIELD_GET(OA_DATA_FTR_RCA, ftr);
332 	tc6->txc = FIELD_GET(OA_DATA_FTR_TXC, ftr);
333 
334 	return 0;
335 }
336 
oa_tc6_chunk_spi_transfer(struct oa_tc6 * tc6,uint8_t * buf_rx,uint8_t * buf_tx,uint32_t hdr,uint32_t * ftr)337 int oa_tc6_chunk_spi_transfer(struct oa_tc6 *tc6, uint8_t *buf_rx, uint8_t *buf_tx, uint32_t hdr,
338 			      uint32_t *ftr)
339 {
340 	struct spi_buf tx_buf[2];
341 	struct spi_buf rx_buf[2];
342 	struct spi_buf_set tx;
343 	struct spi_buf_set rx;
344 	int ret;
345 
346 	hdr = sys_cpu_to_be32(hdr);
347 	tx_buf[0].buf = &hdr;
348 	tx_buf[0].len = sizeof(hdr);
349 
350 	tx_buf[1].buf = buf_tx;
351 	tx_buf[1].len = tc6->cps;
352 
353 	tx.buffers = tx_buf;
354 	tx.count = ARRAY_SIZE(tx_buf);
355 
356 	rx_buf[0].buf = buf_rx;
357 	rx_buf[0].len = tc6->cps;
358 
359 	rx_buf[1].buf = ftr;
360 	rx_buf[1].len = sizeof(*ftr);
361 
362 	rx.buffers = rx_buf;
363 	rx.count = ARRAY_SIZE(rx_buf);
364 
365 	ret = spi_transceive_dt(tc6->spi, &tx, &rx);
366 	if (ret < 0) {
367 		return ret;
368 	}
369 	*ftr = sys_be32_to_cpu(*ftr);
370 
371 	return oa_tc6_update_status(tc6, *ftr);
372 }
373 
oa_tc6_read_status(struct oa_tc6 * tc6,uint32_t * ftr)374 int oa_tc6_read_status(struct oa_tc6 *tc6, uint32_t *ftr)
375 {
376 	uint32_t hdr;
377 
378 	hdr = FIELD_PREP(OA_DATA_HDR_DNC, 1) | FIELD_PREP(OA_DATA_HDR_DV, 0) |
379 	      FIELD_PREP(OA_DATA_HDR_NORX, 1);
380 	hdr |= FIELD_PREP(OA_DATA_HDR_P, oa_tc6_get_parity(hdr));
381 
382 	return oa_tc6_chunk_spi_transfer(tc6, NULL, NULL, hdr, ftr);
383 }
384 
oa_tc6_read_chunks(struct oa_tc6 * tc6,struct net_pkt * pkt)385 int oa_tc6_read_chunks(struct oa_tc6 *tc6, struct net_pkt *pkt)
386 {
387 	const uint16_t buf_rx_size = CONFIG_NET_BUF_DATA_SIZE;
388 	struct net_buf *buf_rx = NULL;
389 	uint32_t buf_rx_used = 0;
390 	uint32_t hdr, ftr;
391 	uint8_t sbo, ebo;
392 	int ret;
393 
394 	/*
395 	 * Special case - append already received data (extracted from previous
396 	 * chunk) to new packet.
397 	 *
398 	 * This code is NOT used when OA_CONFIG0 RFA [13:12] is set to 01
399 	 * (ZAREFE) - so received ethernet frames will always start on the
400 	 * beginning of new chunks.
401 	 */
402 	if (tc6->concat_buf) {
403 		net_pkt_append_buffer(pkt, tc6->concat_buf);
404 		tc6->concat_buf = NULL;
405 	}
406 
407 	do {
408 		if (!buf_rx) {
409 			buf_rx = net_pkt_get_frag(pkt, buf_rx_size, OA_TC6_BUF_ALLOC_TIMEOUT);
410 			if (!buf_rx) {
411 				LOG_ERR("OA RX: Can't allocate RX buffer fordata!");
412 				return -ENOMEM;
413 			}
414 		}
415 
416 		hdr = FIELD_PREP(OA_DATA_HDR_DNC, 1);
417 		hdr |= FIELD_PREP(OA_DATA_HDR_P, oa_tc6_get_parity(hdr));
418 
419 		ret = oa_tc6_chunk_spi_transfer(tc6, buf_rx->data + buf_rx_used, NULL, hdr, &ftr);
420 		if (ret < 0) {
421 			LOG_ERR("OA RX: transmission error: %d!", ret);
422 			goto unref_buf;
423 		}
424 
425 		ret = -EIO;
426 		if (oa_tc6_get_parity(ftr)) {
427 			LOG_ERR("OA RX: Footer parity error!");
428 			goto unref_buf;
429 		}
430 
431 		if (!FIELD_GET(OA_DATA_FTR_SYNC, ftr)) {
432 			LOG_ERR("OA RX: Configuration not SYNC'ed!");
433 			goto unref_buf;
434 		}
435 
436 		if (!FIELD_GET(OA_DATA_FTR_DV, ftr)) {
437 			LOG_DBG("OA RX: Data chunk not valid, skip!");
438 			goto unref_buf;
439 		}
440 
441 		sbo = FIELD_GET(OA_DATA_FTR_SWO, ftr) * sizeof(uint32_t);
442 		ebo = FIELD_GET(OA_DATA_FTR_EBO, ftr) + 1;
443 
444 		if (FIELD_GET(OA_DATA_FTR_SV, ftr)) {
445 			/*
446 			 * Adjust beginning of the buffer with SWO only when
447 			 * we DO NOT have two frames concatenated together
448 			 * in one chunk.
449 			 */
450 			if (!(FIELD_GET(OA_DATA_FTR_EV, ftr) && (ebo <= sbo))) {
451 				if (sbo) {
452 					net_buf_pull(buf_rx, sbo);
453 				}
454 			}
455 		}
456 
457 		if (FIELD_GET(OA_DATA_FTR_EV, ftr)) {
458 			/*
459 			 * Check if received frame shall be dropped - i.e. MAC has
460 			 * detected error condition, which shall result in frame drop
461 			 * by the SPI host.
462 			 */
463 			if (FIELD_GET(OA_DATA_FTR_FD, ftr)) {
464 				ret = -EIO;
465 				goto unref_buf;
466 			}
467 
468 			/*
469 			 * Concatenation of frames in a single chunk - one frame ends
470 			 * and second one starts just afterwards (ebo == sbo).
471 			 */
472 			if (FIELD_GET(OA_DATA_FTR_SV, ftr) && (ebo <= sbo)) {
473 				tc6->concat_buf = net_buf_clone(buf_rx, OA_TC6_BUF_ALLOC_TIMEOUT);
474 				if (!tc6->concat_buf) {
475 					LOG_ERR("OA RX: Can't allocate RX buffer for data!");
476 					ret = -ENOMEM;
477 					goto unref_buf;
478 				}
479 				net_buf_pull(tc6->concat_buf, sbo);
480 			}
481 
482 			/* Set final size of the buffer */
483 			buf_rx_used += ebo;
484 			buf_rx->len = buf_rx_used;
485 			net_pkt_append_buffer(pkt, buf_rx);
486 			/*
487 			 * Exit when complete packet is read and added to
488 			 * struct net_pkt
489 			 */
490 			break;
491 		} else {
492 			buf_rx_used += tc6->cps;
493 			if ((buf_rx_size - buf_rx_used) < tc6->cps) {
494 				net_pkt_append_buffer(pkt, buf_rx);
495 				buf_rx->len = buf_rx_used;
496 				buf_rx_used = 0;
497 				buf_rx = NULL;
498 			}
499 		}
500 	} while (tc6->rca > 0);
501 
502 	return 0;
503 
504 unref_buf:
505 	net_buf_unref(buf_rx);
506 	return ret;
507 }
508