1 /*
2  * Copyright (c) 2018-2019 Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DT_DRV_COMPAT intel_e1000
8 
9 #define LOG_MODULE_NAME eth_e1000
10 #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL
11 #include <zephyr/logging/log.h>
12 LOG_MODULE_REGISTER(LOG_MODULE_NAME);
13 
14 #include <sys/types.h>
15 #include <zephyr/kernel.h>
16 #include <zephyr/net/ethernet.h>
17 #include <ethernet/eth_stats.h>
18 #include <zephyr/drivers/pcie/pcie.h>
19 #include <zephyr/irq.h>
20 #include "eth_e1000_priv.h"
21 
22 #if defined(CONFIG_ETH_E1000_PTP_CLOCK)
23 #include <zephyr/drivers/ptp_clock.h>
24 
25 #define PTP_INST_NODEID(n) DT_INST_CHILD(n, ptp)
26 #endif
27 
28 #if defined(CONFIG_ETH_E1000_VERBOSE_DEBUG)
29 #define hexdump(_buf, _len, fmt, args...)				\
30 ({									\
31 	const size_t STR_SIZE = 80;					\
32 	char _str[STR_SIZE];						\
33 									\
34 	snprintk(_str, STR_SIZE, "%s: " fmt, __func__, ## args);	\
35 									\
36 	LOG_HEXDUMP_DBG(_buf, _len, _str);			\
37 })
38 #else
39 #define hexdump(args...)
40 #endif
41 
e1000_reg_to_string(enum e1000_reg_t r)42 static const char *e1000_reg_to_string(enum e1000_reg_t r)
43 {
44 #define _(_x)	case _x: return #_x
45 	switch (r) {
46 	_(CTRL);
47 	_(ICR);
48 	_(ICS);
49 	_(IMS);
50 	_(RCTL);
51 	_(TCTL);
52 	_(RDBAL);
53 	_(RDBAH);
54 	_(RDLEN);
55 	_(RDH);
56 	_(RDT);
57 	_(TDBAL);
58 	_(TDBAH);
59 	_(TDLEN);
60 	_(TDH);
61 	_(TDT);
62 	_(RAL);
63 	_(RAH);
64 	}
65 #undef _
66 	LOG_ERR("Unsupported register: 0x%x", r);
67 	k_oops();
68 	return NULL;
69 }
70 
get_iface(struct e1000_dev * ctx,uint16_t vlan_tag)71 static struct net_if *get_iface(struct e1000_dev *ctx, uint16_t vlan_tag)
72 {
73 #if defined(CONFIG_NET_VLAN)
74 	struct net_if *iface;
75 
76 	iface = net_eth_get_vlan_iface(ctx->iface, vlan_tag);
77 	if (!iface) {
78 		return ctx->iface;
79 	}
80 
81 	return iface;
82 #else
83 	ARG_UNUSED(vlan_tag);
84 
85 	return ctx->iface;
86 #endif
87 }
88 
e1000_caps(const struct device * dev)89 static enum ethernet_hw_caps e1000_caps(const struct device *dev)
90 {
91 	return
92 #if defined(CONFIG_NET_VLAN)
93 		ETHERNET_HW_VLAN |
94 #endif
95 #if defined(CONFIG_ETH_E1000_PTP_CLOCK)
96 		ETHERNET_PTP |
97 #endif
98 		ETHERNET_LINK_10BASE_T | ETHERNET_LINK_100BASE_T |
99 		ETHERNET_LINK_1000BASE_T |
100 		/* The driver does not really support TXTIME atm but mark
101 		 * it to support it so that we can test the txtime sample.
102 		 */
103 		ETHERNET_TXTIME;
104 }
105 
106 #if defined(CONFIG_ETH_E1000_PTP_CLOCK)
e1000_get_ptp_clock(const struct device * dev)107 static const struct device *e1000_get_ptp_clock(const struct device *dev)
108 {
109 	struct e1000_dev *ctx = dev->data;
110 
111 	return ctx->ptp_clock;
112 }
113 #endif
114 
e1000_tx(struct e1000_dev * dev,void * buf,size_t len)115 static int e1000_tx(struct e1000_dev *dev, void *buf, size_t len)
116 {
117 	hexdump(buf, len, "%zu byte(s)", len);
118 
119 	dev->tx.addr = POINTER_TO_INT(buf);
120 	dev->tx.len = len;
121 	dev->tx.cmd = TDESC_EOP | TDESC_RS;
122 
123 	iow32(dev, TDT, 1);
124 
125 	while (!(dev->tx.sta)) {
126 		k_yield();
127 	}
128 
129 	LOG_DBG("tx.sta: 0x%02hx", dev->tx.sta);
130 
131 	return (dev->tx.sta & TDESC_STA_DD) ? 0 : -EIO;
132 }
133 
e1000_send(const struct device * ddev,struct net_pkt * pkt)134 static int e1000_send(const struct device *ddev, struct net_pkt *pkt)
135 {
136 	struct e1000_dev *dev = ddev->data;
137 	size_t len = net_pkt_get_len(pkt);
138 
139 	if (net_pkt_read(pkt, dev->txb, len)) {
140 		return -EIO;
141 	}
142 
143 	return e1000_tx(dev, dev->txb, len);
144 }
145 
e1000_rx(struct e1000_dev * dev)146 static struct net_pkt *e1000_rx(struct e1000_dev *dev)
147 {
148 	struct net_pkt *pkt = NULL;
149 	void *buf;
150 	ssize_t len;
151 
152 	LOG_DBG("rx.sta: 0x%02hx", dev->rx.sta);
153 
154 	if (!(dev->rx.sta & RDESC_STA_DD)) {
155 		LOG_ERR("RX descriptor not ready");
156 		goto out;
157 	}
158 
159 	buf = INT_TO_POINTER((uint32_t)dev->rx.addr);
160 	len = dev->rx.len - 4;
161 
162 	if (len <= 0) {
163 		LOG_ERR("Invalid RX descriptor length: %hu", dev->rx.len);
164 		goto out;
165 	}
166 
167 	hexdump(buf, len, "%zd byte(s)", len);
168 
169 	pkt = net_pkt_rx_alloc_with_buffer(dev->iface, len, AF_UNSPEC, 0,
170 					   K_NO_WAIT);
171 	if (!pkt) {
172 		LOG_ERR("Out of buffers");
173 		goto out;
174 	}
175 
176 	if (net_pkt_write(pkt, buf, len)) {
177 		LOG_ERR("Out of memory for received frame");
178 		net_pkt_unref(pkt);
179 		pkt = NULL;
180 	}
181 
182 out:
183 	return pkt;
184 }
185 
e1000_isr(const struct device * ddev)186 static void e1000_isr(const struct device *ddev)
187 {
188 	struct e1000_dev *dev = ddev->data;
189 	uint32_t icr = ior32(dev, ICR); /* Cleared upon read */
190 	uint16_t vlan_tag = NET_VLAN_TAG_UNSPEC;
191 
192 	icr &= ~(ICR_TXDW | ICR_TXQE);
193 
194 	if (icr & ICR_RXO) {
195 		struct net_pkt *pkt = e1000_rx(dev);
196 
197 		icr &= ~ICR_RXO;
198 
199 		if (pkt) {
200 #if defined(CONFIG_NET_VLAN)
201 			struct net_eth_hdr *hdr = NET_ETH_HDR(pkt);
202 
203 			if (ntohs(hdr->type) == NET_ETH_PTYPE_VLAN) {
204 				struct net_eth_vlan_hdr *hdr_vlan =
205 					(struct net_eth_vlan_hdr *)
206 					NET_ETH_HDR(pkt);
207 
208 				net_pkt_set_vlan_tci(
209 					pkt, ntohs(hdr_vlan->vlan.tci));
210 				vlan_tag = net_pkt_vlan_tag(pkt);
211 
212 #if CONFIG_NET_TC_RX_COUNT > 1
213 				enum net_priority prio;
214 
215 				prio = net_vlan2priority(
216 						net_pkt_vlan_priority(pkt));
217 				net_pkt_set_priority(pkt, prio);
218 #endif
219 			}
220 #endif /* CONFIG_NET_VLAN */
221 
222 			net_recv_data(get_iface(dev, vlan_tag), pkt);
223 		} else {
224 			eth_stats_update_errors_rx(get_iface(dev, vlan_tag));
225 		}
226 	}
227 
228 	if (icr) {
229 		LOG_ERR("Unhandled interrupt, ICR: 0x%x", icr);
230 	}
231 }
232 
233 
e1000_probe(const struct device * ddev)234 int e1000_probe(const struct device *ddev)
235 {
236 	/* PCI ID is decoded into REG_SIZE */
237 	struct e1000_dev *dev = ddev->data;
238 	uint32_t ral, rah;
239 	struct pcie_bar mbar;
240 
241 	if (dev->pcie->bdf == PCIE_BDF_NONE) {
242 		return -ENODEV;
243 	}
244 
245 	pcie_probe_mbar(dev->pcie->bdf, 0, &mbar);
246 	pcie_set_cmd(dev->pcie->bdf, PCIE_CONF_CMDSTAT_MEM |
247 		     PCIE_CONF_CMDSTAT_MASTER, true);
248 
249 	device_map(&dev->address, mbar.phys_addr, mbar.size,
250 		   K_MEM_CACHE_NONE);
251 
252 	/* Setup TX descriptor */
253 
254 	iow32(dev, TDBAL, (uint32_t)POINTER_TO_UINT(&dev->tx));
255 	iow32(dev, TDBAH, (uint32_t)((POINTER_TO_UINT(&dev->tx) >> 16) >> 16));
256 	iow32(dev, TDLEN, 1*16);
257 
258 	iow32(dev, TDH, 0);
259 	iow32(dev, TDT, 0);
260 
261 	iow32(dev, TCTL, TCTL_EN);
262 
263 	/* Setup RX descriptor */
264 
265 	dev->rx.addr = POINTER_TO_INT(dev->rxb);
266 	dev->rx.len = sizeof(dev->rxb);
267 
268 	iow32(dev, RDBAL, (uint32_t)POINTER_TO_UINT(&dev->rx));
269 	iow32(dev, RDBAH, (uint32_t)((POINTER_TO_UINT(&dev->rx) >> 16) >> 16));
270 	iow32(dev, RDLEN, 1*16);
271 
272 	iow32(dev, RDH, 0);
273 	iow32(dev, RDT, 1);
274 
275 	iow32(dev, IMS, IMS_RXO);
276 
277 	ral = ior32(dev, RAL);
278 	rah = ior32(dev, RAH);
279 
280 	memcpy(dev->mac, &ral, 4);
281 	memcpy(dev->mac + 4, &rah, 2);
282 
283 	return 0;
284 }
285 
286 BUILD_ASSERT(DT_INST_IRQN(0) != PCIE_IRQ_DETECT,
287 	     "Dynamic IRQ allocation is not supported");
288 
e1000_iface_init(struct net_if * iface)289 static void e1000_iface_init(struct net_if *iface)
290 {
291 	struct e1000_dev *dev = net_if_get_device(iface)->data;
292 	const struct e1000_config *config = net_if_get_device(iface)->config;
293 
294 	/* For VLAN, this value is only used to get the correct L2 driver.
295 	 * The iface pointer in device context should contain the main
296 	 * interface if the VLANs are enabled.
297 	 */
298 	if (dev->iface == NULL) {
299 		dev->iface = iface;
300 
301 		/* Do the phy link up only once */
302 		config->config_func(dev);
303 	}
304 
305 	ethernet_init(iface);
306 
307 	net_if_set_link_addr(iface, dev->mac, sizeof(dev->mac),
308 			     NET_LINK_ETHERNET);
309 
310 	LOG_DBG("done");
311 }
312 
313 static const struct ethernet_api e1000_api = {
314 	.iface_api.init		= e1000_iface_init,
315 #if defined(CONFIG_ETH_E1000_PTP_CLOCK)
316 	.get_ptp_clock		= e1000_get_ptp_clock,
317 #endif
318 	.get_capabilities	= e1000_caps,
319 	.send			= e1000_send,
320 };
321 
322 #define E1000_DT_INST_IRQ_FLAGS(inst)					\
323 	COND_CODE_1(DT_INST_IRQ_HAS_CELL(inst, sense),			\
324 		    (DT_INST_IRQ(inst, sense)),				\
325 		    (DT_INST_IRQ(inst, flags)))
326 
327 #define E1000_PCI_INIT(inst)						\
328 	DEVICE_PCIE_INST_DECLARE(inst);					\
329 									\
330 	static struct e1000_dev dev_##inst = {				\
331 		DEVICE_PCIE_INST_INIT(inst, pcie),			\
332 	};								\
333 									\
334 	static void e1000_config_##inst(const struct e1000_dev *dev)	\
335 	{								\
336 		IRQ_CONNECT(DT_INST_IRQN(inst),				\
337 			    DT_INST_IRQ(inst, priority),		\
338 			    e1000_isr, DEVICE_DT_INST_GET(inst),	\
339 			    E1000_DT_INST_IRQ_FLAGS(inst));		\
340 									\
341 		irq_enable(DT_INST_IRQN(inst));				\
342 		iow32(dev, CTRL, CTRL_SLU); /* Set link up */		\
343 		iow32(dev, RCTL, RCTL_EN | RCTL_MPE);			\
344 	}								\
345 									\
346 	static const struct e1000_config config_##inst = {		\
347 		.config_func = e1000_config_##inst,			\
348 	};								\
349 									\
350 	ETH_NET_DEVICE_DT_INST_DEFINE(inst,				\
351 				      e1000_probe,			\
352 				      NULL,				\
353 				      &dev_##inst,			\
354 				      &config_##inst,			\
355 				      CONFIG_ETH_INIT_PRIORITY,		\
356 				      &e1000_api,			\
357 				      NET_ETH_MTU);
358 
359 DT_INST_FOREACH_STATUS_OKAY(E1000_PCI_INIT);
360 
361 #if defined(CONFIG_ETH_E1000_PTP_CLOCK)
362 struct ptp_context {
363 	struct e1000_dev *eth_context;
364 
365 	/* Simulate the clock. This is only for testing.
366 	 * The value is in nanoseconds
367 	 */
368 	uint64_t clock_time;
369 };
370 
ptp_clock_e1000_set(const struct device * dev,struct net_ptp_time * tm)371 static int ptp_clock_e1000_set(const struct device *dev,
372 			       struct net_ptp_time *tm)
373 {
374 	struct ptp_context *ptp_context = dev->data;
375 
376 	/* TODO: Set the clock real value here */
377 	ptp_context->clock_time = tm->second * NSEC_PER_SEC + tm->nanosecond;
378 
379 	return 0;
380 }
381 
ptp_clock_e1000_get(const struct device * dev,struct net_ptp_time * tm)382 static int ptp_clock_e1000_get(const struct device *dev,
383 			       struct net_ptp_time *tm)
384 {
385 	struct ptp_context *ptp_context = dev->data;
386 
387 	/* TODO: Get the clock value */
388 	tm->second = ptp_context->clock_time / NSEC_PER_SEC;
389 	tm->nanosecond = ptp_context->clock_time - tm->second * NSEC_PER_SEC;
390 
391 	return 0;
392 }
393 
ptp_clock_e1000_adjust(const struct device * dev,int increment)394 static int ptp_clock_e1000_adjust(const struct device *dev, int increment)
395 {
396 	ARG_UNUSED(dev);
397 	ARG_UNUSED(increment);
398 
399 	/* TODO: Implement clock adjustment */
400 
401 	return 0;
402 }
403 
ptp_clock_e1000_rate_adjust(const struct device * dev,double ratio)404 static int ptp_clock_e1000_rate_adjust(const struct device *dev, double ratio)
405 {
406 	const int hw_inc = NSEC_PER_SEC / CONFIG_ETH_E1000_PTP_CLOCK_SRC_HZ;
407 	struct ptp_context *ptp_context = dev->data;
408 	struct e1000_dev *context = ptp_context->eth_context;
409 	int corr;
410 	int32_t mul;
411 	float val;
412 
413 	/* No change needed. */
414 	if (ratio == 1.0f) {
415 		return 0;
416 	}
417 
418 	ratio *= context->clk_ratio;
419 
420 	/* Limit possible ratio. */
421 	if ((ratio > 1.0f + 1.0f/(2 * hw_inc)) ||
422 			(ratio < 1.0f - 1.0f/(2 * hw_inc))) {
423 		return -EINVAL;
424 	}
425 
426 	/* Save new ratio. */
427 	context->clk_ratio = ratio;
428 
429 	if (ratio < 1.0f) {
430 		corr = hw_inc - 1;
431 		val = 1.0f / (hw_inc * (1.0f - ratio));
432 	} else if (ratio > 1.0f) {
433 		corr = hw_inc + 1;
434 		val = 1.0f / (hw_inc * (ratio - 1.0f));
435 	} else {
436 		val = 0;
437 		corr = hw_inc;
438 	}
439 
440 	if (val >= INT32_MAX) {
441 		/* Value is too high.
442 		 * It is not possible to adjust the rate of the clock.
443 		 */
444 		mul = 0;
445 	} else {
446 		mul = val;
447 	}
448 
449 	/* TODO: Adjust the clock here */
450 
451 	return 0;
452 }
453 
454 static const struct ptp_clock_driver_api api = {
455 	.set = ptp_clock_e1000_set,
456 	.get = ptp_clock_e1000_get,
457 	.adjust = ptp_clock_e1000_adjust,
458 	.rate_adjust = ptp_clock_e1000_rate_adjust,
459 };
460 
ptp_e1000_init(const struct device * port)461 static int ptp_e1000_init(const struct device *port)
462 {
463 	struct ptp_context *ptp_context = port->data;
464 	struct e1000_dev *context = ptp_context->eth_context;
465 
466 	context->ptp_clock = port;
467 	ptp_context->clock_time = k_ticks_to_ns_floor64(k_uptime_ticks());
468 
469 	return 0;
470 }
471 
472 #define E1000_PTP_INIT(inst)						\
473 	static struct ptp_context ptp_e1000_context_##inst = {		\
474 		.eth_context = DEVICE_DT_INST_GET(inst)->data,		\
475 	};								\
476 									\
477 	DEVICE_DEFINE(e1000_ptp_clock, PTP_CLOCK_NAME,			\
478 		      ptp_e1000_init, NULL,				\
479 		      &ptp_e1000_context_##inst, NULL, POST_KERNEL,	\
480 		      CONFIG_APPLICATION_INIT_PRIORITY, &api);
481 
482 DT_INST_FOREACH_STATUS_OKAY(E1000_PTP_INIT);
483 
484 #endif /* CONFIG_ETH_E1000_PTP_CLOCK */
485