1 /*
2 * Copyright (c) 2018-2019 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT intel_e1000
8
9 #define LOG_MODULE_NAME eth_e1000
10 #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL
11 #include <logging/log.h>
12 LOG_MODULE_REGISTER(LOG_MODULE_NAME);
13
14 #include <sys/types.h>
15 #include <zephyr.h>
16 #include <net/ethernet.h>
17 #include <ethernet/eth_stats.h>
18 #include <drivers/pcie/pcie.h>
19 #include "eth_e1000_priv.h"
20
21 #if defined(CONFIG_ETH_E1000_PTP_CLOCK)
22 #include <ptp_clock.h>
23
24 #define PTP_INST_NODEID(n) DT_CHILD(DT_DRV_INST(n), ptp)
25 #endif
26
27 #if defined(CONFIG_ETH_E1000_VERBOSE_DEBUG)
28 #define hexdump(_buf, _len, fmt, args...) \
29 ({ \
30 const size_t STR_SIZE = 80; \
31 char _str[STR_SIZE]; \
32 \
33 snprintk(_str, STR_SIZE, "%s: " fmt, __func__, ## args); \
34 \
35 LOG_HEXDUMP_DBG(_buf, _len, log_strdup(_str)); \
36 })
37 #else
38 #define hexdump(args...)
39 #endif
40
e1000_reg_to_string(enum e1000_reg_t r)41 static const char *e1000_reg_to_string(enum e1000_reg_t r)
42 {
43 #define _(_x) case _x: return #_x
44 switch (r) {
45 _(CTRL);
46 _(ICR);
47 _(ICS);
48 _(IMS);
49 _(RCTL);
50 _(TCTL);
51 _(RDBAL);
52 _(RDBAH);
53 _(RDLEN);
54 _(RDH);
55 _(RDT);
56 _(TDBAL);
57 _(TDBAH);
58 _(TDLEN);
59 _(TDH);
60 _(TDT);
61 _(RAL);
62 _(RAH);
63 }
64 #undef _
65 LOG_ERR("Unsupported register: 0x%x", r);
66 k_oops();
67 return NULL;
68 }
69
get_iface(struct e1000_dev * ctx,uint16_t vlan_tag)70 static struct net_if *get_iface(struct e1000_dev *ctx, uint16_t vlan_tag)
71 {
72 #if defined(CONFIG_NET_VLAN)
73 struct net_if *iface;
74
75 iface = net_eth_get_vlan_iface(ctx->iface, vlan_tag);
76 if (!iface) {
77 return ctx->iface;
78 }
79
80 return iface;
81 #else
82 ARG_UNUSED(vlan_tag);
83
84 return ctx->iface;
85 #endif
86 }
87
e1000_caps(const struct device * dev)88 static enum ethernet_hw_caps e1000_caps(const struct device *dev)
89 {
90 return
91 #if IS_ENABLED(CONFIG_NET_VLAN)
92 ETHERNET_HW_VLAN |
93 #endif
94 #if IS_ENABLED(CONFIG_ETH_E1000_PTP_CLOCK)
95 ETHERNET_PTP |
96 #endif
97 ETHERNET_LINK_10BASE_T | ETHERNET_LINK_100BASE_T |
98 ETHERNET_LINK_1000BASE_T |
99 /* The driver does not really support TXTIME atm but mark
100 * it to support it so that we can test the txtime sample.
101 */
102 ETHERNET_TXTIME;
103 }
104
105 #if defined(CONFIG_ETH_E1000_PTP_CLOCK)
e1000_get_ptp_clock(const struct device * dev)106 static const struct device *e1000_get_ptp_clock(const struct device *dev)
107 {
108 struct e1000_dev *ctx = dev->data;
109
110 return ctx->ptp_clock;
111 }
112 #endif
113
e1000_tx(struct e1000_dev * dev,void * buf,size_t len)114 static int e1000_tx(struct e1000_dev *dev, void *buf, size_t len)
115 {
116 hexdump(buf, len, "%zu byte(s)", len);
117
118 dev->tx.addr = POINTER_TO_INT(buf);
119 dev->tx.len = len;
120 dev->tx.cmd = TDESC_EOP | TDESC_RS;
121
122 iow32(dev, TDT, 1);
123
124 while (!(dev->tx.sta)) {
125 k_yield();
126 }
127
128 LOG_DBG("tx.sta: 0x%02hx", dev->tx.sta);
129
130 return (dev->tx.sta & TDESC_STA_DD) ? 0 : -EIO;
131 }
132
e1000_send(const struct device * ddev,struct net_pkt * pkt)133 static int e1000_send(const struct device *ddev, struct net_pkt *pkt)
134 {
135 struct e1000_dev *dev = ddev->data;
136 size_t len = net_pkt_get_len(pkt);
137
138 if (net_pkt_read(pkt, dev->txb, len)) {
139 return -EIO;
140 }
141
142 return e1000_tx(dev, dev->txb, len);
143 }
144
e1000_rx(struct e1000_dev * dev)145 static struct net_pkt *e1000_rx(struct e1000_dev *dev)
146 {
147 struct net_pkt *pkt = NULL;
148 void *buf;
149 ssize_t len;
150
151 LOG_DBG("rx.sta: 0x%02hx", dev->rx.sta);
152
153 if (!(dev->rx.sta & RDESC_STA_DD)) {
154 LOG_ERR("RX descriptor not ready");
155 goto out;
156 }
157
158 buf = INT_TO_POINTER((uint32_t)dev->rx.addr);
159 len = dev->rx.len - 4;
160
161 if (len <= 0) {
162 LOG_ERR("Invalid RX descriptor length: %hu", dev->rx.len);
163 goto out;
164 }
165
166 hexdump(buf, len, "%zd byte(s)", len);
167
168 pkt = net_pkt_rx_alloc_with_buffer(dev->iface, len, AF_UNSPEC, 0,
169 K_NO_WAIT);
170 if (!pkt) {
171 LOG_ERR("Out of buffers");
172 goto out;
173 }
174
175 if (net_pkt_write(pkt, buf, len)) {
176 LOG_ERR("Out of memory for received frame");
177 net_pkt_unref(pkt);
178 pkt = NULL;
179 }
180
181 out:
182 return pkt;
183 }
184
e1000_isr(const struct device * ddev)185 static void e1000_isr(const struct device *ddev)
186 {
187 struct e1000_dev *dev = ddev->data;
188 uint32_t icr = ior32(dev, ICR); /* Cleared upon read */
189 uint16_t vlan_tag = NET_VLAN_TAG_UNSPEC;
190
191 icr &= ~(ICR_TXDW | ICR_TXQE);
192
193 if (icr & ICR_RXO) {
194 struct net_pkt *pkt = e1000_rx(dev);
195
196 icr &= ~ICR_RXO;
197
198 if (pkt) {
199 #if defined(CONFIG_NET_VLAN)
200 struct net_eth_hdr *hdr = NET_ETH_HDR(pkt);
201
202 if (ntohs(hdr->type) == NET_ETH_PTYPE_VLAN) {
203 struct net_eth_vlan_hdr *hdr_vlan =
204 (struct net_eth_vlan_hdr *)
205 NET_ETH_HDR(pkt);
206
207 net_pkt_set_vlan_tci(
208 pkt, ntohs(hdr_vlan->vlan.tci));
209 vlan_tag = net_pkt_vlan_tag(pkt);
210
211 #if CONFIG_NET_TC_RX_COUNT > 1
212 enum net_priority prio;
213
214 prio = net_vlan2priority(
215 net_pkt_vlan_priority(pkt));
216 net_pkt_set_priority(pkt, prio);
217 #endif
218 }
219 #endif /* CONFIG_NET_VLAN */
220
221 net_recv_data(get_iface(dev, vlan_tag), pkt);
222 } else {
223 eth_stats_update_errors_rx(get_iface(dev, vlan_tag));
224 }
225 }
226
227 if (icr) {
228 LOG_ERR("Unhandled interrupt, ICR: 0x%x", icr);
229 }
230 }
231
232 #define PCI_VENDOR_ID_INTEL 0x8086
233 #define PCI_DEVICE_ID_I82540EM 0x100e
234
e1000_probe(const struct device * ddev)235 int e1000_probe(const struct device *ddev)
236 {
237 const pcie_bdf_t bdf = PCIE_BDF(0, 3, 0);
238 struct e1000_dev *dev = ddev->data;
239 uint32_t ral, rah;
240 struct pcie_mbar mbar;
241
242 if (!pcie_probe(bdf, PCIE_ID(PCI_VENDOR_ID_INTEL,
243 PCI_DEVICE_ID_I82540EM))) {
244 return -ENODEV;
245 }
246
247 pcie_probe_mbar(bdf, 0, &mbar);
248 pcie_set_cmd(bdf, PCIE_CONF_CMDSTAT_MEM |
249 PCIE_CONF_CMDSTAT_MASTER, true);
250
251 device_map(&dev->address, mbar.phys_addr, mbar.size,
252 K_MEM_CACHE_NONE);
253
254 /* Setup TX descriptor */
255
256 iow32(dev, TDBAL, (uint32_t) &dev->tx);
257 iow32(dev, TDBAH, 0);
258 iow32(dev, TDLEN, 1*16);
259
260 iow32(dev, TDH, 0);
261 iow32(dev, TDT, 0);
262
263 iow32(dev, TCTL, TCTL_EN);
264
265 /* Setup RX descriptor */
266
267 dev->rx.addr = POINTER_TO_INT(dev->rxb);
268 dev->rx.len = sizeof(dev->rxb);
269
270 iow32(dev, RDBAL, (uint32_t) &dev->rx);
271 iow32(dev, RDBAH, 0);
272 iow32(dev, RDLEN, 1*16);
273
274 iow32(dev, RDH, 0);
275 iow32(dev, RDT, 1);
276
277 iow32(dev, IMS, IMS_RXO);
278
279 ral = ior32(dev, RAL);
280 rah = ior32(dev, RAH);
281
282 memcpy(dev->mac, &ral, 4);
283 memcpy(dev->mac + 4, &rah, 2);
284
285 return 0;
286 }
287
e1000_iface_init(struct net_if * iface)288 static void e1000_iface_init(struct net_if *iface)
289 {
290 struct e1000_dev *dev = net_if_get_device(iface)->data;
291
292 /* For VLAN, this value is only used to get the correct L2 driver.
293 * The iface pointer in device context should contain the main
294 * interface if the VLANs are enabled.
295 */
296 if (dev->iface == NULL) {
297 dev->iface = iface;
298
299 /* Do the phy link up only once */
300 IRQ_CONNECT(DT_INST_IRQN(0),
301 DT_INST_IRQ(0, priority),
302 e1000_isr, DEVICE_DT_INST_GET(0),
303 DT_INST_IRQ(0, sense));
304
305 irq_enable(DT_INST_IRQN(0));
306 iow32(dev, CTRL, CTRL_SLU); /* Set link up */
307 iow32(dev, RCTL, RCTL_EN | RCTL_MPE);
308 }
309
310 ethernet_init(iface);
311
312 net_if_set_link_addr(iface, dev->mac, sizeof(dev->mac),
313 NET_LINK_ETHERNET);
314
315 LOG_DBG("done");
316 }
317
318 static struct e1000_dev e1000_dev;
319
320 static const struct ethernet_api e1000_api = {
321 .iface_api.init = e1000_iface_init,
322 #if defined(CONFIG_ETH_E1000_PTP_CLOCK)
323 .get_ptp_clock = e1000_get_ptp_clock,
324 #endif
325 .get_capabilities = e1000_caps,
326 .send = e1000_send,
327 };
328
329 ETH_NET_DEVICE_DT_INST_DEFINE(0,
330 e1000_probe,
331 NULL,
332 &e1000_dev,
333 NULL,
334 CONFIG_ETH_INIT_PRIORITY,
335 &e1000_api,
336 NET_ETH_MTU);
337
338 #if defined(CONFIG_ETH_E1000_PTP_CLOCK)
339 struct ptp_context {
340 struct e1000_dev *eth_context;
341
342 /* Simulate the clock. This is only for testing.
343 * The value is in nanoseconds
344 */
345 uint64_t clock_time;
346 };
347
348 static struct ptp_context ptp_e1000_context;
349
ptp_clock_e1000_set(const struct device * dev,struct net_ptp_time * tm)350 static int ptp_clock_e1000_set(const struct device *dev,
351 struct net_ptp_time *tm)
352 {
353 struct ptp_context *ptp_context = dev->data;
354
355 /* TODO: Set the clock real value here */
356 ptp_context->clock_time = tm->second * NSEC_PER_SEC + tm->nanosecond;
357
358 return 0;
359 }
360
ptp_clock_e1000_get(const struct device * dev,struct net_ptp_time * tm)361 static int ptp_clock_e1000_get(const struct device *dev,
362 struct net_ptp_time *tm)
363 {
364 struct ptp_context *ptp_context = dev->data;
365
366 /* TODO: Get the clock value */
367 tm->second = ptp_context->clock_time / NSEC_PER_SEC;
368 tm->nanosecond = ptp_context->clock_time - tm->second * NSEC_PER_SEC;
369
370 return 0;
371 }
372
ptp_clock_e1000_adjust(const struct device * dev,int increment)373 static int ptp_clock_e1000_adjust(const struct device *dev, int increment)
374 {
375 ARG_UNUSED(dev);
376 ARG_UNUSED(increment);
377
378 /* TODO: Implement clock adjustment */
379
380 return 0;
381 }
382
ptp_clock_e1000_rate_adjust(const struct device * dev,float ratio)383 static int ptp_clock_e1000_rate_adjust(const struct device *dev, float ratio)
384 {
385 const int hw_inc = NSEC_PER_SEC / CONFIG_ETH_E1000_PTP_CLOCK_SRC_HZ;
386 struct ptp_context *ptp_context = dev->data;
387 struct e1000_dev *context = ptp_context->eth_context;
388 int corr;
389 int32_t mul;
390 float val;
391
392 /* No change needed. */
393 if (ratio == 1.0) {
394 return 0;
395 }
396
397 ratio *= context->clk_ratio;
398
399 /* Limit possible ratio. */
400 if ((ratio > 1.0 + 1.0/(2 * hw_inc)) ||
401 (ratio < 1.0 - 1.0/(2 * hw_inc))) {
402 return -EINVAL;
403 }
404
405 /* Save new ratio. */
406 context->clk_ratio = ratio;
407
408 if (ratio < 1.0) {
409 corr = hw_inc - 1;
410 val = 1.0 / (hw_inc * (1.0 - ratio));
411 } else if (ratio > 1.0) {
412 corr = hw_inc + 1;
413 val = 1.0 / (hw_inc * (ratio-1.0));
414 } else {
415 val = 0;
416 corr = hw_inc;
417 }
418
419 if (val >= INT32_MAX) {
420 /* Value is too high.
421 * It is not possible to adjust the rate of the clock.
422 */
423 mul = 0;
424 } else {
425 mul = val;
426 }
427
428 /* TODO: Adjust the clock here */
429
430 return 0;
431 }
432
433 static const struct ptp_clock_driver_api api = {
434 .set = ptp_clock_e1000_set,
435 .get = ptp_clock_e1000_get,
436 .adjust = ptp_clock_e1000_adjust,
437 .rate_adjust = ptp_clock_e1000_rate_adjust,
438 };
439
ptp_e1000_init(const struct device * port)440 static int ptp_e1000_init(const struct device *port)
441 {
442 const struct device *eth_dev = DEVICE_DT_INST_GET(0);
443 struct e1000_dev *context = eth_dev->data;
444 struct ptp_context *ptp_context = port->data;
445
446 context->ptp_clock = port;
447 ptp_context->eth_context = context;
448
449 ptp_context->clock_time = k_ticks_to_ns_floor64(k_uptime_ticks());
450
451 return 0;
452 }
453
454 DEVICE_DEFINE(e1000_ptp_clock, PTP_CLOCK_NAME, ptp_e1000_init,
455 NULL, &ptp_e1000_context, NULL, POST_KERNEL,
456 CONFIG_APPLICATION_INIT_PRIORITY, &api);
457
458 #endif /* CONFIG_ETH_E1000_PTP_CLOCK */
459