1 /*
2 * Copyright (c) 2018-2019 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT intel_e1000
8
9 #define LOG_MODULE_NAME eth_e1000
10 #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL
11 #include <zephyr/logging/log.h>
12 LOG_MODULE_REGISTER(LOG_MODULE_NAME);
13
14 #include <sys/types.h>
15 #include <zephyr/kernel.h>
16 #include <zephyr/net/ethernet.h>
17 #include <ethernet/eth_stats.h>
18 #include <zephyr/drivers/pcie/pcie.h>
19 #include <zephyr/irq.h>
20 #include "eth_e1000_priv.h"
21
22 #if defined(CONFIG_ETH_E1000_PTP_CLOCK)
23 #include <zephyr/drivers/ptp_clock.h>
24
25 #define PTP_INST_NODEID(n) DT_INST_CHILD(n, ptp)
26 #endif
27
28 #if defined(CONFIG_ETH_E1000_VERBOSE_DEBUG)
29 #define hexdump(_buf, _len, fmt, args...) \
30 ({ \
31 const size_t STR_SIZE = 80; \
32 char _str[STR_SIZE]; \
33 \
34 snprintk(_str, STR_SIZE, "%s: " fmt, __func__, ## args); \
35 \
36 LOG_HEXDUMP_DBG(_buf, _len, _str); \
37 })
38 #else
39 #define hexdump(args...)
40 #endif
41
e1000_reg_to_string(enum e1000_reg_t r)42 static const char *e1000_reg_to_string(enum e1000_reg_t r)
43 {
44 #define _(_x) case _x: return #_x
45 switch (r) {
46 _(CTRL);
47 _(ICR);
48 _(ICS);
49 _(IMS);
50 _(RCTL);
51 _(TCTL);
52 _(RDBAL);
53 _(RDBAH);
54 _(RDLEN);
55 _(RDH);
56 _(RDT);
57 _(TDBAL);
58 _(TDBAH);
59 _(TDLEN);
60 _(TDH);
61 _(TDT);
62 _(RAL);
63 _(RAH);
64 }
65 #undef _
66 LOG_ERR("Unsupported register: 0x%x", r);
67 k_oops();
68 return NULL;
69 }
70
get_iface(struct e1000_dev * ctx)71 static struct net_if *get_iface(struct e1000_dev *ctx)
72 {
73 return ctx->iface;
74 }
75
e1000_caps(const struct device * dev)76 static enum ethernet_hw_caps e1000_caps(const struct device *dev)
77 {
78 return
79 #if defined(CONFIG_NET_VLAN)
80 ETHERNET_HW_VLAN |
81 #endif
82 #if defined(CONFIG_ETH_E1000_PTP_CLOCK)
83 ETHERNET_PTP |
84 #endif
85 ETHERNET_LINK_10BASE_T | ETHERNET_LINK_100BASE_T |
86 ETHERNET_LINK_1000BASE_T |
87 /* The driver does not really support TXTIME atm but mark
88 * it to support it so that we can test the txtime sample.
89 */
90 ETHERNET_TXTIME;
91 }
92
93 #if defined(CONFIG_ETH_E1000_PTP_CLOCK)
e1000_get_ptp_clock(const struct device * dev)94 static const struct device *e1000_get_ptp_clock(const struct device *dev)
95 {
96 struct e1000_dev *ctx = dev->data;
97
98 return ctx->ptp_clock;
99 }
100 #endif
101
e1000_tx(struct e1000_dev * dev,void * buf,size_t len)102 static int e1000_tx(struct e1000_dev *dev, void *buf, size_t len)
103 {
104 hexdump(buf, len, "%zu byte(s)", len);
105
106 dev->tx.addr = POINTER_TO_INT(buf);
107 dev->tx.len = len;
108 dev->tx.cmd = TDESC_EOP | TDESC_RS;
109
110 iow32(dev, TDT, 1);
111
112 while (!(dev->tx.sta)) {
113 k_yield();
114 }
115
116 LOG_DBG("tx.sta: 0x%02hx", dev->tx.sta);
117
118 return (dev->tx.sta & TDESC_STA_DD) ? 0 : -EIO;
119 }
120
e1000_send(const struct device * ddev,struct net_pkt * pkt)121 static int e1000_send(const struct device *ddev, struct net_pkt *pkt)
122 {
123 struct e1000_dev *dev = ddev->data;
124 size_t len = net_pkt_get_len(pkt);
125
126 if (net_pkt_read(pkt, dev->txb, len)) {
127 return -EIO;
128 }
129
130 return e1000_tx(dev, dev->txb, len);
131 }
132
e1000_rx(struct e1000_dev * dev)133 static struct net_pkt *e1000_rx(struct e1000_dev *dev)
134 {
135 struct net_pkt *pkt = NULL;
136 void *buf;
137 ssize_t len;
138
139 LOG_DBG("rx.sta: 0x%02hx", dev->rx.sta);
140
141 if (!(dev->rx.sta & RDESC_STA_DD)) {
142 LOG_ERR("RX descriptor not ready");
143 goto out;
144 }
145
146 buf = INT_TO_POINTER((uint32_t)dev->rx.addr);
147 len = dev->rx.len - 4;
148
149 if (len <= 0) {
150 LOG_ERR("Invalid RX descriptor length: %hu", dev->rx.len);
151 goto out;
152 }
153
154 hexdump(buf, len, "%zd byte(s)", len);
155
156 pkt = net_pkt_rx_alloc_with_buffer(dev->iface, len, AF_UNSPEC, 0,
157 K_NO_WAIT);
158 if (!pkt) {
159 LOG_ERR("Out of buffers");
160 goto out;
161 }
162
163 if (net_pkt_write(pkt, buf, len)) {
164 LOG_ERR("Out of memory for received frame");
165 net_pkt_unref(pkt);
166 pkt = NULL;
167 }
168
169 out:
170 return pkt;
171 }
172
e1000_isr(const struct device * ddev)173 static void e1000_isr(const struct device *ddev)
174 {
175 struct e1000_dev *dev = ddev->data;
176 uint32_t icr = ior32(dev, ICR); /* Cleared upon read */
177
178 icr &= ~(ICR_TXDW | ICR_TXQE);
179
180 if (icr & ICR_RXO) {
181 struct net_pkt *pkt = e1000_rx(dev);
182
183 icr &= ~ICR_RXO;
184
185 if (pkt) {
186 net_recv_data(get_iface(dev), pkt);
187 } else {
188 eth_stats_update_errors_rx(get_iface(dev));
189 }
190 }
191
192 if (icr) {
193 LOG_ERR("Unhandled interrupt, ICR: 0x%x", icr);
194 }
195 }
196
197
e1000_probe(const struct device * ddev)198 int e1000_probe(const struct device *ddev)
199 {
200 /* PCI ID is decoded into REG_SIZE */
201 struct e1000_dev *dev = ddev->data;
202 uint32_t ral, rah;
203 struct pcie_bar mbar;
204
205 if (dev->pcie->bdf == PCIE_BDF_NONE) {
206 return -ENODEV;
207 }
208
209 pcie_probe_mbar(dev->pcie->bdf, 0, &mbar);
210 pcie_set_cmd(dev->pcie->bdf, PCIE_CONF_CMDSTAT_MEM |
211 PCIE_CONF_CMDSTAT_MASTER, true);
212
213 device_map(&dev->address, mbar.phys_addr, mbar.size,
214 K_MEM_CACHE_NONE);
215
216 /* Setup TX descriptor */
217
218 iow32(dev, TDBAL, (uint32_t)POINTER_TO_UINT(&dev->tx));
219 iow32(dev, TDBAH, (uint32_t)((POINTER_TO_UINT(&dev->tx) >> 16) >> 16));
220 iow32(dev, TDLEN, 1*16);
221
222 iow32(dev, TDH, 0);
223 iow32(dev, TDT, 0);
224
225 iow32(dev, TCTL, TCTL_EN);
226
227 /* Setup RX descriptor */
228
229 dev->rx.addr = POINTER_TO_INT(dev->rxb);
230 dev->rx.len = sizeof(dev->rxb);
231
232 iow32(dev, RDBAL, (uint32_t)POINTER_TO_UINT(&dev->rx));
233 iow32(dev, RDBAH, (uint32_t)((POINTER_TO_UINT(&dev->rx) >> 16) >> 16));
234 iow32(dev, RDLEN, 1*16);
235
236 iow32(dev, RDH, 0);
237 iow32(dev, RDT, 1);
238
239 iow32(dev, IMS, IMS_RXO);
240
241 ral = ior32(dev, RAL);
242 rah = ior32(dev, RAH);
243
244 memcpy(dev->mac, &ral, 4);
245 memcpy(dev->mac + 4, &rah, 2);
246
247 return 0;
248 }
249
250 BUILD_ASSERT(DT_INST_IRQN(0) != PCIE_IRQ_DETECT,
251 "Dynamic IRQ allocation is not supported");
252
e1000_iface_init(struct net_if * iface)253 static void e1000_iface_init(struct net_if *iface)
254 {
255 struct e1000_dev *dev = net_if_get_device(iface)->data;
256 const struct e1000_config *config = net_if_get_device(iface)->config;
257
258 if (dev->iface == NULL) {
259 dev->iface = iface;
260
261 /* Do the phy link up only once */
262 config->config_func(dev);
263 }
264
265 ethernet_init(iface);
266
267 net_if_set_link_addr(iface, dev->mac, sizeof(dev->mac),
268 NET_LINK_ETHERNET);
269
270 LOG_DBG("done");
271 }
272
273 static const struct ethernet_api e1000_api = {
274 .iface_api.init = e1000_iface_init,
275 #if defined(CONFIG_ETH_E1000_PTP_CLOCK)
276 .get_ptp_clock = e1000_get_ptp_clock,
277 #endif
278 .get_capabilities = e1000_caps,
279 .send = e1000_send,
280 };
281
282 #define E1000_DT_INST_IRQ_FLAGS(inst) \
283 COND_CODE_1(DT_INST_IRQ_HAS_CELL(inst, sense), \
284 (DT_INST_IRQ(inst, sense)), \
285 (DT_INST_IRQ(inst, flags)))
286
287 #define E1000_PCI_INIT(inst) \
288 DEVICE_PCIE_INST_DECLARE(inst); \
289 \
290 static struct e1000_dev dev_##inst = { \
291 DEVICE_PCIE_INST_INIT(inst, pcie), \
292 }; \
293 \
294 static void e1000_config_##inst(const struct e1000_dev *dev) \
295 { \
296 IRQ_CONNECT(DT_INST_IRQN(inst), \
297 DT_INST_IRQ(inst, priority), \
298 e1000_isr, DEVICE_DT_INST_GET(inst), \
299 E1000_DT_INST_IRQ_FLAGS(inst)); \
300 \
301 irq_enable(DT_INST_IRQN(inst)); \
302 iow32(dev, CTRL, CTRL_SLU); /* Set link up */ \
303 iow32(dev, RCTL, RCTL_EN | RCTL_MPE); \
304 } \
305 \
306 static const struct e1000_config config_##inst = { \
307 .config_func = e1000_config_##inst, \
308 }; \
309 \
310 ETH_NET_DEVICE_DT_INST_DEFINE(inst, \
311 e1000_probe, \
312 NULL, \
313 &dev_##inst, \
314 &config_##inst, \
315 CONFIG_ETH_INIT_PRIORITY, \
316 &e1000_api, \
317 NET_ETH_MTU);
318
319 DT_INST_FOREACH_STATUS_OKAY(E1000_PCI_INIT);
320
321 #if defined(CONFIG_ETH_E1000_PTP_CLOCK)
322 struct ptp_context {
323 struct e1000_dev *eth_context;
324
325 /* Simulate the clock. This is only for testing.
326 * The value is in nanoseconds
327 */
328 uint64_t clock_time;
329 };
330
ptp_clock_e1000_set(const struct device * dev,struct net_ptp_time * tm)331 static int ptp_clock_e1000_set(const struct device *dev,
332 struct net_ptp_time *tm)
333 {
334 struct ptp_context *ptp_context = dev->data;
335
336 /* TODO: Set the clock real value here */
337 ptp_context->clock_time = tm->second * NSEC_PER_SEC + tm->nanosecond;
338
339 return 0;
340 }
341
ptp_clock_e1000_get(const struct device * dev,struct net_ptp_time * tm)342 static int ptp_clock_e1000_get(const struct device *dev,
343 struct net_ptp_time *tm)
344 {
345 struct ptp_context *ptp_context = dev->data;
346
347 /* TODO: Get the clock value */
348 tm->second = ptp_context->clock_time / NSEC_PER_SEC;
349 tm->nanosecond = ptp_context->clock_time - tm->second * NSEC_PER_SEC;
350
351 return 0;
352 }
353
ptp_clock_e1000_adjust(const struct device * dev,int increment)354 static int ptp_clock_e1000_adjust(const struct device *dev, int increment)
355 {
356 ARG_UNUSED(dev);
357 ARG_UNUSED(increment);
358
359 /* TODO: Implement clock adjustment */
360
361 return 0;
362 }
363
ptp_clock_e1000_rate_adjust(const struct device * dev,double ratio)364 static int ptp_clock_e1000_rate_adjust(const struct device *dev, double ratio)
365 {
366 const int hw_inc = NSEC_PER_SEC / CONFIG_ETH_E1000_PTP_CLOCK_SRC_HZ;
367 struct ptp_context *ptp_context = dev->data;
368 struct e1000_dev *context = ptp_context->eth_context;
369 int corr;
370 int32_t mul;
371 float val;
372
373 /* No change needed. */
374 if (ratio == 1.0) {
375 return 0;
376 }
377
378 ratio *= context->clk_ratio;
379
380 /* Limit possible ratio. */
381 if ((ratio > 1.0 + 1.0/(2.0 * hw_inc)) ||
382 (ratio < 1.0 - 1.0/(2.0 * hw_inc))) {
383 return -EINVAL;
384 }
385
386 /* Save new ratio. */
387 context->clk_ratio = ratio;
388
389 if (ratio < 1.0) {
390 corr = hw_inc - 1;
391 val = 1.0 / (hw_inc * (1.0 - ratio));
392 } else if (ratio > 1.0) {
393 corr = hw_inc + 1;
394 val = 1.0 / (hw_inc * (ratio - 1.0));
395 } else {
396 val = 0;
397 corr = hw_inc;
398 }
399
400 if (val >= INT32_MAX) {
401 /* Value is too high.
402 * It is not possible to adjust the rate of the clock.
403 */
404 mul = 0;
405 } else {
406 mul = val;
407 }
408
409 /* TODO: Adjust the clock here */
410
411 return 0;
412 }
413
414 static DEVICE_API(ptp_clock, api) = {
415 .set = ptp_clock_e1000_set,
416 .get = ptp_clock_e1000_get,
417 .adjust = ptp_clock_e1000_adjust,
418 .rate_adjust = ptp_clock_e1000_rate_adjust,
419 };
420
ptp_e1000_init(const struct device * port)421 static int ptp_e1000_init(const struct device *port)
422 {
423 struct ptp_context *ptp_context = port->data;
424 struct e1000_dev *context = ptp_context->eth_context;
425
426 context->ptp_clock = port;
427 ptp_context->clock_time = k_ticks_to_ns_floor64(k_uptime_ticks());
428
429 return 0;
430 }
431
432 #define E1000_PTP_INIT(inst) \
433 static struct ptp_context ptp_e1000_context_##inst = { \
434 .eth_context = DEVICE_DT_INST_GET(inst)->data, \
435 }; \
436 \
437 DEVICE_DEFINE(e1000_ptp_clock, PTP_CLOCK_NAME, \
438 ptp_e1000_init, NULL, \
439 &ptp_e1000_context_##inst, NULL, POST_KERNEL, \
440 CONFIG_APPLICATION_INIT_PRIORITY, &api);
441
442 DT_INST_FOREACH_STATUS_OKAY(E1000_PTP_INIT);
443
444 #endif /* CONFIG_ETH_E1000_PTP_CLOCK */
445