1 /*
2 * Copyright 2023 NXP
3 * Copyright 2023 CogniPilot Foundation
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #define DT_DRV_COMPAT nxp_tja1103
9
10 #include <errno.h>
11 #include <stdint.h>
12 #include <stdbool.h>
13 #include <zephyr/kernel.h>
14 #include <zephyr/device.h>
15 #include <zephyr/sys/util.h>
16 #include <zephyr/net/phy.h>
17 #include <zephyr/net/mii.h>
18 #include <zephyr/net/mdio.h>
19 #include <zephyr/drivers/gpio.h>
20 #include <zephyr/drivers/mdio.h>
21
22 #include <zephyr/logging/log.h>
23 LOG_MODULE_REGISTER(phy_tja1103, CONFIG_PHY_LOG_LEVEL);
24
25 /* PHYs out of reset check retry delay */
26 #define TJA1103_AWAIT_DELAY_POLL_US 15000U
27 /* Number of retries for PHYs out of reset check */
28 #define TJA1103_AWAIT_RETRY_COUNT 200U
29
30 /* TJA1103 PHY identifier */
31 #define TJA1103_ID 0x1BB013
32
33 /* MMD30 - Device status register */
34 #define TJA1103_DEVICE_CONTROL (0x0040U)
35 #define TJA1103_DEVICE_CONTROL_GLOBAL_CFG_EN BIT(14)
36 #define TJA1103_DEVICE_CONTROL_SUPER_CFG_EN BIT(13)
37 /* Shared - PHY control register */
38 #define TJA1103_PHY_CONTROL (0x8100U)
39 #define TJA1103_PHY_CONTROL_CFG_EN BIT(14)
40 /* Shared - PHY status register */
41 #define TJA1103_PHY_STATUS (0x8102U)
42 #define TJA1103_PHY_STATUS_LINK_STAT BIT(2)
43
44 /* Shared - PHY functional IRQ masked status register */
45 #define TJA1103_PHY_FUNC_IRQ_MSTATUS (0x80A2)
46 #define TJA1103_PHY_FUNC_IRQ_LINK_EVENT BIT(1)
47 #define TJA1103_PHY_FUNC_IRQ_LINK_AVAIL BIT(2)
48 /* Shared -PHY functional IRQ source & enable registers */
49 #define TJA1103_PHY_FUNC_IRQ_ACK (0x80A0)
50 #define TJA1103_PHY_FUNC_IRQ_EN (0x80A1)
51 #define TJA1103_PHY_FUNC_IRQ_LINK_EVENT_EN BIT(1)
52 #define TJA1103_PHY_FUNC_IRQ_LINK_AVAIL_EN BIT(2)
53 /* Always accessible reg for NMIs */
54 #define TJA1103_ALWAYS_ACCESSIBLE (0x801F)
55 #define TJA1103_ALWAYS_ACCESSIBLE_FUSA_PASS_IRQ BIT(4)
56
57 struct phy_tja1103_config {
58 const struct device *mdio;
59 struct gpio_dt_spec gpio_interrupt;
60 uint8_t phy_addr;
61 uint8_t master_slave;
62 };
63
64 struct phy_tja1103_data {
65 const struct device *dev;
66 struct phy_link_state state;
67 struct k_sem sem;
68 struct k_sem offload_sem;
69 phy_callback_t cb;
70 struct gpio_callback phy_tja1103_int_callback;
71 void *cb_data;
72
73 K_KERNEL_STACK_MEMBER(irq_thread_stack, CONFIG_PHY_TJA1103_IRQ_THREAD_STACK_SIZE);
74 struct k_thread irq_thread;
75
76 struct k_work_delayable monitor_work;
77 };
78
phy_tja1103_c22_read(const struct device * dev,uint16_t reg,uint16_t * val)79 static inline int phy_tja1103_c22_read(const struct device *dev, uint16_t reg, uint16_t *val)
80 {
81 const struct phy_tja1103_config *const cfg = dev->config;
82
83 return mdio_read(cfg->mdio, cfg->phy_addr, reg, val);
84 }
85
phy_tja1103_c22_write(const struct device * dev,uint16_t reg,uint16_t val)86 static inline int phy_tja1103_c22_write(const struct device *dev, uint16_t reg, uint16_t val)
87 {
88 const struct phy_tja1103_config *const cfg = dev->config;
89
90 return mdio_write(cfg->mdio, cfg->phy_addr, reg, val);
91 }
92
phy_tja1103_c45_write(const struct device * dev,uint16_t devad,uint16_t reg,uint16_t val)93 static inline int phy_tja1103_c45_write(const struct device *dev, uint16_t devad, uint16_t reg,
94 uint16_t val)
95 {
96 const struct phy_tja1103_config *cfg = dev->config;
97
98 return mdio_write_c45(cfg->mdio, cfg->phy_addr, devad, reg, val);
99 }
100
phy_tja1103_c45_read(const struct device * dev,uint16_t devad,uint16_t reg,uint16_t * val)101 static inline int phy_tja1103_c45_read(const struct device *dev, uint16_t devad, uint16_t reg,
102 uint16_t *val)
103 {
104 const struct phy_tja1103_config *cfg = dev->config;
105
106 return mdio_read_c45(cfg->mdio, cfg->phy_addr, devad, reg, val);
107 }
108
phy_tja1103_reg_read(const struct device * dev,uint16_t reg_addr,uint32_t * data)109 static int phy_tja1103_reg_read(const struct device *dev, uint16_t reg_addr, uint32_t *data)
110 {
111 const struct phy_tja1103_config *cfg = dev->config;
112 int ret;
113
114 mdio_bus_enable(cfg->mdio);
115
116 ret = phy_tja1103_c22_read(dev, reg_addr, (uint16_t *)data);
117
118 mdio_bus_disable(cfg->mdio);
119
120 return ret;
121 }
122
phy_tja1103_reg_write(const struct device * dev,uint16_t reg_addr,uint32_t data)123 static int phy_tja1103_reg_write(const struct device *dev, uint16_t reg_addr, uint32_t data)
124 {
125 const struct phy_tja1103_config *cfg = dev->config;
126 int ret;
127
128 mdio_bus_enable(cfg->mdio);
129
130 ret = phy_tja1103_c22_write(dev, reg_addr, (uint16_t)data);
131
132 mdio_bus_disable(cfg->mdio);
133
134 return ret;
135 }
136
phy_tja1103_id(const struct device * dev,uint32_t * phy_id)137 static int phy_tja1103_id(const struct device *dev, uint32_t *phy_id)
138 {
139 uint16_t val;
140
141 if (phy_tja1103_c22_read(dev, MII_PHYID1R, &val) < 0) {
142 return -EIO;
143 }
144
145 *phy_id = (val & UINT16_MAX) << 16;
146
147 if (phy_tja1103_c22_read(dev, MII_PHYID2R, &val) < 0) {
148 return -EIO;
149 }
150
151 *phy_id |= (val & UINT16_MAX);
152
153 return 0;
154 }
155
update_link_state(const struct device * dev)156 static int update_link_state(const struct device *dev)
157 {
158 struct phy_tja1103_data *const data = dev->data;
159 bool link_up;
160 uint16_t val;
161
162 if (phy_tja1103_c45_read(dev, MDIO_MMD_VENDOR_SPECIFIC1, TJA1103_PHY_STATUS, &val) < 0) {
163 return -EIO;
164 }
165
166 link_up = (val & TJA1103_PHY_STATUS_LINK_STAT) != 0;
167
168 /* Let workqueue re-schedule and re-check if the
169 * link status is unchanged this time
170 */
171 if (data->state.is_up == link_up) {
172 return -EAGAIN;
173 }
174
175 data->state.is_up = link_up;
176
177 return 0;
178 }
179
phy_tja1103_get_link_state(const struct device * dev,struct phy_link_state * state)180 static int phy_tja1103_get_link_state(const struct device *dev, struct phy_link_state *state)
181 {
182 struct phy_tja1103_data *const data = dev->data;
183 const struct phy_tja1103_config *const cfg = dev->config;
184 int rc = 0;
185
186 k_sem_take(&data->sem, K_FOREVER);
187
188 /* If Interrupt is configured then the workqueue will not
189 * update the link state periodically so do it explicitly
190 */
191 if (cfg->gpio_interrupt.port != NULL) {
192 rc = update_link_state(dev);
193 }
194
195 memcpy(state, &data->state, sizeof(struct phy_link_state));
196
197 k_sem_give(&data->sem);
198
199 return rc;
200 }
201
invoke_link_cb(const struct device * dev)202 static void invoke_link_cb(const struct device *dev)
203 {
204 struct phy_tja1103_data *const data = dev->data;
205 struct phy_link_state state;
206
207 if (data->cb == NULL) {
208 return;
209 }
210
211 /* Send callback only on link state change */
212 if (phy_tja1103_get_link_state(dev, &state) != 0) {
213 return;
214 }
215
216 data->cb(dev, &state, data->cb_data);
217 }
218
monitor_work_handler(struct k_work * work)219 static void monitor_work_handler(struct k_work *work)
220 {
221 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
222 struct phy_tja1103_data *const data =
223 CONTAINER_OF(dwork, struct phy_tja1103_data, monitor_work);
224 const struct device *dev = data->dev;
225 int rc;
226
227 k_sem_take(&data->sem, K_FOREVER);
228
229 rc = update_link_state(dev);
230
231 k_sem_give(&data->sem);
232
233 /* If link state has changed and a callback is set, invoke callback */
234 if (rc == 0) {
235 invoke_link_cb(dev);
236 }
237
238 /* Submit delayed work */
239 k_work_reschedule(&data->monitor_work, K_MSEC(CONFIG_PHY_MONITOR_PERIOD));
240 }
241
phy_tja1103_irq_offload_thread(void * p1,void * p2,void * p3)242 static void phy_tja1103_irq_offload_thread(void *p1, void *p2, void *p3)
243 {
244 ARG_UNUSED(p2);
245 ARG_UNUSED(p3);
246
247 const struct device *dev = p1;
248 struct phy_tja1103_data *const data = dev->data;
249 uint16_t irq;
250
251 for (;;) {
252 /* await trigger from ISR */
253 k_sem_take(&data->offload_sem, K_FOREVER);
254
255 if (phy_tja1103_c45_read(dev, MDIO_MMD_VENDOR_SPECIFIC1,
256 TJA1103_PHY_FUNC_IRQ_MSTATUS, &irq) < 0) {
257 return;
258 }
259
260 /* Handling Link related Functional IRQs */
261 if (irq & (TJA1103_PHY_FUNC_IRQ_LINK_EVENT | TJA1103_PHY_FUNC_IRQ_LINK_AVAIL)) {
262 /* Send callback to MAC on link status changed */
263 invoke_link_cb(dev);
264
265 /* Ack the assered link related interrupts */
266 phy_tja1103_c45_write(dev, MDIO_MMD_VENDOR_SPECIFIC1,
267 TJA1103_PHY_FUNC_IRQ_ACK, irq);
268 }
269 }
270 }
271
phy_tja1103_handle_irq(const struct device * port,struct gpio_callback * cb,uint32_t pins)272 static void phy_tja1103_handle_irq(const struct device *port, struct gpio_callback *cb,
273 uint32_t pins)
274 {
275 ARG_UNUSED(pins);
276 ARG_UNUSED(port);
277
278 struct phy_tja1103_data *const data =
279 CONTAINER_OF(cb, struct phy_tja1103_data, phy_tja1103_int_callback);
280
281 /* Trigger BH before leaving the ISR */
282 k_sem_give(&data->offload_sem);
283 }
284
phy_tja1103_cfg_irq_poll(const struct device * dev)285 static void phy_tja1103_cfg_irq_poll(const struct device *dev)
286 {
287 struct phy_tja1103_data *const data = dev->data;
288 const struct phy_tja1103_config *const cfg = dev->config;
289 int ret;
290
291 if (cfg->gpio_interrupt.port != NULL) {
292 if (!gpio_is_ready_dt(&cfg->gpio_interrupt)) {
293 LOG_ERR("Interrupt GPIO device %s is not ready",
294 cfg->gpio_interrupt.port->name);
295 return;
296 }
297
298 ret = gpio_pin_configure_dt(&cfg->gpio_interrupt, GPIO_INPUT);
299 if (ret < 0) {
300 LOG_ERR("Failed to configure interrupt GPIO, %d", ret);
301 return;
302 }
303
304 gpio_init_callback(&(data->phy_tja1103_int_callback), phy_tja1103_handle_irq,
305 BIT(cfg->gpio_interrupt.pin));
306
307 /* Add callback structure to global syslist */
308 ret = gpio_add_callback(cfg->gpio_interrupt.port, &data->phy_tja1103_int_callback);
309 if (ret < 0) {
310 LOG_ERR("Failed to add INT callback, %d", ret);
311 return;
312 }
313
314 ret = phy_tja1103_c45_write(
315 dev, MDIO_MMD_VENDOR_SPECIFIC1, TJA1103_PHY_FUNC_IRQ_EN,
316 (TJA1103_PHY_FUNC_IRQ_LINK_EVENT_EN | TJA1103_PHY_FUNC_IRQ_LINK_AVAIL_EN));
317 if (ret < 0) {
318 return;
319 }
320
321 ret = gpio_pin_interrupt_configure_dt(&cfg->gpio_interrupt, GPIO_INT_EDGE_FALLING);
322 if (ret < 0) {
323 LOG_ERR("Failed to enable INT, %d", ret);
324 return;
325 }
326
327 /* PHY initialized, IRQ configured, now initialize the BH handler */
328 k_thread_create(&data->irq_thread, data->irq_thread_stack,
329 CONFIG_PHY_TJA1103_IRQ_THREAD_STACK_SIZE,
330 phy_tja1103_irq_offload_thread, (void *)dev, NULL, NULL,
331 CONFIG_PHY_TJA1103_IRQ_THREAD_PRIO, K_ESSENTIAL, K_NO_WAIT);
332 k_thread_name_set(&data->irq_thread, "phy_tja1103_irq_offload");
333
334 } else {
335 k_work_init_delayable(&data->monitor_work, monitor_work_handler);
336
337 monitor_work_handler(&data->monitor_work.work);
338 }
339 }
340
phy_tja1103_cfg_link(const struct device * dev,enum phy_link_speed adv_speeds)341 static int phy_tja1103_cfg_link(const struct device *dev, enum phy_link_speed adv_speeds)
342 {
343 ARG_UNUSED(dev);
344
345 if (adv_speeds & LINK_FULL_100BASE_T) {
346 return 0;
347 }
348
349 return -ENOTSUP;
350 }
351
phy_tja1103_init(const struct device * dev)352 static int phy_tja1103_init(const struct device *dev)
353 {
354 const struct phy_tja1103_config *const cfg = dev->config;
355 struct phy_tja1103_data *const data = dev->data;
356 uint32_t phy_id = 0;
357 uint16_t val;
358 int ret;
359
360 data->dev = dev;
361 data->cb = NULL;
362 data->state.is_up = false;
363 data->state.speed = LINK_FULL_100BASE_T;
364
365 ret = WAIT_FOR(!phy_tja1103_id(dev, &phy_id) && phy_id == TJA1103_ID,
366 TJA1103_AWAIT_RETRY_COUNT * TJA1103_AWAIT_DELAY_POLL_US,
367 k_sleep(K_USEC(TJA1103_AWAIT_DELAY_POLL_US)));
368 if (ret < 0) {
369 LOG_ERR("Unable to obtain PHY ID for device 0x%x", cfg->phy_addr);
370 return -ENODEV;
371 }
372
373 /* enable config registers */
374 ret = phy_tja1103_c45_write(dev, MDIO_MMD_VENDOR_SPECIFIC1, TJA1103_DEVICE_CONTROL,
375 TJA1103_DEVICE_CONTROL_GLOBAL_CFG_EN |
376 TJA1103_DEVICE_CONTROL_SUPER_CFG_EN);
377 if (ret < 0) {
378 return ret;
379 }
380
381 ret = phy_tja1103_c45_write(dev, MDIO_MMD_VENDOR_SPECIFIC1, TJA1103_PHY_CONTROL,
382 TJA1103_PHY_CONTROL_CFG_EN);
383 if (ret < 0) {
384 return ret;
385 }
386
387 ret = phy_tja1103_c45_read(dev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1_CTRL, &val);
388 if (ret < 0) {
389 return ret;
390 }
391
392 /* Change master/slave mode if need */
393 if (cfg->master_slave == 1) {
394 val |= MDIO_PMA_PMD_BT1_CTRL_CFG_MST;
395 } else if (cfg->master_slave == 2) {
396 val &= ~MDIO_PMA_PMD_BT1_CTRL_CFG_MST;
397 }
398
399 ret = phy_tja1103_c45_write(dev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1_CTRL, val);
400 if (ret < 0) {
401 return ret;
402 }
403
404 /* Check always accesible register for handling NMIs */
405 ret = phy_tja1103_c45_read(dev, MDIO_MMD_VENDOR_SPECIFIC1, TJA1103_ALWAYS_ACCESSIBLE, &val);
406 if (ret < 0) {
407 return ret;
408 }
409
410 /* Ack Fusa Pass Interrupt if Startup Self Test Passed successfully */
411 if (val & TJA1103_ALWAYS_ACCESSIBLE_FUSA_PASS_IRQ) {
412 ret = phy_tja1103_c45_write(dev, MDIO_MMD_VENDOR_SPECIFIC1,
413 TJA1103_ALWAYS_ACCESSIBLE,
414 TJA1103_ALWAYS_ACCESSIBLE_FUSA_PASS_IRQ);
415 }
416
417 /* Configure interrupt or poll mode for reporting link changes */
418 phy_tja1103_cfg_irq_poll(dev);
419
420 return ret;
421 }
422
phy_tja1103_link_cb_set(const struct device * dev,phy_callback_t cb,void * user_data)423 static int phy_tja1103_link_cb_set(const struct device *dev, phy_callback_t cb, void *user_data)
424 {
425 struct phy_tja1103_data *const data = dev->data;
426
427 data->cb = cb;
428 data->cb_data = user_data;
429
430 /* Invoke the callback to notify the caller of the current
431 * link status.
432 */
433 invoke_link_cb(dev);
434
435 return 0;
436 }
437
438 static DEVICE_API(ethphy, phy_tja1103_api) = {
439 .get_link = phy_tja1103_get_link_state,
440 .cfg_link = phy_tja1103_cfg_link,
441 .link_cb_set = phy_tja1103_link_cb_set,
442 .read = phy_tja1103_reg_read,
443 .write = phy_tja1103_reg_write,
444 };
445
446 #define TJA1103_INITIALIZE(n) \
447 static const struct phy_tja1103_config phy_tja1103_config_##n = { \
448 .phy_addr = DT_INST_REG_ADDR(n), \
449 .mdio = DEVICE_DT_GET(DT_INST_BUS(n)), \
450 .gpio_interrupt = GPIO_DT_SPEC_INST_GET_OR(n, int_gpios, {0}), \
451 .master_slave = DT_INST_ENUM_IDX(n, master_slave), \
452 }; \
453 static struct phy_tja1103_data phy_tja1103_data_##n = { \
454 .sem = Z_SEM_INITIALIZER(phy_tja1103_data_##n.sem, 1, 1), \
455 .offload_sem = Z_SEM_INITIALIZER(phy_tja1103_data_##n.offload_sem, 0, 1), \
456 }; \
457 DEVICE_DT_INST_DEFINE(n, &phy_tja1103_init, NULL, &phy_tja1103_data_##n, \
458 &phy_tja1103_config_##n, POST_KERNEL, CONFIG_PHY_INIT_PRIORITY, \
459 &phy_tja1103_api);
460
461 DT_INST_FOREACH_STATUS_OKAY(TJA1103_INITIALIZE)
462