1 /*
2 * Copyright (c) 2018 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/logging/log.h>
8 LOG_MODULE_REGISTER(net_ethernet_mgmt, CONFIG_NET_L2_ETHERNET_LOG_LEVEL);
9
10 #include <errno.h>
11
12 #include <zephyr/net/net_core.h>
13 #include <zephyr/net/net_if.h>
14 #include <zephyr/net/ethernet_mgmt.h>
15
is_hw_caps_supported(const struct device * dev,enum ethernet_hw_caps caps)16 static inline bool is_hw_caps_supported(const struct device *dev,
17 enum ethernet_hw_caps caps)
18 {
19 const struct ethernet_api *api = dev->api;
20
21 if (!api || !api->get_capabilities) {
22 return false;
23 }
24
25 return ((api->get_capabilities(dev) & caps) != 0);
26 }
27
ethernet_set_config(uint32_t mgmt_request,struct net_if * iface,void * data,size_t len)28 static int ethernet_set_config(uint32_t mgmt_request,
29 struct net_if *iface,
30 void *data, size_t len)
31 {
32 struct ethernet_req_params *params = (struct ethernet_req_params *)data;
33 const struct device *dev = net_if_get_device(iface);
34 const struct ethernet_api *api = dev->api;
35 struct ethernet_config config = { 0 };
36 enum ethernet_config_type type;
37
38 if (!api) {
39 return -ENOENT;
40 }
41
42 if (!api->set_config) {
43 return -ENOTSUP;
44 }
45
46 if (!data || (len != sizeof(struct ethernet_req_params))) {
47 return -EINVAL;
48 }
49
50 if (mgmt_request == NET_REQUEST_ETHERNET_SET_AUTO_NEGOTIATION) {
51 if (!is_hw_caps_supported(dev,
52 ETHERNET_AUTO_NEGOTIATION_SET)) {
53 return -ENOTSUP;
54 }
55
56 config.auto_negotiation = params->auto_negotiation;
57 type = ETHERNET_CONFIG_TYPE_AUTO_NEG;
58 } else if (mgmt_request == NET_REQUEST_ETHERNET_SET_LINK) {
59 if (params->l.link_10bt) {
60 if (!is_hw_caps_supported(dev,
61 ETHERNET_LINK_10BASE_T)) {
62 return -ENOTSUP;
63 }
64
65 config.l.link_10bt = true;
66 } else if (params->l.link_100bt) {
67 if (!is_hw_caps_supported(dev,
68 ETHERNET_LINK_100BASE_T)) {
69 return -ENOTSUP;
70 }
71
72 config.l.link_100bt = true;
73 } else if (params->l.link_1000bt) {
74 if (!is_hw_caps_supported(dev,
75 ETHERNET_LINK_1000BASE_T)) {
76 return -ENOTSUP;
77 }
78
79 config.l.link_1000bt = true;
80 } else {
81 return -EINVAL;
82 }
83
84 type = ETHERNET_CONFIG_TYPE_LINK;
85 } else if (mgmt_request == NET_REQUEST_ETHERNET_SET_DUPLEX) {
86 if (!is_hw_caps_supported(dev, ETHERNET_DUPLEX_SET)) {
87 return -ENOTSUP;
88 }
89
90 config.full_duplex = params->full_duplex;
91 type = ETHERNET_CONFIG_TYPE_DUPLEX;
92 } else if (mgmt_request == NET_REQUEST_ETHERNET_SET_MAC_ADDRESS) {
93 if (net_if_is_up(iface)) {
94 return -EACCES;
95 }
96
97 /* We need to remove the old IPv6 link layer address, that is
98 * generated from old MAC address, from network interface if
99 * needed.
100 */
101 if (IS_ENABLED(CONFIG_NET_NATIVE_IPV6) &&
102 IS_ENABLED(CONFIG_NET_IPV6_IID_EUI_64)) {
103 struct in6_addr iid;
104
105 net_ipv6_addr_create_iid(&iid,
106 net_if_get_link_addr(iface));
107
108 /* No need to check the return value in this case. It
109 * is not an error if the address is not found atm.
110 */
111 (void)net_if_ipv6_addr_rm(iface, &iid);
112 }
113
114 memcpy(&config.mac_address, ¶ms->mac_address,
115 sizeof(struct net_eth_addr));
116 type = ETHERNET_CONFIG_TYPE_MAC_ADDRESS;
117 } else if (mgmt_request == NET_REQUEST_ETHERNET_SET_QAV_PARAM) {
118 if (!is_hw_caps_supported(dev, ETHERNET_QAV)) {
119 return -ENOTSUP;
120 }
121
122 /* Validate params which need global validating */
123 switch (params->qav_param.type) {
124 case ETHERNET_QAV_PARAM_TYPE_DELTA_BANDWIDTH:
125 if (params->qav_param.delta_bandwidth > 100) {
126 return -EINVAL;
127 }
128 break;
129 case ETHERNET_QAV_PARAM_TYPE_OPER_IDLE_SLOPE:
130 case ETHERNET_QAV_PARAM_TYPE_TRAFFIC_CLASS:
131 /* Read-only parameters */
132 return -EINVAL;
133 default:
134 /* No validation needed */
135 break;
136 }
137
138 memcpy(&config.qav_param, ¶ms->qav_param,
139 sizeof(struct ethernet_qav_param));
140 type = ETHERNET_CONFIG_TYPE_QAV_PARAM;
141 } else if (mgmt_request == NET_REQUEST_ETHERNET_SET_QBV_PARAM) {
142 if (!is_hw_caps_supported(dev, ETHERNET_QBV)) {
143 return -ENOTSUP;
144 }
145
146 /* Validate params which need global validating */
147 if (params->qbv_param.state == ETHERNET_QBV_STATE_TYPE_OPER) {
148 /* Read-only parameters */
149 return -EINVAL;
150 }
151
152 if (params->qbv_param.type == ETHERNET_QBV_PARAM_TYPE_TIME &&
153 (params->qbv_param.cycle_time.nanosecond >= 1000000000 ||
154 params->qbv_param.base_time.fract_nsecond >= 1000000000)) {
155 return -EINVAL;
156 }
157
158 memcpy(&config.qbv_param, ¶ms->qbv_param,
159 sizeof(struct ethernet_qbv_param));
160 type = ETHERNET_CONFIG_TYPE_QBV_PARAM;
161 } else if (mgmt_request == NET_REQUEST_ETHERNET_SET_QBU_PARAM) {
162 if (!is_hw_caps_supported(dev, ETHERNET_QBU)) {
163 return -ENOTSUP;
164 }
165
166 if (params->qbu_param.type ==
167 ETHERNET_QBR_PARAM_TYPE_LINK_PARTNER_STATUS) {
168 /* Read only parameter */
169 return -EINVAL;
170 }
171
172 /* All other fields are rw */
173
174 memcpy(&config.qbu_param, ¶ms->qbu_param,
175 sizeof(struct ethernet_qbu_param));
176 type = ETHERNET_CONFIG_TYPE_QBU_PARAM;
177 } else if (mgmt_request == NET_REQUEST_ETHERNET_SET_TXTIME_PARAM) {
178 if (!is_hw_caps_supported(dev, ETHERNET_TXTIME)) {
179 return -ENOTSUP;
180 }
181
182 if (net_if_is_up(iface)) {
183 return -EACCES;
184 }
185
186 memcpy(&config.txtime_param, ¶ms->txtime_param,
187 sizeof(struct ethernet_txtime_param));
188 type = ETHERNET_CONFIG_TYPE_TXTIME_PARAM;
189 } else if (mgmt_request == NET_REQUEST_ETHERNET_SET_PROMISC_MODE) {
190 if (!is_hw_caps_supported(dev, ETHERNET_PROMISC_MODE)) {
191 return -ENOTSUP;
192 }
193
194 config.promisc_mode = params->promisc_mode;
195 type = ETHERNET_CONFIG_TYPE_PROMISC_MODE;
196 } else if (mgmt_request == NET_REQUEST_ETHERNET_SET_T1S_PARAM) {
197 if (net_if_is_up(iface)) {
198 return -EACCES;
199 }
200
201 memcpy(&config.t1s_param, ¶ms->t1s_param,
202 sizeof(struct ethernet_t1s_param));
203 type = ETHERNET_CONFIG_TYPE_T1S_PARAM;
204 } else if (mgmt_request == NET_REQUEST_ETHERNET_SET_TXINJECTION_MODE) {
205 if (!is_hw_caps_supported(dev, ETHERNET_TXINJECTION_MODE)) {
206 return -ENOTSUP;
207 }
208
209 config.txinjection_mode = params->txinjection_mode;
210 type = ETHERNET_CONFIG_TYPE_TXINJECTION_MODE;
211 } else if (mgmt_request == NET_REQUEST_ETHERNET_SET_MAC_FILTER) {
212 if (!is_hw_caps_supported(dev, ETHERNET_HW_FILTERING)) {
213 return -ENOTSUP;
214 }
215
216 memcpy(&config.filter, ¶ms->filter, sizeof(struct ethernet_filter));
217 type = ETHERNET_CONFIG_TYPE_FILTER;
218 } else {
219 return -EINVAL;
220 }
221
222 return api->set_config(net_if_get_device(iface), type, &config);
223 }
224
225 NET_MGMT_REGISTER_REQUEST_HANDLER(NET_REQUEST_ETHERNET_SET_AUTO_NEGOTIATION,
226 ethernet_set_config);
227
228 NET_MGMT_REGISTER_REQUEST_HANDLER(NET_REQUEST_ETHERNET_SET_LINK,
229 ethernet_set_config);
230
231 NET_MGMT_REGISTER_REQUEST_HANDLER(NET_REQUEST_ETHERNET_SET_DUPLEX,
232 ethernet_set_config);
233
234 NET_MGMT_REGISTER_REQUEST_HANDLER(NET_REQUEST_ETHERNET_SET_MAC_ADDRESS,
235 ethernet_set_config);
236
237 NET_MGMT_REGISTER_REQUEST_HANDLER(NET_REQUEST_ETHERNET_SET_QAV_PARAM,
238 ethernet_set_config);
239
240 NET_MGMT_REGISTER_REQUEST_HANDLER(NET_REQUEST_ETHERNET_SET_QBV_PARAM,
241 ethernet_set_config);
242
243 NET_MGMT_REGISTER_REQUEST_HANDLER(NET_REQUEST_ETHERNET_SET_QBU_PARAM,
244 ethernet_set_config);
245
246 NET_MGMT_REGISTER_REQUEST_HANDLER(NET_REQUEST_ETHERNET_SET_TXTIME_PARAM,
247 ethernet_set_config);
248
249 NET_MGMT_REGISTER_REQUEST_HANDLER(NET_REQUEST_ETHERNET_SET_PROMISC_MODE,
250 ethernet_set_config);
251
252 NET_MGMT_REGISTER_REQUEST_HANDLER(NET_REQUEST_ETHERNET_SET_T1S_PARAM,
253 ethernet_set_config);
254
255 NET_MGMT_REGISTER_REQUEST_HANDLER(NET_REQUEST_ETHERNET_SET_TXINJECTION_MODE,
256 ethernet_set_config);
257
258 NET_MGMT_REGISTER_REQUEST_HANDLER(NET_REQUEST_ETHERNET_SET_MAC_FILTER,
259 ethernet_set_config);
260
ethernet_get_config(uint32_t mgmt_request,struct net_if * iface,void * data,size_t len)261 static int ethernet_get_config(uint32_t mgmt_request,
262 struct net_if *iface,
263 void *data, size_t len)
264 {
265 struct ethernet_req_params *params = (struct ethernet_req_params *)data;
266 const struct device *dev = net_if_get_device(iface);
267 const struct ethernet_api *api = dev->api;
268 struct ethernet_config config = { 0 };
269 int ret = 0;
270 enum ethernet_config_type type;
271
272 if (!api) {
273 return -ENOENT;
274 }
275
276 if (!api->get_config) {
277 return -ENOTSUP;
278 }
279
280 if (!data || (len != sizeof(struct ethernet_req_params))) {
281 return -EINVAL;
282 }
283
284 if (mgmt_request == NET_REQUEST_ETHERNET_GET_PRIORITY_QUEUES_NUM) {
285 if (!is_hw_caps_supported(dev, ETHERNET_PRIORITY_QUEUES)) {
286 return -ENOTSUP;
287 }
288
289 type = ETHERNET_CONFIG_TYPE_PRIORITY_QUEUES_NUM;
290
291 ret = api->get_config(dev, type, &config);
292 if (ret) {
293 return ret;
294 }
295
296 params->priority_queues_num = config.priority_queues_num;
297 } else if (mgmt_request == NET_REQUEST_ETHERNET_GET_QAV_PARAM) {
298 if (!is_hw_caps_supported(dev, ETHERNET_QAV)) {
299 return -ENOTSUP;
300 }
301
302 config.qav_param.queue_id = params->qav_param.queue_id;
303 config.qav_param.type = params->qav_param.type;
304
305 type = ETHERNET_CONFIG_TYPE_QAV_PARAM;
306
307 ret = api->get_config(dev, type, &config);
308 if (ret) {
309 return ret;
310 }
311
312 switch (config.qav_param.type) {
313 case ETHERNET_QAV_PARAM_TYPE_DELTA_BANDWIDTH:
314 params->qav_param.delta_bandwidth =
315 config.qav_param.delta_bandwidth;
316 break;
317 case ETHERNET_QAV_PARAM_TYPE_IDLE_SLOPE:
318 params->qav_param.idle_slope =
319 config.qav_param.idle_slope;
320 break;
321 case ETHERNET_QAV_PARAM_TYPE_OPER_IDLE_SLOPE:
322 params->qav_param.oper_idle_slope =
323 config.qav_param.oper_idle_slope;
324 break;
325 case ETHERNET_QAV_PARAM_TYPE_TRAFFIC_CLASS:
326 params->qav_param.traffic_class =
327 config.qav_param.traffic_class;
328 break;
329 case ETHERNET_QAV_PARAM_TYPE_STATUS:
330 params->qav_param.enabled = config.qav_param.enabled;
331 break;
332 }
333
334 } else if (mgmt_request == NET_REQUEST_ETHERNET_GET_PORTS_NUM) {
335 type = ETHERNET_CONFIG_TYPE_PORTS_NUM;
336
337 ret = api->get_config(dev, type, &config);
338 if (ret) {
339 return ret;
340 }
341
342 params->ports_num = config.ports_num;
343
344 } else if (mgmt_request == NET_REQUEST_ETHERNET_GET_QBV_PARAM) {
345 if (!is_hw_caps_supported(dev, ETHERNET_QBV)) {
346 return -ENOTSUP;
347 }
348
349 config.qbv_param.port_id = params->qbv_param.port_id;
350 config.qbv_param.type = params->qbv_param.type;
351 config.qbv_param.state = params->qbv_param.state;
352
353 if (config.qbv_param.type ==
354 ETHERNET_QBV_PARAM_TYPE_GATE_CONTROL_LIST) {
355 config.qbv_param.gate_control.row =
356 params->qbv_param.gate_control.row;
357 }
358
359 type = ETHERNET_CONFIG_TYPE_QBV_PARAM;
360
361 ret = api->get_config(dev, type, &config);
362 if (ret) {
363 return ret;
364 }
365
366 switch (config.qbv_param.type) {
367 case ETHERNET_QBV_PARAM_TYPE_STATUS:
368 params->qbv_param.enabled = config.qbv_param.enabled;
369 break;
370 case ETHERNET_QBV_PARAM_TYPE_TIME:
371 memcpy(¶ms->qbv_param.cycle_time,
372 &config.qbv_param.cycle_time,
373 sizeof(params->qbv_param.cycle_time));
374 memcpy(¶ms->qbv_param.base_time,
375 &config.qbv_param.base_time,
376 sizeof(params->qbv_param.base_time));
377 params->qbv_param.extension_time =
378 config.qbv_param.extension_time;
379 break;
380 case ETHERNET_QBV_PARAM_TYPE_GATE_CONTROL_LIST_LEN:
381 params->qbv_param.gate_control_list_len =
382 config.qbv_param.gate_control_list_len;
383 break;
384 case ETHERNET_QBV_PARAM_TYPE_GATE_CONTROL_LIST:
385 memcpy(¶ms->qbv_param.gate_control,
386 &config.qbv_param.gate_control,
387 sizeof(params->qbv_param.gate_control));
388 break;
389 }
390
391 } else if (mgmt_request == NET_REQUEST_ETHERNET_GET_QBU_PARAM) {
392 if (!is_hw_caps_supported(dev, ETHERNET_QBU)) {
393 return -ENOTSUP;
394 }
395
396 config.qbu_param.port_id = params->qbu_param.port_id;
397 config.qbu_param.type = params->qbu_param.type;
398
399 type = ETHERNET_CONFIG_TYPE_QBU_PARAM;
400
401 ret = api->get_config(dev, type, &config);
402 if (ret) {
403 return ret;
404 }
405
406 switch (config.qbu_param.type) {
407 case ETHERNET_QBU_PARAM_TYPE_STATUS:
408 params->qbu_param.enabled = config.qbu_param.enabled;
409 break;
410 case ETHERNET_QBU_PARAM_TYPE_RELEASE_ADVANCE:
411 params->qbu_param.release_advance =
412 config.qbu_param.release_advance;
413 break;
414 case ETHERNET_QBU_PARAM_TYPE_HOLD_ADVANCE:
415 params->qbu_param.hold_advance =
416 config.qbu_param.hold_advance;
417 break;
418 case ETHERNET_QBR_PARAM_TYPE_LINK_PARTNER_STATUS:
419 params->qbu_param.link_partner_status =
420 config.qbu_param.link_partner_status;
421 break;
422 case ETHERNET_QBR_PARAM_TYPE_ADDITIONAL_FRAGMENT_SIZE:
423 params->qbu_param.additional_fragment_size =
424 config.qbu_param.additional_fragment_size;
425 break;
426 case ETHERNET_QBU_PARAM_TYPE_PREEMPTION_STATUS_TABLE:
427 memcpy(¶ms->qbu_param.frame_preempt_statuses,
428 &config.qbu_param.frame_preempt_statuses,
429 sizeof(params->qbu_param.frame_preempt_statuses));
430 break;
431 }
432
433 } else if (mgmt_request == NET_REQUEST_ETHERNET_GET_TXTIME_PARAM) {
434 if (!is_hw_caps_supported(dev, ETHERNET_TXTIME)) {
435 return -ENOTSUP;
436 }
437
438 config.txtime_param.queue_id = params->txtime_param.queue_id;
439 config.txtime_param.type = params->txtime_param.type;
440
441 type = ETHERNET_CONFIG_TYPE_TXTIME_PARAM;
442
443 ret = api->get_config(dev, type, &config);
444 if (ret) {
445 return ret;
446 }
447
448 switch (config.txtime_param.type) {
449 case ETHERNET_TXTIME_PARAM_TYPE_ENABLE_QUEUES:
450 params->txtime_param.enable_txtime =
451 config.txtime_param.enable_txtime;
452 break;
453 }
454 } else if (mgmt_request == NET_REQUEST_ETHERNET_GET_TXINJECTION_MODE) {
455 if (!is_hw_caps_supported(dev, ETHERNET_TXINJECTION_MODE)) {
456 return -ENOTSUP;
457 }
458
459 type = ETHERNET_CONFIG_TYPE_TXINJECTION_MODE;
460
461 ret = api->get_config(dev, type, &config);
462 if (ret) {
463 return ret;
464 }
465
466 params->txinjection_mode = config.txinjection_mode;
467 } else {
468 return -EINVAL;
469 }
470
471 return ret;
472 }
473
474 NET_MGMT_REGISTER_REQUEST_HANDLER(NET_REQUEST_ETHERNET_GET_PRIORITY_QUEUES_NUM,
475 ethernet_get_config);
476
477 NET_MGMT_REGISTER_REQUEST_HANDLER(NET_REQUEST_ETHERNET_GET_QAV_PARAM,
478 ethernet_get_config);
479
480 NET_MGMT_REGISTER_REQUEST_HANDLER(NET_REQUEST_ETHERNET_GET_PORTS_NUM,
481 ethernet_get_config);
482
483 NET_MGMT_REGISTER_REQUEST_HANDLER(NET_REQUEST_ETHERNET_GET_QBV_PARAM,
484 ethernet_get_config);
485
486 NET_MGMT_REGISTER_REQUEST_HANDLER(NET_REQUEST_ETHERNET_GET_QBU_PARAM,
487 ethernet_get_config);
488
489 NET_MGMT_REGISTER_REQUEST_HANDLER(NET_REQUEST_ETHERNET_GET_TXTIME_PARAM,
490 ethernet_get_config);
491
492 NET_MGMT_REGISTER_REQUEST_HANDLER(NET_REQUEST_ETHERNET_GET_TXINJECTION_MODE,
493 ethernet_get_config);
494
ethernet_mgmt_raise_carrier_on_event(struct net_if * iface)495 void ethernet_mgmt_raise_carrier_on_event(struct net_if *iface)
496 {
497 net_mgmt_event_notify(NET_EVENT_ETHERNET_CARRIER_ON, iface);
498 }
499
ethernet_mgmt_raise_carrier_off_event(struct net_if * iface)500 void ethernet_mgmt_raise_carrier_off_event(struct net_if *iface)
501 {
502 net_mgmt_event_notify(NET_EVENT_ETHERNET_CARRIER_OFF, iface);
503 }
504
ethernet_mgmt_raise_vlan_enabled_event(struct net_if * iface,uint16_t tag)505 void ethernet_mgmt_raise_vlan_enabled_event(struct net_if *iface, uint16_t tag)
506 {
507 #if defined(CONFIG_NET_MGMT_EVENT_INFO)
508 net_mgmt_event_notify_with_info(NET_EVENT_ETHERNET_VLAN_TAG_ENABLED,
509 iface, &tag, sizeof(tag));
510 #else
511 net_mgmt_event_notify(NET_EVENT_ETHERNET_VLAN_TAG_ENABLED,
512 iface);
513 #endif
514 }
515
ethernet_mgmt_raise_vlan_disabled_event(struct net_if * iface,uint16_t tag)516 void ethernet_mgmt_raise_vlan_disabled_event(struct net_if *iface, uint16_t tag)
517 {
518 #if defined(CONFIG_NET_MGMT_EVENT_INFO)
519 net_mgmt_event_notify_with_info(NET_EVENT_ETHERNET_VLAN_TAG_DISABLED,
520 iface, &tag, sizeof(tag));
521 #else
522 net_mgmt_event_notify(NET_EVENT_ETHERNET_VLAN_TAG_DISABLED, iface);
523 #endif
524 }
525