1 /*
2  * Copyright (c) 2018 Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define NET_LOG_LEVEL CONFIG_NET_L2_ETHERNET_LOG_LEVEL
8 
9 #include <zephyr/logging/log.h>
10 LOG_MODULE_REGISTER(net_test, NET_LOG_LEVEL);
11 
12 #include <zephyr/kernel.h>
13 
14 #include <zephyr/net/net_if.h>
15 #include <zephyr/net/ethernet.h>
16 #include <zephyr/net/ethernet_mgmt.h>
17 
18 #include <zephyr/ztest.h>
19 
20 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
21 #define DBG(fmt, ...) printk(fmt, ##__VA_ARGS__)
22 #else
23 #define DBG(fmt, ...)
24 #endif
25 
26 static struct net_if *default_iface;
27 
28 static const uint8_t mac_addr_init[6] = { 0x01, 0x02, 0x03,
29 				       0x04,  0x05,  0x06 };
30 
31 static const uint8_t mac_addr_change[6] = { 0x01, 0x02, 0x03,
32 					 0x04,  0x05,  0x07 };
33 
34 struct eth_fake_context {
35 	struct net_if *iface;
36 	uint8_t mac_address[6];
37 
38 	bool auto_negotiation;
39 	bool full_duplex;
40 	bool link_10bt;
41 	bool link_100bt;
42 	bool promisc_mode;
43 	struct {
44 		bool qav_enabled;
45 		int idle_slope;
46 		int delta_bandwidth;
47 	} priority_queues[2];
48 
49 	struct {
50 		/* Qbv parameters */
51 		struct {
52 			bool gate_status[NET_TC_TX_COUNT];
53 			enum ethernet_gate_state_operation operation;
54 			uint32_t time_interval;
55 			uint16_t row;
56 		} gate_control;
57 		uint32_t gate_control_list_len;
58 		bool qbv_enabled;
59 		struct net_ptp_extended_time base_time;
60 		struct net_ptp_time cycle_time;
61 		uint32_t extension_time;
62 
63 		/* Qbu parameters */
64 		uint32_t hold_advance;
65 		uint32_t release_advance;
66 		enum ethernet_qbu_preempt_status
67 				frame_preempt_statuses[NET_TC_TX_COUNT];
68 		bool qbu_enabled;
69 		bool link_partner_status;
70 		uint8_t additional_fragment_size : 2;
71 	} ports[2];
72 
73 	/* TXTIME parameters */
74 	bool txtime_statuses[NET_TC_TX_COUNT];
75 };
76 
77 static struct eth_fake_context eth_fake_data;
78 
eth_fake_iface_init(struct net_if * iface)79 static void eth_fake_iface_init(struct net_if *iface)
80 {
81 	const struct device *dev = net_if_get_device(iface);
82 	struct eth_fake_context *ctx = dev->data;
83 
84 	ctx->iface = iface;
85 
86 	net_if_set_link_addr(iface, ctx->mac_address,
87 			     sizeof(ctx->mac_address),
88 			     NET_LINK_ETHERNET);
89 
90 	ethernet_init(iface);
91 }
92 
eth_fake_send(const struct device * dev,struct net_pkt * pkt)93 static int eth_fake_send(const struct device *dev,
94 			 struct net_pkt *pkt)
95 {
96 	ARG_UNUSED(dev);
97 	ARG_UNUSED(pkt);
98 
99 	return 0;
100 }
101 
eth_fake_get_capabilities(const struct device * dev)102 static enum ethernet_hw_caps eth_fake_get_capabilities(const struct device *dev)
103 {
104 	return ETHERNET_AUTO_NEGOTIATION_SET | ETHERNET_LINK_10BASE_T |
105 		ETHERNET_LINK_100BASE_T | ETHERNET_DUPLEX_SET | ETHERNET_QAV |
106 		ETHERNET_PROMISC_MODE | ETHERNET_PRIORITY_QUEUES |
107 		ETHERNET_QBV | ETHERNET_QBU | ETHERNET_TXTIME;
108 }
109 
eth_fake_get_total_bandwidth(struct eth_fake_context * ctx)110 static int eth_fake_get_total_bandwidth(struct eth_fake_context *ctx)
111 {
112 	if (ctx->link_100bt) {
113 		return 100 * 1000 * 1000 / 8;
114 	}
115 
116 	if (ctx->link_10bt) {
117 		return 10 * 1000 * 1000 / 8;
118 	}
119 
120 	/* No link */
121 	return 0;
122 }
123 
eth_fake_recalc_qav_delta_bandwidth(struct eth_fake_context * ctx)124 static void eth_fake_recalc_qav_delta_bandwidth(struct eth_fake_context *ctx)
125 {
126 	int bw;
127 	int i;
128 
129 	bw = eth_fake_get_total_bandwidth(ctx);
130 
131 	for (i = 0; i < ARRAY_SIZE(ctx->priority_queues); ++i) {
132 		if (bw == 0) {
133 			ctx->priority_queues[i].delta_bandwidth = 0;
134 		} else {
135 			ctx->priority_queues[i].delta_bandwidth =
136 				(ctx->priority_queues[i].idle_slope * 100);
137 
138 			ctx->priority_queues[i].delta_bandwidth /= bw;
139 		}
140 	}
141 }
142 
eth_fake_recalc_qav_idle_slopes(struct eth_fake_context * ctx)143 static void eth_fake_recalc_qav_idle_slopes(struct eth_fake_context *ctx)
144 {
145 	int bw;
146 	int i;
147 
148 	bw = eth_fake_get_total_bandwidth(ctx);
149 
150 	for (i = 0; i < ARRAY_SIZE(ctx->priority_queues); ++i) {
151 		ctx->priority_queues[i].idle_slope =
152 			(ctx->priority_queues[i].delta_bandwidth * bw) / 100;
153 	}
154 }
155 
eth_fake_set_config(const struct device * dev,enum ethernet_config_type type,const struct ethernet_config * config)156 static int eth_fake_set_config(const struct device *dev,
157 			       enum ethernet_config_type type,
158 			       const struct ethernet_config *config)
159 {
160 	struct eth_fake_context *ctx = dev->data;
161 	int priority_queues_num = ARRAY_SIZE(ctx->priority_queues);
162 	int ports_num = ARRAY_SIZE(ctx->ports);
163 	enum ethernet_qav_param_type qav_param_type;
164 	enum ethernet_qbv_param_type qbv_param_type;
165 	enum ethernet_qbu_param_type qbu_param_type;
166 	enum ethernet_txtime_param_type txtime_param_type;
167 	int queue_id, port_id;
168 
169 	switch (type) {
170 	case ETHERNET_CONFIG_TYPE_AUTO_NEG:
171 		if (config->auto_negotiation == ctx->auto_negotiation) {
172 			return -EALREADY;
173 		}
174 
175 		ctx->auto_negotiation = config->auto_negotiation;
176 
177 		break;
178 	case ETHERNET_CONFIG_TYPE_LINK:
179 		if ((config->l.link_10bt && ctx->link_10bt) ||
180 		    (config->l.link_100bt && ctx->link_100bt)) {
181 			return -EALREADY;
182 		}
183 
184 		if (config->l.link_10bt) {
185 			ctx->link_10bt = true;
186 			ctx->link_100bt = false;
187 		} else {
188 			ctx->link_10bt = false;
189 			ctx->link_100bt = true;
190 		}
191 
192 		eth_fake_recalc_qav_idle_slopes(ctx);
193 
194 		break;
195 	case ETHERNET_CONFIG_TYPE_DUPLEX:
196 		if (config->full_duplex == ctx->full_duplex) {
197 			return -EALREADY;
198 		}
199 
200 		ctx->full_duplex = config->full_duplex;
201 
202 		break;
203 	case ETHERNET_CONFIG_TYPE_MAC_ADDRESS:
204 		memcpy(ctx->mac_address, config->mac_address.addr, 6);
205 
206 		net_if_set_link_addr(ctx->iface, ctx->mac_address,
207 				     sizeof(ctx->mac_address),
208 				     NET_LINK_ETHERNET);
209 		break;
210 	case ETHERNET_CONFIG_TYPE_QAV_PARAM:
211 		queue_id = config->qav_param.queue_id;
212 		qav_param_type = config->qav_param.type;
213 
214 		if (queue_id < 0 || queue_id >= priority_queues_num) {
215 			return -EINVAL;
216 		}
217 
218 		switch (qav_param_type) {
219 		case ETHERNET_QAV_PARAM_TYPE_STATUS:
220 			ctx->priority_queues[queue_id].qav_enabled =
221 				config->qav_param.enabled;
222 			break;
223 		case ETHERNET_QAV_PARAM_TYPE_IDLE_SLOPE:
224 			ctx->priority_queues[queue_id].idle_slope =
225 				config->qav_param.idle_slope;
226 
227 			eth_fake_recalc_qav_delta_bandwidth(ctx);
228 			break;
229 		case ETHERNET_QAV_PARAM_TYPE_DELTA_BANDWIDTH:
230 			ctx->priority_queues[queue_id].delta_bandwidth =
231 				config->qav_param.delta_bandwidth;
232 
233 			eth_fake_recalc_qav_idle_slopes(ctx);
234 			break;
235 		default:
236 			return -ENOTSUP;
237 		}
238 
239 		break;
240 	case ETHERNET_CONFIG_TYPE_QBV_PARAM:
241 		port_id = config->qbv_param.port_id;
242 		qbv_param_type = config->qbv_param.type;
243 
244 		if (port_id < 0 || port_id >= ports_num) {
245 			return -EINVAL;
246 		}
247 
248 		switch (qbv_param_type) {
249 		case ETHERNET_QBV_PARAM_TYPE_STATUS:
250 			ctx->ports[port_id].qbv_enabled =
251 				config->qbv_param.enabled;
252 			break;
253 		case ETHERNET_QBV_PARAM_TYPE_TIME:
254 			memcpy(&ctx->ports[port_id].cycle_time,
255 			       &config->qbv_param.cycle_time,
256 			       sizeof(ctx->ports[port_id].cycle_time));
257 			ctx->ports[port_id].extension_time =
258 				config->qbv_param.extension_time;
259 			memcpy(&ctx->ports[port_id].base_time,
260 			       &config->qbv_param.base_time,
261 			       sizeof(ctx->ports[port_id].base_time));
262 			break;
263 		case ETHERNET_QBV_PARAM_TYPE_GATE_CONTROL_LIST:
264 			ctx->ports[port_id].gate_control.gate_status[0] =
265 			    config->qbv_param.gate_control.gate_status[0];
266 			ctx->ports[port_id].gate_control.gate_status[1] =
267 			    config->qbv_param.gate_control.gate_status[1];
268 			break;
269 		case ETHERNET_QBV_PARAM_TYPE_GATE_CONTROL_LIST_LEN:
270 			ctx->ports[port_id].gate_control_list_len =
271 			    config->qbv_param.gate_control_list_len;
272 			break;
273 		default:
274 			return -ENOTSUP;
275 		}
276 
277 		break;
278 	case ETHERNET_CONFIG_TYPE_QBU_PARAM:
279 		port_id = config->qbu_param.port_id;
280 		qbu_param_type = config->qbu_param.type;
281 
282 		if (port_id < 0 || port_id >= ports_num) {
283 			return -EINVAL;
284 		}
285 
286 		switch (qbu_param_type) {
287 		case ETHERNET_QBU_PARAM_TYPE_STATUS:
288 			ctx->ports[port_id].qbu_enabled =
289 				config->qbu_param.enabled;
290 			break;
291 		case ETHERNET_QBU_PARAM_TYPE_RELEASE_ADVANCE:
292 			ctx->ports[port_id].release_advance =
293 				config->qbu_param.release_advance;
294 			break;
295 		case ETHERNET_QBU_PARAM_TYPE_HOLD_ADVANCE:
296 			ctx->ports[port_id].hold_advance =
297 				config->qbu_param.hold_advance;
298 			break;
299 		case ETHERNET_QBR_PARAM_TYPE_LINK_PARTNER_STATUS:
300 			ctx->ports[port_id].link_partner_status =
301 				config->qbu_param.link_partner_status;
302 			break;
303 		case ETHERNET_QBR_PARAM_TYPE_ADDITIONAL_FRAGMENT_SIZE:
304 			ctx->ports[port_id].additional_fragment_size =
305 				config->qbu_param.additional_fragment_size;
306 			break;
307 		case ETHERNET_QBU_PARAM_TYPE_PREEMPTION_STATUS_TABLE:
308 			memcpy(&ctx->ports[port_id].frame_preempt_statuses,
309 			   &config->qbu_param.frame_preempt_statuses,
310 			   sizeof(ctx->ports[port_id].frame_preempt_statuses));
311 			break;
312 		default:
313 			return -ENOTSUP;
314 		}
315 
316 		break;
317 	case ETHERNET_CONFIG_TYPE_TXTIME_PARAM:
318 		queue_id = config->txtime_param.queue_id;
319 		txtime_param_type = config->txtime_param.type;
320 
321 		if (queue_id < 0 || queue_id >= priority_queues_num) {
322 			return -EINVAL;
323 		}
324 
325 		switch (txtime_param_type) {
326 		case ETHERNET_TXTIME_PARAM_TYPE_ENABLE_QUEUES:
327 			ctx->txtime_statuses[queue_id] =
328 				config->txtime_param.enable_txtime;
329 			break;
330 		default:
331 			return -ENOTSUP;
332 		}
333 
334 		break;
335 	case ETHERNET_CONFIG_TYPE_PROMISC_MODE:
336 		if (config->promisc_mode == ctx->promisc_mode) {
337 			return -EALREADY;
338 		}
339 
340 		ctx->promisc_mode = config->promisc_mode;
341 
342 		break;
343 	default:
344 		return -ENOTSUP;
345 	}
346 
347 	return 0;
348 }
349 
eth_fake_get_config(const struct device * dev,enum ethernet_config_type type,struct ethernet_config * config)350 static int eth_fake_get_config(const struct device *dev,
351 			       enum ethernet_config_type type,
352 			       struct ethernet_config *config)
353 {
354 	struct eth_fake_context *ctx = dev->data;
355 	int priority_queues_num = ARRAY_SIZE(ctx->priority_queues);
356 	int ports_num = ARRAY_SIZE(ctx->ports);
357 	enum ethernet_qav_param_type qav_param_type;
358 	enum ethernet_qbv_param_type qbv_param_type;
359 	enum ethernet_qbu_param_type qbu_param_type;
360 	enum ethernet_txtime_param_type txtime_param_type;
361 	int queue_id, port_id;
362 
363 	switch (type) {
364 	case ETHERNET_CONFIG_TYPE_PRIORITY_QUEUES_NUM:
365 		config->priority_queues_num = ARRAY_SIZE(ctx->priority_queues);
366 		break;
367 	case ETHERNET_CONFIG_TYPE_PORTS_NUM:
368 		config->ports_num = ARRAY_SIZE(ctx->ports);
369 		break;
370 	case ETHERNET_CONFIG_TYPE_QAV_PARAM:
371 		queue_id = config->qav_param.queue_id;
372 		qav_param_type = config->qav_param.type;
373 
374 		if (queue_id < 0 || queue_id >= priority_queues_num) {
375 			return -EINVAL;
376 		}
377 
378 		switch (qav_param_type) {
379 		case ETHERNET_QAV_PARAM_TYPE_STATUS:
380 			config->qav_param.enabled =
381 				ctx->priority_queues[queue_id].qav_enabled;
382 			break;
383 		case ETHERNET_QAV_PARAM_TYPE_IDLE_SLOPE:
384 		case ETHERNET_QAV_PARAM_TYPE_OPER_IDLE_SLOPE:
385 			/* No distinction between idle slopes for fake eth */
386 			config->qav_param.idle_slope =
387 				ctx->priority_queues[queue_id].idle_slope;
388 			break;
389 		case ETHERNET_QAV_PARAM_TYPE_DELTA_BANDWIDTH:
390 			config->qav_param.delta_bandwidth =
391 				ctx->priority_queues[queue_id].delta_bandwidth;
392 			break;
393 		case ETHERNET_QAV_PARAM_TYPE_TRAFFIC_CLASS:
394 			/* Default TC for BE - it doesn't really matter here */
395 			config->qav_param.traffic_class =
396 				net_tx_priority2tc(NET_PRIORITY_BE);
397 			break;
398 		default:
399 			return -ENOTSUP;
400 		}
401 
402 		break;
403 	case ETHERNET_CONFIG_TYPE_QBV_PARAM:
404 		port_id = config->qbv_param.port_id;
405 		qbv_param_type = config->qbv_param.type;
406 
407 		if (port_id < 0 || port_id >= ports_num) {
408 			return -EINVAL;
409 		}
410 
411 		switch (qbv_param_type) {
412 		case ETHERNET_QBV_PARAM_TYPE_STATUS:
413 			config->qbv_param.enabled =
414 				ctx->ports[port_id].qbv_enabled;
415 			break;
416 		case ETHERNET_QBV_PARAM_TYPE_TIME:
417 			memcpy(&config->qbv_param.base_time,
418 			       &ctx->ports[port_id].base_time,
419 			       sizeof(config->qbv_param.base_time));
420 			memcpy(&config->qbv_param.cycle_time,
421 			       &ctx->ports[port_id].cycle_time,
422 			       sizeof(config->qbv_param.cycle_time));
423 			config->qbv_param.extension_time =
424 				ctx->ports[port_id].extension_time;
425 			break;
426 		case ETHERNET_QBV_PARAM_TYPE_GATE_CONTROL_LIST_LEN:
427 			config->qbv_param.gate_control_list_len =
428 				ctx->ports[port_id].gate_control_list_len;
429 			break;
430 		case ETHERNET_QBV_PARAM_TYPE_GATE_CONTROL_LIST:
431 			memcpy(&config->qbv_param.gate_control,
432 			       &ctx->ports[port_id].gate_control,
433 			       sizeof(config->qbv_param.gate_control));
434 			break;
435 		default:
436 			return -ENOTSUP;
437 		}
438 
439 		break;
440 	case ETHERNET_CONFIG_TYPE_QBU_PARAM:
441 		port_id = config->qbu_param.port_id;
442 		qbu_param_type = config->qbu_param.type;
443 
444 		if (port_id < 0 || port_id >= ports_num) {
445 			return -EINVAL;
446 		}
447 
448 		switch (qbu_param_type) {
449 		case ETHERNET_QBU_PARAM_TYPE_STATUS:
450 			config->qbu_param.enabled =
451 				ctx->ports[port_id].qbu_enabled;
452 			break;
453 		case ETHERNET_QBU_PARAM_TYPE_RELEASE_ADVANCE:
454 			config->qbu_param.release_advance =
455 				ctx->ports[port_id].release_advance;
456 			break;
457 		case ETHERNET_QBU_PARAM_TYPE_HOLD_ADVANCE:
458 			config->qbu_param.hold_advance =
459 				ctx->ports[port_id].hold_advance;
460 			break;
461 		case ETHERNET_QBR_PARAM_TYPE_LINK_PARTNER_STATUS:
462 			config->qbu_param.link_partner_status =
463 				ctx->ports[port_id].link_partner_status;
464 			break;
465 		case ETHERNET_QBR_PARAM_TYPE_ADDITIONAL_FRAGMENT_SIZE:
466 			config->qbu_param.additional_fragment_size =
467 				ctx->ports[port_id].additional_fragment_size;
468 			break;
469 		case ETHERNET_QBU_PARAM_TYPE_PREEMPTION_STATUS_TABLE:
470 			memcpy(&config->qbu_param.frame_preempt_statuses,
471 			     &ctx->ports[port_id].frame_preempt_statuses,
472 			     sizeof(config->qbu_param.frame_preempt_statuses));
473 			break;
474 		default:
475 			return -ENOTSUP;
476 		}
477 
478 		break;
479 	case ETHERNET_CONFIG_TYPE_TXTIME_PARAM:
480 		queue_id = config->txtime_param.queue_id;
481 		txtime_param_type = config->txtime_param.type;
482 
483 		if (queue_id < 0 || queue_id >= priority_queues_num) {
484 			return -EINVAL;
485 		}
486 
487 		switch (txtime_param_type) {
488 		case ETHERNET_TXTIME_PARAM_TYPE_ENABLE_QUEUES:
489 			config->txtime_param.enable_txtime =
490 				ctx->txtime_statuses[queue_id];
491 			break;
492 		default:
493 			return -ENOTSUP;
494 		}
495 
496 		break;
497 	default:
498 		return -ENOTSUP;
499 	}
500 
501 	return 0;
502 }
503 
504 static struct ethernet_api eth_fake_api_funcs = {
505 	.iface_api.init = eth_fake_iface_init,
506 
507 	.get_capabilities = eth_fake_get_capabilities,
508 	.set_config = eth_fake_set_config,
509 	.get_config = eth_fake_get_config,
510 	.send = eth_fake_send,
511 };
512 
eth_fake_init(const struct device * dev)513 static int eth_fake_init(const struct device *dev)
514 {
515 	struct eth_fake_context *ctx = dev->data;
516 	int i;
517 
518 	ctx->auto_negotiation = true;
519 	ctx->full_duplex = true;
520 	ctx->link_10bt = true;
521 	ctx->link_100bt = false;
522 
523 	memcpy(ctx->mac_address, mac_addr_init, 6);
524 
525 	/* Initialize priority queues */
526 	for (i = 0; i < ARRAY_SIZE(ctx->priority_queues); ++i) {
527 		ctx->priority_queues[i].qav_enabled = true;
528 		if (i + 1 == ARRAY_SIZE(ctx->priority_queues)) {
529 			/* 75% for the last priority queue */
530 			ctx->priority_queues[i].delta_bandwidth = 75;
531 		} else {
532 			/* 0% for the rest */
533 			ctx->priority_queues[i].delta_bandwidth = 0;
534 		}
535 	}
536 
537 	eth_fake_recalc_qav_idle_slopes(ctx);
538 
539 	return 0;
540 }
541 
542 ETH_NET_DEVICE_INIT(eth_fake, "eth_fake", eth_fake_init, NULL,
543 		    &eth_fake_data, NULL, CONFIG_ETH_INIT_PRIORITY,
544 		    &eth_fake_api_funcs, NET_ETH_MTU);
545 
546 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
iface2str(struct net_if * iface)547 static const char *iface2str(struct net_if *iface)
548 {
549 #ifdef CONFIG_NET_L2_ETHERNET
550 	if (net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET)) {
551 		return "Ethernet";
552 	}
553 #endif
554 
555 #ifdef CONFIG_NET_L2_DUMMY
556 	if (net_if_l2(iface) == &NET_L2_GET_NAME(DUMMY)) {
557 		return "Dummy";
558 	}
559 #endif
560 
561 	return "<unknown type>";
562 }
563 #endif
564 
iface_cb(struct net_if * iface,void * user_data)565 static void iface_cb(struct net_if *iface, void *user_data)
566 {
567 	struct net_if **my_iface = user_data;
568 
569 	DBG("Interface %p (%s) [%d]\n", iface, iface2str(iface),
570 	    net_if_get_by_iface(iface));
571 
572 	if (net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET)) {
573 		if (PART_OF_ARRAY(NET_IF_GET_NAME(eth_fake, 0), iface)) {
574 			*my_iface = iface;
575 		}
576 	}
577 }
578 
ethernet_mgmt_setup(void)579 static void *ethernet_mgmt_setup(void)
580 {
581 	net_if_foreach(iface_cb, &default_iface);
582 
583 	zassert_not_null(default_iface, "Cannot find test interface");
584 
585 	return NULL;
586 }
587 
change_mac_when_up(void)588 static void change_mac_when_up(void)
589 {
590 	struct net_if *iface = default_iface;
591 	struct ethernet_req_params params;
592 	int ret;
593 
594 	memcpy(params.mac_address.addr, mac_addr_change, 6);
595 
596 	net_if_up(iface);
597 
598 	ret = net_mgmt(NET_REQUEST_ETHERNET_SET_MAC_ADDRESS, iface,
599 		       &params, sizeof(struct ethernet_req_params));
600 
601 	zassert_not_equal(ret, 0,
602 			  "mac address change should not be possible");
603 }
604 
change_mac_when_down(void)605 static void change_mac_when_down(void)
606 {
607 	struct net_if *iface = default_iface;
608 	struct ethernet_req_params params;
609 	int ret;
610 
611 	memcpy(params.mac_address.addr, mac_addr_change, 6);
612 
613 	net_if_down(iface);
614 
615 	ret = net_mgmt(NET_REQUEST_ETHERNET_SET_MAC_ADDRESS, iface,
616 		       &params, sizeof(struct ethernet_req_params));
617 
618 	zassert_equal(ret, 0, "unable to change mac address");
619 
620 	ret = memcmp(net_if_get_link_addr(iface)->addr, mac_addr_change,
621 		     sizeof(mac_addr_change));
622 
623 	zassert_equal(ret, 0, "invalid mac address change");
624 
625 	net_if_up(iface);
626 }
627 
ZTEST(net_ethernet_mgmt,test_change_mac)628 ZTEST(net_ethernet_mgmt, test_change_mac)
629 {
630 	change_mac_when_up();
631 	change_mac_when_down();
632 }
633 
change_auto_neg(bool is_auto_neg)634 static void change_auto_neg(bool is_auto_neg)
635 {
636 	struct net_if *iface = default_iface;
637 	struct ethernet_req_params params;
638 	int ret;
639 
640 	params.auto_negotiation = is_auto_neg;
641 
642 	ret = net_mgmt(NET_REQUEST_ETHERNET_SET_AUTO_NEGOTIATION, iface,
643 		       &params, sizeof(struct ethernet_req_params));
644 
645 	zassert_equal(ret, 0, "invalid auto negotiation change");
646 }
647 
change_to_same_auto_neg(bool is_auto_neg)648 static void change_to_same_auto_neg(bool is_auto_neg)
649 {
650 	struct net_if *iface = default_iface;
651 	struct ethernet_req_params params;
652 	int ret;
653 
654 	params.auto_negotiation = is_auto_neg;
655 
656 	ret = net_mgmt(NET_REQUEST_ETHERNET_SET_AUTO_NEGOTIATION, iface,
657 		       &params, sizeof(struct ethernet_req_params));
658 
659 	zassert_not_equal(ret, 0,
660 			  "invalid change to already auto negotiation");
661 }
662 
ZTEST(net_ethernet_mgmt,test_change_auto_neg)663 ZTEST(net_ethernet_mgmt, test_change_auto_neg)
664 {
665 	change_auto_neg(false);
666 	change_to_same_auto_neg(false);
667 	change_auto_neg(true);
668 }
669 
change_link_10bt(void)670 static void change_link_10bt(void)
671 {
672 	struct net_if *iface = default_iface;
673 	struct ethernet_req_params params = { 0 };
674 	int ret;
675 
676 	params.l.link_10bt = true;
677 
678 	ret = net_mgmt(NET_REQUEST_ETHERNET_SET_LINK, iface,
679 		       &params, sizeof(struct ethernet_req_params));
680 
681 	zassert_equal(ret, 0, "invalid link change");
682 }
683 
change_link_100bt(void)684 static void change_link_100bt(void)
685 {
686 	struct net_if *iface = default_iface;
687 	struct ethernet_req_params params = { 0 };
688 	int ret;
689 
690 	params.l.link_100bt = true;
691 
692 	ret = net_mgmt(NET_REQUEST_ETHERNET_SET_LINK, iface,
693 		       &params, sizeof(struct ethernet_req_params));
694 
695 	zassert_equal(ret, 0, "invalid link change");
696 }
697 
change_same_link_100bt(void)698 static void change_same_link_100bt(void)
699 {
700 	struct net_if *iface = default_iface;
701 	struct ethernet_req_params params = { 0 };
702 	int ret;
703 
704 	params.l.link_100bt = true;
705 
706 	ret = net_mgmt(NET_REQUEST_ETHERNET_SET_LINK, iface,
707 		       &params, sizeof(struct ethernet_req_params));
708 
709 	zassert_not_equal(ret, 0, "invalid same link change");
710 }
711 
change_unsupported_link_1000bt(void)712 static void change_unsupported_link_1000bt(void)
713 {
714 	struct net_if *iface = default_iface;
715 	struct ethernet_req_params params = { 0 };
716 	int ret;
717 
718 	params.l.link_1000bt = true;
719 
720 	ret = net_mgmt(NET_REQUEST_ETHERNET_SET_LINK, iface,
721 		       &params, sizeof(struct ethernet_req_params));
722 
723 	zassert_not_equal(ret, 0, "invalid change to unsupported link");
724 }
725 
ZTEST(net_ethernet_mgmt,test_change_link)726 ZTEST(net_ethernet_mgmt, test_change_link)
727 {
728 	change_link_100bt();
729 	change_same_link_100bt();
730 	change_unsupported_link_1000bt();
731 	change_link_10bt();
732 }
733 
change_duplex(bool is_full_duplex)734 static void change_duplex(bool is_full_duplex)
735 {
736 	struct net_if *iface = default_iface;
737 	struct ethernet_req_params params;
738 	int ret;
739 
740 	params.full_duplex = is_full_duplex;
741 
742 	ret = net_mgmt(NET_REQUEST_ETHERNET_SET_DUPLEX, iface,
743 		       &params, sizeof(struct ethernet_req_params));
744 
745 	zassert_equal(ret, 0, "invalid duplex change");
746 }
747 
change_same_duplex(bool is_full_duplex)748 static void change_same_duplex(bool is_full_duplex)
749 {
750 	struct net_if *iface = default_iface;
751 	struct ethernet_req_params params;
752 	int ret;
753 
754 	params.full_duplex = is_full_duplex;
755 
756 	ret = net_mgmt(NET_REQUEST_ETHERNET_SET_DUPLEX, iface,
757 		       &params, sizeof(struct ethernet_req_params));
758 
759 	zassert_not_equal(ret, 0, "invalid change to already set duplex");
760 }
761 
ZTEST(net_ethernet_mgmt,test_change_duplex)762 ZTEST(net_ethernet_mgmt, test_change_duplex)
763 {
764 	change_duplex(false);
765 	change_same_duplex(false);
766 	change_duplex(true);
767 }
768 
ZTEST(net_ethernet_mgmt,test_change_qav_params)769 ZTEST(net_ethernet_mgmt, test_change_qav_params)
770 {
771 	struct net_if *iface = default_iface;
772 	const struct device *dev = net_if_get_device(iface);
773 	struct eth_fake_context *ctx = dev->data;
774 	struct ethernet_req_params params;
775 	int available_priority_queues;
776 	int i;
777 	int ret;
778 
779 	/* Try to get the number of the priority queues */
780 	ret = net_mgmt(NET_REQUEST_ETHERNET_GET_PRIORITY_QUEUES_NUM,
781 		       iface,
782 		       &params, sizeof(struct ethernet_req_params));
783 
784 	zassert_equal(ret, 0, "could not get the number of priority queues");
785 
786 	available_priority_queues = params.priority_queues_num;
787 
788 	zassert_not_equal(available_priority_queues, 0,
789 			  "returned no priority queues");
790 	zassert_equal(available_priority_queues,
791 		      ARRAY_SIZE(ctx->priority_queues),
792 		      "an invalid number of priority queues returned");
793 
794 	for (i = 0; i < available_priority_queues; ++i) {
795 		/* Try to set correct params to a correct queue id */
796 		params.qav_param.queue_id = i;
797 
798 		/* Disable Qav for queue */
799 		params.qav_param.type = ETHERNET_QAV_PARAM_TYPE_STATUS;
800 		params.qav_param.enabled = false;
801 		ret = net_mgmt(NET_REQUEST_ETHERNET_SET_QAV_PARAM,
802 			       iface,
803 			       &params, sizeof(struct ethernet_req_params));
804 
805 		zassert_equal(ret, 0, "could not disable qav");
806 
807 		/* Invert it to make sure the read-back value is proper */
808 		params.qav_param.enabled = true;
809 
810 		ret = net_mgmt(NET_REQUEST_ETHERNET_GET_QAV_PARAM,
811 			       iface,
812 			       &params, sizeof(struct ethernet_req_params));
813 
814 		zassert_equal(ret, 0, "could not read qav status");
815 
816 		zassert_equal(false, params.qav_param.enabled,
817 			      "qav should be disabled");
818 
819 		/* Re-enable Qav for queue */
820 		params.qav_param.enabled = true;
821 		ret = net_mgmt(NET_REQUEST_ETHERNET_SET_QAV_PARAM,
822 			       iface,
823 			       &params, sizeof(struct ethernet_req_params));
824 
825 		zassert_equal(ret, 0, "could not enable qav");
826 
827 		/* Invert it to make sure the read-back value is proper */
828 		params.qav_param.enabled = false;
829 
830 		ret = net_mgmt(NET_REQUEST_ETHERNET_GET_QAV_PARAM,
831 			       iface,
832 			       &params, sizeof(struct ethernet_req_params));
833 
834 		zassert_equal(ret, 0, "could not read qav status");
835 
836 		zassert_equal(true, params.qav_param.enabled,
837 			      "qav should be enabled");
838 
839 		/* Starting with delta bandwidth */
840 		params.qav_param.type =
841 			ETHERNET_QAV_PARAM_TYPE_DELTA_BANDWIDTH;
842 		params.qav_param.delta_bandwidth = 10U;
843 		ret = net_mgmt(NET_REQUEST_ETHERNET_SET_QAV_PARAM,
844 			       iface,
845 			       &params, sizeof(struct ethernet_req_params));
846 
847 		zassert_equal(ret, 0, "could not set delta bandwidth");
848 
849 		/* Reset local value - read-back and verify it */
850 		params.qav_param.delta_bandwidth = 0U;
851 		ret = net_mgmt(NET_REQUEST_ETHERNET_GET_QAV_PARAM,
852 			       iface,
853 			       &params, sizeof(struct ethernet_req_params));
854 
855 		zassert_equal(ret, 0, "could not read delta bandwidth");
856 		zassert_equal(params.qav_param.delta_bandwidth, 10,
857 			      "delta bandwidth did not change");
858 
859 		/* And them the idle slope */
860 		params.qav_param.type = ETHERNET_QAV_PARAM_TYPE_IDLE_SLOPE;
861 		params.qav_param.idle_slope = 10U;
862 		ret = net_mgmt(NET_REQUEST_ETHERNET_SET_QAV_PARAM,
863 			       iface,
864 			       &params, sizeof(struct ethernet_req_params));
865 
866 		zassert_equal(ret, 0, "could not set idle slope");
867 
868 		/* Reset local value - read-back and verify it */
869 		params.qav_param.idle_slope = 0U;
870 		ret = net_mgmt(NET_REQUEST_ETHERNET_GET_QAV_PARAM,
871 			       iface,
872 			       &params, sizeof(struct ethernet_req_params));
873 
874 		zassert_equal(ret, 0, "could not read idle slope");
875 		zassert_equal(params.qav_param.idle_slope, 10,
876 			      "idle slope did not change");
877 
878 		/* Oper idle slope should also be the same */
879 		params.qav_param.type =
880 			ETHERNET_QAV_PARAM_TYPE_OPER_IDLE_SLOPE;
881 		ret = net_mgmt(NET_REQUEST_ETHERNET_GET_QAV_PARAM,
882 			       iface,
883 			       &params, sizeof(struct ethernet_req_params));
884 
885 		zassert_equal(ret, 0, "could not read oper idle slope");
886 		zassert_equal(params.qav_param.oper_idle_slope, 10,
887 			      "oper idle slope should equal admin idle slope");
888 
889 		/* Now try to set incorrect params to a correct queue */
890 		params.qav_param.type =
891 			ETHERNET_QAV_PARAM_TYPE_DELTA_BANDWIDTH;
892 		params.qav_param.delta_bandwidth = -10;
893 		ret = net_mgmt(NET_REQUEST_ETHERNET_SET_QAV_PARAM,
894 			       iface,
895 			       &params, sizeof(struct ethernet_req_params));
896 
897 		zassert_not_equal(ret, 0,
898 				  "allowed to set invalid delta bandwidth");
899 
900 		params.qav_param.delta_bandwidth = 101U;
901 		ret = net_mgmt(NET_REQUEST_ETHERNET_SET_QAV_PARAM,
902 			       iface,
903 			       &params, sizeof(struct ethernet_req_params));
904 
905 		zassert_not_equal(ret, 0,
906 				  "allowed to set invalid delta bandwidth");
907 	}
908 
909 	/* Try to set read-only parameters */
910 	params.qav_param.queue_id = 0;
911 	params.qav_param.type = ETHERNET_QAV_PARAM_TYPE_OPER_IDLE_SLOPE;
912 	ret = net_mgmt(NET_REQUEST_ETHERNET_SET_QAV_PARAM,
913 		       iface,
914 		       &params, sizeof(struct ethernet_req_params));
915 
916 	zassert_not_equal(ret, 0, "should not be able to set oper idle slope");
917 
918 	params.qav_param.queue_id = 0;
919 	params.qav_param.type = ETHERNET_QAV_PARAM_TYPE_TRAFFIC_CLASS;
920 	ret = net_mgmt(NET_REQUEST_ETHERNET_SET_QAV_PARAM,
921 		       iface,
922 		       &params, sizeof(struct ethernet_req_params));
923 
924 	zassert_not_equal(ret, 0, "should not be able to set traffic class");
925 
926 	/* Now try to set valid parameters to an invalid queue id */
927 	params.qav_param.type = ETHERNET_QAV_PARAM_TYPE_DELTA_BANDWIDTH;
928 	params.qav_param.queue_id = available_priority_queues;
929 	params.qav_param.delta_bandwidth = 10U;
930 	ret = net_mgmt(NET_REQUEST_ETHERNET_SET_QAV_PARAM,
931 		       iface,
932 		       &params, sizeof(struct ethernet_req_params));
933 
934 	zassert_not_equal(ret, 0, "should not be able to set delta bandwidth");
935 
936 	params.qav_param.type = ETHERNET_QAV_PARAM_TYPE_IDLE_SLOPE;
937 	params.qav_param.idle_slope = 10U;
938 	ret = net_mgmt(NET_REQUEST_ETHERNET_SET_QAV_PARAM,
939 		       iface,
940 		       &params, sizeof(struct ethernet_req_params));
941 
942 	zassert_not_equal(ret, 0, "should not be able to set idle slope");
943 }
944 
ZTEST(net_ethernet_mgmt,test_change_qbv_params)945 ZTEST(net_ethernet_mgmt, test_change_qbv_params)
946 {
947 	struct net_if *iface = default_iface;
948 	const struct device *dev = net_if_get_device(iface);
949 	struct eth_fake_context *ctx = dev->data;
950 	struct ethernet_req_params params;
951 	struct net_ptp_time cycle_time;
952 	int available_ports;
953 	int i;
954 	int ret;
955 
956 	/* Try to get the number of the ports */
957 	ret = net_mgmt(NET_REQUEST_ETHERNET_GET_PORTS_NUM,
958 		       iface,
959 		       &params, sizeof(struct ethernet_req_params));
960 
961 	zassert_equal(ret, 0, "could not get the number of ports (%d)", ret);
962 
963 	available_ports = params.ports_num;
964 
965 	zassert_not_equal(available_ports, 0, "returned no priority queues");
966 	zassert_equal(available_ports,
967 		      ARRAY_SIZE(ctx->ports),
968 		      "an invalid number of ports returned");
969 
970 	for (i = 0; i < available_ports; ++i) {
971 		/* Try to set correct params to a correct queue id */
972 		params.qbv_param.port_id = i;
973 
974 		/* Disable Qbv for port */
975 		params.qbv_param.type = ETHERNET_QBV_PARAM_TYPE_STATUS;
976 		params.qbv_param.state = ETHERNET_QBV_STATE_TYPE_ADMIN;
977 		params.qbv_param.enabled = false;
978 		ret = net_mgmt(NET_REQUEST_ETHERNET_SET_QBV_PARAM,
979 			       iface,
980 			       &params, sizeof(struct ethernet_req_params));
981 
982 		zassert_equal(ret, 0, "could not disable qbv for port %d", i);
983 
984 		/* Invert it to make sure the read-back value is proper */
985 		params.qbv_param.enabled = true;
986 
987 		ret = net_mgmt(NET_REQUEST_ETHERNET_GET_QBV_PARAM,
988 			       iface,
989 			       &params, sizeof(struct ethernet_req_params));
990 
991 		zassert_equal(ret, 0, "could not read qbv status (%d)", ret);
992 
993 		zassert_equal(false, params.qbv_param.enabled,
994 			      "qbv should be disabled");
995 
996 		/* Re-enable Qbv for queue */
997 		params.qbv_param.enabled = true;
998 		ret = net_mgmt(NET_REQUEST_ETHERNET_SET_QBV_PARAM,
999 			       iface,
1000 			       &params, sizeof(struct ethernet_req_params));
1001 
1002 		zassert_equal(ret, 0, "could not enable qbv (%d)", ret);
1003 
1004 		/* Invert it to make sure the read-back value is proper */
1005 		params.qbv_param.enabled = false;
1006 
1007 		ret = net_mgmt(NET_REQUEST_ETHERNET_GET_QBV_PARAM,
1008 			       iface,
1009 			       &params, sizeof(struct ethernet_req_params));
1010 
1011 		zassert_equal(ret, 0, "could not read qbv status (%d)", ret);
1012 
1013 		zassert_equal(true, params.qbv_param.enabled,
1014 			      "qbv should be enabled");
1015 
1016 		/* Then the Qbv parameter checks */
1017 
1018 		params.qbv_param.type = ETHERNET_QBV_PARAM_TYPE_TIME;
1019 
1020 		params.qbv_param.base_time.second = 10ULL;
1021 		params.qbv_param.base_time.fract_nsecond = 20ULL;
1022 		params.qbv_param.cycle_time.second = 30ULL;
1023 		params.qbv_param.cycle_time.nanosecond = 20;
1024 		params.qbv_param.extension_time = 40;
1025 
1026 		ret = net_mgmt(NET_REQUEST_ETHERNET_SET_QBV_PARAM,
1027 			       iface,
1028 			       &params, sizeof(struct ethernet_req_params));
1029 
1030 		zassert_equal(ret, 0, "could not set base time (%d)", ret);
1031 
1032 		/* Reset local value - read-back and verify it */
1033 		params.qbv_param.base_time.second = 0ULL;
1034 		params.qbv_param.base_time.fract_nsecond = 0ULL;
1035 		params.qbv_param.cycle_time.second = 0ULL;
1036 		params.qbv_param.cycle_time.nanosecond = 0;
1037 		params.qbv_param.extension_time = 0;
1038 
1039 		ret = net_mgmt(NET_REQUEST_ETHERNET_GET_QBV_PARAM,
1040 			       iface,
1041 			       &params, sizeof(struct ethernet_req_params));
1042 
1043 		zassert_equal(ret, 0, "could not read times (%d)", ret);
1044 		zassert_equal(params.qbv_param.base_time.second, 10ULL,
1045 			      "base_time.second did not change");
1046 		zassert_equal(params.qbv_param.base_time.fract_nsecond, 20ULL,
1047 			      "base_time.fract_nsecond did not change");
1048 
1049 		cycle_time.second = 30ULL;
1050 		cycle_time.nanosecond = 20;
1051 		zassert_true(params.qbv_param.cycle_time.second == cycle_time.second &&
1052 			     params.qbv_param.cycle_time.nanosecond == cycle_time.nanosecond,
1053 			     "cycle time did not change");
1054 
1055 		zassert_equal(params.qbv_param.extension_time, 40,
1056 			      "extension time did not change");
1057 
1058 		params.qbv_param.type =
1059 			ETHERNET_QBV_PARAM_TYPE_GATE_CONTROL_LIST;
1060 		params.qbv_param.gate_control.gate_status[0] = true;
1061 		params.qbv_param.gate_control.gate_status[1] = false;
1062 		ret = net_mgmt(NET_REQUEST_ETHERNET_SET_QBV_PARAM,
1063 			       iface,
1064 			       &params, sizeof(struct ethernet_req_params));
1065 
1066 		zassert_equal(ret, 0, "could not set gate control list (%d)",
1067 			      ret);
1068 
1069 		/* Reset local value - read-back and verify it */
1070 		ret = net_mgmt(NET_REQUEST_ETHERNET_GET_QBV_PARAM,
1071 			       iface,
1072 			       &params, sizeof(struct ethernet_req_params));
1073 
1074 		zassert_equal(ret, 0, "could not read gate control (%d)", ret);
1075 
1076 		params.qbv_param.type =
1077 			ETHERNET_QBV_PARAM_TYPE_GATE_CONTROL_LIST_LEN;
1078 		params.qbv_param.gate_control_list_len = 1;
1079 		ret = net_mgmt(NET_REQUEST_ETHERNET_SET_QBV_PARAM,
1080 			       iface,
1081 			       &params, sizeof(struct ethernet_req_params));
1082 
1083 		zassert_equal(ret, 0,
1084 			      "could not set gate control list len (%d)", ret);
1085 
1086 		/* Reset local value - read-back and verify it */
1087 		params.qbv_param.gate_control_list_len = 0;
1088 		ret = net_mgmt(NET_REQUEST_ETHERNET_GET_QBV_PARAM,
1089 			       iface,
1090 			       &params, sizeof(struct ethernet_req_params));
1091 
1092 		zassert_equal(ret, 0,
1093 			      "could not read gate control list len (%d)",
1094 			      ret);
1095 		zassert_equal(params.qbv_param.gate_control_list_len, 1,
1096 			      "gate control list len did not change");
1097 	}
1098 
1099 	/* Try to set read-only parameters */
1100 	params.qbv_param.state = ETHERNET_QBV_STATE_TYPE_OPER;
1101 	params.qbv_param.type = ETHERNET_QBV_PARAM_TYPE_TIME;
1102 	params.qbv_param.extension_time = 50;
1103 	ret = net_mgmt(NET_REQUEST_ETHERNET_SET_QBV_PARAM,
1104 		       iface,
1105 		       &params, sizeof(struct ethernet_req_params));
1106 
1107 	zassert_not_equal(ret, 0, "allowed to set oper status parameter (%d)",
1108 			  ret);
1109 
1110 	params.qbv_param.state = ETHERNET_QBV_STATE_TYPE_ADMIN;
1111 	params.qbv_param.type = ETHERNET_QBV_PARAM_TYPE_TIME;
1112 	params.qbv_param.base_time.fract_nsecond = 1000000000;
1113 	params.qbv_param.cycle_time.nanosecond = 1000000000;
1114 	ret = net_mgmt(NET_REQUEST_ETHERNET_SET_QBV_PARAM,
1115 		       iface,
1116 		       &params, sizeof(struct ethernet_req_params));
1117 
1118 	zassert_not_equal(ret, 0, "allowed to set base_time parameter (%d)",
1119 			  ret);
1120 }
1121 
ZTEST(net_ethernet_mgmt,test_change_qbu_params)1122 ZTEST(net_ethernet_mgmt, test_change_qbu_params)
1123 {
1124 	struct net_if *iface = default_iface;
1125 	const struct device *dev = net_if_get_device(iface);
1126 	struct eth_fake_context *ctx = dev->data;
1127 	struct ethernet_req_params params;
1128 	int available_ports;
1129 	int i, j;
1130 	int ret;
1131 
1132 	/* Try to get the number of the ports */
1133 	ret = net_mgmt(NET_REQUEST_ETHERNET_GET_PORTS_NUM,
1134 		       iface,
1135 		       &params, sizeof(struct ethernet_req_params));
1136 
1137 	zassert_equal(ret, 0, "could not get the number of ports (%d)", ret);
1138 
1139 	available_ports = params.ports_num;
1140 
1141 	zassert_not_equal(available_ports, 0, "returned no priority queues");
1142 	zassert_equal(available_ports,
1143 		      ARRAY_SIZE(ctx->ports),
1144 		      "an invalid number of ports returned");
1145 
1146 	for (i = 0; i < available_ports; ++i) {
1147 		/* Try to set correct params to a correct queue id */
1148 		params.qbu_param.port_id = i;
1149 
1150 		/* Disable Qbu for port */
1151 		params.qbu_param.type = ETHERNET_QBU_PARAM_TYPE_STATUS;
1152 		params.qbu_param.enabled = false;
1153 		ret = net_mgmt(NET_REQUEST_ETHERNET_SET_QBU_PARAM,
1154 			       iface,
1155 			       &params, sizeof(struct ethernet_req_params));
1156 
1157 		zassert_equal(ret, 0, "could not disable qbu for port %d (%d)",
1158 			      i, ret);
1159 
1160 		/* Invert it to make sure the read-back value is proper */
1161 		params.qbu_param.enabled = true;
1162 
1163 		ret = net_mgmt(NET_REQUEST_ETHERNET_GET_QBU_PARAM,
1164 			       iface,
1165 			       &params, sizeof(struct ethernet_req_params));
1166 
1167 		zassert_equal(ret, 0, "could not read qbu status (%d)", ret);
1168 
1169 		zassert_equal(false, params.qbu_param.enabled,
1170 			      "qbu should be disabled");
1171 
1172 		/* Re-enable Qbu for queue */
1173 		params.qbu_param.enabled = true;
1174 		ret = net_mgmt(NET_REQUEST_ETHERNET_SET_QBU_PARAM,
1175 			       iface,
1176 			       &params, sizeof(struct ethernet_req_params));
1177 
1178 		zassert_equal(ret, 0, "could not enable qbu (%d)", ret);
1179 
1180 		/* Invert it to make sure the read-back value is proper */
1181 		params.qbu_param.enabled = false;
1182 
1183 		ret = net_mgmt(NET_REQUEST_ETHERNET_GET_QBU_PARAM,
1184 			       iface,
1185 			       &params, sizeof(struct ethernet_req_params));
1186 
1187 		zassert_equal(ret, 0, "could not read qbu status (%d)", ret);
1188 
1189 		zassert_equal(true, params.qbu_param.enabled,
1190 			      "qbu should be enabled");
1191 
1192 		/* Then the Qbu parameter checks */
1193 
1194 		params.qbu_param.type = ETHERNET_QBU_PARAM_TYPE_RELEASE_ADVANCE;
1195 		params.qbu_param.release_advance = 10;
1196 		ret = net_mgmt(NET_REQUEST_ETHERNET_SET_QBU_PARAM,
1197 			       iface,
1198 			       &params, sizeof(struct ethernet_req_params));
1199 
1200 		zassert_equal(ret, 0, "could not set release advance (%d)",
1201 			      ret);
1202 
1203 		/* Reset local value - read-back and verify it */
1204 		params.qbu_param.release_advance = 0;
1205 		ret = net_mgmt(NET_REQUEST_ETHERNET_GET_QBU_PARAM,
1206 			       iface,
1207 			       &params, sizeof(struct ethernet_req_params));
1208 
1209 		zassert_equal(ret, 0, "could not read release advance (%d)",
1210 			      ret);
1211 		zassert_equal(params.qbu_param.release_advance, 10,
1212 			      "release_advance did not change");
1213 
1214 		/* And them the hold advance */
1215 		params.qbu_param.type = ETHERNET_QBU_PARAM_TYPE_HOLD_ADVANCE;
1216 		params.qbu_param.hold_advance = 20;
1217 		ret = net_mgmt(NET_REQUEST_ETHERNET_SET_QBU_PARAM,
1218 			       iface,
1219 			       &params, sizeof(struct ethernet_req_params));
1220 
1221 		zassert_equal(ret, 0, "could not set hold advance (%d)", ret);
1222 
1223 		/* Reset local value - read-back and verify it */
1224 		params.qbu_param.hold_advance = 0;
1225 		ret = net_mgmt(NET_REQUEST_ETHERNET_GET_QBU_PARAM,
1226 			       iface,
1227 			       &params, sizeof(struct ethernet_req_params));
1228 
1229 		zassert_equal(ret, 0, "could not read hold advance (%d)", ret);
1230 		zassert_equal(params.qbu_param.hold_advance, 20,
1231 			      "hold advance did not change");
1232 
1233 		params.qbu_param.type =
1234 			ETHERNET_QBR_PARAM_TYPE_LINK_PARTNER_STATUS;
1235 		params.qbu_param.link_partner_status = true;
1236 		ret = net_mgmt(NET_REQUEST_ETHERNET_SET_QBU_PARAM,
1237 			       iface,
1238 			       &params, sizeof(struct ethernet_req_params));
1239 
1240 		zassert_equal(ret, -EINVAL,
1241 			      "could set link partner status (%d)", ret);
1242 
1243 		/* Reset local value - read-back and verify it */
1244 		params.qbu_param.link_partner_status = false;
1245 		ret = net_mgmt(NET_REQUEST_ETHERNET_GET_QBU_PARAM,
1246 			       iface,
1247 			       &params, sizeof(struct ethernet_req_params));
1248 
1249 		zassert_equal(ret, 0,
1250 			      "could not read link partner status (%d)", ret);
1251 		zassert_equal(params.qbu_param.link_partner_status, false,
1252 			      "link partner status changed");
1253 
1254 		params.qbu_param.type =
1255 			ETHERNET_QBR_PARAM_TYPE_ADDITIONAL_FRAGMENT_SIZE;
1256 		params.qbu_param.additional_fragment_size = 2;
1257 		ret = net_mgmt(NET_REQUEST_ETHERNET_SET_QBU_PARAM,
1258 			       iface,
1259 			       &params, sizeof(struct ethernet_req_params));
1260 
1261 		zassert_equal(ret, 0,
1262 			      "could not set additional frag size (%d)", ret);
1263 
1264 		/* Reset local value - read-back and verify it */
1265 		params.qbu_param.additional_fragment_size = 1;
1266 		ret = net_mgmt(NET_REQUEST_ETHERNET_GET_QBU_PARAM,
1267 			       iface,
1268 			       &params, sizeof(struct ethernet_req_params));
1269 
1270 		zassert_equal(ret, 0,
1271 			      "could not read additional frag size (%d)", ret);
1272 		zassert_equal(params.qbu_param.additional_fragment_size, 2,
1273 			      "additional fragment size did not change");
1274 
1275 		params.qbu_param.type =
1276 			ETHERNET_QBU_PARAM_TYPE_PREEMPTION_STATUS_TABLE;
1277 
1278 		for (j = 0;
1279 		     j < ARRAY_SIZE(params.qbu_param.frame_preempt_statuses);
1280 		     j++) {
1281 			/* Set the preempt status for different priorities.
1282 			 */
1283 			params.qbu_param.frame_preempt_statuses[j] =
1284 				j % 2 ? ETHERNET_QBU_STATUS_EXPRESS :
1285 					ETHERNET_QBU_STATUS_PREEMPTABLE;
1286 		}
1287 
1288 		ret = net_mgmt(NET_REQUEST_ETHERNET_SET_QBU_PARAM,
1289 			       iface,
1290 			       &params, sizeof(struct ethernet_req_params));
1291 
1292 		zassert_equal(ret, 0,
1293 			      "could not set frame preempt status (%d)", ret);
1294 
1295 		/* Reset local value - read-back and verify it */
1296 		for (j = 0;
1297 		     j < ARRAY_SIZE(params.qbu_param.frame_preempt_statuses);
1298 		     j++) {
1299 			params.qbu_param.frame_preempt_statuses[j] =
1300 				j % 2 ? ETHERNET_QBU_STATUS_PREEMPTABLE :
1301 					ETHERNET_QBU_STATUS_EXPRESS;
1302 		}
1303 
1304 		ret = net_mgmt(NET_REQUEST_ETHERNET_GET_QBU_PARAM,
1305 			       iface,
1306 			       &params, sizeof(struct ethernet_req_params));
1307 
1308 		zassert_equal(ret, 0,
1309 			      "could not read frame preempt status (%d)", ret);
1310 
1311 		for (j = 0;
1312 		     j < ARRAY_SIZE(params.qbu_param.frame_preempt_statuses);
1313 		     j++) {
1314 			zassert_equal(
1315 				params.qbu_param.frame_preempt_statuses[j],
1316 				j % 2 ? ETHERNET_QBU_STATUS_EXPRESS :
1317 					ETHERNET_QBU_STATUS_PREEMPTABLE,
1318 			      "frame preempt status did not change");
1319 		}
1320 	}
1321 }
1322 
ZTEST(net_ethernet_mgmt,test_change_txtime_params)1323 ZTEST(net_ethernet_mgmt, test_change_txtime_params)
1324 {
1325 	struct net_if *iface = default_iface;
1326 	const struct device *dev = net_if_get_device(iface);
1327 	struct eth_fake_context *ctx = dev->data;
1328 	struct ethernet_req_params params;
1329 	int available_priority_queues;
1330 	int ret;
1331 	int i;
1332 
1333 	/* Try to get the number of the priority queues */
1334 	ret = net_mgmt(NET_REQUEST_ETHERNET_GET_PRIORITY_QUEUES_NUM,
1335 		       iface,
1336 		       &params, sizeof(struct ethernet_req_params));
1337 
1338 	zassert_equal(ret, 0, "could not get the number of priority queues");
1339 
1340 	available_priority_queues = params.priority_queues_num;
1341 
1342 	zassert_not_equal(available_priority_queues, 0,
1343 			  "returned no priority queues");
1344 	zassert_equal(available_priority_queues,
1345 		      ARRAY_SIZE(ctx->priority_queues),
1346 		      "an invalid number of priority queues returned");
1347 
1348 	net_if_up(iface);
1349 
1350 	/* Make sure we cannot enable txtime if the interface is up */
1351 	params.txtime_param.queue_id = 0;
1352 	params.txtime_param.type = ETHERNET_TXTIME_PARAM_TYPE_ENABLE_QUEUES;
1353 	params.txtime_param.enable_txtime = false;
1354 	ret = net_mgmt(NET_REQUEST_ETHERNET_SET_TXTIME_PARAM, iface,
1355 		       &params, sizeof(struct ethernet_req_params));
1356 
1357 	zassert_equal(ret, -EACCES, "could disable TXTIME for queue 0 (%d)",
1358 		      ret);
1359 
1360 	net_if_down(iface);
1361 
1362 	for (i = 0; i < available_priority_queues; ++i) {
1363 		/* Try to set correct params to a correct queue id */
1364 		params.txtime_param.queue_id = i;
1365 
1366 		/* Disable TXTIME for queue */
1367 		params.txtime_param.type = ETHERNET_TXTIME_PARAM_TYPE_ENABLE_QUEUES;
1368 		params.txtime_param.enable_txtime = false;
1369 		ret = net_mgmt(NET_REQUEST_ETHERNET_SET_TXTIME_PARAM,
1370 			       iface,
1371 			       &params, sizeof(struct ethernet_req_params));
1372 
1373 		zassert_equal(ret, 0, "could not disable TXTIME for queue %d (%d)",
1374 			      i, ret);
1375 
1376 		/* Invert it to make sure the read-back value is proper */
1377 		params.txtime_param.enable_txtime = true;
1378 
1379 		ret = net_mgmt(NET_REQUEST_ETHERNET_GET_TXTIME_PARAM, iface,
1380 			       &params, sizeof(struct ethernet_req_params));
1381 
1382 		zassert_equal(ret, 0, "could not read txtime status (%d)", ret);
1383 
1384 		zassert_equal(false, params.txtime_param.enable_txtime,
1385 			      "txtime should be disabled");
1386 
1387 		/* Re-enable TXTIME for queue */
1388 		params.txtime_param.enable_txtime = true;
1389 		ret = net_mgmt(NET_REQUEST_ETHERNET_SET_TXTIME_PARAM, iface,
1390 			       &params, sizeof(struct ethernet_req_params));
1391 
1392 		zassert_equal(ret, 0, "could not enable txtime (%d)", ret);
1393 
1394 		/* Invert it to make sure the read-back value is proper */
1395 		params.txtime_param.enable_txtime = false;
1396 
1397 		ret = net_mgmt(NET_REQUEST_ETHERNET_GET_TXTIME_PARAM, iface,
1398 			       &params, sizeof(struct ethernet_req_params));
1399 
1400 		zassert_equal(ret, 0, "could not read txtime status (%d)", ret);
1401 
1402 		zassert_equal(true, params.txtime_param.enable_txtime,
1403 			      "txtime should be enabled");
1404 	}
1405 }
1406 
change_promisc_mode(bool mode)1407 static void change_promisc_mode(bool mode)
1408 {
1409 	struct net_if *iface = default_iface;
1410 	struct ethernet_req_params params;
1411 	int ret;
1412 
1413 	params.promisc_mode = mode;
1414 
1415 	ret = net_mgmt(NET_REQUEST_ETHERNET_SET_PROMISC_MODE, iface,
1416 		       &params, sizeof(struct ethernet_req_params));
1417 
1418 	zassert_equal(ret, 0, "invalid promisc mode change");
1419 }
1420 
change_promisc_mode_on(void)1421 static void change_promisc_mode_on(void)
1422 {
1423 	change_promisc_mode(true);
1424 }
1425 
change_promisc_mode_off(void)1426 static void change_promisc_mode_off(void)
1427 {
1428 	change_promisc_mode(false);
1429 }
1430 
change_to_same_promisc_mode(void)1431 static void change_to_same_promisc_mode(void)
1432 {
1433 	struct net_if *iface = default_iface;
1434 	struct ethernet_req_params params;
1435 	int ret;
1436 
1437 	params.promisc_mode = true;
1438 
1439 	ret = net_mgmt(NET_REQUEST_ETHERNET_SET_PROMISC_MODE, iface,
1440 		       &params, sizeof(struct ethernet_req_params));
1441 
1442 	zassert_equal(ret, -EALREADY,
1443 		      "invalid change to already set promisc mode");
1444 }
1445 
ZTEST(net_ethernet_mgmt,test_change_to_promisc_mode)1446 ZTEST(net_ethernet_mgmt, test_change_to_promisc_mode)
1447 {
1448 	change_promisc_mode_on();
1449 	change_to_same_promisc_mode();
1450 	change_promisc_mode_off();
1451 }
1452 ZTEST_SUITE(net_ethernet_mgmt, NULL, ethernet_mgmt_setup, NULL, NULL, NULL);
1453