1 /*
2  * Copyright (c) 2018 Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/logging/log.h>
8 LOG_MODULE_REGISTER(net_tc, CONFIG_NET_TC_LOG_LEVEL);
9 
10 #include <zephyr/kernel.h>
11 #include <string.h>
12 
13 #include <zephyr/net/net_core.h>
14 #include <zephyr/net/net_pkt.h>
15 #include <zephyr/net/net_stats.h>
16 
17 #include "net_private.h"
18 #include "net_stats.h"
19 #include "net_tc_mapping.h"
20 
21 /* Template for thread name. The "xx" is either "TX" denoting transmit thread,
22  * or "RX" denoting receive thread. The "q[y]" denotes the traffic class queue
23  * where y indicates the traffic class id. The value of y can be from 0 to 7.
24  */
25 #define MAX_NAME_LEN sizeof("xx_q[y]")
26 
27 /* Stacks for TX work queue */
28 K_KERNEL_STACK_ARRAY_DEFINE(tx_stack, NET_TC_TX_COUNT,
29 			    CONFIG_NET_TX_STACK_SIZE);
30 
31 /* Stacks for RX work queue */
32 K_KERNEL_STACK_ARRAY_DEFINE(rx_stack, NET_TC_RX_COUNT,
33 			    CONFIG_NET_RX_STACK_SIZE);
34 
35 #if NET_TC_TX_COUNT > 0
36 static struct net_traffic_class tx_classes[NET_TC_TX_COUNT];
37 #endif
38 
39 #if NET_TC_RX_COUNT > 0
40 static struct net_traffic_class rx_classes[NET_TC_RX_COUNT];
41 #endif
42 
43 #if NET_TC_RX_COUNT > 0 || NET_TC_TX_COUNT > 0
submit_to_queue(struct k_fifo * queue,struct net_pkt * pkt)44 static void submit_to_queue(struct k_fifo *queue, struct net_pkt *pkt)
45 {
46 	k_fifo_put(queue, pkt);
47 }
48 #endif
49 
net_tc_submit_to_tx_queue(uint8_t tc,struct net_pkt * pkt)50 bool net_tc_submit_to_tx_queue(uint8_t tc, struct net_pkt *pkt)
51 {
52 #if NET_TC_TX_COUNT > 0
53 	net_pkt_set_tx_stats_tick(pkt, k_cycle_get_32());
54 
55 	submit_to_queue(&tx_classes[tc].fifo, pkt);
56 #else
57 	ARG_UNUSED(tc);
58 	ARG_UNUSED(pkt);
59 #endif
60 	return true;
61 }
62 
net_tc_submit_to_rx_queue(uint8_t tc,struct net_pkt * pkt)63 void net_tc_submit_to_rx_queue(uint8_t tc, struct net_pkt *pkt)
64 {
65 #if NET_TC_RX_COUNT > 0
66 	net_pkt_set_rx_stats_tick(pkt, k_cycle_get_32());
67 
68 	submit_to_queue(&rx_classes[tc].fifo, pkt);
69 #else
70 	ARG_UNUSED(tc);
71 	ARG_UNUSED(pkt);
72 #endif
73 }
74 
net_tx_priority2tc(enum net_priority prio)75 int net_tx_priority2tc(enum net_priority prio)
76 {
77 #if NET_TC_TX_COUNT > 0
78 	if (prio > NET_PRIORITY_NC) {
79 		/* Use default value suggested in 802.1Q */
80 		prio = NET_PRIORITY_BE;
81 	}
82 
83 	return tx_prio2tc_map[prio];
84 #else
85 	ARG_UNUSED(prio);
86 
87 	return 0;
88 #endif
89 }
90 
net_rx_priority2tc(enum net_priority prio)91 int net_rx_priority2tc(enum net_priority prio)
92 {
93 #if NET_TC_RX_COUNT > 0
94 	if (prio > NET_PRIORITY_NC) {
95 		/* Use default value suggested in 802.1Q */
96 		prio = NET_PRIORITY_BE;
97 	}
98 
99 	return rx_prio2tc_map[prio];
100 #else
101 	ARG_UNUSED(prio);
102 
103 	return 0;
104 #endif
105 }
106 
107 
108 #if defined(CONFIG_NET_TC_THREAD_COOPERATIVE)
109 #define BASE_PRIO_TX (CONFIG_NET_TC_NUM_PRIORITIES - 1)
110 #else
111 #define BASE_PRIO_TX (CONFIG_NET_TC_TX_COUNT - 1)
112 #endif
113 
114 #define PRIO_TX(i, _) (BASE_PRIO_TX - i)
115 
116 #if defined(CONFIG_NET_TC_THREAD_COOPERATIVE)
117 #define BASE_PRIO_RX (CONFIG_NET_TC_NUM_PRIORITIES - 1)
118 #else
119 #define BASE_PRIO_RX (CONFIG_NET_TC_RX_COUNT - 1)
120 #endif
121 
122 #define PRIO_RX(i, _) (BASE_PRIO_RX - i)
123 
124 #if NET_TC_TX_COUNT > 0
125 /* Convert traffic class to thread priority */
tx_tc2thread(uint8_t tc)126 static uint8_t tx_tc2thread(uint8_t tc)
127 {
128 	/* Initial implementation just maps the traffic class to certain queue.
129 	 * If there are less queues than classes, then map them into
130 	 * some specific queue.
131 	 *
132 	 * Lower value in this table means higher thread priority. The
133 	 * value is used as a parameter to K_PRIO_COOP() or K_PRIO_PREEMPT()
134 	 * which converts it to actual thread priority.
135 	 *
136 	 * Higher traffic class value means higher priority queue. This means
137 	 * that thread_priorities[7] value should contain the highest priority
138 	 * for the TX queue handling thread.
139 	 *
140 	 * For example, if NET_TC_TX_COUNT = 8, which is the maximum number of
141 	 * traffic classes, then this priority array will contain following
142 	 * values if preemptive priorities are used:
143 	 *      7, 6, 5, 4, 3, 2, 1, 0
144 	 * and
145 	 *      14, 13, 12, 11, 10, 9, 8, 7
146 	 * if cooperative priorities are used.
147 	 *
148 	 * Then these will be converted to following thread priorities if
149 	 * CONFIG_NET_TC_THREAD_COOPERATIVE is enabled:
150 	 *      -1, -2, -3, -4, -5, -6, -7, -8
151 	 *
152 	 * and if CONFIG_NET_TC_THREAD_PREEMPTIVE is enabled, following thread
153 	 * priorities are used:
154 	 *       7, 6, 5, 4, 3, 2, 1, 0
155 	 *
156 	 * This means that the lowest traffic class 1, will have the lowest
157 	 * cooperative priority -1 for coop priorities and 7 for preemptive
158 	 * priority.
159 	 */
160 	static const uint8_t thread_priorities[] = {
161 		LISTIFY(NET_TC_TX_COUNT, PRIO_TX, (,))
162 	};
163 
164 	BUILD_ASSERT(NET_TC_TX_COUNT <= CONFIG_NUM_COOP_PRIORITIES,
165 		     "Too many traffic classes");
166 
167 	NET_ASSERT(tc < ARRAY_SIZE(thread_priorities));
168 
169 	return thread_priorities[tc];
170 }
171 #endif
172 
173 #if NET_TC_RX_COUNT > 0
174 /* Convert traffic class to thread priority */
rx_tc2thread(uint8_t tc)175 static uint8_t rx_tc2thread(uint8_t tc)
176 {
177 	static const uint8_t thread_priorities[] = {
178 		LISTIFY(NET_TC_RX_COUNT, PRIO_RX, (,))
179 	};
180 
181 	BUILD_ASSERT(NET_TC_RX_COUNT <= CONFIG_NUM_COOP_PRIORITIES,
182 		     "Too many traffic classes");
183 
184 	NET_ASSERT(tc < ARRAY_SIZE(thread_priorities));
185 
186 	return thread_priorities[tc];
187 }
188 #endif
189 
190 #if defined(CONFIG_NET_STATISTICS)
191 /* Fixup the traffic class statistics so that "net stats" shell command will
192  * print output correctly.
193  */
194 #if NET_TC_TX_COUNT > 0
tc_tx_stats_priority_setup(struct net_if * iface)195 static void tc_tx_stats_priority_setup(struct net_if *iface)
196 {
197 	int i;
198 
199 	for (i = 0; i < 8; i++) {
200 		net_stats_update_tc_sent_priority(iface, net_tx_priority2tc(i),
201 						  i);
202 	}
203 }
204 #endif
205 
206 #if NET_TC_RX_COUNT > 0
tc_rx_stats_priority_setup(struct net_if * iface)207 static void tc_rx_stats_priority_setup(struct net_if *iface)
208 {
209 	int i;
210 
211 	for (i = 0; i < 8; i++) {
212 		net_stats_update_tc_recv_priority(iface, net_rx_priority2tc(i),
213 						  i);
214 	}
215 }
216 #endif
217 
218 #if NET_TC_TX_COUNT > 0
net_tc_tx_stats_priority_setup(struct net_if * iface,void * user_data)219 static void net_tc_tx_stats_priority_setup(struct net_if *iface,
220 					   void *user_data)
221 {
222 	ARG_UNUSED(user_data);
223 
224 	tc_tx_stats_priority_setup(iface);
225 }
226 #endif
227 
228 #if NET_TC_RX_COUNT > 0
net_tc_rx_stats_priority_setup(struct net_if * iface,void * user_data)229 static void net_tc_rx_stats_priority_setup(struct net_if *iface,
230 					   void *user_data)
231 {
232 	ARG_UNUSED(user_data);
233 
234 	tc_rx_stats_priority_setup(iface);
235 }
236 #endif
237 #endif
238 
239 #if NET_TC_RX_COUNT > 0
tc_rx_handler(void * p1,void * p2,void * p3)240 static void tc_rx_handler(void *p1, void *p2, void *p3)
241 {
242 	ARG_UNUSED(p2);
243 	ARG_UNUSED(p3);
244 
245 	struct k_fifo *fifo = p1;
246 	struct net_pkt *pkt;
247 
248 	while (1) {
249 		pkt = k_fifo_get(fifo, K_FOREVER);
250 		if (pkt == NULL) {
251 			continue;
252 		}
253 
254 		net_process_rx_packet(pkt);
255 	}
256 }
257 #endif
258 
259 #if NET_TC_TX_COUNT > 0
tc_tx_handler(void * p1,void * p2,void * p3)260 static void tc_tx_handler(void *p1, void *p2, void *p3)
261 {
262 	ARG_UNUSED(p2);
263 	ARG_UNUSED(p3);
264 
265 	struct k_fifo *fifo = p1;
266 	struct net_pkt *pkt;
267 
268 	while (1) {
269 		pkt = k_fifo_get(fifo, K_FOREVER);
270 		if (pkt == NULL) {
271 			continue;
272 		}
273 
274 		net_process_tx_packet(pkt);
275 	}
276 }
277 #endif
278 
279 /* Create a fifo for each traffic class we are using. All the network
280  * traffic goes through these classes.
281  */
net_tc_tx_init(void)282 void net_tc_tx_init(void)
283 {
284 #if NET_TC_TX_COUNT == 0
285 	NET_DBG("No %s thread created", "TX");
286 	return;
287 #else
288 	int i;
289 
290 	BUILD_ASSERT(NET_TC_TX_COUNT >= 0);
291 
292 #if defined(CONFIG_NET_STATISTICS)
293 	net_if_foreach(net_tc_tx_stats_priority_setup, NULL);
294 #endif
295 
296 	for (i = 0; i < NET_TC_TX_COUNT; i++) {
297 		uint8_t thread_priority;
298 		int priority;
299 		k_tid_t tid;
300 
301 		thread_priority = tx_tc2thread(i);
302 
303 		priority = IS_ENABLED(CONFIG_NET_TC_THREAD_COOPERATIVE) ?
304 			K_PRIO_COOP(thread_priority) :
305 			K_PRIO_PREEMPT(thread_priority);
306 
307 		NET_DBG("[%d] Starting TX handler %p stack size %zd "
308 			"prio %d %s(%d)", i,
309 			&tx_classes[i].handler,
310 			K_KERNEL_STACK_SIZEOF(tx_stack[i]),
311 			thread_priority,
312 			IS_ENABLED(CONFIG_NET_TC_THREAD_COOPERATIVE) ?
313 							"coop" : "preempt",
314 			priority);
315 
316 		k_fifo_init(&tx_classes[i].fifo);
317 
318 		tid = k_thread_create(&tx_classes[i].handler, tx_stack[i],
319 				      K_KERNEL_STACK_SIZEOF(tx_stack[i]),
320 				      tc_tx_handler,
321 				      &tx_classes[i].fifo, NULL, NULL,
322 				      priority, 0, K_FOREVER);
323 		if (!tid) {
324 			NET_ERR("Cannot create TC handler thread %d", i);
325 			continue;
326 		}
327 
328 		if (IS_ENABLED(CONFIG_THREAD_NAME)) {
329 			char name[MAX_NAME_LEN];
330 
331 			snprintk(name, sizeof(name), "tx_q[%d]", i);
332 			k_thread_name_set(tid, name);
333 		}
334 
335 		k_thread_start(tid);
336 	}
337 #endif
338 }
339 
net_tc_rx_init(void)340 void net_tc_rx_init(void)
341 {
342 #if NET_TC_RX_COUNT == 0
343 	NET_DBG("No %s thread created", "RX");
344 	return;
345 #else
346 	int i;
347 
348 	BUILD_ASSERT(NET_TC_RX_COUNT >= 0);
349 
350 #if defined(CONFIG_NET_STATISTICS)
351 	net_if_foreach(net_tc_rx_stats_priority_setup, NULL);
352 #endif
353 
354 	for (i = 0; i < NET_TC_RX_COUNT; i++) {
355 		uint8_t thread_priority;
356 		int priority;
357 		k_tid_t tid;
358 
359 		thread_priority = rx_tc2thread(i);
360 
361 		priority = IS_ENABLED(CONFIG_NET_TC_THREAD_COOPERATIVE) ?
362 			K_PRIO_COOP(thread_priority) :
363 			K_PRIO_PREEMPT(thread_priority);
364 
365 		NET_DBG("[%d] Starting RX handler %p stack size %zd "
366 			"prio %d %s(%d)", i,
367 			&rx_classes[i].handler,
368 			K_KERNEL_STACK_SIZEOF(rx_stack[i]),
369 			thread_priority,
370 			IS_ENABLED(CONFIG_NET_TC_THREAD_COOPERATIVE) ?
371 							"coop" : "preempt",
372 			priority);
373 
374 		k_fifo_init(&rx_classes[i].fifo);
375 
376 		tid = k_thread_create(&rx_classes[i].handler, rx_stack[i],
377 				      K_KERNEL_STACK_SIZEOF(rx_stack[i]),
378 				      tc_rx_handler,
379 				      &rx_classes[i].fifo, NULL, NULL,
380 				      priority, 0, K_FOREVER);
381 		if (!tid) {
382 			NET_ERR("Cannot create TC handler thread %d", i);
383 			continue;
384 		}
385 
386 		if (IS_ENABLED(CONFIG_THREAD_NAME)) {
387 			char name[MAX_NAME_LEN];
388 
389 			snprintk(name, sizeof(name), "rx_q[%d]", i);
390 			k_thread_name_set(tid, name);
391 		}
392 
393 		k_thread_start(tid);
394 	}
395 #endif
396 }
397