1 /*
2 * Copyright (c) 2018 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/logging/log.h>
8 LOG_MODULE_REGISTER(net_tc, CONFIG_NET_TC_LOG_LEVEL);
9
10 #include <zephyr/kernel.h>
11 #include <string.h>
12
13 #include <zephyr/net/net_core.h>
14 #include <zephyr/net/net_pkt.h>
15 #include <zephyr/net/net_stats.h>
16
17 #include "net_private.h"
18 #include "net_stats.h"
19 #include "net_tc_mapping.h"
20
21 /* Template for thread name. The "xx" is either "TX" denoting transmit thread,
22 * or "RX" denoting receive thread. The "q[y]" denotes the traffic class queue
23 * where y indicates the traffic class id. The value of y can be from 0 to 7.
24 */
25 #define MAX_NAME_LEN sizeof("xx_q[y]")
26
27 /* Stacks for TX work queue */
28 K_KERNEL_STACK_ARRAY_DEFINE(tx_stack, NET_TC_TX_COUNT,
29 CONFIG_NET_TX_STACK_SIZE);
30
31 /* Stacks for RX work queue */
32 K_KERNEL_STACK_ARRAY_DEFINE(rx_stack, NET_TC_RX_COUNT,
33 CONFIG_NET_RX_STACK_SIZE);
34
35 #if NET_TC_TX_COUNT > 0
36 static struct net_traffic_class tx_classes[NET_TC_TX_COUNT];
37 #endif
38
39 #if NET_TC_RX_COUNT > 0
40 static struct net_traffic_class rx_classes[NET_TC_RX_COUNT];
41 #endif
42
43 #if NET_TC_RX_COUNT > 0 || NET_TC_TX_COUNT > 0
submit_to_queue(struct k_fifo * queue,struct net_pkt * pkt)44 static void submit_to_queue(struct k_fifo *queue, struct net_pkt *pkt)
45 {
46 k_fifo_put(queue, pkt);
47 }
48 #endif
49
net_tc_submit_to_tx_queue(uint8_t tc,struct net_pkt * pkt)50 bool net_tc_submit_to_tx_queue(uint8_t tc, struct net_pkt *pkt)
51 {
52 #if NET_TC_TX_COUNT > 0
53 net_pkt_set_tx_stats_tick(pkt, k_cycle_get_32());
54
55 submit_to_queue(&tx_classes[tc].fifo, pkt);
56 #else
57 ARG_UNUSED(tc);
58 ARG_UNUSED(pkt);
59 #endif
60 return true;
61 }
62
net_tc_submit_to_rx_queue(uint8_t tc,struct net_pkt * pkt)63 void net_tc_submit_to_rx_queue(uint8_t tc, struct net_pkt *pkt)
64 {
65 #if NET_TC_RX_COUNT > 0
66 net_pkt_set_rx_stats_tick(pkt, k_cycle_get_32());
67
68 submit_to_queue(&rx_classes[tc].fifo, pkt);
69 #else
70 ARG_UNUSED(tc);
71 ARG_UNUSED(pkt);
72 #endif
73 }
74
net_tx_priority2tc(enum net_priority prio)75 int net_tx_priority2tc(enum net_priority prio)
76 {
77 #if NET_TC_TX_COUNT > 0
78 if (prio > NET_PRIORITY_NC) {
79 /* Use default value suggested in 802.1Q */
80 prio = NET_PRIORITY_BE;
81 }
82
83 return tx_prio2tc_map[prio];
84 #else
85 ARG_UNUSED(prio);
86
87 return 0;
88 #endif
89 }
90
net_rx_priority2tc(enum net_priority prio)91 int net_rx_priority2tc(enum net_priority prio)
92 {
93 #if NET_TC_RX_COUNT > 0
94 if (prio > NET_PRIORITY_NC) {
95 /* Use default value suggested in 802.1Q */
96 prio = NET_PRIORITY_BE;
97 }
98
99 return rx_prio2tc_map[prio];
100 #else
101 ARG_UNUSED(prio);
102
103 return 0;
104 #endif
105 }
106
107
108 #if defined(CONFIG_NET_TC_THREAD_COOPERATIVE)
109 #define BASE_PRIO_TX (CONFIG_NET_TC_NUM_PRIORITIES - 1)
110 #else
111 #define BASE_PRIO_TX (CONFIG_NET_TC_TX_COUNT - 1)
112 #endif
113
114 #define PRIO_TX(i, _) (BASE_PRIO_TX - i)
115
116 #if defined(CONFIG_NET_TC_THREAD_COOPERATIVE)
117 #define BASE_PRIO_RX (CONFIG_NET_TC_NUM_PRIORITIES - 1)
118 #else
119 #define BASE_PRIO_RX (CONFIG_NET_TC_RX_COUNT - 1)
120 #endif
121
122 #define PRIO_RX(i, _) (BASE_PRIO_RX - i)
123
124 #if NET_TC_TX_COUNT > 0
125 /* Convert traffic class to thread priority */
tx_tc2thread(uint8_t tc)126 static uint8_t tx_tc2thread(uint8_t tc)
127 {
128 /* Initial implementation just maps the traffic class to certain queue.
129 * If there are less queues than classes, then map them into
130 * some specific queue.
131 *
132 * Lower value in this table means higher thread priority. The
133 * value is used as a parameter to K_PRIO_COOP() or K_PRIO_PREEMPT()
134 * which converts it to actual thread priority.
135 *
136 * Higher traffic class value means higher priority queue. This means
137 * that thread_priorities[7] value should contain the highest priority
138 * for the TX queue handling thread.
139 *
140 * For example, if NET_TC_TX_COUNT = 8, which is the maximum number of
141 * traffic classes, then this priority array will contain following
142 * values if preemptive priorities are used:
143 * 7, 6, 5, 4, 3, 2, 1, 0
144 * and
145 * 14, 13, 12, 11, 10, 9, 8, 7
146 * if cooperative priorities are used.
147 *
148 * Then these will be converted to following thread priorities if
149 * CONFIG_NET_TC_THREAD_COOPERATIVE is enabled:
150 * -1, -2, -3, -4, -5, -6, -7, -8
151 *
152 * and if CONFIG_NET_TC_THREAD_PREEMPTIVE is enabled, following thread
153 * priorities are used:
154 * 7, 6, 5, 4, 3, 2, 1, 0
155 *
156 * This means that the lowest traffic class 1, will have the lowest
157 * cooperative priority -1 for coop priorities and 7 for preemptive
158 * priority.
159 */
160 static const uint8_t thread_priorities[] = {
161 LISTIFY(NET_TC_TX_COUNT, PRIO_TX, (,))
162 };
163
164 BUILD_ASSERT(NET_TC_TX_COUNT <= CONFIG_NUM_COOP_PRIORITIES,
165 "Too many traffic classes");
166
167 NET_ASSERT(tc < ARRAY_SIZE(thread_priorities));
168
169 return thread_priorities[tc];
170 }
171 #endif
172
173 #if NET_TC_RX_COUNT > 0
174 /* Convert traffic class to thread priority */
rx_tc2thread(uint8_t tc)175 static uint8_t rx_tc2thread(uint8_t tc)
176 {
177 static const uint8_t thread_priorities[] = {
178 LISTIFY(NET_TC_RX_COUNT, PRIO_RX, (,))
179 };
180
181 BUILD_ASSERT(NET_TC_RX_COUNT <= CONFIG_NUM_COOP_PRIORITIES,
182 "Too many traffic classes");
183
184 NET_ASSERT(tc < ARRAY_SIZE(thread_priorities));
185
186 return thread_priorities[tc];
187 }
188 #endif
189
190 #if defined(CONFIG_NET_STATISTICS)
191 /* Fixup the traffic class statistics so that "net stats" shell command will
192 * print output correctly.
193 */
194 #if NET_TC_TX_COUNT > 0
tc_tx_stats_priority_setup(struct net_if * iface)195 static void tc_tx_stats_priority_setup(struct net_if *iface)
196 {
197 int i;
198
199 for (i = 0; i < 8; i++) {
200 net_stats_update_tc_sent_priority(iface, net_tx_priority2tc(i),
201 i);
202 }
203 }
204 #endif
205
206 #if NET_TC_RX_COUNT > 0
tc_rx_stats_priority_setup(struct net_if * iface)207 static void tc_rx_stats_priority_setup(struct net_if *iface)
208 {
209 int i;
210
211 for (i = 0; i < 8; i++) {
212 net_stats_update_tc_recv_priority(iface, net_rx_priority2tc(i),
213 i);
214 }
215 }
216 #endif
217
218 #if NET_TC_TX_COUNT > 0
net_tc_tx_stats_priority_setup(struct net_if * iface,void * user_data)219 static void net_tc_tx_stats_priority_setup(struct net_if *iface,
220 void *user_data)
221 {
222 ARG_UNUSED(user_data);
223
224 tc_tx_stats_priority_setup(iface);
225 }
226 #endif
227
228 #if NET_TC_RX_COUNT > 0
net_tc_rx_stats_priority_setup(struct net_if * iface,void * user_data)229 static void net_tc_rx_stats_priority_setup(struct net_if *iface,
230 void *user_data)
231 {
232 ARG_UNUSED(user_data);
233
234 tc_rx_stats_priority_setup(iface);
235 }
236 #endif
237 #endif
238
239 #if NET_TC_RX_COUNT > 0
tc_rx_handler(struct k_fifo * fifo)240 static void tc_rx_handler(struct k_fifo *fifo)
241 {
242 struct net_pkt *pkt;
243
244 while (1) {
245 pkt = k_fifo_get(fifo, K_FOREVER);
246 if (pkt == NULL) {
247 continue;
248 }
249
250 net_process_rx_packet(pkt);
251 }
252 }
253 #endif
254
255 #if NET_TC_TX_COUNT > 0
tc_tx_handler(struct k_fifo * fifo)256 static void tc_tx_handler(struct k_fifo *fifo)
257 {
258 struct net_pkt *pkt;
259
260 while (1) {
261 pkt = k_fifo_get(fifo, K_FOREVER);
262 if (pkt == NULL) {
263 continue;
264 }
265
266 net_process_tx_packet(pkt);
267 }
268 }
269 #endif
270
271 /* Create a fifo for each traffic class we are using. All the network
272 * traffic goes through these classes.
273 */
net_tc_tx_init(void)274 void net_tc_tx_init(void)
275 {
276 #if NET_TC_TX_COUNT == 0
277 NET_DBG("No %s thread created", "TX");
278 return;
279 #else
280 int i;
281
282 BUILD_ASSERT(NET_TC_TX_COUNT >= 0);
283
284 #if defined(CONFIG_NET_STATISTICS)
285 net_if_foreach(net_tc_tx_stats_priority_setup, NULL);
286 #endif
287
288 for (i = 0; i < NET_TC_TX_COUNT; i++) {
289 uint8_t thread_priority;
290 int priority;
291 k_tid_t tid;
292
293 thread_priority = tx_tc2thread(i);
294
295 priority = IS_ENABLED(CONFIG_NET_TC_THREAD_COOPERATIVE) ?
296 K_PRIO_COOP(thread_priority) :
297 K_PRIO_PREEMPT(thread_priority);
298
299 NET_DBG("[%d] Starting TX handler %p stack size %zd "
300 "prio %d %s(%d)", i,
301 &tx_classes[i].handler,
302 K_KERNEL_STACK_SIZEOF(tx_stack[i]),
303 thread_priority,
304 IS_ENABLED(CONFIG_NET_TC_THREAD_COOPERATIVE) ?
305 "coop" : "preempt",
306 priority);
307
308 k_fifo_init(&tx_classes[i].fifo);
309
310 tid = k_thread_create(&tx_classes[i].handler, tx_stack[i],
311 K_KERNEL_STACK_SIZEOF(tx_stack[i]),
312 (k_thread_entry_t)tc_tx_handler,
313 &tx_classes[i].fifo, NULL, NULL,
314 priority, 0, K_FOREVER);
315 if (!tid) {
316 NET_ERR("Cannot create TC handler thread %d", i);
317 continue;
318 }
319
320 if (IS_ENABLED(CONFIG_THREAD_NAME)) {
321 char name[MAX_NAME_LEN];
322
323 snprintk(name, sizeof(name), "tx_q[%d]", i);
324 k_thread_name_set(tid, name);
325 }
326
327 k_thread_start(tid);
328 }
329 #endif
330 }
331
net_tc_rx_init(void)332 void net_tc_rx_init(void)
333 {
334 #if NET_TC_RX_COUNT == 0
335 NET_DBG("No %s thread created", "RX");
336 return;
337 #else
338 int i;
339
340 BUILD_ASSERT(NET_TC_RX_COUNT >= 0);
341
342 #if defined(CONFIG_NET_STATISTICS)
343 net_if_foreach(net_tc_rx_stats_priority_setup, NULL);
344 #endif
345
346 for (i = 0; i < NET_TC_RX_COUNT; i++) {
347 uint8_t thread_priority;
348 int priority;
349 k_tid_t tid;
350
351 thread_priority = rx_tc2thread(i);
352
353 priority = IS_ENABLED(CONFIG_NET_TC_THREAD_COOPERATIVE) ?
354 K_PRIO_COOP(thread_priority) :
355 K_PRIO_PREEMPT(thread_priority);
356
357 NET_DBG("[%d] Starting RX handler %p stack size %zd "
358 "prio %d %s(%d)", i,
359 &rx_classes[i].handler,
360 K_KERNEL_STACK_SIZEOF(rx_stack[i]),
361 thread_priority,
362 IS_ENABLED(CONFIG_NET_TC_THREAD_COOPERATIVE) ?
363 "coop" : "preempt",
364 priority);
365
366 k_fifo_init(&rx_classes[i].fifo);
367
368 tid = k_thread_create(&rx_classes[i].handler, rx_stack[i],
369 K_KERNEL_STACK_SIZEOF(rx_stack[i]),
370 (k_thread_entry_t)tc_rx_handler,
371 &rx_classes[i].fifo, NULL, NULL,
372 priority, 0, K_FOREVER);
373 if (!tid) {
374 NET_ERR("Cannot create TC handler thread %d", i);
375 continue;
376 }
377
378 if (IS_ENABLED(CONFIG_THREAD_NAME)) {
379 char name[MAX_NAME_LEN];
380
381 snprintk(name, sizeof(name), "rx_q[%d]", i);
382 k_thread_name_set(tid, name);
383 }
384
385 k_thread_start(tid);
386 }
387 #endif
388 }
389