1 /*
2 * Copyright (c) 2016 Intel Corporation
3 * Copyright (c) 2023 Nordic Semiconductor ASA
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #include <zephyr/logging/log.h>
9 LOG_MODULE_DECLARE(net_shell);
10
11 #include "net_shell_private.h"
12
13 struct ctx_info {
14 int pos;
15 bool are_external_pools;
16 struct k_mem_slab *tx_slabs[CONFIG_NET_MAX_CONTEXTS];
17 struct net_buf_pool *data_pools[CONFIG_NET_MAX_CONTEXTS];
18 };
19
20 #if defined(CONFIG_NET_OFFLOAD) || defined(CONFIG_NET_NATIVE)
21 #if defined(CONFIG_NET_CONTEXT_NET_PKT_POOL)
slab_pool_found_already(struct ctx_info * info,struct k_mem_slab * slab,struct net_buf_pool * pool)22 static bool slab_pool_found_already(struct ctx_info *info,
23 struct k_mem_slab *slab,
24 struct net_buf_pool *pool)
25 {
26 int i;
27
28 for (i = 0; i < CONFIG_NET_MAX_CONTEXTS; i++) {
29 if (slab) {
30 if (info->tx_slabs[i] == slab) {
31 return true;
32 }
33 } else {
34 if (info->data_pools[i] == pool) {
35 return true;
36 }
37 }
38 }
39
40 return false;
41 }
42 #endif /* CONFIG_NET_CONTEXT_NET_PKT_POOL */
43
context_info(struct net_context * context,void * user_data)44 static void context_info(struct net_context *context, void *user_data)
45 {
46 #if defined(CONFIG_NET_CONTEXT_NET_PKT_POOL)
47 struct net_shell_user_data *data = user_data;
48 const struct shell *sh = data->sh;
49 struct ctx_info *info = data->user_data;
50 struct k_mem_slab *slab;
51 struct net_buf_pool *pool;
52
53 if (!net_context_is_used(context)) {
54 return;
55 }
56
57 if (context->tx_slab) {
58 slab = context->tx_slab();
59
60 if (slab_pool_found_already(info, slab, NULL)) {
61 return;
62 }
63
64 #if defined(CONFIG_NET_BUF_POOL_USAGE)
65 PR("%p\t%u\t%u\tETX\n",
66 slab, slab->info.num_blocks, k_mem_slab_num_free_get(slab));
67 #else
68 PR("%p\t%d\tETX\n", slab, slab->info.num_blocks);
69 #endif
70 info->are_external_pools = true;
71 info->tx_slabs[info->pos] = slab;
72 }
73
74 if (context->data_pool) {
75 pool = context->data_pool();
76
77 if (slab_pool_found_already(info, NULL, pool)) {
78 return;
79 }
80
81 #if defined(CONFIG_NET_BUF_POOL_USAGE)
82 PR("%p\t%d\t%ld\tEDATA (%s)\n", pool, pool->buf_count,
83 atomic_get(&pool->avail_count), pool->name);
84 #else
85 PR("%p\t%d\tEDATA\n", pool, pool->buf_count);
86 #endif
87 info->are_external_pools = true;
88 info->data_pools[info->pos] = pool;
89 }
90
91 info->pos++;
92 #endif /* CONFIG_NET_CONTEXT_NET_PKT_POOL */
93 }
94 #endif /* CONFIG_NET_OFFLOAD || CONFIG_NET_NATIVE */
95
cmd_net_mem(const struct shell * sh,size_t argc,char * argv[])96 static int cmd_net_mem(const struct shell *sh, size_t argc, char *argv[])
97 {
98 ARG_UNUSED(argc);
99 ARG_UNUSED(argv);
100
101 #if defined(CONFIG_NET_OFFLOAD) || defined(CONFIG_NET_NATIVE)
102 struct k_mem_slab *rx, *tx;
103 struct net_buf_pool *rx_data, *tx_data;
104
105 net_pkt_get_info(&rx, &tx, &rx_data, &tx_data);
106
107 #if defined(CONFIG_NET_BUF_FIXED_DATA_SIZE)
108 PR("Fragment length %d bytes\n", CONFIG_NET_BUF_DATA_SIZE);
109 #else
110 PR("Fragment RX data pool size %d bytes\n", CONFIG_NET_PKT_BUF_RX_DATA_POOL_SIZE);
111 PR("Fragment TX data pool size %d bytes\n", CONFIG_NET_PKT_BUF_TX_DATA_POOL_SIZE);
112 #endif /* CONFIG_NET_BUF_FIXED_DATA_SIZE */
113
114 PR("Network buffer pools:\n");
115
116 #if defined(CONFIG_NET_BUF_POOL_USAGE)
117 PR("Address\t\tTotal\tAvail\tMaxUsed\tName\n");
118 #if defined(CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION)
119 PR("%p\t%d\t%u\t%u\tRX\n", rx, rx->info.num_blocks,
120 k_mem_slab_num_free_get(rx), rx->info.max_used);
121
122 PR("%p\t%d\t%u\t%u\tTX\n", tx, tx->info.num_blocks,
123 k_mem_slab_num_free_get(tx), tx->info.max_used);
124 #else
125 PR("%p\t%d\t%u\t-\tRX\n",
126 rx, rx->info.num_blocks, k_mem_slab_num_free_get(rx));
127
128 PR("%p\t%d\t%u\t-\tTX\n",
129 tx, tx->info.num_blocks, k_mem_slab_num_free_get(tx));
130 #endif
131 PR("%p\t%d\t%ld\t%d\tRX DATA (%s)\n", rx_data, rx_data->buf_count,
132 atomic_get(&rx_data->avail_count), rx_data->max_used, rx_data->name);
133
134 PR("%p\t%d\t%ld\t%d\tTX DATA (%s)\n", tx_data, tx_data->buf_count,
135 atomic_get(&tx_data->avail_count), tx_data->max_used, tx_data->name);
136 #else
137 PR("Address\t\tTotal\tName\n");
138
139 PR("%p\t%d\tRX\n", rx, rx->info.num_blocks);
140 PR("%p\t%d\tTX\n", tx, tx->info.num_blocks);
141 PR("%p\t%d\tRX DATA\n", rx_data, rx_data->buf_count);
142 PR("%p\t%d\tTX DATA\n", tx_data, tx_data->buf_count);
143 PR_INFO("Set %s to enable %s support.\n",
144 "CONFIG_NET_BUF_POOL_USAGE", "net_buf allocation");
145 #endif /* CONFIG_NET_BUF_POOL_USAGE */
146
147 if (IS_ENABLED(CONFIG_NET_CONTEXT_NET_PKT_POOL)) {
148 struct net_shell_user_data user_data;
149 struct ctx_info info;
150
151 (void)memset(&info, 0, sizeof(info));
152
153 user_data.sh = sh;
154 user_data.user_data = &info;
155
156 net_context_foreach(context_info, &user_data);
157
158 if (!info.are_external_pools) {
159 PR("No external memory pools found.\n");
160 }
161 }
162
163 #if defined(CONFIG_NET_PKT_ALLOC_STATS)
164 PR("\n");
165 PR("Slab\t\tStatus\tAllocs\tAvg size\tAvg time (usec)\n");
166
167 STRUCT_SECTION_FOREACH(net_pkt_alloc_stats_slab, stats) {
168 if (stats->ok.count) {
169 PR("%p\tOK \t%u\t%llu\t\t%llu\n", stats->slab, stats->ok.count,
170 stats->ok.alloc_sum / (uint64_t)stats->ok.count,
171 k_cyc_to_us_ceil64(stats->ok.time_sum /
172 (uint64_t)stats->ok.count));
173 }
174
175 if (stats->fail.count) {
176 PR("%p\tFAIL\t%u\t%llu\t\t%llu\n", stats->slab, stats->fail.count,
177 stats->fail.alloc_sum / (uint64_t)stats->fail.count,
178 k_cyc_to_us_ceil64(stats->fail.time_sum /
179 (uint64_t)stats->fail.count));
180 }
181 }
182 #endif /* CONFIG_NET_PKT_ALLOC_STATS */
183
184 #else
185 PR_INFO("Set %s to enable %s support.\n",
186 "CONFIG_NET_OFFLOAD or CONFIG_NET_NATIVE", "memory usage");
187 #endif /* CONFIG_NET_OFFLOAD || CONFIG_NET_NATIVE */
188
189 return 0;
190 }
191
192 SHELL_SUBCMD_ADD((net), mem, NULL,
193 "Print information about network memory usage.",
194 cmd_net_mem, 1, 0);
195