1 /*
2 * SPDX-FileCopyrightText: 2022-2024 Espressif Systems (Shanghai) CO LTD
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #include <stdio.h>
7 #include <string.h>
8 #include "esp_log.h"
9 #include "os/os.h"
10 #include "os/os_mempool.h"
11 #include "esp_hci_driver.h"
12 #include "esp_hci_internal.h"
13 #include "common/hci_driver_util.h"
14
15 #define TAG "HCI_UTIL"
16 #define HCI_DRIVER_UTIL_TX_POOL_NUM \
17 (CONFIG_BT_LE_ACL_BUF_COUNT + CONFIG_BT_LE_HCI_EVT_HI_BUF_COUNT + CONFIG_BT_LE_HCI_EVT_LO_BUF_COUNT)
18
19 #ifndef min
20 #define min(a, b) ((a) < (b) ? (a) : (b))
21 #endif
22
23 /**
24 * @brief Structure representing HCI TX data.
25 */
26 typedef struct hci_driver_util_tx_entry {
27 hci_driver_data_type_t data_type; ///< Type of the HCI TX data.
28 uint8_t *data; ///< Pointer to the TX data.
29 uint32_t length; ///< Length of the TX data.
30 STAILQ_ENTRY(hci_driver_util_tx_entry) next; ///< Next element in the linked list.
31 } hci_driver_util_tx_entry_t;
32
33 /* The list for hci_driver_util_tx_entry */
34 STAILQ_HEAD(hci_driver_util_tx_list, hci_driver_util_tx_entry);
35
36 typedef struct {
37 struct hci_driver_util_tx_list tx_head;
38 struct hci_driver_util_tx_entry *cur_tx_entry;
39 uint32_t cur_tx_off;
40 struct os_mempool *tx_entry_pool;
41 uint8_t *tx_entry_mem;
42 } hci_driver_util_env_t;
43
44 static hci_driver_util_env_t s_hci_driver_util_env;
45
46 static void
hci_driver_util_memory_deinit(void)47 hci_driver_util_memory_deinit(void)
48 {
49 if (s_hci_driver_util_env.tx_entry_pool) {
50 free(s_hci_driver_util_env.tx_entry_pool);
51 s_hci_driver_util_env.tx_entry_pool = NULL;
52 }
53 if (s_hci_driver_util_env.tx_entry_mem) {
54 free(s_hci_driver_util_env.tx_entry_mem);
55 s_hci_driver_util_env.tx_entry_mem = NULL;
56 }
57 }
58
59 static int
hci_driver_util_memory_init(void)60 hci_driver_util_memory_init(void)
61 {
62 int rc;
63
64 s_hci_driver_util_env.tx_entry_pool = (struct os_mempool *)malloc(sizeof(struct os_mempool));
65 if (!s_hci_driver_util_env.tx_entry_pool) {
66 ESP_LOGE(TAG, "No memory for tx pool");
67 goto init_err;
68 }
69
70 s_hci_driver_util_env.tx_entry_mem = malloc(OS_MEMPOOL_SIZE(HCI_DRIVER_UTIL_TX_POOL_NUM,
71 sizeof(hci_driver_util_tx_entry_t)) * sizeof(os_membuf_t));
72 if (!s_hci_driver_util_env.tx_entry_mem) {
73 ESP_LOGE(TAG, "No memory for tx pool buffer");
74 goto init_err;
75 }
76
77 rc = os_mempool_init(s_hci_driver_util_env.tx_entry_pool, HCI_DRIVER_UTIL_TX_POOL_NUM,
78 sizeof(hci_driver_util_tx_entry_t), s_hci_driver_util_env.tx_entry_mem,
79 "hci_tx_entry_pool");
80 if (rc) {
81 ESP_LOGE(TAG, "Failed to initialize tx pool");
82 goto init_err;
83 }
84
85 return 0;
86
87 init_err:
88 hci_driver_util_memory_deinit();
89 return -1;
90 }
91
92 void
hci_driver_util_tx_list_enqueue(hci_driver_data_type_t type,uint8_t * data,uint32_t len)93 hci_driver_util_tx_list_enqueue(hci_driver_data_type_t type, uint8_t *data, uint32_t len)
94 {
95 os_sr_t sr;
96 hci_driver_util_tx_entry_t *tx_entry;
97
98 tx_entry = os_memblock_get(s_hci_driver_util_env.tx_entry_pool);
99 assert(tx_entry != NULL);
100 tx_entry->data_type = type;
101 tx_entry->data = data;
102 tx_entry->length = len;
103 /* If the txbuf is command status event or command complete event, we should send firstly.
104 * The tx list maybe used in the controller task and hci task. Therefore, enter critical area.
105 */
106 if ((type == HCI_DRIVER_TYPE_EVT) && ((data[0] == 0x0E) || (data[0] == 0x0F))) {
107 OS_ENTER_CRITICAL(sr);
108 STAILQ_INSERT_HEAD(&s_hci_driver_util_env.tx_head, tx_entry, next);
109 OS_EXIT_CRITICAL(sr);
110 } else {
111 OS_ENTER_CRITICAL(sr);
112 STAILQ_INSERT_TAIL(&s_hci_driver_util_env.tx_head, tx_entry, next);
113 OS_EXIT_CRITICAL(sr);
114 }
115 }
116
117 uint32_t
hci_driver_util_tx_list_dequeue(uint32_t max_tx_len,void ** tx_data,bool * last_frame)118 hci_driver_util_tx_list_dequeue(uint32_t max_tx_len, void **tx_data, bool *last_frame)
119 {
120 os_sr_t sr;
121 uint32_t tx_len;
122 uint32_t data_len;
123 uint16_t out_off;
124 struct os_mbuf *om;
125 hci_driver_util_tx_entry_t *tx_entry;
126
127 /* Check if there is any remaining data that hasn't been sent completely. If it has been completed,
128 * free the corresponding memory. Therefore, the HCI TX entry needs to be sent one by one; multiple
129 * entries cannot be sent together.
130 */
131 tx_len = 0;
132 tx_entry = s_hci_driver_util_env.cur_tx_entry;
133 if (tx_entry) {
134 data_len = tx_entry->length;
135 if (tx_entry->data_type == HCI_DRIVER_TYPE_ACL) {
136 om = (struct os_mbuf *)tx_entry->data;
137 if (s_hci_driver_util_env.cur_tx_off >= data_len) {
138 os_mbuf_free_chain(om);
139 } else {
140 om = os_mbuf_off(om, s_hci_driver_util_env.cur_tx_off, &out_off);
141 tx_len = min(max_tx_len, om->om_len - out_off);
142 *tx_data = (void *)&om->om_data[out_off];
143 }
144 } else if (tx_entry->data_type == HCI_DRIVER_TYPE_EVT) {
145 if (s_hci_driver_util_env.cur_tx_off >= data_len) {
146 r_ble_hci_trans_buf_free(tx_entry->data);
147 } else {
148 tx_len = min(max_tx_len, data_len - s_hci_driver_util_env.cur_tx_off);
149 *tx_data = &tx_entry->data[s_hci_driver_util_env.cur_tx_off];
150 }
151 } else {
152 assert(0);
153 }
154 /* If this is the last frame, inform the invoker not to call this API until the current data
155 * has been completely sent.
156 */
157 if (tx_len) {
158 s_hci_driver_util_env.cur_tx_off += tx_len;
159
160 if (s_hci_driver_util_env.cur_tx_off >= data_len) {
161 *last_frame = true;
162 } else {
163 *last_frame = false;
164 }
165 } else {
166 os_memblock_put(s_hci_driver_util_env.tx_entry_pool, (void *)tx_entry);
167 s_hci_driver_util_env.cur_tx_entry = NULL;
168 }
169 }
170
171 /* Find a new entry. */
172 if (!tx_len && !STAILQ_EMPTY(&s_hci_driver_util_env.tx_head)) {
173 OS_ENTER_CRITICAL(sr);
174 tx_entry = STAILQ_FIRST(&s_hci_driver_util_env.tx_head);
175 STAILQ_REMOVE_HEAD(&s_hci_driver_util_env.tx_head, next);
176 OS_EXIT_CRITICAL(sr);
177
178 *tx_data = &tx_entry->data_type;
179 s_hci_driver_util_env.cur_tx_entry = tx_entry;
180 s_hci_driver_util_env.cur_tx_off = 0;
181 tx_len = 1;
182 *last_frame = false;
183 }
184
185 return tx_len;
186 }
187
188 int
hci_driver_util_init(void)189 hci_driver_util_init(void)
190 {
191 memset(&s_hci_driver_util_env, 0, sizeof(hci_driver_util_env_t));
192
193 if (hci_driver_util_memory_init()) {
194 return -1;
195 }
196
197 STAILQ_INIT(&s_hci_driver_util_env.tx_head);
198
199 return 0;
200 }
201
202 void
hci_driver_util_deinit(void)203 hci_driver_util_deinit(void)
204 {
205 hci_driver_util_tx_entry_t *tx_entry;
206 hci_driver_util_tx_entry_t *next_entry;
207
208 /* Free all of controller buffers which haven't been sent yet. The whole mempool will be freed.
209 * Therefore, it's unnecessary to put the tx_entry into mempool.
210 */
211 tx_entry = STAILQ_FIRST(&s_hci_driver_util_env.tx_head);
212 while (tx_entry) {
213 next_entry = STAILQ_NEXT(tx_entry, next);
214 if (tx_entry->data_type == HCI_DRIVER_TYPE_ACL) {
215 os_mbuf_free_chain((struct os_mbuf *)tx_entry->data);
216 } else if (tx_entry->data_type == HCI_DRIVER_TYPE_EVT) {
217 r_ble_hci_trans_buf_free(tx_entry->data);
218 }
219 tx_entry = next_entry;
220 }
221
222 hci_driver_util_memory_deinit();
223
224 memset(&s_hci_driver_util_env, 0, sizeof(hci_driver_util_env_t));
225 }
226