1 /*
2 * Copyright (c) 2017 Nordic Semiconductor ASA
3 * Copyright (c) 2015 Intel Corporation
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #include <stddef.h>
9 #include <stdint.h>
10 #include <string.h>
11
12 #include <zephyr/autoconf.h>
13 #include <zephyr/bluetooth/hci.h>
14 #include <zephyr/bluetooth/buf.h>
15 #include <zephyr/bluetooth/hci_types.h>
16 #include <zephyr/bluetooth/l2cap.h>
17 #include <zephyr/kernel.h>
18 #include <zephyr/logging/log.h>
19 #include <zephyr/net_buf.h>
20 #include <zephyr/sys/__assert.h>
21 #include <zephyr/sys/atomic.h>
22 #include <zephyr/sys/util_macro.h>
23 #include <zephyr/sys_clock.h>
24
25 #include "buf_view.h"
26 #include "common/hci_common_internal.h"
27 #include "conn_internal.h"
28 #include "hci_core.h"
29 #include "iso_internal.h"
30
31 LOG_MODULE_REGISTER(bt_buf, CONFIG_BT_LOG_LEVEL);
32
33 /* Events have a length field of 1 byte. This size fits all events.
34 *
35 * It's true that we don't put all kinds of events there (yet). However, the
36 * command complete event has an arbitrary payload, depending on opcode.
37 */
38 #define SYNC_EVT_SIZE (BT_BUF_RESERVE + BT_HCI_EVT_HDR_SIZE + 255)
39
40 static atomic_ptr_t buf_rx_freed_cb;
41
buf_rx_freed_notify(enum bt_buf_type mask)42 static void buf_rx_freed_notify(enum bt_buf_type mask)
43 {
44 bt_buf_rx_freed_cb_t cb;
45 bool in_isr = k_is_in_isr();
46
47 if (!in_isr) {
48 k_sched_lock();
49 }
50
51 cb = (bt_buf_rx_freed_cb_t)atomic_ptr_get(&buf_rx_freed_cb);
52
53 if (cb != NULL) {
54 cb(mask);
55 }
56
57 if (!in_isr) {
58 k_sched_unlock();
59 }
60 }
61
62 #if defined(CONFIG_BT_ISO_RX)
iso_rx_freed_cb(void)63 static void iso_rx_freed_cb(void)
64 {
65 buf_rx_freed_notify(BT_BUF_ISO_IN);
66 }
67 #endif
68
69 /* Pool for RX HCI buffers that are always freed by `bt_recv`
70 * before it returns.
71 *
72 * A singleton buffer shall be sufficient for correct operation.
73 * The buffer count may be increased as an optimization to allow
74 * the HCI transport to fill buffers in parallel with `bt_recv`
75 * consuming them.
76 */
77 NET_BUF_POOL_FIXED_DEFINE(sync_evt_pool, 1, SYNC_EVT_SIZE, 0, NULL);
78
79 NET_BUF_POOL_FIXED_DEFINE(discardable_pool, CONFIG_BT_BUF_EVT_DISCARDABLE_COUNT,
80 BT_BUF_EVT_SIZE(CONFIG_BT_BUF_EVT_DISCARDABLE_SIZE),
81 0, NULL);
82
83 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
acl_in_pool_destroy(struct net_buf * buf)84 static void acl_in_pool_destroy(struct net_buf *buf)
85 {
86 bt_hci_host_num_completed_packets(buf);
87 buf_rx_freed_notify(BT_BUF_ACL_IN);
88 }
89
evt_pool_destroy(struct net_buf * buf)90 static void evt_pool_destroy(struct net_buf *buf)
91 {
92 net_buf_destroy(buf);
93 buf_rx_freed_notify(BT_BUF_EVT);
94 }
95
96 NET_BUF_POOL_DEFINE(acl_in_pool, (BT_BUF_ACL_RX_COUNT_EXTRA + BT_BUF_HCI_ACL_RX_COUNT),
97 BT_BUF_ACL_SIZE(CONFIG_BT_BUF_ACL_RX_SIZE), sizeof(struct bt_conn_rx),
98 acl_in_pool_destroy);
99
100 NET_BUF_POOL_FIXED_DEFINE(evt_pool, CONFIG_BT_BUF_EVT_RX_COUNT, BT_BUF_EVT_RX_SIZE, 0,
101 evt_pool_destroy);
102 #else
hci_rx_pool_destroy(struct net_buf * buf)103 static void hci_rx_pool_destroy(struct net_buf *buf)
104 {
105 net_buf_destroy(buf);
106
107 /* When ACL Flow Control is disabled, a single pool is used for events and acl data.
108 * Therefore the callback will always notify about both types of buffers, BT_BUF_EVT and
109 * BT_BUF_ACL_IN.
110 */
111 buf_rx_freed_notify(BT_BUF_EVT | BT_BUF_ACL_IN);
112 }
113
114 NET_BUF_POOL_FIXED_DEFINE(hci_rx_pool, BT_BUF_RX_COUNT, BT_BUF_RX_SIZE,
115 sizeof(struct bt_conn_rx), hci_rx_pool_destroy);
116 #endif /* CONFIG_BT_HCI_ACL_FLOW_CONTROL */
117
bt_buf_get_rx(enum bt_buf_type type,k_timeout_t timeout)118 struct net_buf *bt_buf_get_rx(enum bt_buf_type type, k_timeout_t timeout)
119 {
120 struct net_buf *buf;
121
122 __ASSERT(type == BT_BUF_EVT || type == BT_BUF_ACL_IN ||
123 type == BT_BUF_ISO_IN, "Invalid buffer type requested");
124
125 if (IS_ENABLED(CONFIG_BT_ISO_RX) && type == BT_BUF_ISO_IN) {
126 return bt_iso_get_rx(timeout);
127 }
128
129 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
130 if (type == BT_BUF_EVT) {
131 buf = net_buf_alloc(&evt_pool, timeout);
132 } else {
133 buf = net_buf_alloc(&acl_in_pool, timeout);
134 }
135 #else
136 buf = net_buf_alloc(&hci_rx_pool, timeout);
137 #endif
138 if (buf) {
139 net_buf_add_u8(buf, bt_buf_type_to_h4(type));
140 }
141
142 return buf;
143 }
144
bt_buf_rx_freed_cb_set(bt_buf_rx_freed_cb_t cb)145 void bt_buf_rx_freed_cb_set(bt_buf_rx_freed_cb_t cb)
146 {
147 atomic_ptr_set(&buf_rx_freed_cb, (void *)cb);
148
149 #if defined(CONFIG_BT_ISO_RX)
150 bt_iso_buf_rx_freed_cb_set(cb != NULL ? iso_rx_freed_cb : NULL);
151 #endif
152 }
153
bt_buf_get_evt(uint8_t evt,bool discardable,k_timeout_t timeout)154 struct net_buf *bt_buf_get_evt(uint8_t evt, bool discardable,
155 k_timeout_t timeout)
156 {
157 struct net_buf *buf;
158
159 switch (evt) {
160 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_ISO)
161 case BT_HCI_EVT_NUM_COMPLETED_PACKETS:
162 #endif /* CONFIG_BT_CONN || CONFIG_BT_ISO */
163 case BT_HCI_EVT_CMD_STATUS:
164 case BT_HCI_EVT_CMD_COMPLETE:
165 buf = net_buf_alloc(&sync_evt_pool, timeout);
166 break;
167 default:
168 if (discardable) {
169 /* Discardable, decided in Host-side HCI Transport driver. */
170 buf = net_buf_alloc(&discardable_pool, timeout);
171 } else {
172 return bt_buf_get_rx(BT_BUF_EVT, timeout);
173 }
174 }
175
176 if (buf) {
177 net_buf_add_u8(buf, BT_HCI_H4_EVT);
178 }
179
180 return buf;
181 }
182
183 #ifdef ZTEST_UNITTEST
184 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
bt_buf_get_evt_pool(void)185 struct net_buf_pool *bt_buf_get_evt_pool(void)
186 {
187 return &evt_pool;
188 }
189
bt_buf_get_acl_in_pool(void)190 struct net_buf_pool *bt_buf_get_acl_in_pool(void)
191 {
192 return &acl_in_pool;
193 }
194 #else
bt_buf_get_hci_rx_pool(void)195 struct net_buf_pool *bt_buf_get_hci_rx_pool(void)
196 {
197 return &hci_rx_pool;
198 }
199 #endif /* CONFIG_BT_HCI_ACL_FLOW_CONTROL */
200
201 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_ISO)
bt_buf_get_num_complete_pool(void)202 struct net_buf_pool *bt_buf_get_num_complete_pool(void)
203 {
204 return &sync_evt_pool;
205 }
206 #endif /* CONFIG_BT_CONN || CONFIG_BT_ISO */
207 #endif /* ZTEST_UNITTEST */
208
bt_buf_make_view(struct net_buf * view,struct net_buf * parent,size_t len,struct bt_buf_view_meta * meta)209 struct net_buf *bt_buf_make_view(struct net_buf *view,
210 struct net_buf *parent,
211 size_t len,
212 struct bt_buf_view_meta *meta)
213 {
214 __ASSERT_NO_MSG(len);
215 __ASSERT_NO_MSG(view);
216 /* The whole point of this API is to allow prepending data. If the
217 * headroom is 0, that will not happen.
218 */
219 __ASSERT_NO_MSG(net_buf_headroom(parent) > 0);
220
221 __ASSERT_NO_MSG(!bt_buf_has_view(parent));
222
223 LOG_DBG("make-view %p viewsize %zu meta %p", view, len, meta);
224
225 net_buf_simple_clone(&parent->b, &view->b);
226 view->size = net_buf_headroom(parent) + len;
227 view->len = len;
228 view->flags = NET_BUF_EXTERNAL_DATA;
229
230 /* we have a view, eat `len`'s worth of data from the parent */
231 (void)net_buf_pull(parent, len);
232
233 meta->backup.data = parent->data;
234 parent->data = NULL;
235
236 meta->backup.size = parent->size;
237 parent->size = 0;
238
239 /* The ref to `parent` is moved in by passing `parent` as argument. */
240 /* save backup & "clip" the buffer so the next `make_view` will fail */
241 meta->parent = parent;
242 parent = NULL;
243
244 return view;
245 }
246
bt_buf_destroy_view(struct net_buf * view,struct bt_buf_view_meta * meta)247 void bt_buf_destroy_view(struct net_buf *view, struct bt_buf_view_meta *meta)
248 {
249 LOG_DBG("destroy-view %p meta %p", view, meta);
250 __ASSERT_NO_MSG(meta->parent);
251
252 /* "unclip" the parent buf */
253 meta->parent->data = meta->backup.data;
254 meta->parent->size = meta->backup.size;
255
256 net_buf_unref(meta->parent);
257
258 memset(meta, 0, sizeof(*meta));
259 net_buf_destroy(view);
260 }
261
bt_buf_has_view(const struct net_buf * parent)262 bool bt_buf_has_view(const struct net_buf *parent)
263 {
264 /* This is enforced by `make_view`. see comment there. */
265 return parent->size == 0 && parent->data == NULL;
266 }
267