1 /*
2 * Copyright (c) 2017 Nordic Semiconductor ASA
3 * Copyright (c) 2015 Intel Corporation
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #include <zephyr/bluetooth/hci.h>
9 #include <zephyr/bluetooth/buf.h>
10 #include <zephyr/bluetooth/l2cap.h>
11
12 #include "common/hci_common_internal.h"
13
14 #include "buf_view.h"
15 #include "hci_core.h"
16 #include "conn_internal.h"
17 #include "iso_internal.h"
18
19 #include <zephyr/logging/log.h>
20 LOG_MODULE_REGISTER(bt_buf, CONFIG_BT_LOG_LEVEL);
21
22 /* Events have a length field of 1 byte. This size fits all events.
23 *
24 * It's true that we don't put all kinds of events there (yet). However, the
25 * command complete event has an arbitrary payload, depending on opcode.
26 */
27 #define SYNC_EVT_SIZE (BT_BUF_RESERVE + BT_HCI_EVT_HDR_SIZE + 255)
28
29 static bt_buf_rx_freed_cb_t buf_rx_freed_cb;
30
buf_rx_freed_notify(enum bt_buf_type mask)31 static void buf_rx_freed_notify(enum bt_buf_type mask)
32 {
33 k_sched_lock();
34
35 if (buf_rx_freed_cb) {
36 buf_rx_freed_cb(mask);
37 }
38
39 k_sched_unlock();
40 }
41
42 #if defined(CONFIG_BT_ISO_RX)
iso_rx_freed_cb(void)43 static void iso_rx_freed_cb(void)
44 {
45 buf_rx_freed_notify(BT_BUF_ISO_IN);
46 }
47 #endif
48
49 /* Pool for RX HCI buffers that are always freed by `bt_recv`
50 * before it returns.
51 *
52 * A singleton buffer shall be sufficient for correct operation.
53 * The buffer count may be increased as an optimization to allow
54 * the HCI transport to fill buffers in parallel with `bt_recv`
55 * consuming them.
56 */
57 NET_BUF_POOL_FIXED_DEFINE(sync_evt_pool, 1, SYNC_EVT_SIZE, sizeof(struct bt_buf_data), NULL);
58
59 NET_BUF_POOL_FIXED_DEFINE(discardable_pool, CONFIG_BT_BUF_EVT_DISCARDABLE_COUNT,
60 BT_BUF_EVT_SIZE(CONFIG_BT_BUF_EVT_DISCARDABLE_SIZE),
61 sizeof(struct bt_buf_data), NULL);
62
63 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
acl_in_pool_destroy(struct net_buf * buf)64 static void acl_in_pool_destroy(struct net_buf *buf)
65 {
66 bt_hci_host_num_completed_packets(buf);
67 buf_rx_freed_notify(BT_BUF_ACL_IN);
68 }
69
evt_pool_destroy(struct net_buf * buf)70 static void evt_pool_destroy(struct net_buf *buf)
71 {
72 net_buf_destroy(buf);
73 buf_rx_freed_notify(BT_BUF_EVT);
74 }
75
76 NET_BUF_POOL_DEFINE(acl_in_pool, (BT_BUF_ACL_RX_COUNT_EXTRA + BT_BUF_HCI_ACL_RX_COUNT),
77 BT_BUF_ACL_SIZE(CONFIG_BT_BUF_ACL_RX_SIZE), sizeof(struct acl_data),
78 acl_in_pool_destroy);
79
80 NET_BUF_POOL_FIXED_DEFINE(evt_pool, CONFIG_BT_BUF_EVT_RX_COUNT, BT_BUF_EVT_RX_SIZE,
81 sizeof(struct bt_buf_data), evt_pool_destroy);
82 #else
hci_rx_pool_destroy(struct net_buf * buf)83 static void hci_rx_pool_destroy(struct net_buf *buf)
84 {
85 net_buf_destroy(buf);
86
87 /* When ACL Flow Control is disabled, a single pool is used for events and acl data.
88 * Therefore the callback will always notify about both types of buffers, BT_BUF_EVT and
89 * BT_BUF_ACL_IN.
90 */
91 buf_rx_freed_notify(BT_BUF_EVT | BT_BUF_ACL_IN);
92 }
93
94 NET_BUF_POOL_FIXED_DEFINE(hci_rx_pool, BT_BUF_RX_COUNT, BT_BUF_RX_SIZE, sizeof(struct acl_data),
95 hci_rx_pool_destroy);
96 #endif /* CONFIG_BT_HCI_ACL_FLOW_CONTROL */
97
bt_buf_get_rx(enum bt_buf_type type,k_timeout_t timeout)98 struct net_buf *bt_buf_get_rx(enum bt_buf_type type, k_timeout_t timeout)
99 {
100 struct net_buf *buf;
101
102 __ASSERT(type == BT_BUF_EVT || type == BT_BUF_ACL_IN ||
103 type == BT_BUF_ISO_IN, "Invalid buffer type requested");
104
105 if (IS_ENABLED(CONFIG_BT_ISO_RX) && type == BT_BUF_ISO_IN) {
106 return bt_iso_get_rx(timeout);
107 }
108
109 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
110 if (type == BT_BUF_EVT) {
111 buf = net_buf_alloc(&evt_pool, timeout);
112 } else {
113 buf = net_buf_alloc(&acl_in_pool, timeout);
114 }
115 #else
116 buf = net_buf_alloc(&hci_rx_pool, timeout);
117 #endif
118
119 if (buf) {
120 net_buf_reserve(buf, BT_BUF_RESERVE);
121 bt_buf_set_type(buf, type);
122 }
123
124 return buf;
125 }
126
bt_buf_rx_freed_cb_set(bt_buf_rx_freed_cb_t cb)127 void bt_buf_rx_freed_cb_set(bt_buf_rx_freed_cb_t cb)
128 {
129 k_sched_lock();
130
131 buf_rx_freed_cb = cb;
132
133 #if defined(CONFIG_BT_ISO_RX)
134 bt_iso_buf_rx_freed_cb_set(cb != NULL ? iso_rx_freed_cb : NULL);
135 #endif
136
137 k_sched_unlock();
138 }
139
bt_buf_get_evt(uint8_t evt,bool discardable,k_timeout_t timeout)140 struct net_buf *bt_buf_get_evt(uint8_t evt, bool discardable,
141 k_timeout_t timeout)
142 {
143 struct net_buf *buf;
144
145 switch (evt) {
146 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_ISO)
147 case BT_HCI_EVT_NUM_COMPLETED_PACKETS:
148 #endif /* CONFIG_BT_CONN || CONFIG_BT_ISO */
149 case BT_HCI_EVT_CMD_STATUS:
150 case BT_HCI_EVT_CMD_COMPLETE:
151 buf = net_buf_alloc(&sync_evt_pool, timeout);
152 break;
153 default:
154 if (discardable) {
155 buf = net_buf_alloc(&discardable_pool, timeout);
156 } else {
157 return bt_buf_get_rx(BT_BUF_EVT, timeout);
158 }
159 }
160
161 if (buf) {
162 net_buf_reserve(buf, BT_BUF_RESERVE);
163 bt_buf_set_type(buf, BT_BUF_EVT);
164 }
165
166 return buf;
167 }
168
169 #ifdef ZTEST_UNITTEST
170 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
bt_buf_get_evt_pool(void)171 struct net_buf_pool *bt_buf_get_evt_pool(void)
172 {
173 return &evt_pool;
174 }
175
bt_buf_get_acl_in_pool(void)176 struct net_buf_pool *bt_buf_get_acl_in_pool(void)
177 {
178 return &acl_in_pool;
179 }
180 #else
bt_buf_get_hci_rx_pool(void)181 struct net_buf_pool *bt_buf_get_hci_rx_pool(void)
182 {
183 return &hci_rx_pool;
184 }
185 #endif /* CONFIG_BT_HCI_ACL_FLOW_CONTROL */
186
187 #if defined(CONFIG_BT_BUF_EVT_DISCARDABLE_COUNT)
bt_buf_get_discardable_pool(void)188 struct net_buf_pool *bt_buf_get_discardable_pool(void)
189 {
190 return &discardable_pool;
191 }
192 #endif /* CONFIG_BT_BUF_EVT_DISCARDABLE_COUNT */
193
194 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_ISO)
bt_buf_get_num_complete_pool(void)195 struct net_buf_pool *bt_buf_get_num_complete_pool(void)
196 {
197 return &sync_evt_pool;
198 }
199 #endif /* CONFIG_BT_CONN || CONFIG_BT_ISO */
200 #endif /* ZTEST_UNITTEST */
201
bt_buf_make_view(struct net_buf * view,struct net_buf * parent,size_t len,struct bt_buf_view_meta * meta)202 struct net_buf *bt_buf_make_view(struct net_buf *view,
203 struct net_buf *parent,
204 size_t len,
205 struct bt_buf_view_meta *meta)
206 {
207 __ASSERT_NO_MSG(len);
208 __ASSERT_NO_MSG(view);
209 /* The whole point of this API is to allow prepending data. If the
210 * headroom is 0, that will not happen.
211 */
212 __ASSERT_NO_MSG(net_buf_headroom(parent) > 0);
213
214 __ASSERT_NO_MSG(!bt_buf_has_view(parent));
215
216 LOG_DBG("make-view %p viewsize %zu meta %p", view, len, meta);
217
218 net_buf_simple_clone(&parent->b, &view->b);
219 view->size = net_buf_headroom(parent) + len;
220 view->len = len;
221 view->flags = NET_BUF_EXTERNAL_DATA;
222
223 /* we have a view, eat `len`'s worth of data from the parent */
224 (void)net_buf_pull(parent, len);
225
226 meta->backup.data = parent->data;
227 parent->data = NULL;
228
229 meta->backup.size = parent->size;
230 parent->size = 0;
231
232 /* The ref to `parent` is moved in by passing `parent` as argument. */
233 /* save backup & "clip" the buffer so the next `make_view` will fail */
234 meta->parent = parent;
235 parent = NULL;
236
237 return view;
238 }
239
bt_buf_destroy_view(struct net_buf * view,struct bt_buf_view_meta * meta)240 void bt_buf_destroy_view(struct net_buf *view, struct bt_buf_view_meta *meta)
241 {
242 LOG_DBG("destroy-view %p meta %p", view, meta);
243 __ASSERT_NO_MSG(meta->parent);
244
245 /* "unclip" the parent buf */
246 meta->parent->data = meta->backup.data;
247 meta->parent->size = meta->backup.size;
248
249 net_buf_unref(meta->parent);
250
251 memset(meta, 0, sizeof(*meta));
252 net_buf_destroy(view);
253 }
254
bt_buf_has_view(const struct net_buf * parent)255 bool bt_buf_has_view(const struct net_buf *parent)
256 {
257 /* This is enforced by `make_view`. see comment there. */
258 return parent->size == 0 && parent->data == NULL;
259 }
260