1 /*
2  * Copyright (c) 2017 Nordic Semiconductor ASA
3  * Copyright (c) 2015 Intel Corporation
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <zephyr/bluetooth/buf.h>
9 #include <zephyr/bluetooth/l2cap.h>
10 
11 #include "buf_view.h"
12 #include "hci_core.h"
13 #include "conn_internal.h"
14 #include "iso_internal.h"
15 
16 #include <zephyr/bluetooth/hci.h>
17 
18 #include <zephyr/logging/log.h>
19 LOG_MODULE_REGISTER(bt_buf, CONFIG_BT_LOG_LEVEL);
20 
21 /* Events have a length field of 1 byte. This size fits all events.
22  *
23  * It's true that we don't put all kinds of events there (yet). However, the
24  * command complete event has an arbitrary payload, depending on opcode.
25  */
26 #define SYNC_EVT_SIZE (BT_BUF_RESERVE + BT_HCI_EVT_HDR_SIZE + 255)
27 
28 static bt_buf_rx_freed_cb_t buf_rx_freed_cb;
29 
buf_rx_freed_notify(enum bt_buf_type mask)30 static void buf_rx_freed_notify(enum bt_buf_type mask)
31 {
32 	k_sched_lock();
33 
34 	if (buf_rx_freed_cb) {
35 		buf_rx_freed_cb(mask);
36 	}
37 
38 	k_sched_unlock();
39 }
40 
41 #if defined(CONFIG_BT_ISO_RX)
iso_rx_freed_cb(void)42 static void iso_rx_freed_cb(void)
43 {
44 	buf_rx_freed_notify(BT_BUF_ISO_IN);
45 }
46 #endif
47 
48 /* Pool for RX HCI buffers that are always freed by `bt_recv`
49  * before it returns.
50  *
51  * A singleton buffer shall be sufficient for correct operation.
52  * The buffer count may be increased as an optimization to allow
53  * the HCI transport to fill buffers in parallel with `bt_recv`
54  * consuming them.
55  */
56 NET_BUF_POOL_FIXED_DEFINE(sync_evt_pool, 1, SYNC_EVT_SIZE, sizeof(struct bt_buf_data), NULL);
57 
58 NET_BUF_POOL_FIXED_DEFINE(discardable_pool, CONFIG_BT_BUF_EVT_DISCARDABLE_COUNT,
59 			  BT_BUF_EVT_SIZE(CONFIG_BT_BUF_EVT_DISCARDABLE_SIZE),
60 			  sizeof(struct bt_buf_data), NULL);
61 
62 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
acl_in_pool_destroy(struct net_buf * buf)63 static void acl_in_pool_destroy(struct net_buf *buf)
64 {
65 	bt_hci_host_num_completed_packets(buf);
66 	buf_rx_freed_notify(BT_BUF_ACL_IN);
67 }
68 
evt_pool_destroy(struct net_buf * buf)69 static void evt_pool_destroy(struct net_buf *buf)
70 {
71 	net_buf_destroy(buf);
72 	buf_rx_freed_notify(BT_BUF_EVT);
73 }
74 
75 NET_BUF_POOL_DEFINE(acl_in_pool, BT_BUF_ACL_RX_COUNT, BT_BUF_ACL_SIZE(CONFIG_BT_BUF_ACL_RX_SIZE),
76 		    sizeof(struct acl_data), acl_in_pool_destroy);
77 
78 NET_BUF_POOL_FIXED_DEFINE(evt_pool, CONFIG_BT_BUF_EVT_RX_COUNT, BT_BUF_EVT_RX_SIZE,
79 			  sizeof(struct bt_buf_data), evt_pool_destroy);
80 #else
hci_rx_pool_destroy(struct net_buf * buf)81 static void hci_rx_pool_destroy(struct net_buf *buf)
82 {
83 	net_buf_destroy(buf);
84 
85 	/* When ACL Flow Control is disabled, a single pool is used for events and acl data.
86 	 * Therefore the callback will always notify about both types of buffers, BT_BUF_EVT and
87 	 * BT_BUF_ACL_IN.
88 	 */
89 	buf_rx_freed_notify(BT_BUF_EVT | BT_BUF_ACL_IN);
90 }
91 
92 NET_BUF_POOL_FIXED_DEFINE(hci_rx_pool, BT_BUF_RX_COUNT, BT_BUF_RX_SIZE, sizeof(struct acl_data),
93 			  hci_rx_pool_destroy);
94 #endif /* CONFIG_BT_HCI_ACL_FLOW_CONTROL */
95 
bt_buf_get_rx(enum bt_buf_type type,k_timeout_t timeout)96 struct net_buf *bt_buf_get_rx(enum bt_buf_type type, k_timeout_t timeout)
97 {
98 	struct net_buf *buf;
99 
100 	__ASSERT(type == BT_BUF_EVT || type == BT_BUF_ACL_IN ||
101 		 type == BT_BUF_ISO_IN, "Invalid buffer type requested");
102 
103 	if (IS_ENABLED(CONFIG_BT_ISO_RX) && type == BT_BUF_ISO_IN) {
104 		return bt_iso_get_rx(timeout);
105 	}
106 
107 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
108 	if (type == BT_BUF_EVT) {
109 		buf = net_buf_alloc(&evt_pool, timeout);
110 	} else {
111 		buf = net_buf_alloc(&acl_in_pool, timeout);
112 	}
113 #else
114 	buf = net_buf_alloc(&hci_rx_pool, timeout);
115 #endif
116 
117 	if (buf) {
118 		net_buf_reserve(buf, BT_BUF_RESERVE);
119 		bt_buf_set_type(buf, type);
120 	}
121 
122 	return buf;
123 }
124 
bt_buf_rx_freed_cb_set(bt_buf_rx_freed_cb_t cb)125 void bt_buf_rx_freed_cb_set(bt_buf_rx_freed_cb_t cb)
126 {
127 	k_sched_lock();
128 
129 	buf_rx_freed_cb = cb;
130 
131 #if defined(CONFIG_BT_ISO_RX)
132 	bt_iso_buf_rx_freed_cb_set(cb != NULL ? iso_rx_freed_cb : NULL);
133 #endif
134 
135 	k_sched_unlock();
136 }
137 
bt_buf_get_evt(uint8_t evt,bool discardable,k_timeout_t timeout)138 struct net_buf *bt_buf_get_evt(uint8_t evt, bool discardable,
139 			       k_timeout_t timeout)
140 {
141 	struct net_buf *buf;
142 
143 	switch (evt) {
144 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_ISO)
145 	case BT_HCI_EVT_NUM_COMPLETED_PACKETS:
146 #endif /* CONFIG_BT_CONN || CONFIG_BT_ISO */
147 	case BT_HCI_EVT_CMD_STATUS:
148 	case BT_HCI_EVT_CMD_COMPLETE:
149 		buf = net_buf_alloc(&sync_evt_pool, timeout);
150 		break;
151 	default:
152 		if (discardable) {
153 			buf = net_buf_alloc(&discardable_pool, timeout);
154 		} else {
155 			return bt_buf_get_rx(BT_BUF_EVT, timeout);
156 		}
157 	}
158 
159 	if (buf) {
160 		net_buf_reserve(buf, BT_BUF_RESERVE);
161 		bt_buf_set_type(buf, BT_BUF_EVT);
162 	}
163 
164 	return buf;
165 }
166 
167 #ifdef ZTEST_UNITTEST
168 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
bt_buf_get_evt_pool(void)169 struct net_buf_pool *bt_buf_get_evt_pool(void)
170 {
171 	return &evt_pool;
172 }
173 
bt_buf_get_acl_in_pool(void)174 struct net_buf_pool *bt_buf_get_acl_in_pool(void)
175 {
176 	return &acl_in_pool;
177 }
178 #else
bt_buf_get_hci_rx_pool(void)179 struct net_buf_pool *bt_buf_get_hci_rx_pool(void)
180 {
181 	return &hci_rx_pool;
182 }
183 #endif /* CONFIG_BT_HCI_ACL_FLOW_CONTROL */
184 
185 #if defined(CONFIG_BT_BUF_EVT_DISCARDABLE_COUNT)
bt_buf_get_discardable_pool(void)186 struct net_buf_pool *bt_buf_get_discardable_pool(void)
187 {
188 	return &discardable_pool;
189 }
190 #endif /* CONFIG_BT_BUF_EVT_DISCARDABLE_COUNT */
191 
192 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_ISO)
bt_buf_get_num_complete_pool(void)193 struct net_buf_pool *bt_buf_get_num_complete_pool(void)
194 {
195 	return &sync_evt_pool;
196 }
197 #endif /* CONFIG_BT_CONN || CONFIG_BT_ISO */
198 #endif /* ZTEST_UNITTEST */
199 
bt_buf_make_view(struct net_buf * view,struct net_buf * parent,size_t len,struct bt_buf_view_meta * meta)200 struct net_buf *bt_buf_make_view(struct net_buf *view,
201 				 struct net_buf *parent,
202 				 size_t len,
203 				 struct bt_buf_view_meta *meta)
204 {
205 	__ASSERT_NO_MSG(len);
206 	__ASSERT_NO_MSG(view);
207 	/* The whole point of this API is to allow prepending data. If the
208 	 * headroom is 0, that will not happen.
209 	 */
210 	__ASSERT_NO_MSG(net_buf_headroom(parent) > 0);
211 
212 	__ASSERT_NO_MSG(!bt_buf_has_view(parent));
213 
214 	LOG_DBG("make-view %p viewsize %zu meta %p", view, len, meta);
215 
216 	net_buf_simple_clone(&parent->b, &view->b);
217 	view->size = net_buf_headroom(parent) + len;
218 	view->len = len;
219 	view->flags = NET_BUF_EXTERNAL_DATA;
220 
221 	/* we have a view, eat `len`'s worth of data from the parent */
222 	(void)net_buf_pull(parent, len);
223 
224 	meta->backup.data = parent->data;
225 	parent->data = NULL;
226 
227 	meta->backup.size = parent->size;
228 	parent->size = 0;
229 
230 	/* The ref to `parent` is moved in by passing `parent` as argument. */
231 	/* save backup & "clip" the buffer so the next `make_view` will fail */
232 	meta->parent = parent;
233 	parent = NULL;
234 
235 	return view;
236 }
237 
bt_buf_destroy_view(struct net_buf * view,struct bt_buf_view_meta * meta)238 void bt_buf_destroy_view(struct net_buf *view, struct bt_buf_view_meta *meta)
239 {
240 	LOG_DBG("destroy-view %p meta %p", view, meta);
241 	__ASSERT_NO_MSG(meta->parent);
242 
243 	/* "unclip" the parent buf */
244 	meta->parent->data = meta->backup.data;
245 	meta->parent->size = meta->backup.size;
246 
247 	net_buf_unref(meta->parent);
248 
249 	memset(meta, 0, sizeof(*meta));
250 	net_buf_destroy(view);
251 }
252 
bt_buf_has_view(const struct net_buf * parent)253 bool bt_buf_has_view(const struct net_buf *parent)
254 {
255 	/* This is enforced by `make_view`. see comment there. */
256 	return parent->size == 0 && parent->data == NULL;
257 }
258