1 /* att.c - Attribute protocol handling */
2
3 /*
4 * Copyright (c) 2015-2016 Intel Corporation
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9 #include <zephyr/bluetooth/addr.h>
10 #include <zephyr/bluetooth/conn.h>
11 #include <zephyr/kernel.h>
12 #include <string.h>
13 #include <errno.h>
14 #include <stdbool.h>
15 #include <zephyr/sys/atomic.h>
16 #include <zephyr/sys/byteorder.h>
17 #include <zephyr/sys/util.h>
18
19 #include <zephyr/bluetooth/hci.h>
20 #include <zephyr/bluetooth/bluetooth.h>
21 #include <zephyr/bluetooth/uuid.h>
22 #include <zephyr/bluetooth/att.h>
23 #include <zephyr/bluetooth/gatt.h>
24
25 #include "common/bt_str.h"
26
27 #include "hci_core.h"
28 #include "conn_internal.h"
29 #include "l2cap_internal.h"
30 #include "smp.h"
31 #include "att_internal.h"
32 #include "gatt_internal.h"
33
34 #define LOG_LEVEL CONFIG_BT_ATT_LOG_LEVEL
35 #include <zephyr/logging/log.h>
36 LOG_MODULE_REGISTER(bt_att);
37
38 #define ATT_CHAN(_ch) CONTAINER_OF(_ch, struct bt_att_chan, chan.chan)
39 #define ATT_REQ(_node) CONTAINER_OF(_node, struct bt_att_req, node)
40
41 #define ATT_CMD_MASK 0x40
42
43 #if defined(CONFIG_BT_EATT)
44 #define ATT_CHAN_MAX (CONFIG_BT_EATT_MAX + 1)
45 #else
46 #define ATT_CHAN_MAX 1
47 #endif /* CONFIG_BT_EATT */
48
49 typedef enum __packed {
50 ATT_COMMAND,
51 ATT_REQUEST,
52 ATT_RESPONSE,
53 ATT_NOTIFICATION,
54 ATT_CONFIRMATION,
55 ATT_INDICATION,
56 ATT_UNKNOWN,
57 } att_type_t;
58
59 static att_type_t att_op_get_type(uint8_t op);
60
61 #if CONFIG_BT_ATT_PREPARE_COUNT > 0
62 struct bt_attr_data {
63 uint16_t handle;
64 uint16_t offset;
65 };
66
67 /* Pool for incoming ATT packets */
68 NET_BUF_POOL_DEFINE(prep_pool, CONFIG_BT_ATT_PREPARE_COUNT, BT_ATT_BUF_SIZE,
69 sizeof(struct bt_attr_data), NULL);
70 #endif /* CONFIG_BT_ATT_PREPARE_COUNT */
71
72 K_MEM_SLAB_DEFINE(req_slab, sizeof(struct bt_att_req),
73 CONFIG_BT_ATT_TX_COUNT, __alignof__(struct bt_att_req));
74
75 enum {
76 ATT_CONNECTED,
77 ATT_ENHANCED,
78 ATT_PENDING_SENT,
79 ATT_OUT_OF_SYNC_SENT,
80
81 /* Total number of flags - must be at the end of the enum */
82 ATT_NUM_FLAGS,
83 };
84
85 struct bt_att_tx_meta_data;
86 typedef void (*bt_att_tx_cb_t)(struct bt_conn *conn,
87 struct bt_att_tx_meta_data *user_data);
88
89 struct bt_att_tx_meta_data {
90 int err;
91 uint8_t opcode;
92 uint16_t attr_count;
93 struct bt_att_chan *att_chan;
94 bt_gatt_complete_func_t func;
95 void *user_data;
96 enum bt_att_chan_opt chan_opt;
97 };
98
99 struct bt_att_tx_meta {
100 struct bt_att_tx_meta_data *data;
101 };
102
103 /* ATT channel specific data */
104 struct bt_att_chan {
105 /* Connection this channel is associated with */
106 struct bt_att *att;
107 struct bt_l2cap_le_chan chan;
108 ATOMIC_DEFINE(flags, ATT_NUM_FLAGS);
109 struct bt_att_req *req;
110 struct k_fifo tx_queue;
111 struct k_work_delayable timeout_work;
112 sys_snode_t node;
113 };
114
bt_att_is_enhanced(struct bt_att_chan * chan)115 static bool bt_att_is_enhanced(struct bt_att_chan *chan)
116 {
117 /* Optimization. */
118 if (!IS_ENABLED(CONFIG_BT_EATT)) {
119 return false;
120 }
121
122 return atomic_test_bit(chan->flags, ATT_ENHANCED);
123 }
124
bt_att_mtu(struct bt_att_chan * chan)125 static uint16_t bt_att_mtu(struct bt_att_chan *chan)
126 {
127 /* Core v5.3 Vol 3 Part F 3.4.2:
128 *
129 * The server and client shall set ATT_MTU to the minimum of the
130 * Client Rx MTU and the Server Rx MTU.
131 */
132 return MIN(chan->chan.rx.mtu, chan->chan.tx.mtu);
133 }
134
135 /* Descriptor of application-specific authorization callbacks that are used
136 * with the CONFIG_BT_GATT_AUTHORIZATION_CUSTOM Kconfig enabled.
137 */
138 static const struct bt_gatt_authorization_cb *authorization_cb;
139
140 /* ATT connection specific data */
141 struct bt_att {
142 struct bt_conn *conn;
143 /* Shared request queue */
144 sys_slist_t reqs;
145 struct k_fifo tx_queue;
146 #if CONFIG_BT_ATT_PREPARE_COUNT > 0
147 sys_slist_t prep_queue;
148 #endif
149 /* Contains bt_att_chan instance(s) */
150 sys_slist_t chans;
151 #if defined(CONFIG_BT_EATT)
152 struct {
153 struct k_work_delayable connection_work;
154 uint8_t chans_to_connect;
155
156 uint16_t prev_conn_rsp_result;
157 uint16_t prev_conn_req_result;
158 uint8_t prev_conn_req_missing_chans;
159 } eatt;
160 #endif /* CONFIG_BT_EATT */
161 };
162
163 K_MEM_SLAB_DEFINE(att_slab, sizeof(struct bt_att),
164 CONFIG_BT_MAX_CONN, __alignof__(struct bt_att));
165 K_MEM_SLAB_DEFINE(chan_slab, sizeof(struct bt_att_chan),
166 CONFIG_BT_MAX_CONN * ATT_CHAN_MAX,
167 __alignof__(struct bt_att_chan));
168 static struct bt_att_req cancel;
169
170 /** The thread ATT response handlers likely run on.
171 *
172 * Blocking this thread while waiting for an ATT request to resolve can cause a
173 * deadlock.
174 *
175 * This can happen if the application queues ATT requests in the context of a
176 * callback from the Bluetooth stack. This is because queuing an ATT request
177 * will block until a request-resource is available, and the callbacks run on
178 * the same thread as the ATT response handler that frees request-resources.
179 *
180 * The intended use of this value is to detect the above situation.
181 */
182 static k_tid_t att_handle_rsp_thread;
183
184 static struct bt_att_tx_meta_data tx_meta_data_storage[CONFIG_BT_ATT_TX_COUNT];
185
186 struct bt_att_tx_meta_data *bt_att_get_tx_meta_data(const struct net_buf *buf);
187 static void att_on_sent_cb(struct bt_att_tx_meta_data *meta);
188
189 #if defined(CONFIG_BT_ATT_ERR_TO_STR)
bt_att_err_to_str(uint8_t att_err)190 const char *bt_att_err_to_str(uint8_t att_err)
191 {
192 /* To mapping tables are used to avoid a big gap with NULL-entries. */
193 #define ATT_ERR(err) [err] = #err
194 #define ATT_ERR_SECOND(err) [err - BT_ATT_ERR_WRITE_REQ_REJECTED] = #err
195
196 const char * const first_mapping_table[] = {
197 ATT_ERR(BT_ATT_ERR_SUCCESS),
198 ATT_ERR(BT_ATT_ERR_INVALID_HANDLE),
199 ATT_ERR(BT_ATT_ERR_READ_NOT_PERMITTED),
200 ATT_ERR(BT_ATT_ERR_WRITE_NOT_PERMITTED),
201 ATT_ERR(BT_ATT_ERR_INVALID_PDU),
202 ATT_ERR(BT_ATT_ERR_AUTHENTICATION),
203 ATT_ERR(BT_ATT_ERR_NOT_SUPPORTED),
204 ATT_ERR(BT_ATT_ERR_INVALID_OFFSET),
205 ATT_ERR(BT_ATT_ERR_AUTHORIZATION),
206 ATT_ERR(BT_ATT_ERR_PREPARE_QUEUE_FULL),
207 ATT_ERR(BT_ATT_ERR_ATTRIBUTE_NOT_FOUND),
208 ATT_ERR(BT_ATT_ERR_ATTRIBUTE_NOT_LONG),
209 ATT_ERR(BT_ATT_ERR_ENCRYPTION_KEY_SIZE),
210 ATT_ERR(BT_ATT_ERR_INVALID_ATTRIBUTE_LEN),
211 ATT_ERR(BT_ATT_ERR_UNLIKELY),
212 ATT_ERR(BT_ATT_ERR_INSUFFICIENT_ENCRYPTION),
213 ATT_ERR(BT_ATT_ERR_UNSUPPORTED_GROUP_TYPE),
214 ATT_ERR(BT_ATT_ERR_INSUFFICIENT_RESOURCES),
215 ATT_ERR(BT_ATT_ERR_DB_OUT_OF_SYNC),
216 ATT_ERR(BT_ATT_ERR_VALUE_NOT_ALLOWED),
217 };
218
219 const char * const second_mapping_table[] = {
220 ATT_ERR_SECOND(BT_ATT_ERR_WRITE_REQ_REJECTED),
221 ATT_ERR_SECOND(BT_ATT_ERR_CCC_IMPROPER_CONF),
222 ATT_ERR_SECOND(BT_ATT_ERR_PROCEDURE_IN_PROGRESS),
223 ATT_ERR_SECOND(BT_ATT_ERR_OUT_OF_RANGE),
224 };
225
226
227 if (att_err < ARRAY_SIZE(first_mapping_table) && first_mapping_table[att_err]) {
228 return first_mapping_table[att_err];
229 } else if (att_err >= BT_ATT_ERR_WRITE_REQ_REJECTED) {
230 const uint8_t second_index = att_err - BT_ATT_ERR_WRITE_REQ_REJECTED;
231
232 if (second_index < ARRAY_SIZE(second_mapping_table) &&
233 second_mapping_table[second_index]) {
234 return second_mapping_table[second_index];
235 }
236 }
237
238 return "(unknown)";
239
240 #undef ATT_ERR
241 #undef ATT_ERR_SECOND
242 }
243 #endif /* CONFIG_BT_ATT_ERR_TO_STR */
244
att_tx_destroy(struct net_buf * buf)245 static void att_tx_destroy(struct net_buf *buf)
246 {
247 struct bt_att_tx_meta_data *p_meta = bt_att_get_tx_meta_data(buf);
248 struct bt_att_tx_meta_data meta;
249
250 LOG_DBG("%p", buf);
251
252 /* Destroy the buffer first, as the callback may attempt to allocate a
253 * new one for another operation.
254 */
255 meta = *p_meta;
256
257 /* Clear the meta storage. This might help catch illegal
258 * "use-after-free"s. An initial memset is not necessary, as the
259 * metadata storage array is `static`.
260 */
261 memset(p_meta, 0x00, sizeof(*p_meta));
262
263 /* After this point, p_meta doesn't belong to us.
264 * The user data will be memset to 0 on allocation.
265 */
266 net_buf_destroy(buf);
267
268 /* ATT opcode 0 is invalid. If we get here, that means the buffer got
269 * destroyed before it was ready to be sent. Hopefully nobody sets the
270 * opcode and then destroys the buffer without sending it. :'(
271 */
272 if (meta.opcode != 0) {
273 att_on_sent_cb(&meta);
274 }
275 }
276
277 NET_BUF_POOL_DEFINE(att_pool, CONFIG_BT_ATT_TX_COUNT,
278 BT_L2CAP_SDU_BUF_SIZE(BT_ATT_BUF_SIZE),
279 CONFIG_BT_CONN_TX_USER_DATA_SIZE, att_tx_destroy);
280
bt_att_get_tx_meta_data(const struct net_buf * buf)281 struct bt_att_tx_meta_data *bt_att_get_tx_meta_data(const struct net_buf *buf)
282 {
283 __ASSERT_NO_MSG(net_buf_pool_get(buf->pool_id) == &att_pool);
284
285 /* Metadata lifetime is implicitly tied to the buffer lifetime.
286 * Treat it as part of the buffer itself.
287 */
288 return &tx_meta_data_storage[net_buf_id((struct net_buf *)buf)];
289 }
290
291 static int bt_att_chan_send(struct bt_att_chan *chan, struct net_buf *buf);
292
293 static void att_chan_mtu_updated(struct bt_att_chan *updated_chan);
294 static void bt_att_disconnected(struct bt_l2cap_chan *chan);
295
296 struct net_buf *bt_att_create_rsp_pdu(struct bt_att_chan *chan, uint8_t op);
297
298 static void bt_att_sent(struct bt_l2cap_chan *ch);
299
att_disconnect(struct bt_att_chan * chan)300 static void att_disconnect(struct bt_att_chan *chan)
301 {
302 char addr[BT_ADDR_LE_STR_LEN];
303 int err;
304
305 /* In rare circumstances we are "forced" to disconnect the ATT bearer and the ACL.
306 * Examples of when this is right course of action is when there is an ATT timeout, we
307 * receive an unexpected response from the server, or the response from the server is
308 * invalid
309 */
310
311 bt_addr_le_to_str(bt_conn_get_dst(chan->att->conn), addr, sizeof(addr));
312 LOG_DBG("ATT disconnecting device %s", addr);
313
314 bt_att_disconnected(&chan->chan.chan);
315
316 err = bt_conn_disconnect(chan->chan.chan.conn, BT_HCI_ERR_REMOTE_USER_TERM_CONN);
317 if (err) {
318 LOG_ERR("Disconnecting failed (err %d)", err);
319 }
320 }
321
att_sent(void * user_data)322 static void att_sent(void *user_data)
323 {
324 struct bt_att_tx_meta_data *data = user_data;
325 struct bt_att_chan *att_chan = data->att_chan;
326 struct bt_conn *conn = att_chan->att->conn;
327 struct bt_l2cap_chan *chan = &att_chan->chan.chan;
328
329 __ASSERT_NO_MSG(!bt_att_is_enhanced(att_chan));
330
331 LOG_DBG("conn %p chan %p", conn, chan);
332
333 /* For EATT, `bt_att_sent` is assigned to the `.sent` L2 callback.
334 * L2CAP will then call it once the SDU has finished sending.
335 *
336 * For UATT, this won't happen, as static LE l2cap channels don't have
337 * SDUs. Call it manually instead.
338 */
339 bt_att_sent(chan);
340 }
341
342 /* In case of success the ownership of the buffer is transferred to the stack
343 * which takes care of releasing it when it completes transmitting to the
344 * controller.
345 *
346 * In case bt_l2cap_send_cb fails the buffer state and ownership are retained
347 * so the buffer can be safely pushed back to the queue to be processed later.
348 */
chan_send(struct bt_att_chan * chan,struct net_buf * buf)349 static int chan_send(struct bt_att_chan *chan, struct net_buf *buf)
350 {
351 struct bt_att_hdr *hdr;
352 struct net_buf_simple_state state;
353 int err;
354 struct bt_att_tx_meta_data *data = bt_att_get_tx_meta_data(buf);
355 struct bt_att_chan *prev_chan = data->att_chan;
356
357 hdr = (void *)buf->data;
358
359 LOG_DBG("code 0x%02x", hdr->code);
360
361 if (!atomic_test_bit(chan->flags, ATT_CONNECTED)) {
362 LOG_ERR("ATT channel not connected");
363 return -EINVAL;
364 }
365
366 if (IS_ENABLED(CONFIG_BT_EATT) && hdr->code == BT_ATT_OP_MTU_REQ &&
367 chan->chan.tx.cid != BT_L2CAP_CID_ATT) {
368 /* The Exchange MTU sub-procedure shall only be supported on
369 * the LE Fixed Channel Unenhanced ATT bearer
370 */
371 return -ENOTSUP;
372 }
373
374 __ASSERT_NO_MSG(buf->len >= sizeof(struct bt_att_hdr));
375 data->opcode = buf->data[0];
376 data->err = 0;
377
378 if (IS_ENABLED(CONFIG_BT_EATT) && bt_att_is_enhanced(chan)) {
379 /* Check if sent is pending already, if it does it cannot be
380 * modified so the operation will need to be queued.
381 */
382 if (atomic_test_bit(chan->flags, ATT_PENDING_SENT)) {
383 return -EAGAIN;
384 }
385
386 if (hdr->code == BT_ATT_OP_SIGNED_WRITE_CMD) {
387 return -ENOTSUP;
388 }
389
390 /* Check if the channel is ready to send in case of a request */
391 if (att_op_get_type(hdr->code) == ATT_REQUEST &&
392 !atomic_test_bit(chan->chan.chan.status,
393 BT_L2CAP_STATUS_OUT)) {
394 return -EAGAIN;
395 }
396
397 atomic_set_bit(chan->flags, ATT_PENDING_SENT);
398 data->att_chan = chan;
399
400 /* bt_l2cap_chan_send does actually return the number of bytes
401 * that could be sent immediately.
402 */
403 err = bt_l2cap_chan_send(&chan->chan.chan, buf);
404 if (err < 0) {
405 data->att_chan = prev_chan;
406 atomic_clear_bit(chan->flags, ATT_PENDING_SENT);
407 data->err = err;
408
409 return err;
410 } else {
411 /* On success, the almighty scheduler might already have
412 * run the destroy cb on the buffer. In that case, buf
413 * and its metadata are dangling pointers.
414 */
415 buf = NULL;
416 data = NULL;
417 }
418
419 return 0;
420 }
421
422 if (hdr->code == BT_ATT_OP_SIGNED_WRITE_CMD) {
423 err = bt_smp_sign(chan->att->conn, buf);
424 if (err) {
425 LOG_ERR("Error signing data");
426 net_buf_unref(buf);
427 return err;
428 }
429 }
430
431 net_buf_simple_save(&buf->b, &state);
432
433 data->att_chan = chan;
434
435 err = bt_l2cap_send_pdu(&chan->chan, buf, NULL, NULL);
436 if (err) {
437 if (err == -ENOBUFS) {
438 LOG_ERR("Ran out of TX buffers or contexts.");
439 }
440 /* In case of an error has occurred restore the buffer state */
441 net_buf_simple_restore(&buf->b, &state);
442 data->att_chan = prev_chan;
443 data->err = err;
444 }
445
446 return err;
447 }
448
att_chan_matches_chan_opt(struct bt_att_chan * chan,enum bt_att_chan_opt chan_opt)449 static bool att_chan_matches_chan_opt(struct bt_att_chan *chan, enum bt_att_chan_opt chan_opt)
450 {
451 __ASSERT_NO_MSG(chan_opt <= BT_ATT_CHAN_OPT_ENHANCED_ONLY);
452
453 if (chan_opt == BT_ATT_CHAN_OPT_NONE) {
454 return true;
455 }
456
457 if (bt_att_is_enhanced(chan)) {
458 return (chan_opt & BT_ATT_CHAN_OPT_ENHANCED_ONLY);
459 } else {
460 return (chan_opt & BT_ATT_CHAN_OPT_UNENHANCED_ONLY);
461 }
462 }
463
get_first_buf_matching_chan(struct k_fifo * fifo,struct bt_att_chan * chan)464 static struct net_buf *get_first_buf_matching_chan(struct k_fifo *fifo, struct bt_att_chan *chan)
465 {
466 if (IS_ENABLED(CONFIG_BT_EATT)) {
467 struct k_fifo skipped;
468 struct net_buf *buf;
469 struct net_buf *ret = NULL;
470 struct bt_att_tx_meta_data *meta;
471
472 k_fifo_init(&skipped);
473
474 while ((buf = k_fifo_get(fifo, K_NO_WAIT))) {
475 meta = bt_att_get_tx_meta_data(buf);
476 if (!ret &&
477 att_chan_matches_chan_opt(chan, meta->chan_opt)) {
478 ret = buf;
479 } else {
480 k_fifo_put(&skipped, buf);
481 }
482 }
483
484 __ASSERT_NO_MSG(k_fifo_is_empty(fifo));
485
486 while ((buf = k_fifo_get(&skipped, K_NO_WAIT))) {
487 k_fifo_put(fifo, buf);
488 }
489
490 __ASSERT_NO_MSG(k_fifo_is_empty(&skipped));
491
492 return ret;
493 } else {
494 return k_fifo_get(fifo, K_NO_WAIT);
495 }
496 }
497
get_first_req_matching_chan(sys_slist_t * reqs,struct bt_att_chan * chan)498 static struct bt_att_req *get_first_req_matching_chan(sys_slist_t *reqs, struct bt_att_chan *chan)
499 {
500 if (IS_ENABLED(CONFIG_BT_EATT)) {
501 sys_snode_t *curr, *prev = NULL;
502 struct bt_att_tx_meta_data *meta = NULL;
503
504 SYS_SLIST_FOR_EACH_NODE(reqs, curr) {
505 meta = bt_att_get_tx_meta_data(ATT_REQ(curr)->buf);
506 if (att_chan_matches_chan_opt(chan, meta->chan_opt)) {
507 break;
508 }
509
510 prev = curr;
511 }
512
513 if (curr) {
514 sys_slist_remove(reqs, prev, curr);
515
516 return ATT_REQ(curr);
517 }
518
519 return NULL;
520 }
521
522 sys_snode_t *node = sys_slist_get(reqs);
523
524 if (node) {
525 return ATT_REQ(node);
526 } else {
527 return NULL;
528 }
529 }
530
process_queue(struct bt_att_chan * chan,struct k_fifo * queue)531 static int process_queue(struct bt_att_chan *chan, struct k_fifo *queue)
532 {
533 struct net_buf *buf;
534 int err;
535
536 buf = get_first_buf_matching_chan(queue, chan);
537 if (buf) {
538 err = bt_att_chan_send(chan, buf);
539 if (err) {
540 /* Push it back if it could not be send */
541 k_queue_prepend(&queue->_queue, buf);
542 return err;
543 }
544
545 return 0;
546 }
547
548 return -ENOENT;
549 }
550
551 /* Send requests without taking tx_sem */
chan_req_send(struct bt_att_chan * chan,struct bt_att_req * req)552 static int chan_req_send(struct bt_att_chan *chan, struct bt_att_req *req)
553 {
554 struct net_buf *buf;
555 int err;
556
557 if (bt_att_mtu(chan) < net_buf_frags_len(req->buf)) {
558 return -EMSGSIZE;
559 }
560
561 LOG_DBG("chan %p req %p len %zu", chan, req, net_buf_frags_len(req->buf));
562
563 chan->req = req;
564
565 /* Release since bt_l2cap_send_cb takes ownership of the buffer */
566 buf = req->buf;
567 req->buf = NULL;
568
569 /* This lock makes sure the value of `bt_att_mtu(chan)` does not
570 * change.
571 */
572 k_sched_lock();
573 err = bt_att_chan_send(chan, buf);
574 if (err) {
575 /* We still have the ownership of the buffer */
576 req->buf = buf;
577 chan->req = NULL;
578 } else {
579 bt_gatt_req_set_mtu(req, bt_att_mtu(chan));
580 }
581 k_sched_unlock();
582
583 return err;
584 }
585
bt_att_sent(struct bt_l2cap_chan * ch)586 static void bt_att_sent(struct bt_l2cap_chan *ch)
587 {
588 struct bt_att_chan *chan = ATT_CHAN(ch);
589 struct bt_att *att = chan->att;
590 int err;
591
592 LOG_DBG("chan %p", chan);
593
594 atomic_clear_bit(chan->flags, ATT_PENDING_SENT);
595
596 if (!att) {
597 LOG_DBG("Ignore sent on detached ATT chan");
598 return;
599 }
600
601 /* Process pending requests first since they require a response they
602 * can only be processed one at time while if other queues were
603 * processed before they may always contain a buffer starving the
604 * request queue.
605 */
606 if (!chan->req && !sys_slist_is_empty(&att->reqs)) {
607 sys_snode_t *node = sys_slist_get(&att->reqs);
608
609 if (chan_req_send(chan, ATT_REQ(node)) >= 0) {
610 return;
611 }
612
613 /* Prepend back to the list as it could not be sent */
614 sys_slist_prepend(&att->reqs, node);
615 }
616
617 /* Process channel queue */
618 err = process_queue(chan, &chan->tx_queue);
619 if (!err) {
620 return;
621 }
622
623 /* Process global queue */
624 (void)process_queue(chan, &att->tx_queue);
625 }
626
chan_rebegin_att_timeout(struct bt_att_tx_meta_data * user_data)627 static void chan_rebegin_att_timeout(struct bt_att_tx_meta_data *user_data)
628 {
629 struct bt_att_tx_meta_data *data = user_data;
630 struct bt_att_chan *chan = data->att_chan;
631
632 LOG_DBG("chan %p chan->req %p", chan, chan->req);
633
634 if (!atomic_test_bit(chan->flags, ATT_CONNECTED)) {
635 LOG_ERR("ATT channel not connected");
636 return;
637 }
638
639 /* Start timeout work. Only if we are sure that the request is really
640 * in-flight.
641 */
642 if (chan->req) {
643 k_work_reschedule(&chan->timeout_work, BT_ATT_TIMEOUT);
644 }
645 }
646
chan_req_notif_sent(struct bt_att_tx_meta_data * user_data)647 static void chan_req_notif_sent(struct bt_att_tx_meta_data *user_data)
648 {
649 struct bt_att_tx_meta_data *data = user_data;
650 struct bt_att_chan *chan = data->att_chan;
651 struct bt_conn *conn = chan->att->conn;
652 bt_gatt_complete_func_t func = data->func;
653 uint16_t attr_count = data->attr_count;
654 void *ud = data->user_data;
655
656 LOG_DBG("chan %p CID 0x%04X", chan, chan->chan.tx.cid);
657
658 if (!atomic_test_bit(chan->flags, ATT_CONNECTED)) {
659 LOG_ERR("ATT channel not connected");
660 return;
661 }
662
663 if (func) {
664 for (uint16_t i = 0; i < attr_count; i++) {
665 func(conn, ud);
666 }
667 }
668 }
669
att_on_sent_cb(struct bt_att_tx_meta_data * meta)670 static void att_on_sent_cb(struct bt_att_tx_meta_data *meta)
671 {
672 const att_type_t op_type = att_op_get_type(meta->opcode);
673
674 LOG_DBG("opcode 0x%x", meta->opcode);
675
676 if (!meta->att_chan ||
677 !meta->att_chan->att ||
678 !meta->att_chan->att->conn) {
679 LOG_DBG("Bearer not connected, dropping ATT cb");
680 return;
681 }
682
683 if (meta->err) {
684 LOG_ERR("Got err %d, not calling ATT cb", meta->err);
685 return;
686 }
687
688 if (!bt_att_is_enhanced(meta->att_chan)) {
689 /* For EATT, L2CAP will call it after the SDU is fully sent. */
690 LOG_DBG("UATT bearer, calling att_sent");
691 att_sent(meta);
692 }
693
694 switch (op_type) {
695 case ATT_RESPONSE:
696 return;
697 case ATT_CONFIRMATION:
698 return;
699 case ATT_REQUEST:
700 case ATT_INDICATION:
701 chan_rebegin_att_timeout(meta);
702 return;
703 case ATT_COMMAND:
704 case ATT_NOTIFICATION:
705 chan_req_notif_sent(meta);
706 return;
707 default:
708 __ASSERT(false, "Unknown op type 0x%02X", op_type);
709 return;
710 }
711 }
712
bt_att_chan_create_pdu(struct bt_att_chan * chan,uint8_t op,size_t len)713 static struct net_buf *bt_att_chan_create_pdu(struct bt_att_chan *chan, uint8_t op, size_t len)
714 {
715 struct bt_att_hdr *hdr;
716 struct net_buf *buf;
717 struct bt_att_tx_meta_data *data;
718 k_timeout_t timeout;
719
720 if (len + sizeof(op) > bt_att_mtu(chan)) {
721 LOG_WRN("ATT MTU exceeded, max %u, wanted %zu", bt_att_mtu(chan),
722 len + sizeof(op));
723 return NULL;
724 }
725
726 switch (att_op_get_type(op)) {
727 case ATT_RESPONSE:
728 case ATT_CONFIRMATION:
729 /* Use a timeout only when responding/confirming */
730 timeout = BT_ATT_TIMEOUT;
731 break;
732 default: {
733 k_tid_t current_thread = k_current_get();
734
735 if (current_thread == k_work_queue_thread_get(&k_sys_work_q)) {
736 /* No blocking in the sysqueue. */
737 timeout = K_NO_WAIT;
738 } else if (current_thread == att_handle_rsp_thread) {
739 /* Blocking would cause deadlock. */
740 timeout = K_NO_WAIT;
741 } else {
742 timeout = K_FOREVER;
743 }
744 }
745 }
746
747 /* This will reserve headspace for lower layers */
748 buf = bt_l2cap_create_pdu_timeout(&att_pool, 0, timeout);
749 if (!buf) {
750 LOG_ERR("Unable to allocate buffer for op 0x%02x", op);
751 return NULL;
752 }
753
754 /* If we got a buf from `att_pool`, then the metadata slot at its index
755 * is officially ours to use.
756 */
757 data = bt_att_get_tx_meta_data(buf);
758
759 if (IS_ENABLED(CONFIG_BT_EATT)) {
760 net_buf_reserve(buf, BT_L2CAP_SDU_BUF_SIZE(0));
761 }
762
763 data->att_chan = chan;
764
765 hdr = net_buf_add(buf, sizeof(*hdr));
766 hdr->code = op;
767
768 return buf;
769 }
770
bt_att_chan_send(struct bt_att_chan * chan,struct net_buf * buf)771 static int bt_att_chan_send(struct bt_att_chan *chan, struct net_buf *buf)
772 {
773 LOG_DBG("chan %p flags %lu code 0x%02x", chan, atomic_get(chan->flags),
774 ((struct bt_att_hdr *)buf->data)->code);
775
776 if (IS_ENABLED(CONFIG_BT_EATT) &&
777 !att_chan_matches_chan_opt(chan, bt_att_get_tx_meta_data(buf)->chan_opt)) {
778 return -EINVAL;
779 }
780
781 return chan_send(chan, buf);
782 }
783
att_send_process(struct bt_att * att)784 static void att_send_process(struct bt_att *att)
785 {
786 struct bt_att_chan *chan, *tmp, *prev = NULL;
787 int err = 0;
788
789 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&att->chans, chan, tmp, node) {
790 if (err == -ENOENT && prev &&
791 (bt_att_is_enhanced(chan) == bt_att_is_enhanced(prev))) {
792 /* If there was nothing to send for the previous channel and the current
793 * channel has the same "enhancedness", there will be nothing to send for
794 * this channel either.
795 */
796 continue;
797 }
798
799 err = process_queue(chan, &att->tx_queue);
800 if (!err) {
801 /* Success */
802 return;
803 }
804
805 prev = chan;
806 }
807 }
808
bt_att_chan_send_rsp(struct bt_att_chan * chan,struct net_buf * buf)809 static void bt_att_chan_send_rsp(struct bt_att_chan *chan, struct net_buf *buf)
810 {
811 int err;
812
813 err = chan_send(chan, buf);
814 if (err) {
815 /* Responses need to be sent back using the same channel */
816 k_fifo_put(&chan->tx_queue, buf);
817 }
818 }
819
send_err_rsp(struct bt_att_chan * chan,uint8_t req,uint16_t handle,uint8_t err)820 static void send_err_rsp(struct bt_att_chan *chan, uint8_t req, uint16_t handle,
821 uint8_t err)
822 {
823 struct bt_att_error_rsp *rsp;
824 struct net_buf *buf;
825
826 /* Ignore opcode 0x00 */
827 if (!req) {
828 return;
829 }
830
831 buf = bt_att_chan_create_pdu(chan, BT_ATT_OP_ERROR_RSP, sizeof(*rsp));
832 if (!buf) {
833 return;
834 }
835
836 rsp = net_buf_add(buf, sizeof(*rsp));
837 rsp->request = req;
838 rsp->handle = sys_cpu_to_le16(handle);
839 rsp->error = err;
840
841 bt_att_chan_send_rsp(chan, buf);
842 }
843
att_mtu_req(struct bt_att_chan * chan,struct net_buf * buf)844 static uint8_t att_mtu_req(struct bt_att_chan *chan, struct net_buf *buf)
845 {
846 struct bt_att_exchange_mtu_req *req;
847 struct bt_att_exchange_mtu_rsp *rsp;
848 struct net_buf *pdu;
849 uint16_t mtu_client, mtu_server;
850
851 /* Exchange MTU sub-procedure shall only be supported on the
852 * LE Fixed Channel Unenhanced ATT bearer.
853 */
854 if (bt_att_is_enhanced(chan)) {
855 return BT_ATT_ERR_NOT_SUPPORTED;
856 }
857
858 req = (void *)buf->data;
859
860 mtu_client = sys_le16_to_cpu(req->mtu);
861
862 LOG_DBG("Client MTU %u", mtu_client);
863
864 /* Check if MTU is valid */
865 if (mtu_client < BT_ATT_DEFAULT_LE_MTU) {
866 return BT_ATT_ERR_INVALID_PDU;
867 }
868
869 pdu = bt_att_create_rsp_pdu(chan, BT_ATT_OP_MTU_RSP);
870 if (!pdu) {
871 return BT_ATT_ERR_UNLIKELY;
872 }
873
874 mtu_server = BT_LOCAL_ATT_MTU_UATT;
875
876 LOG_DBG("Server MTU %u", mtu_server);
877
878 rsp = net_buf_add(pdu, sizeof(*rsp));
879 rsp->mtu = sys_cpu_to_le16(mtu_server);
880
881 bt_att_chan_send_rsp(chan, pdu);
882
883 /* The ATT_EXCHANGE_MTU_REQ/RSP is just an alternative way of
884 * communicating the L2CAP MTU.
885 */
886 chan->chan.rx.mtu = mtu_server;
887 chan->chan.tx.mtu = mtu_client;
888
889 LOG_DBG("Negotiated MTU %u", bt_att_mtu(chan));
890
891 #if defined(CONFIG_BT_GATT_CLIENT)
892 /* Mark the MTU Exchange as complete.
893 * This will skip sending ATT Exchange MTU from our side.
894 *
895 * Core 5.3 | Vol 3, Part F 3.4.2.2:
896 * If MTU is exchanged in one direction, that is sufficient for both directions.
897 */
898 atomic_set_bit(chan->att->conn->flags, BT_CONN_ATT_MTU_EXCHANGED);
899 #endif /* CONFIG_BT_GATT_CLIENT */
900
901 att_chan_mtu_updated(chan);
902
903 return 0;
904 }
905
bt_att_chan_req_send(struct bt_att_chan * chan,struct bt_att_req * req)906 static int bt_att_chan_req_send(struct bt_att_chan *chan,
907 struct bt_att_req *req)
908 {
909 __ASSERT_NO_MSG(chan);
910 __ASSERT_NO_MSG(req);
911 __ASSERT_NO_MSG(req->func);
912 __ASSERT_NO_MSG(!chan->req);
913
914 LOG_DBG("req %p", req);
915
916 return chan_req_send(chan, req);
917 }
918
att_req_send_process(struct bt_att * att)919 static void att_req_send_process(struct bt_att *att)
920 {
921 struct bt_att_req *req = NULL;
922 struct bt_att_chan *chan, *tmp, *prev = NULL;
923
924 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&att->chans, chan, tmp, node) {
925 /* If there is an ongoing transaction, do not use the channel */
926 if (chan->req) {
927 continue;
928 }
929
930 if (!req && prev && (bt_att_is_enhanced(chan) == bt_att_is_enhanced(prev))) {
931 /* If there was nothing to send for the previous channel and the current
932 * channel has the same "enhancedness", there will be nothing to send for
933 * this channel either.
934 */
935 continue;
936 }
937
938 prev = chan;
939
940 /* Pull next request from the list */
941 req = get_first_req_matching_chan(&att->reqs, chan);
942 if (!req) {
943 continue;
944 }
945
946 if (bt_att_chan_req_send(chan, req) >= 0) {
947 return;
948 }
949
950 /* Prepend back to the list as it could not be sent */
951 sys_slist_prepend(&att->reqs, &req->node);
952 }
953 }
954
att_handle_rsp(struct bt_att_chan * chan,void * pdu,uint16_t len,int err)955 static uint8_t att_handle_rsp(struct bt_att_chan *chan, void *pdu, uint16_t len,
956 int err)
957 {
958 bt_att_func_t func = NULL;
959 void *params;
960
961 LOG_DBG("chan %p err %d len %u: %s", chan, err, len, bt_hex(pdu, len));
962
963 /* Cancel timeout if ongoing */
964 k_work_cancel_delayable(&chan->timeout_work);
965
966 if (!chan->req) {
967 LOG_WRN("No pending ATT request");
968 att_disconnect(chan);
969 return 0; /* Returning a non-0 value would attempt to send an error response */
970 }
971
972 /* Check if request has been cancelled */
973 if (chan->req == &cancel) {
974 chan->req = NULL;
975 goto process;
976 }
977
978 /* Reset func so it can be reused by the callback */
979 func = chan->req->func;
980 chan->req->func = NULL;
981 params = chan->req->user_data;
982
983 /* free allocated request so its memory can be reused */
984 bt_att_req_free(chan->req);
985 chan->req = NULL;
986
987 process:
988 /* Process pending requests */
989 att_req_send_process(chan->att);
990 if (func) {
991 func(chan->att->conn, err, pdu, len, params);
992 }
993
994 return 0;
995 }
996
997 #if defined(CONFIG_BT_GATT_CLIENT)
att_mtu_rsp(struct bt_att_chan * chan,struct net_buf * buf)998 static uint8_t att_mtu_rsp(struct bt_att_chan *chan, struct net_buf *buf)
999 {
1000 struct bt_att_exchange_mtu_rsp *rsp;
1001 uint16_t mtu;
1002
1003 rsp = (void *)buf->data;
1004
1005 mtu = sys_le16_to_cpu(rsp->mtu);
1006
1007 LOG_DBG("Server MTU %u", mtu);
1008
1009 /* Check if MTU is valid */
1010 if (mtu < BT_ATT_DEFAULT_LE_MTU) {
1011 return att_handle_rsp(chan, NULL, 0, BT_ATT_ERR_INVALID_PDU);
1012 }
1013
1014 /* The following must equal the value we sent in the req. We assume this
1015 * is a rsp to `gatt_exchange_mtu_encode`.
1016 */
1017 chan->chan.rx.mtu = BT_LOCAL_ATT_MTU_UATT;
1018 /* The ATT_EXCHANGE_MTU_REQ/RSP is just an alternative way of
1019 * communicating the L2CAP MTU.
1020 */
1021
1022 chan->chan.tx.mtu = mtu;
1023
1024 LOG_DBG("Negotiated MTU %u", bt_att_mtu(chan));
1025
1026 att_chan_mtu_updated(chan);
1027
1028 return att_handle_rsp(chan, rsp, buf->len, 0);
1029 }
1030 #endif /* CONFIG_BT_GATT_CLIENT */
1031
range_is_valid(uint16_t start,uint16_t end,uint16_t * err)1032 static bool range_is_valid(uint16_t start, uint16_t end, uint16_t *err)
1033 {
1034 /* Handle 0 is invalid */
1035 if (!start || !end) {
1036 if (err) {
1037 *err = 0U;
1038 }
1039 return false;
1040 }
1041
1042 /* Check if range is valid */
1043 if (start > end) {
1044 if (err) {
1045 *err = start;
1046 }
1047 return false;
1048 }
1049
1050 return true;
1051 }
1052
1053 struct find_info_data {
1054 struct bt_att_chan *chan;
1055 struct net_buf *buf;
1056 struct bt_att_find_info_rsp *rsp;
1057 union {
1058 struct bt_att_info_16 *info16;
1059 struct bt_att_info_128 *info128;
1060 };
1061 };
1062
find_info_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)1063 static uint8_t find_info_cb(const struct bt_gatt_attr *attr, uint16_t handle,
1064 void *user_data)
1065 {
1066 struct find_info_data *data = user_data;
1067 struct bt_att_chan *chan = data->chan;
1068
1069 LOG_DBG("handle 0x%04x", handle);
1070
1071 /* Initialize rsp at first entry */
1072 if (!data->rsp) {
1073 data->rsp = net_buf_add(data->buf, sizeof(*data->rsp));
1074 data->rsp->format = (attr->uuid->type == BT_UUID_TYPE_16) ?
1075 BT_ATT_INFO_16 : BT_ATT_INFO_128;
1076 }
1077
1078 switch (data->rsp->format) {
1079 case BT_ATT_INFO_16:
1080 if (attr->uuid->type != BT_UUID_TYPE_16) {
1081 return BT_GATT_ITER_STOP;
1082 }
1083
1084 /* Fast forward to next item position */
1085 data->info16 = net_buf_add(data->buf, sizeof(*data->info16));
1086 data->info16->handle = sys_cpu_to_le16(handle);
1087 data->info16->uuid = sys_cpu_to_le16(BT_UUID_16(attr->uuid)->val);
1088
1089 if (bt_att_mtu(chan) - data->buf->len >
1090 sizeof(*data->info16)) {
1091 return BT_GATT_ITER_CONTINUE;
1092 }
1093
1094 break;
1095 case BT_ATT_INFO_128:
1096 if (attr->uuid->type != BT_UUID_TYPE_128) {
1097 return BT_GATT_ITER_STOP;
1098 }
1099
1100 /* Fast forward to next item position */
1101 data->info128 = net_buf_add(data->buf, sizeof(*data->info128));
1102 data->info128->handle = sys_cpu_to_le16(handle);
1103 memcpy(data->info128->uuid, BT_UUID_128(attr->uuid)->val,
1104 sizeof(data->info128->uuid));
1105
1106 if (bt_att_mtu(chan) - data->buf->len >
1107 sizeof(*data->info128)) {
1108 return BT_GATT_ITER_CONTINUE;
1109 }
1110 }
1111
1112 return BT_GATT_ITER_STOP;
1113 }
1114
att_find_info_rsp(struct bt_att_chan * chan,uint16_t start_handle,uint16_t end_handle)1115 static uint8_t att_find_info_rsp(struct bt_att_chan *chan, uint16_t start_handle,
1116 uint16_t end_handle)
1117 {
1118 struct find_info_data data;
1119
1120 (void)memset(&data, 0, sizeof(data));
1121
1122 data.buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_FIND_INFO_RSP);
1123 if (!data.buf) {
1124 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
1125 }
1126
1127 data.chan = chan;
1128 bt_gatt_foreach_attr(start_handle, end_handle, find_info_cb, &data);
1129
1130 if (!data.rsp) {
1131 net_buf_unref(data.buf);
1132 /* Respond since handle is set */
1133 send_err_rsp(chan, BT_ATT_OP_FIND_INFO_REQ, start_handle,
1134 BT_ATT_ERR_ATTRIBUTE_NOT_FOUND);
1135 return 0;
1136 }
1137
1138 bt_att_chan_send_rsp(chan, data.buf);
1139
1140 return 0;
1141 }
1142
att_find_info_req(struct bt_att_chan * chan,struct net_buf * buf)1143 static uint8_t att_find_info_req(struct bt_att_chan *chan, struct net_buf *buf)
1144 {
1145 struct bt_att_find_info_req *req;
1146 uint16_t start_handle, end_handle, err_handle;
1147
1148 req = (void *)buf->data;
1149
1150 start_handle = sys_le16_to_cpu(req->start_handle);
1151 end_handle = sys_le16_to_cpu(req->end_handle);
1152
1153 LOG_DBG("start_handle 0x%04x end_handle 0x%04x", start_handle, end_handle);
1154
1155 if (!range_is_valid(start_handle, end_handle, &err_handle)) {
1156 send_err_rsp(chan, BT_ATT_OP_FIND_INFO_REQ, err_handle,
1157 BT_ATT_ERR_INVALID_HANDLE);
1158 return 0;
1159 }
1160
1161 return att_find_info_rsp(chan, start_handle, end_handle);
1162 }
1163
1164 struct find_type_data {
1165 struct bt_att_chan *chan;
1166 struct net_buf *buf;
1167 struct bt_att_handle_group *group;
1168 const void *value;
1169 uint8_t value_len;
1170 uint8_t err;
1171 };
1172
find_type_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)1173 static uint8_t find_type_cb(const struct bt_gatt_attr *attr, uint16_t handle,
1174 void *user_data)
1175 {
1176 struct find_type_data *data = user_data;
1177 struct bt_att_chan *chan = data->chan;
1178 struct bt_conn *conn = chan->chan.chan.conn;
1179 int read;
1180 uint8_t uuid[16];
1181 struct net_buf *frag;
1182 size_t len;
1183
1184 /* Skip secondary services */
1185 if (!bt_uuid_cmp(attr->uuid, BT_UUID_GATT_SECONDARY)) {
1186 goto skip;
1187 }
1188
1189 /* Update group end_handle if not a primary service */
1190 if (bt_uuid_cmp(attr->uuid, BT_UUID_GATT_PRIMARY)) {
1191 if (data->group &&
1192 handle > sys_le16_to_cpu(data->group->end_handle)) {
1193 data->group->end_handle = sys_cpu_to_le16(handle);
1194 }
1195 return BT_GATT_ITER_CONTINUE;
1196 }
1197
1198 LOG_DBG("handle 0x%04x", handle);
1199
1200 /* stop if there is no space left */
1201 if (bt_att_mtu(chan) - net_buf_frags_len(data->buf) <
1202 sizeof(*data->group)) {
1203 return BT_GATT_ITER_STOP;
1204 }
1205
1206 frag = net_buf_frag_last(data->buf);
1207
1208 len = MIN(bt_att_mtu(chan) - net_buf_frags_len(data->buf),
1209 net_buf_tailroom(frag));
1210 if (!len) {
1211 frag = net_buf_alloc(net_buf_pool_get(data->buf->pool_id),
1212 K_NO_WAIT);
1213 /* If not buffer can be allocated immediately stop */
1214 if (!frag) {
1215 return BT_GATT_ITER_STOP;
1216 }
1217
1218 net_buf_frag_add(data->buf, frag);
1219 }
1220
1221 /* Read attribute value and store in the buffer */
1222 read = attr->read(conn, attr, uuid, sizeof(uuid), 0);
1223 if (read < 0) {
1224 /*
1225 * Since we don't know if it is the service with requested UUID,
1226 * we cannot respond with an error to this request.
1227 */
1228 goto skip;
1229 }
1230
1231 /* Check if data matches */
1232 if (read != data->value_len) {
1233 /* Use bt_uuid_cmp() to compare UUIDs of different form. */
1234 struct bt_uuid_128 ref_uuid;
1235 struct bt_uuid_128 recvd_uuid;
1236
1237 if (!bt_uuid_create(&recvd_uuid.uuid, data->value, data->value_len)) {
1238 LOG_WRN("Unable to create UUID: size %u", data->value_len);
1239 goto skip;
1240 }
1241 if (!bt_uuid_create(&ref_uuid.uuid, uuid, read)) {
1242 LOG_WRN("Unable to create UUID: size %d", read);
1243 goto skip;
1244 }
1245 if (bt_uuid_cmp(&recvd_uuid.uuid, &ref_uuid.uuid)) {
1246 goto skip;
1247 }
1248 } else if (memcmp(data->value, uuid, read)) {
1249 goto skip;
1250 }
1251
1252 /* If service has been found, error should be cleared */
1253 data->err = 0x00;
1254
1255 /* Fast forward to next item position */
1256 data->group = net_buf_add(frag, sizeof(*data->group));
1257 data->group->start_handle = sys_cpu_to_le16(handle);
1258 data->group->end_handle = sys_cpu_to_le16(handle);
1259
1260 /* continue to find the end_handle */
1261 return BT_GATT_ITER_CONTINUE;
1262
1263 skip:
1264 data->group = NULL;
1265 return BT_GATT_ITER_CONTINUE;
1266 }
1267
att_find_type_rsp(struct bt_att_chan * chan,uint16_t start_handle,uint16_t end_handle,const void * value,uint8_t value_len)1268 static uint8_t att_find_type_rsp(struct bt_att_chan *chan, uint16_t start_handle,
1269 uint16_t end_handle, const void *value,
1270 uint8_t value_len)
1271 {
1272 struct find_type_data data;
1273
1274 (void)memset(&data, 0, sizeof(data));
1275
1276 data.buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_FIND_TYPE_RSP);
1277 if (!data.buf) {
1278 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
1279 }
1280
1281 data.chan = chan;
1282 data.group = NULL;
1283 data.value = value;
1284 data.value_len = value_len;
1285
1286 /* Pre-set error in case no service will be found */
1287 data.err = BT_ATT_ERR_ATTRIBUTE_NOT_FOUND;
1288
1289 bt_gatt_foreach_attr(start_handle, end_handle, find_type_cb, &data);
1290
1291 /* If error has not been cleared, no service has been found */
1292 if (data.err) {
1293 net_buf_unref(data.buf);
1294 /* Respond since handle is set */
1295 send_err_rsp(chan, BT_ATT_OP_FIND_TYPE_REQ, start_handle,
1296 data.err);
1297 return 0;
1298 }
1299
1300 bt_att_chan_send_rsp(chan, data.buf);
1301
1302 return 0;
1303 }
1304
att_find_type_req(struct bt_att_chan * chan,struct net_buf * buf)1305 static uint8_t att_find_type_req(struct bt_att_chan *chan, struct net_buf *buf)
1306 {
1307 struct bt_att_find_type_req *req;
1308 uint16_t start_handle, end_handle, err_handle, type;
1309 uint8_t *value;
1310
1311 req = net_buf_pull_mem(buf, sizeof(*req));
1312
1313 start_handle = sys_le16_to_cpu(req->start_handle);
1314 end_handle = sys_le16_to_cpu(req->end_handle);
1315 type = sys_le16_to_cpu(req->type);
1316 value = buf->data;
1317
1318 LOG_DBG("start_handle 0x%04x end_handle 0x%04x type %u", start_handle, end_handle, type);
1319
1320 if (!range_is_valid(start_handle, end_handle, &err_handle)) {
1321 send_err_rsp(chan, BT_ATT_OP_FIND_TYPE_REQ, err_handle,
1322 BT_ATT_ERR_INVALID_HANDLE);
1323 return 0;
1324 }
1325
1326 /* The Attribute Protocol Find By Type Value Request shall be used with
1327 * the Attribute Type parameter set to the UUID for "Primary Service"
1328 * and the Attribute Value set to the 16-bit Bluetooth UUID or 128-bit
1329 * UUID for the specific primary service.
1330 */
1331 if (bt_uuid_cmp(BT_UUID_DECLARE_16(type), BT_UUID_GATT_PRIMARY)) {
1332 send_err_rsp(chan, BT_ATT_OP_FIND_TYPE_REQ, start_handle,
1333 BT_ATT_ERR_ATTRIBUTE_NOT_FOUND);
1334 return 0;
1335 }
1336
1337 return att_find_type_rsp(chan, start_handle, end_handle, value,
1338 buf->len);
1339 }
1340
err_to_att(int err)1341 static uint8_t err_to_att(int err)
1342 {
1343 LOG_DBG("%d", err);
1344
1345 if (err < 0 && err >= -0xff) {
1346 return -err;
1347 }
1348
1349 return BT_ATT_ERR_UNLIKELY;
1350 }
1351
1352 struct read_type_data {
1353 struct bt_att_chan *chan;
1354 struct bt_uuid *uuid;
1355 struct net_buf *buf;
1356 struct bt_att_read_type_rsp *rsp;
1357 struct bt_att_data *item;
1358 uint8_t err;
1359 };
1360
1361 typedef bool (*attr_read_cb)(struct net_buf *buf, ssize_t read,
1362 void *user_data);
1363
attr_read_authorize(struct bt_conn * conn,const struct bt_gatt_attr * attr)1364 static bool attr_read_authorize(struct bt_conn *conn,
1365 const struct bt_gatt_attr *attr)
1366 {
1367 if (!IS_ENABLED(CONFIG_BT_GATT_AUTHORIZATION_CUSTOM)) {
1368 return true;
1369 }
1370
1371 if (!authorization_cb || !authorization_cb->read_authorize) {
1372 return true;
1373 }
1374
1375 return authorization_cb->read_authorize(conn, attr);
1376 }
1377
attr_read_type_cb(struct net_buf * frag,ssize_t read,void * user_data)1378 static bool attr_read_type_cb(struct net_buf *frag, ssize_t read,
1379 void *user_data)
1380 {
1381 struct read_type_data *data = user_data;
1382
1383 if (!data->rsp->len) {
1384 /* Set len to be the first item found */
1385 data->rsp->len = read + sizeof(*data->item);
1386 } else if (data->rsp->len != read + sizeof(*data->item)) {
1387 /* All items should have the same size */
1388 frag->len -= sizeof(*data->item);
1389 data->item = NULL;
1390 return false;
1391 }
1392
1393 return true;
1394 }
1395
att_chan_read(struct bt_att_chan * chan,const struct bt_gatt_attr * attr,struct net_buf * buf,uint16_t offset,attr_read_cb cb,void * user_data)1396 static ssize_t att_chan_read(struct bt_att_chan *chan,
1397 const struct bt_gatt_attr *attr,
1398 struct net_buf *buf, uint16_t offset,
1399 attr_read_cb cb, void *user_data)
1400 {
1401 struct bt_conn *conn = chan->chan.chan.conn;
1402 ssize_t read;
1403 struct net_buf *frag;
1404 size_t len, total = 0;
1405
1406 if (bt_att_mtu(chan) <= net_buf_frags_len(buf)) {
1407 return 0;
1408 }
1409
1410 frag = net_buf_frag_last(buf);
1411
1412 /* Create necessary fragments if MTU is bigger than what a buffer can
1413 * hold.
1414 */
1415 do {
1416 len = MIN(bt_att_mtu(chan) - net_buf_frags_len(buf),
1417 net_buf_tailroom(frag));
1418 if (!len) {
1419 frag = net_buf_alloc(net_buf_pool_get(buf->pool_id),
1420 K_NO_WAIT);
1421 /* If not buffer can be allocated immediately return */
1422 if (!frag) {
1423 return total;
1424 }
1425
1426 net_buf_frag_add(buf, frag);
1427
1428 len = MIN(bt_att_mtu(chan) - net_buf_frags_len(buf),
1429 net_buf_tailroom(frag));
1430 }
1431
1432 read = attr->read(conn, attr, frag->data + frag->len, len,
1433 offset);
1434 if (read < 0) {
1435 if (total) {
1436 return total;
1437 }
1438
1439 return read;
1440 }
1441
1442 if (cb && !cb(frag, read, user_data)) {
1443 break;
1444 }
1445
1446 net_buf_add(frag, read);
1447 total += read;
1448 offset += read;
1449 } while (bt_att_mtu(chan) > net_buf_frags_len(buf) && read == len);
1450
1451 return total;
1452 }
1453
read_type_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)1454 static uint8_t read_type_cb(const struct bt_gatt_attr *attr, uint16_t handle,
1455 void *user_data)
1456 {
1457 struct read_type_data *data = user_data;
1458 struct bt_att_chan *chan = data->chan;
1459 struct bt_conn *conn = chan->chan.chan.conn;
1460 ssize_t read;
1461
1462 /* Skip if doesn't match */
1463 if (bt_uuid_cmp(attr->uuid, data->uuid)) {
1464 return BT_GATT_ITER_CONTINUE;
1465 }
1466
1467 LOG_DBG("handle 0x%04x", handle);
1468
1469 /*
1470 * If an attribute in the set of requested attributes would cause an
1471 * Error Response then this attribute cannot be included in a
1472 * Read By Type Response and the attributes before this attribute
1473 * shall be returned
1474 *
1475 * If the first attribute in the set of requested attributes would
1476 * cause an Error Response then no other attributes in the requested
1477 * attributes can be considered.
1478 */
1479 data->err = bt_gatt_check_perm(conn, attr, BT_GATT_PERM_READ_MASK);
1480 if (data->err) {
1481 if (data->rsp->len) {
1482 data->err = 0x00;
1483 }
1484 return BT_GATT_ITER_STOP;
1485 }
1486
1487 /* Check the attribute authorization logic */
1488 if (!attr_read_authorize(conn, attr)) {
1489 data->err = BT_ATT_ERR_AUTHORIZATION;
1490 return BT_GATT_ITER_STOP;
1491 }
1492
1493 /*
1494 * If any attribute is founded in handle range it means that error
1495 * should be changed from pre-set: attr not found error to no error.
1496 */
1497 data->err = 0x00;
1498
1499 /* Fast forward to next item position */
1500 data->item = net_buf_add(net_buf_frag_last(data->buf),
1501 sizeof(*data->item));
1502 data->item->handle = sys_cpu_to_le16(handle);
1503
1504 read = att_chan_read(chan, attr, data->buf, 0, attr_read_type_cb, data);
1505 if (read < 0) {
1506 data->err = err_to_att(read);
1507 return BT_GATT_ITER_STOP;
1508 }
1509
1510 if (!data->item) {
1511 return BT_GATT_ITER_STOP;
1512 }
1513
1514 /* continue only if there are still space for more items */
1515 return bt_att_mtu(chan) - net_buf_frags_len(data->buf) >
1516 data->rsp->len ? BT_GATT_ITER_CONTINUE : BT_GATT_ITER_STOP;
1517 }
1518
att_read_type_rsp(struct bt_att_chan * chan,struct bt_uuid * uuid,uint16_t start_handle,uint16_t end_handle)1519 static uint8_t att_read_type_rsp(struct bt_att_chan *chan, struct bt_uuid *uuid,
1520 uint16_t start_handle, uint16_t end_handle)
1521 {
1522 struct read_type_data data;
1523
1524 (void)memset(&data, 0, sizeof(data));
1525
1526 data.buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_READ_TYPE_RSP);
1527 if (!data.buf) {
1528 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
1529 }
1530
1531 data.chan = chan;
1532 data.uuid = uuid;
1533 data.rsp = net_buf_add(data.buf, sizeof(*data.rsp));
1534 data.rsp->len = 0U;
1535
1536 /* Pre-set error if no attr will be found in handle */
1537 data.err = BT_ATT_ERR_ATTRIBUTE_NOT_FOUND;
1538
1539 bt_gatt_foreach_attr(start_handle, end_handle, read_type_cb, &data);
1540
1541 if (data.err) {
1542 net_buf_unref(data.buf);
1543 /* Response here since handle is set */
1544 send_err_rsp(chan, BT_ATT_OP_READ_TYPE_REQ, start_handle,
1545 data.err);
1546 return 0;
1547 }
1548
1549 bt_att_chan_send_rsp(chan, data.buf);
1550
1551 return 0;
1552 }
1553
att_read_type_req(struct bt_att_chan * chan,struct net_buf * buf)1554 static uint8_t att_read_type_req(struct bt_att_chan *chan, struct net_buf *buf)
1555 {
1556 struct bt_att_read_type_req *req;
1557 uint16_t start_handle, end_handle, err_handle;
1558 union {
1559 struct bt_uuid uuid;
1560 struct bt_uuid_16 u16;
1561 struct bt_uuid_128 u128;
1562 } u;
1563 uint8_t uuid_len = buf->len - sizeof(*req);
1564
1565 /* Type can only be UUID16 or UUID128 */
1566 if (uuid_len != 2 && uuid_len != 16) {
1567 return BT_ATT_ERR_INVALID_PDU;
1568 }
1569
1570 req = net_buf_pull_mem(buf, sizeof(*req));
1571
1572 start_handle = sys_le16_to_cpu(req->start_handle);
1573 end_handle = sys_le16_to_cpu(req->end_handle);
1574 if (!bt_uuid_create(&u.uuid, req->uuid, uuid_len)) {
1575 return BT_ATT_ERR_UNLIKELY;
1576 }
1577
1578 LOG_DBG("start_handle 0x%04x end_handle 0x%04x type %s", start_handle, end_handle,
1579 bt_uuid_str(&u.uuid));
1580
1581 if (!range_is_valid(start_handle, end_handle, &err_handle)) {
1582 send_err_rsp(chan, BT_ATT_OP_READ_TYPE_REQ, err_handle,
1583 BT_ATT_ERR_INVALID_HANDLE);
1584 return 0;
1585 }
1586
1587 /* If a client that has indicated support for robust caching (by setting the Robust
1588 * Caching bit in the Client Supported Features characteristic) is change-unaware
1589 * then the server shall send an ATT_ERROR_RSP PDU with the Error Code
1590 * parameter set to Database Out Of Sync (0x12) when either of the following happen:
1591 * • That client requests an operation at any Attribute Handle or list of Attribute
1592 * Handles by sending an ATT request.
1593 * • That client sends an ATT_READ_BY_TYPE_REQ PDU with Attribute Type
1594 * other than «Include» or «Characteristic» and an Attribute Handle range
1595 * other than 0x0001 to 0xFFFF.
1596 * (Core Specification 5.4 Vol 3. Part G. 2.5.2.1 Robust Caching).
1597 */
1598 if (!bt_gatt_change_aware(chan->chan.chan.conn, true)) {
1599 if (bt_uuid_cmp(&u.uuid, BT_UUID_GATT_INCLUDE) != 0 &&
1600 bt_uuid_cmp(&u.uuid, BT_UUID_GATT_CHRC) != 0 &&
1601 (start_handle != BT_ATT_FIRST_ATTRIBUTE_HANDLE ||
1602 end_handle != BT_ATT_LAST_ATTRIBUTE_HANDLE)) {
1603 if (!atomic_test_and_set_bit(chan->flags, ATT_OUT_OF_SYNC_SENT)) {
1604 return BT_ATT_ERR_DB_OUT_OF_SYNC;
1605 } else {
1606 return 0;
1607 }
1608 }
1609 }
1610
1611 return att_read_type_rsp(chan, &u.uuid, start_handle, end_handle);
1612 }
1613
1614 struct read_data {
1615 struct bt_att_chan *chan;
1616 uint16_t offset;
1617 struct net_buf *buf;
1618 uint8_t err;
1619 };
1620
read_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)1621 static uint8_t read_cb(const struct bt_gatt_attr *attr, uint16_t handle,
1622 void *user_data)
1623 {
1624 struct read_data *data = user_data;
1625 struct bt_att_chan *chan = data->chan;
1626 struct bt_conn *conn = chan->chan.chan.conn;
1627 int ret;
1628
1629 LOG_DBG("handle 0x%04x", handle);
1630
1631 /*
1632 * If any attribute is founded in handle range it means that error
1633 * should be changed from pre-set: invalid handle error to no error.
1634 */
1635 data->err = 0x00;
1636
1637 /* Check attribute permissions */
1638 data->err = bt_gatt_check_perm(conn, attr, BT_GATT_PERM_READ_MASK);
1639 if (data->err) {
1640 return BT_GATT_ITER_STOP;
1641 }
1642
1643 /* Check the attribute authorization logic */
1644 if (!attr_read_authorize(conn, attr)) {
1645 data->err = BT_ATT_ERR_AUTHORIZATION;
1646 return BT_GATT_ITER_STOP;
1647 }
1648
1649 /* Read attribute value and store in the buffer */
1650 ret = att_chan_read(chan, attr, data->buf, data->offset, NULL, NULL);
1651 if (ret < 0) {
1652 data->err = err_to_att(ret);
1653 return BT_GATT_ITER_STOP;
1654 }
1655
1656 return BT_GATT_ITER_CONTINUE;
1657 }
1658
att_read_rsp(struct bt_att_chan * chan,uint8_t op,uint8_t rsp,uint16_t handle,uint16_t offset)1659 static uint8_t att_read_rsp(struct bt_att_chan *chan, uint8_t op, uint8_t rsp,
1660 uint16_t handle, uint16_t offset)
1661 {
1662 struct read_data data;
1663
1664 if (!bt_gatt_change_aware(chan->att->conn, true)) {
1665 if (!atomic_test_and_set_bit(chan->flags, ATT_OUT_OF_SYNC_SENT)) {
1666 return BT_ATT_ERR_DB_OUT_OF_SYNC;
1667 } else {
1668 return 0;
1669 }
1670 }
1671
1672 if (!handle) {
1673 return BT_ATT_ERR_INVALID_HANDLE;
1674 }
1675
1676 (void)memset(&data, 0, sizeof(data));
1677
1678 data.buf = bt_att_create_rsp_pdu(chan, rsp);
1679 if (!data.buf) {
1680 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
1681 }
1682
1683 data.chan = chan;
1684 data.offset = offset;
1685
1686 /* Pre-set error if no attr will be found in handle */
1687 data.err = BT_ATT_ERR_INVALID_HANDLE;
1688
1689 bt_gatt_foreach_attr(handle, handle, read_cb, &data);
1690
1691 /* In case of error discard data and respond with an error */
1692 if (data.err) {
1693 net_buf_unref(data.buf);
1694 /* Respond here since handle is set */
1695 send_err_rsp(chan, op, handle, data.err);
1696 return 0;
1697 }
1698
1699 bt_att_chan_send_rsp(chan, data.buf);
1700
1701 return 0;
1702 }
1703
att_read_req(struct bt_att_chan * chan,struct net_buf * buf)1704 static uint8_t att_read_req(struct bt_att_chan *chan, struct net_buf *buf)
1705 {
1706 struct bt_att_read_req *req;
1707 uint16_t handle;
1708
1709 req = (void *)buf->data;
1710
1711 handle = sys_le16_to_cpu(req->handle);
1712
1713 LOG_DBG("handle 0x%04x", handle);
1714
1715 return att_read_rsp(chan, BT_ATT_OP_READ_REQ, BT_ATT_OP_READ_RSP,
1716 handle, 0);
1717 }
1718
att_read_blob_req(struct bt_att_chan * chan,struct net_buf * buf)1719 static uint8_t att_read_blob_req(struct bt_att_chan *chan, struct net_buf *buf)
1720 {
1721 struct bt_att_read_blob_req *req;
1722 uint16_t handle, offset;
1723
1724 req = (void *)buf->data;
1725
1726 handle = sys_le16_to_cpu(req->handle);
1727 offset = sys_le16_to_cpu(req->offset);
1728
1729 LOG_DBG("handle 0x%04x offset %u", handle, offset);
1730
1731 return att_read_rsp(chan, BT_ATT_OP_READ_BLOB_REQ,
1732 BT_ATT_OP_READ_BLOB_RSP, handle, offset);
1733 }
1734
1735 #if defined(CONFIG_BT_GATT_READ_MULTIPLE)
att_read_mult_req(struct bt_att_chan * chan,struct net_buf * buf)1736 static uint8_t att_read_mult_req(struct bt_att_chan *chan, struct net_buf *buf)
1737 {
1738 struct read_data data;
1739 uint16_t handle;
1740
1741 if (!bt_gatt_change_aware(chan->att->conn, true)) {
1742 if (!atomic_test_and_set_bit(chan->flags, ATT_OUT_OF_SYNC_SENT)) {
1743 return BT_ATT_ERR_DB_OUT_OF_SYNC;
1744 } else {
1745 return 0;
1746 }
1747 }
1748
1749 (void)memset(&data, 0, sizeof(data));
1750
1751 data.buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_READ_MULT_RSP);
1752 if (!data.buf) {
1753 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
1754 }
1755
1756 data.chan = chan;
1757
1758 while (buf->len >= sizeof(uint16_t)) {
1759 handle = net_buf_pull_le16(buf);
1760
1761 LOG_DBG("handle 0x%04x ", handle);
1762
1763 /* An Error Response shall be sent by the server in response to
1764 * the Read Multiple Request [....] if a read operation is not
1765 * permitted on any of the Characteristic Values.
1766 *
1767 * If handle is not valid then return invalid handle error.
1768 * If handle is found error will be cleared by read_cb.
1769 */
1770 data.err = BT_ATT_ERR_INVALID_HANDLE;
1771
1772 bt_gatt_foreach_attr(handle, handle, read_cb, &data);
1773
1774 /* Stop reading in case of error */
1775 if (data.err) {
1776 net_buf_unref(data.buf);
1777 /* Respond here since handle is set */
1778 send_err_rsp(chan, BT_ATT_OP_READ_MULT_REQ, handle,
1779 data.err);
1780 return 0;
1781 }
1782 }
1783
1784 bt_att_chan_send_rsp(chan, data.buf);
1785
1786 return 0;
1787 }
1788 #endif /* CONFIG_BT_GATT_READ_MULTIPLE */
1789
1790 #if defined(CONFIG_BT_GATT_READ_MULT_VAR_LEN)
read_vl_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)1791 static uint8_t read_vl_cb(const struct bt_gatt_attr *attr, uint16_t handle,
1792 void *user_data)
1793 {
1794 struct read_data *data = user_data;
1795 struct bt_att_chan *chan = data->chan;
1796 struct bt_conn *conn = chan->chan.chan.conn;
1797 struct bt_att_read_mult_vl_rsp *rsp;
1798 int read;
1799
1800 LOG_DBG("handle 0x%04x", handle);
1801
1802 /*
1803 * If any attribute is founded in handle range it means that error
1804 * should be changed from pre-set: invalid handle error to no error.
1805 */
1806 data->err = 0x00;
1807
1808 /* Check attribute permissions */
1809 data->err = bt_gatt_check_perm(conn, attr, BT_GATT_PERM_READ_MASK);
1810 if (data->err) {
1811 return BT_GATT_ITER_STOP;
1812 }
1813
1814 /* Check the attribute authorization logic */
1815 if (!attr_read_authorize(conn, attr)) {
1816 data->err = BT_ATT_ERR_AUTHORIZATION;
1817 return BT_GATT_ITER_STOP;
1818 }
1819
1820 /* The Length Value Tuple List may be truncated within the first two
1821 * octets of a tuple due to the size limits of the current ATT_MTU.
1822 */
1823 if (bt_att_mtu(chan) - data->buf->len < 2) {
1824 return BT_GATT_ITER_STOP;
1825 }
1826
1827 rsp = net_buf_add(data->buf, sizeof(*rsp));
1828
1829 read = att_chan_read(chan, attr, data->buf, data->offset, NULL, NULL);
1830 if (read < 0) {
1831 data->err = err_to_att(read);
1832 return BT_GATT_ITER_STOP;
1833 }
1834
1835 rsp->len = read;
1836
1837 return BT_GATT_ITER_CONTINUE;
1838 }
1839
att_read_mult_vl_req(struct bt_att_chan * chan,struct net_buf * buf)1840 static uint8_t att_read_mult_vl_req(struct bt_att_chan *chan, struct net_buf *buf)
1841 {
1842 struct read_data data;
1843 uint16_t handle;
1844
1845 if (!bt_gatt_change_aware(chan->att->conn, true)) {
1846 if (!atomic_test_and_set_bit(chan->flags, ATT_OUT_OF_SYNC_SENT)) {
1847 return BT_ATT_ERR_DB_OUT_OF_SYNC;
1848 } else {
1849 return 0;
1850 }
1851 }
1852
1853 (void)memset(&data, 0, sizeof(data));
1854
1855 data.buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_READ_MULT_VL_RSP);
1856 if (!data.buf) {
1857 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
1858 }
1859
1860 data.chan = chan;
1861
1862 while (buf->len >= sizeof(uint16_t)) {
1863 handle = net_buf_pull_le16(buf);
1864
1865 LOG_DBG("handle 0x%04x ", handle);
1866
1867 /* If handle is not valid then return invalid handle error.
1868 * If handle is found error will be cleared by read_cb.
1869 */
1870 data.err = BT_ATT_ERR_INVALID_HANDLE;
1871
1872 bt_gatt_foreach_attr(handle, handle, read_vl_cb, &data);
1873
1874 /* Stop reading in case of error */
1875 if (data.err) {
1876 net_buf_unref(data.buf);
1877 /* Respond here since handle is set */
1878 send_err_rsp(chan, BT_ATT_OP_READ_MULT_VL_REQ, handle,
1879 data.err);
1880 return 0;
1881 }
1882 }
1883
1884 bt_att_chan_send_rsp(chan, data.buf);
1885
1886 return 0;
1887 }
1888 #endif /* CONFIG_BT_GATT_READ_MULT_VAR_LEN */
1889
1890 struct read_group_data {
1891 struct bt_att_chan *chan;
1892 struct bt_uuid *uuid;
1893 struct net_buf *buf;
1894 struct bt_att_read_group_rsp *rsp;
1895 struct bt_att_group_data *group;
1896 };
1897
attr_read_group_cb(struct net_buf * frag,ssize_t read,void * user_data)1898 static bool attr_read_group_cb(struct net_buf *frag, ssize_t read,
1899 void *user_data)
1900 {
1901 struct read_group_data *data = user_data;
1902
1903 if (!data->rsp->len) {
1904 /* Set len to be the first group found */
1905 data->rsp->len = read + sizeof(*data->group);
1906 } else if (data->rsp->len != read + sizeof(*data->group)) {
1907 /* All groups entries should have the same size */
1908 data->buf->len -= sizeof(*data->group);
1909 data->group = NULL;
1910 return false;
1911 }
1912
1913 return true;
1914 }
1915
read_group_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)1916 static uint8_t read_group_cb(const struct bt_gatt_attr *attr, uint16_t handle,
1917 void *user_data)
1918 {
1919 struct read_group_data *data = user_data;
1920 struct bt_att_chan *chan = data->chan;
1921 int read;
1922
1923 /* Update group end_handle if attribute is not a service */
1924 if (bt_uuid_cmp(attr->uuid, BT_UUID_GATT_PRIMARY) &&
1925 bt_uuid_cmp(attr->uuid, BT_UUID_GATT_SECONDARY)) {
1926 if (data->group &&
1927 handle > sys_le16_to_cpu(data->group->end_handle)) {
1928 data->group->end_handle = sys_cpu_to_le16(handle);
1929 }
1930 return BT_GATT_ITER_CONTINUE;
1931 }
1932
1933 /* If Group Type don't match skip */
1934 if (bt_uuid_cmp(attr->uuid, data->uuid)) {
1935 data->group = NULL;
1936 return BT_GATT_ITER_CONTINUE;
1937 }
1938
1939 LOG_DBG("handle 0x%04x", handle);
1940
1941 /* Stop if there is no space left */
1942 if (data->rsp->len &&
1943 bt_att_mtu(chan) - data->buf->len < data->rsp->len) {
1944 return BT_GATT_ITER_STOP;
1945 }
1946
1947 /* Fast forward to next group position */
1948 data->group = net_buf_add(data->buf, sizeof(*data->group));
1949
1950 /* Initialize group handle range */
1951 data->group->start_handle = sys_cpu_to_le16(handle);
1952 data->group->end_handle = sys_cpu_to_le16(handle);
1953
1954 /* Read attribute value and store in the buffer */
1955 read = att_chan_read(chan, attr, data->buf, 0, attr_read_group_cb,
1956 data);
1957 if (read < 0) {
1958 /* TODO: Handle read errors */
1959 return BT_GATT_ITER_STOP;
1960 }
1961
1962 if (!data->group) {
1963 return BT_GATT_ITER_STOP;
1964 }
1965
1966 /* continue only if there are still space for more items */
1967 return BT_GATT_ITER_CONTINUE;
1968 }
1969
att_read_group_rsp(struct bt_att_chan * chan,struct bt_uuid * uuid,uint16_t start_handle,uint16_t end_handle)1970 static uint8_t att_read_group_rsp(struct bt_att_chan *chan, struct bt_uuid *uuid,
1971 uint16_t start_handle, uint16_t end_handle)
1972 {
1973 struct read_group_data data;
1974
1975 (void)memset(&data, 0, sizeof(data));
1976
1977 data.buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_READ_GROUP_RSP);
1978 if (!data.buf) {
1979 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
1980 }
1981
1982 data.chan = chan;
1983 data.uuid = uuid;
1984 data.rsp = net_buf_add(data.buf, sizeof(*data.rsp));
1985 data.rsp->len = 0U;
1986 data.group = NULL;
1987
1988 bt_gatt_foreach_attr(start_handle, end_handle, read_group_cb, &data);
1989
1990 if (!data.rsp->len) {
1991 net_buf_unref(data.buf);
1992 /* Respond here since handle is set */
1993 send_err_rsp(chan, BT_ATT_OP_READ_GROUP_REQ, start_handle,
1994 BT_ATT_ERR_ATTRIBUTE_NOT_FOUND);
1995 return 0;
1996 }
1997
1998 bt_att_chan_send_rsp(chan, data.buf);
1999
2000 return 0;
2001 }
2002
att_read_group_req(struct bt_att_chan * chan,struct net_buf * buf)2003 static uint8_t att_read_group_req(struct bt_att_chan *chan, struct net_buf *buf)
2004 {
2005 struct bt_att_read_group_req *req;
2006 uint16_t start_handle, end_handle, err_handle;
2007 union {
2008 struct bt_uuid uuid;
2009 struct bt_uuid_16 u16;
2010 struct bt_uuid_128 u128;
2011 } u;
2012 uint8_t uuid_len = buf->len - sizeof(*req);
2013
2014 /* Type can only be UUID16 or UUID128 */
2015 if (uuid_len != 2 && uuid_len != 16) {
2016 return BT_ATT_ERR_INVALID_PDU;
2017 }
2018
2019 req = net_buf_pull_mem(buf, sizeof(*req));
2020
2021 start_handle = sys_le16_to_cpu(req->start_handle);
2022 end_handle = sys_le16_to_cpu(req->end_handle);
2023
2024 if (!bt_uuid_create(&u.uuid, req->uuid, uuid_len)) {
2025 return BT_ATT_ERR_UNLIKELY;
2026 }
2027
2028 LOG_DBG("start_handle 0x%04x end_handle 0x%04x type %s", start_handle, end_handle,
2029 bt_uuid_str(&u.uuid));
2030
2031 if (!range_is_valid(start_handle, end_handle, &err_handle)) {
2032 send_err_rsp(chan, BT_ATT_OP_READ_GROUP_REQ, err_handle,
2033 BT_ATT_ERR_INVALID_HANDLE);
2034 return 0;
2035 }
2036
2037 /* Core v4.2, Vol 3, sec 2.5.3 Attribute Grouping:
2038 * Not all of the grouping attributes can be used in the ATT
2039 * Read By Group Type Request. The "Primary Service" and "Secondary
2040 * Service" grouping types may be used in the Read By Group Type
2041 * Request. The "Characteristic" grouping type shall not be used in
2042 * the ATT Read By Group Type Request.
2043 */
2044 if (bt_uuid_cmp(&u.uuid, BT_UUID_GATT_PRIMARY) &&
2045 bt_uuid_cmp(&u.uuid, BT_UUID_GATT_SECONDARY)) {
2046 send_err_rsp(chan, BT_ATT_OP_READ_GROUP_REQ, start_handle,
2047 BT_ATT_ERR_UNSUPPORTED_GROUP_TYPE);
2048 return 0;
2049 }
2050
2051 return att_read_group_rsp(chan, &u.uuid, start_handle, end_handle);
2052 }
2053
2054 struct write_data {
2055 struct bt_conn *conn;
2056 struct net_buf *buf;
2057 uint8_t req;
2058 const void *value;
2059 uint16_t len;
2060 uint16_t offset;
2061 uint8_t err;
2062 };
2063
attr_write_authorize(struct bt_conn * conn,const struct bt_gatt_attr * attr)2064 static bool attr_write_authorize(struct bt_conn *conn,
2065 const struct bt_gatt_attr *attr)
2066 {
2067 if (!IS_ENABLED(CONFIG_BT_GATT_AUTHORIZATION_CUSTOM)) {
2068 return true;
2069 }
2070
2071 if (!authorization_cb || !authorization_cb->write_authorize) {
2072 return true;
2073 }
2074
2075 return authorization_cb->write_authorize(conn, attr);
2076 }
2077
write_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)2078 static uint8_t write_cb(const struct bt_gatt_attr *attr, uint16_t handle,
2079 void *user_data)
2080 {
2081 struct write_data *data = user_data;
2082 int write;
2083 uint8_t flags = 0U;
2084
2085 LOG_DBG("handle 0x%04x offset %u", handle, data->offset);
2086
2087 /* Check attribute permissions */
2088 data->err = bt_gatt_check_perm(data->conn, attr,
2089 BT_GATT_PERM_WRITE_MASK);
2090 if (data->err) {
2091 return BT_GATT_ITER_STOP;
2092 }
2093
2094 /* Check the attribute authorization logic */
2095 if (!attr_write_authorize(data->conn, attr)) {
2096 data->err = BT_ATT_ERR_AUTHORIZATION;
2097 return BT_GATT_ITER_STOP;
2098 }
2099
2100 /* Set command flag if not a request */
2101 if (!data->req) {
2102 flags |= BT_GATT_WRITE_FLAG_CMD;
2103 } else if (data->req == BT_ATT_OP_EXEC_WRITE_REQ) {
2104 flags |= BT_GATT_WRITE_FLAG_EXECUTE;
2105 }
2106
2107 /* Write attribute value */
2108 write = attr->write(data->conn, attr, data->value, data->len,
2109 data->offset, flags);
2110 if (write < 0 || write != data->len) {
2111 data->err = err_to_att(write);
2112 return BT_GATT_ITER_STOP;
2113 }
2114
2115 data->err = 0U;
2116
2117 return BT_GATT_ITER_CONTINUE;
2118 }
2119
att_write_rsp(struct bt_att_chan * chan,uint8_t req,uint8_t rsp,uint16_t handle,uint16_t offset,const void * value,uint16_t len)2120 static uint8_t att_write_rsp(struct bt_att_chan *chan, uint8_t req, uint8_t rsp,
2121 uint16_t handle, uint16_t offset, const void *value,
2122 uint16_t len)
2123 {
2124 struct write_data data;
2125
2126 if (!bt_gatt_change_aware(chan->att->conn, req ? true : false)) {
2127 if (!atomic_test_and_set_bit(chan->flags, ATT_OUT_OF_SYNC_SENT)) {
2128 return BT_ATT_ERR_DB_OUT_OF_SYNC;
2129 } else {
2130 return 0;
2131 }
2132 }
2133
2134 if (!handle) {
2135 return BT_ATT_ERR_INVALID_HANDLE;
2136 }
2137
2138 (void)memset(&data, 0, sizeof(data));
2139
2140 /* Only allocate buf if required to respond */
2141 if (rsp) {
2142 data.buf = bt_att_chan_create_pdu(chan, rsp, 0);
2143 if (!data.buf) {
2144 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
2145 }
2146 }
2147
2148 data.conn = chan->att->conn;
2149 data.req = req;
2150 data.offset = offset;
2151 data.value = value;
2152 data.len = len;
2153 data.err = BT_ATT_ERR_INVALID_HANDLE;
2154
2155 bt_gatt_foreach_attr(handle, handle, write_cb, &data);
2156
2157 if (data.err) {
2158 /* In case of error discard data and respond with an error */
2159 if (rsp) {
2160 net_buf_unref(data.buf);
2161 /* Respond here since handle is set */
2162 send_err_rsp(chan, req, handle, data.err);
2163 }
2164 return req == BT_ATT_OP_EXEC_WRITE_REQ ? data.err : 0;
2165 }
2166
2167 if (data.buf) {
2168 bt_att_chan_send_rsp(chan, data.buf);
2169 }
2170
2171 return 0;
2172 }
2173
att_write_req(struct bt_att_chan * chan,struct net_buf * buf)2174 static uint8_t att_write_req(struct bt_att_chan *chan, struct net_buf *buf)
2175 {
2176 uint16_t handle;
2177
2178 handle = net_buf_pull_le16(buf);
2179
2180 LOG_DBG("handle 0x%04x", handle);
2181
2182 return att_write_rsp(chan, BT_ATT_OP_WRITE_REQ, BT_ATT_OP_WRITE_RSP,
2183 handle, 0, buf->data, buf->len);
2184 }
2185
2186 #if CONFIG_BT_ATT_PREPARE_COUNT > 0
2187 struct prep_data {
2188 struct bt_conn *conn;
2189 struct net_buf *buf;
2190 const void *value;
2191 uint16_t len;
2192 uint16_t offset;
2193 uint8_t err;
2194 };
2195
prep_write_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)2196 static uint8_t prep_write_cb(const struct bt_gatt_attr *attr, uint16_t handle,
2197 void *user_data)
2198 {
2199 struct prep_data *data = user_data;
2200 struct bt_attr_data *attr_data;
2201 int write;
2202
2203 LOG_DBG("handle 0x%04x offset %u", handle, data->offset);
2204
2205 /* Check attribute permissions */
2206 data->err = bt_gatt_check_perm(data->conn, attr,
2207 BT_GATT_PERM_WRITE_MASK);
2208 if (data->err) {
2209 return BT_GATT_ITER_STOP;
2210 }
2211
2212 /* Check the attribute authorization logic */
2213 if (!attr_write_authorize(data->conn, attr)) {
2214 data->err = BT_ATT_ERR_AUTHORIZATION;
2215 return BT_GATT_ITER_STOP;
2216 }
2217
2218 /* Check if attribute requires handler to accept the data */
2219 if (!(attr->perm & BT_GATT_PERM_PREPARE_WRITE)) {
2220 goto append;
2221 }
2222
2223 /* Write attribute value to check if device is authorized */
2224 write = attr->write(data->conn, attr, data->value, data->len,
2225 data->offset, BT_GATT_WRITE_FLAG_PREPARE);
2226 if (write != 0) {
2227 data->err = err_to_att(write);
2228 return BT_GATT_ITER_STOP;
2229 }
2230
2231 append:
2232 /* Copy data into the outstanding queue */
2233 data->buf = net_buf_alloc(&prep_pool, K_NO_WAIT);
2234 if (!data->buf) {
2235 data->err = BT_ATT_ERR_PREPARE_QUEUE_FULL;
2236 return BT_GATT_ITER_STOP;
2237 }
2238
2239 attr_data = net_buf_user_data(data->buf);
2240 attr_data->handle = handle;
2241 attr_data->offset = data->offset;
2242
2243 net_buf_add_mem(data->buf, data->value, data->len);
2244
2245 data->err = 0U;
2246
2247 return BT_GATT_ITER_CONTINUE;
2248 }
2249
att_prep_write_rsp(struct bt_att_chan * chan,uint16_t handle,uint16_t offset,const void * value,uint8_t len)2250 static uint8_t att_prep_write_rsp(struct bt_att_chan *chan, uint16_t handle,
2251 uint16_t offset, const void *value, uint8_t len)
2252 {
2253 struct prep_data data;
2254 struct bt_att_prepare_write_rsp *rsp;
2255
2256 if (!bt_gatt_change_aware(chan->att->conn, true)) {
2257 if (!atomic_test_and_set_bit(chan->flags, ATT_OUT_OF_SYNC_SENT)) {
2258 return BT_ATT_ERR_DB_OUT_OF_SYNC;
2259 } else {
2260 return 0;
2261 }
2262 }
2263
2264 if (!handle) {
2265 return BT_ATT_ERR_INVALID_HANDLE;
2266 }
2267
2268 (void)memset(&data, 0, sizeof(data));
2269
2270 data.conn = chan->att->conn;
2271 data.offset = offset;
2272 data.value = value;
2273 data.len = len;
2274 data.err = BT_ATT_ERR_INVALID_HANDLE;
2275
2276 bt_gatt_foreach_attr(handle, handle, prep_write_cb, &data);
2277
2278 if (data.err) {
2279 /* Respond here since handle is set */
2280 send_err_rsp(chan, BT_ATT_OP_PREPARE_WRITE_REQ, handle,
2281 data.err);
2282 return 0;
2283 }
2284
2285 LOG_DBG("buf %p handle 0x%04x offset %u", data.buf, handle, offset);
2286
2287 /* Store buffer in the outstanding queue */
2288 net_buf_slist_put(&chan->att->prep_queue, data.buf);
2289
2290 /* Generate response */
2291 data.buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_PREPARE_WRITE_RSP);
2292 if (!data.buf) {
2293 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
2294 }
2295
2296 rsp = net_buf_add(data.buf, sizeof(*rsp));
2297 rsp->handle = sys_cpu_to_le16(handle);
2298 rsp->offset = sys_cpu_to_le16(offset);
2299 net_buf_add(data.buf, len);
2300 memcpy(rsp->value, value, len);
2301
2302 bt_att_chan_send_rsp(chan, data.buf);
2303
2304 return 0;
2305 }
2306 #endif /* CONFIG_BT_ATT_PREPARE_COUNT */
2307
att_prepare_write_req(struct bt_att_chan * chan,struct net_buf * buf)2308 static uint8_t att_prepare_write_req(struct bt_att_chan *chan, struct net_buf *buf)
2309 {
2310 #if CONFIG_BT_ATT_PREPARE_COUNT == 0
2311 return BT_ATT_ERR_NOT_SUPPORTED;
2312 #else
2313 struct bt_att_prepare_write_req *req;
2314 uint16_t handle, offset;
2315
2316 req = net_buf_pull_mem(buf, sizeof(*req));
2317
2318 handle = sys_le16_to_cpu(req->handle);
2319 offset = sys_le16_to_cpu(req->offset);
2320
2321 LOG_DBG("handle 0x%04x offset %u", handle, offset);
2322
2323 return att_prep_write_rsp(chan, handle, offset, buf->data, buf->len);
2324 #endif /* CONFIG_BT_ATT_PREPARE_COUNT */
2325 }
2326
2327 #if CONFIG_BT_ATT_PREPARE_COUNT > 0
exec_write_reassemble(uint16_t handle,uint16_t offset,sys_slist_t * list,struct net_buf_simple * buf)2328 static uint8_t exec_write_reassemble(uint16_t handle, uint16_t offset,
2329 sys_slist_t *list,
2330 struct net_buf_simple *buf)
2331 {
2332 struct net_buf *entry, *next;
2333 sys_snode_t *prev;
2334
2335 prev = NULL;
2336 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(list, entry, next, node) {
2337 struct bt_attr_data *tmp_data = net_buf_user_data(entry);
2338
2339 LOG_DBG("entry %p handle 0x%04x, offset %u", entry, tmp_data->handle,
2340 tmp_data->offset);
2341
2342 if (tmp_data->handle == handle) {
2343 if (tmp_data->offset == 0) {
2344 /* Multiple writes to the same handle can occur
2345 * in a prepare write queue. If the offset is 0,
2346 * that should mean that it's a new write to the
2347 * same handle, and we break to process the
2348 * first write.
2349 */
2350
2351 LOG_DBG("tmp_data->offset == 0");
2352 break;
2353 }
2354
2355 if (tmp_data->offset != buf->len + offset) {
2356 /* We require that the offset is increasing
2357 * properly to avoid badly reassembled buffers
2358 */
2359
2360 LOG_DBG("Bad offset %u (%u, %u)", tmp_data->offset, buf->len,
2361 offset);
2362
2363 return BT_ATT_ERR_INVALID_OFFSET;
2364 }
2365
2366 if (buf->len + entry->len > buf->size) {
2367 return BT_ATT_ERR_INVALID_ATTRIBUTE_LEN;
2368 }
2369
2370 net_buf_simple_add_mem(buf, entry->data, entry->len);
2371 sys_slist_remove(list, prev, &entry->node);
2372 net_buf_unref(entry);
2373 } else {
2374 prev = &entry->node;
2375 }
2376 }
2377
2378 return BT_ATT_ERR_SUCCESS;
2379 }
2380
att_exec_write_rsp(struct bt_att_chan * chan,uint8_t flags)2381 static uint8_t att_exec_write_rsp(struct bt_att_chan *chan, uint8_t flags)
2382 {
2383 struct net_buf *buf;
2384 uint8_t err = 0U;
2385
2386 /* The following code will iterate on all prepare writes in the
2387 * prep_queue, and reassemble those that share the same handle.
2388 * Once a handle has been reassembled, it is sent to the upper layers,
2389 * and the next handle is processed
2390 */
2391 while (!sys_slist_is_empty(&chan->att->prep_queue)) {
2392 struct bt_attr_data *data;
2393 uint16_t handle;
2394
2395 NET_BUF_SIMPLE_DEFINE_STATIC(reassembled_data,
2396 MIN(BT_ATT_MAX_ATTRIBUTE_LEN,
2397 CONFIG_BT_ATT_PREPARE_COUNT * BT_ATT_BUF_SIZE));
2398
2399 buf = net_buf_slist_get(&chan->att->prep_queue);
2400 data = net_buf_user_data(buf);
2401 handle = data->handle;
2402
2403 LOG_DBG("buf %p handle 0x%04x offset %u", buf, handle, data->offset);
2404
2405 net_buf_simple_reset(&reassembled_data);
2406 net_buf_simple_add_mem(&reassembled_data, buf->data, buf->len);
2407
2408 err = exec_write_reassemble(handle, data->offset,
2409 &chan->att->prep_queue,
2410 &reassembled_data);
2411 if (err != BT_ATT_ERR_SUCCESS) {
2412 send_err_rsp(chan, BT_ATT_OP_EXEC_WRITE_REQ,
2413 handle, err);
2414 return 0;
2415 }
2416
2417 /* Just discard the data if an error was set */
2418 if (!err && flags == BT_ATT_FLAG_EXEC) {
2419 err = att_write_rsp(chan, BT_ATT_OP_EXEC_WRITE_REQ, 0,
2420 handle, data->offset,
2421 reassembled_data.data,
2422 reassembled_data.len);
2423 if (err) {
2424 /* Respond here since handle is set */
2425 send_err_rsp(chan, BT_ATT_OP_EXEC_WRITE_REQ,
2426 data->handle, err);
2427 }
2428 }
2429
2430 net_buf_unref(buf);
2431 }
2432
2433 if (err) {
2434 return 0;
2435 }
2436
2437 /* Generate response */
2438 buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_EXEC_WRITE_RSP);
2439 if (!buf) {
2440 return BT_ATT_ERR_UNLIKELY;
2441 }
2442
2443 bt_att_chan_send_rsp(chan, buf);
2444
2445 return 0;
2446 }
2447 #endif /* CONFIG_BT_ATT_PREPARE_COUNT */
2448
2449
att_exec_write_req(struct bt_att_chan * chan,struct net_buf * buf)2450 static uint8_t att_exec_write_req(struct bt_att_chan *chan, struct net_buf *buf)
2451 {
2452 #if CONFIG_BT_ATT_PREPARE_COUNT == 0
2453 return BT_ATT_ERR_NOT_SUPPORTED;
2454 #else
2455 struct bt_att_exec_write_req *req;
2456
2457 req = (void *)buf->data;
2458
2459 LOG_DBG("flags 0x%02x", req->flags);
2460
2461 return att_exec_write_rsp(chan, req->flags);
2462 #endif /* CONFIG_BT_ATT_PREPARE_COUNT */
2463 }
2464
att_write_cmd(struct bt_att_chan * chan,struct net_buf * buf)2465 static uint8_t att_write_cmd(struct bt_att_chan *chan, struct net_buf *buf)
2466 {
2467 uint16_t handle;
2468
2469 handle = net_buf_pull_le16(buf);
2470
2471 LOG_DBG("handle 0x%04x", handle);
2472
2473 return att_write_rsp(chan, 0, 0, handle, 0, buf->data, buf->len);
2474 }
2475
2476 #if defined(CONFIG_BT_SIGNING)
att_signed_write_cmd(struct bt_att_chan * chan,struct net_buf * buf)2477 static uint8_t att_signed_write_cmd(struct bt_att_chan *chan, struct net_buf *buf)
2478 {
2479 struct bt_conn *conn = chan->chan.chan.conn;
2480 struct bt_att_signed_write_cmd *req;
2481 uint16_t handle;
2482 int err;
2483
2484 /* The Signed Write Without Response sub-procedure shall only be supported
2485 * on the LE Fixed Channel Unenhanced ATT bearer.
2486 */
2487 if (bt_att_is_enhanced(chan)) {
2488 /* No response for this command */
2489 return 0;
2490 }
2491
2492 req = (void *)buf->data;
2493
2494 handle = sys_le16_to_cpu(req->handle);
2495
2496 LOG_DBG("handle 0x%04x", handle);
2497
2498 /* Verifying data requires full buffer including attribute header */
2499 net_buf_push(buf, sizeof(struct bt_att_hdr));
2500 err = bt_smp_sign_verify(conn, buf);
2501 if (err) {
2502 LOG_ERR("Error verifying data");
2503 /* No response for this command */
2504 return 0;
2505 }
2506
2507 net_buf_pull(buf, sizeof(struct bt_att_hdr));
2508 net_buf_pull(buf, sizeof(*req));
2509
2510 return att_write_rsp(chan, 0, 0, handle, 0, buf->data,
2511 buf->len - sizeof(struct bt_att_signature));
2512 }
2513 #endif /* CONFIG_BT_SIGNING */
2514
2515 #if defined(CONFIG_BT_GATT_CLIENT)
2516 #if defined(CONFIG_BT_ATT_RETRY_ON_SEC_ERR)
att_change_security(struct bt_conn * conn,uint8_t err)2517 static int att_change_security(struct bt_conn *conn, uint8_t err)
2518 {
2519 bt_security_t sec;
2520
2521 switch (err) {
2522 case BT_ATT_ERR_INSUFFICIENT_ENCRYPTION:
2523 if (conn->sec_level >= BT_SECURITY_L2) {
2524 return -EALREADY;
2525 }
2526 sec = BT_SECURITY_L2;
2527 break;
2528 case BT_ATT_ERR_AUTHENTICATION:
2529 if (conn->sec_level < BT_SECURITY_L2) {
2530 /* BLUETOOTH SPECIFICATION Version 4.2 [Vol 3, Part C]
2531 * page 375:
2532 *
2533 * If an LTK is not available, the service request
2534 * shall be rejected with the error code 'Insufficient
2535 * Authentication'.
2536 * Note: When the link is not encrypted, the error code
2537 * "Insufficient Authentication" does not indicate that
2538 * MITM protection is required.
2539 */
2540 sec = BT_SECURITY_L2;
2541 } else if (conn->sec_level < BT_SECURITY_L3) {
2542 /* BLUETOOTH SPECIFICATION Version 4.2 [Vol 3, Part C]
2543 * page 375:
2544 *
2545 * If an authenticated pairing is required but only an
2546 * unauthenticated pairing has occurred and the link is
2547 * currently encrypted, the service request shall be
2548 * rejected with the error code 'Insufficient
2549 * Authentication'.
2550 * Note: When unauthenticated pairing has occurred and
2551 * the link is currently encrypted, the error code
2552 * 'Insufficient Authentication' indicates that MITM
2553 * protection is required.
2554 */
2555 sec = BT_SECURITY_L3;
2556 } else if (conn->sec_level < BT_SECURITY_L4) {
2557 /* BLUETOOTH SPECIFICATION Version 4.2 [Vol 3, Part C]
2558 * page 375:
2559 *
2560 * If LE Secure Connections authenticated pairing is
2561 * required but LE legacy pairing has occurred and the
2562 * link is currently encrypted, the service request
2563 * shall be rejected with the error code ''Insufficient
2564 * Authentication'.
2565 */
2566 sec = BT_SECURITY_L4;
2567 } else {
2568 return -EALREADY;
2569 }
2570 break;
2571 default:
2572 return -EINVAL;
2573 }
2574
2575 return bt_conn_set_security(conn, sec);
2576 }
2577 #endif /* CONFIG_BT_ATT_RETRY_ON_SEC_ERR */
2578
att_error_rsp(struct bt_att_chan * chan,struct net_buf * buf)2579 static uint8_t att_error_rsp(struct bt_att_chan *chan, struct net_buf *buf)
2580 {
2581 struct bt_att_error_rsp *rsp;
2582 uint8_t err;
2583
2584 rsp = (void *)buf->data;
2585
2586 LOG_DBG("request 0x%02x handle 0x%04x error 0x%02x", rsp->request,
2587 sys_le16_to_cpu(rsp->handle), rsp->error);
2588
2589 /* Don't retry if there is no req pending or it has been cancelled.
2590 *
2591 * BLUETOOTH SPECIFICATION Version 5.2 [Vol 3, Part F]
2592 * page 1423:
2593 *
2594 * If an error code is received in the ATT_ERROR_RSP PDU that is not
2595 * understood by the client, for example an error code that was reserved
2596 * for future use that is now being used in a future version of the
2597 * specification, then the ATT_ERROR_RSP PDU shall still be considered to
2598 * state that the given request cannot be performed for an unknown reason.
2599 */
2600 if (!chan->req || chan->req == &cancel || !rsp->error) {
2601 err = BT_ATT_ERR_UNLIKELY;
2602 goto done;
2603 }
2604
2605 err = rsp->error;
2606
2607 #if defined(CONFIG_BT_ATT_RETRY_ON_SEC_ERR)
2608 int ret;
2609
2610 /* Check if error can be handled by elevating security. */
2611 ret = att_change_security(chan->chan.chan.conn, err);
2612 if (ret == 0 || ret == -EBUSY) {
2613 /* ATT timeout work is normally cancelled in att_handle_rsp.
2614 * However retrying is special case, so the timeout shall
2615 * be cancelled here.
2616 */
2617 k_work_cancel_delayable(&chan->timeout_work);
2618
2619 chan->req->retrying = true;
2620 return 0;
2621 }
2622 #endif /* CONFIG_BT_ATT_RETRY_ON_SEC_ERR */
2623
2624 done:
2625 return att_handle_rsp(chan, NULL, 0, err);
2626 }
2627
att_handle_find_info_rsp(struct bt_att_chan * chan,struct net_buf * buf)2628 static uint8_t att_handle_find_info_rsp(struct bt_att_chan *chan,
2629 struct net_buf *buf)
2630 {
2631 LOG_DBG("");
2632
2633 return att_handle_rsp(chan, buf->data, buf->len, 0);
2634 }
2635
att_handle_find_type_rsp(struct bt_att_chan * chan,struct net_buf * buf)2636 static uint8_t att_handle_find_type_rsp(struct bt_att_chan *chan,
2637 struct net_buf *buf)
2638 {
2639 LOG_DBG("");
2640
2641 return att_handle_rsp(chan, buf->data, buf->len, 0);
2642 }
2643
att_handle_read_type_rsp(struct bt_att_chan * chan,struct net_buf * buf)2644 static uint8_t att_handle_read_type_rsp(struct bt_att_chan *chan,
2645 struct net_buf *buf)
2646 {
2647 LOG_DBG("");
2648
2649 return att_handle_rsp(chan, buf->data, buf->len, 0);
2650 }
2651
att_handle_read_rsp(struct bt_att_chan * chan,struct net_buf * buf)2652 static uint8_t att_handle_read_rsp(struct bt_att_chan *chan,
2653 struct net_buf *buf)
2654 {
2655 LOG_DBG("");
2656
2657 return att_handle_rsp(chan, buf->data, buf->len, 0);
2658 }
2659
att_handle_read_blob_rsp(struct bt_att_chan * chan,struct net_buf * buf)2660 static uint8_t att_handle_read_blob_rsp(struct bt_att_chan *chan,
2661 struct net_buf *buf)
2662 {
2663 LOG_DBG("");
2664
2665 return att_handle_rsp(chan, buf->data, buf->len, 0);
2666 }
2667
2668 #if defined(CONFIG_BT_GATT_READ_MULTIPLE)
att_handle_read_mult_rsp(struct bt_att_chan * chan,struct net_buf * buf)2669 static uint8_t att_handle_read_mult_rsp(struct bt_att_chan *chan,
2670 struct net_buf *buf)
2671 {
2672 LOG_DBG("");
2673
2674 return att_handle_rsp(chan, buf->data, buf->len, 0);
2675 }
2676
2677 #endif /* CONFIG_BT_GATT_READ_MULTIPLE */
2678
2679 #if defined(CONFIG_BT_GATT_READ_MULT_VAR_LEN)
att_handle_read_mult_vl_rsp(struct bt_att_chan * chan,struct net_buf * buf)2680 static uint8_t att_handle_read_mult_vl_rsp(struct bt_att_chan *chan,
2681 struct net_buf *buf)
2682 {
2683 LOG_DBG("");
2684
2685 return att_handle_rsp(chan, buf->data, buf->len, 0);
2686 }
2687 #endif /* CONFIG_BT_GATT_READ_MULT_VAR_LEN */
2688
att_handle_read_group_rsp(struct bt_att_chan * chan,struct net_buf * buf)2689 static uint8_t att_handle_read_group_rsp(struct bt_att_chan *chan,
2690 struct net_buf *buf)
2691 {
2692 LOG_DBG("");
2693
2694 return att_handle_rsp(chan, buf->data, buf->len, 0);
2695 }
2696
att_handle_write_rsp(struct bt_att_chan * chan,struct net_buf * buf)2697 static uint8_t att_handle_write_rsp(struct bt_att_chan *chan,
2698 struct net_buf *buf)
2699 {
2700 LOG_DBG("");
2701
2702 return att_handle_rsp(chan, buf->data, buf->len, 0);
2703 }
2704
att_handle_prepare_write_rsp(struct bt_att_chan * chan,struct net_buf * buf)2705 static uint8_t att_handle_prepare_write_rsp(struct bt_att_chan *chan,
2706 struct net_buf *buf)
2707 {
2708 LOG_DBG("");
2709
2710 return att_handle_rsp(chan, buf->data, buf->len, 0);
2711 }
2712
att_handle_exec_write_rsp(struct bt_att_chan * chan,struct net_buf * buf)2713 static uint8_t att_handle_exec_write_rsp(struct bt_att_chan *chan,
2714 struct net_buf *buf)
2715 {
2716 LOG_DBG("");
2717
2718 return att_handle_rsp(chan, buf->data, buf->len, 0);
2719 }
2720
att_notify(struct bt_att_chan * chan,struct net_buf * buf)2721 static uint8_t att_notify(struct bt_att_chan *chan, struct net_buf *buf)
2722 {
2723 uint16_t handle;
2724
2725 handle = net_buf_pull_le16(buf);
2726
2727 LOG_DBG("chan %p handle 0x%04x", chan, handle);
2728
2729 bt_gatt_notification(chan->att->conn, handle, buf->data, buf->len);
2730
2731 return 0;
2732 }
2733
att_indicate(struct bt_att_chan * chan,struct net_buf * buf)2734 static uint8_t att_indicate(struct bt_att_chan *chan, struct net_buf *buf)
2735 {
2736 uint16_t handle;
2737
2738 handle = net_buf_pull_le16(buf);
2739
2740 LOG_DBG("chan %p handle 0x%04x", chan, handle);
2741
2742 bt_gatt_notification(chan->att->conn, handle, buf->data, buf->len);
2743
2744 buf = bt_att_chan_create_pdu(chan, BT_ATT_OP_CONFIRM, 0);
2745 if (!buf) {
2746 return 0;
2747 }
2748
2749 bt_att_chan_send_rsp(chan, buf);
2750
2751 return 0;
2752 }
2753
att_notify_mult(struct bt_att_chan * chan,struct net_buf * buf)2754 static uint8_t att_notify_mult(struct bt_att_chan *chan, struct net_buf *buf)
2755 {
2756 LOG_DBG("chan %p", chan);
2757
2758 bt_gatt_mult_notification(chan->att->conn, buf->data, buf->len);
2759
2760 return 0;
2761 }
2762 #endif /* CONFIG_BT_GATT_CLIENT */
2763
att_confirm(struct bt_att_chan * chan,struct net_buf * buf)2764 static uint8_t att_confirm(struct bt_att_chan *chan, struct net_buf *buf)
2765 {
2766 LOG_DBG("");
2767
2768 return att_handle_rsp(chan, buf->data, buf->len, 0);
2769 }
2770
2771 static const struct att_handler {
2772 uint8_t op;
2773 uint8_t expect_len;
2774 att_type_t type;
2775 uint8_t (*func)(struct bt_att_chan *chan, struct net_buf *buf);
2776 } handlers[] = {
2777 { BT_ATT_OP_MTU_REQ,
2778 sizeof(struct bt_att_exchange_mtu_req),
2779 ATT_REQUEST,
2780 att_mtu_req },
2781 { BT_ATT_OP_FIND_INFO_REQ,
2782 sizeof(struct bt_att_find_info_req),
2783 ATT_REQUEST,
2784 att_find_info_req },
2785 { BT_ATT_OP_FIND_TYPE_REQ,
2786 sizeof(struct bt_att_find_type_req),
2787 ATT_REQUEST,
2788 att_find_type_req },
2789 { BT_ATT_OP_READ_TYPE_REQ,
2790 sizeof(struct bt_att_read_type_req),
2791 ATT_REQUEST,
2792 att_read_type_req },
2793 { BT_ATT_OP_READ_REQ,
2794 sizeof(struct bt_att_read_req),
2795 ATT_REQUEST,
2796 att_read_req },
2797 { BT_ATT_OP_READ_BLOB_REQ,
2798 sizeof(struct bt_att_read_blob_req),
2799 ATT_REQUEST,
2800 att_read_blob_req },
2801 #if defined(CONFIG_BT_GATT_READ_MULTIPLE)
2802 { BT_ATT_OP_READ_MULT_REQ,
2803 BT_ATT_READ_MULT_MIN_LEN_REQ,
2804 ATT_REQUEST,
2805 att_read_mult_req },
2806 #endif /* CONFIG_BT_GATT_READ_MULTIPLE */
2807 #if defined(CONFIG_BT_GATT_READ_MULT_VAR_LEN)
2808 { BT_ATT_OP_READ_MULT_VL_REQ,
2809 BT_ATT_READ_MULT_MIN_LEN_REQ,
2810 ATT_REQUEST,
2811 att_read_mult_vl_req },
2812 #endif /* CONFIG_BT_GATT_READ_MULT_VAR_LEN */
2813 { BT_ATT_OP_READ_GROUP_REQ,
2814 sizeof(struct bt_att_read_group_req),
2815 ATT_REQUEST,
2816 att_read_group_req },
2817 { BT_ATT_OP_WRITE_REQ,
2818 sizeof(struct bt_att_write_req),
2819 ATT_REQUEST,
2820 att_write_req },
2821 { BT_ATT_OP_PREPARE_WRITE_REQ,
2822 sizeof(struct bt_att_prepare_write_req),
2823 ATT_REQUEST,
2824 att_prepare_write_req },
2825 { BT_ATT_OP_EXEC_WRITE_REQ,
2826 sizeof(struct bt_att_exec_write_req),
2827 ATT_REQUEST,
2828 att_exec_write_req },
2829 { BT_ATT_OP_CONFIRM,
2830 0,
2831 ATT_CONFIRMATION,
2832 att_confirm },
2833 { BT_ATT_OP_WRITE_CMD,
2834 sizeof(struct bt_att_write_cmd),
2835 ATT_COMMAND,
2836 att_write_cmd },
2837 #if defined(CONFIG_BT_SIGNING)
2838 { BT_ATT_OP_SIGNED_WRITE_CMD,
2839 (sizeof(struct bt_att_write_cmd) +
2840 sizeof(struct bt_att_signature)),
2841 ATT_COMMAND,
2842 att_signed_write_cmd },
2843 #endif /* CONFIG_BT_SIGNING */
2844 #if defined(CONFIG_BT_GATT_CLIENT)
2845 { BT_ATT_OP_ERROR_RSP,
2846 sizeof(struct bt_att_error_rsp),
2847 ATT_RESPONSE,
2848 att_error_rsp },
2849 { BT_ATT_OP_MTU_RSP,
2850 sizeof(struct bt_att_exchange_mtu_rsp),
2851 ATT_RESPONSE,
2852 att_mtu_rsp },
2853 { BT_ATT_OP_FIND_INFO_RSP,
2854 sizeof(struct bt_att_find_info_rsp),
2855 ATT_RESPONSE,
2856 att_handle_find_info_rsp },
2857 { BT_ATT_OP_FIND_TYPE_RSP,
2858 sizeof(struct bt_att_handle_group),
2859 ATT_RESPONSE,
2860 att_handle_find_type_rsp },
2861 { BT_ATT_OP_READ_TYPE_RSP,
2862 sizeof(struct bt_att_read_type_rsp),
2863 ATT_RESPONSE,
2864 att_handle_read_type_rsp },
2865 { BT_ATT_OP_READ_RSP,
2866 0,
2867 ATT_RESPONSE,
2868 att_handle_read_rsp },
2869 { BT_ATT_OP_READ_BLOB_RSP,
2870 0,
2871 ATT_RESPONSE,
2872 att_handle_read_blob_rsp },
2873 #if defined(CONFIG_BT_GATT_READ_MULTIPLE)
2874 { BT_ATT_OP_READ_MULT_RSP,
2875 0,
2876 ATT_RESPONSE,
2877 att_handle_read_mult_rsp },
2878 #endif /* CONFIG_BT_GATT_READ_MULTIPLE */
2879 #if defined(CONFIG_BT_GATT_READ_MULT_VAR_LEN)
2880 { BT_ATT_OP_READ_MULT_VL_RSP,
2881 sizeof(struct bt_att_read_mult_vl_rsp),
2882 ATT_RESPONSE,
2883 att_handle_read_mult_vl_rsp },
2884 #endif /* CONFIG_BT_GATT_READ_MULT_VAR_LEN */
2885 { BT_ATT_OP_READ_GROUP_RSP,
2886 sizeof(struct bt_att_read_group_rsp),
2887 ATT_RESPONSE,
2888 att_handle_read_group_rsp },
2889 { BT_ATT_OP_WRITE_RSP,
2890 0,
2891 ATT_RESPONSE,
2892 att_handle_write_rsp },
2893 { BT_ATT_OP_PREPARE_WRITE_RSP,
2894 sizeof(struct bt_att_prepare_write_rsp),
2895 ATT_RESPONSE,
2896 att_handle_prepare_write_rsp },
2897 { BT_ATT_OP_EXEC_WRITE_RSP,
2898 0,
2899 ATT_RESPONSE,
2900 att_handle_exec_write_rsp },
2901 { BT_ATT_OP_NOTIFY,
2902 sizeof(struct bt_att_notify),
2903 ATT_NOTIFICATION,
2904 att_notify },
2905 { BT_ATT_OP_INDICATE,
2906 sizeof(struct bt_att_indicate),
2907 ATT_INDICATION,
2908 att_indicate },
2909 { BT_ATT_OP_NOTIFY_MULT,
2910 sizeof(struct bt_att_notify_mult),
2911 ATT_NOTIFICATION,
2912 att_notify_mult },
2913 #endif /* CONFIG_BT_GATT_CLIENT */
2914 };
2915
att_op_get_type(uint8_t op)2916 static att_type_t att_op_get_type(uint8_t op)
2917 {
2918 switch (op) {
2919 case BT_ATT_OP_MTU_REQ:
2920 case BT_ATT_OP_FIND_INFO_REQ:
2921 case BT_ATT_OP_FIND_TYPE_REQ:
2922 case BT_ATT_OP_READ_TYPE_REQ:
2923 case BT_ATT_OP_READ_REQ:
2924 case BT_ATT_OP_READ_BLOB_REQ:
2925 case BT_ATT_OP_READ_MULT_REQ:
2926 case BT_ATT_OP_READ_MULT_VL_REQ:
2927 case BT_ATT_OP_READ_GROUP_REQ:
2928 case BT_ATT_OP_WRITE_REQ:
2929 case BT_ATT_OP_PREPARE_WRITE_REQ:
2930 case BT_ATT_OP_EXEC_WRITE_REQ:
2931 return ATT_REQUEST;
2932 case BT_ATT_OP_CONFIRM:
2933 return ATT_CONFIRMATION;
2934 case BT_ATT_OP_WRITE_CMD:
2935 case BT_ATT_OP_SIGNED_WRITE_CMD:
2936 return ATT_COMMAND;
2937 case BT_ATT_OP_ERROR_RSP:
2938 case BT_ATT_OP_MTU_RSP:
2939 case BT_ATT_OP_FIND_INFO_RSP:
2940 case BT_ATT_OP_FIND_TYPE_RSP:
2941 case BT_ATT_OP_READ_TYPE_RSP:
2942 case BT_ATT_OP_READ_RSP:
2943 case BT_ATT_OP_READ_BLOB_RSP:
2944 case BT_ATT_OP_READ_MULT_RSP:
2945 case BT_ATT_OP_READ_MULT_VL_RSP:
2946 case BT_ATT_OP_READ_GROUP_RSP:
2947 case BT_ATT_OP_WRITE_RSP:
2948 case BT_ATT_OP_PREPARE_WRITE_RSP:
2949 case BT_ATT_OP_EXEC_WRITE_RSP:
2950 return ATT_RESPONSE;
2951 case BT_ATT_OP_NOTIFY:
2952 case BT_ATT_OP_NOTIFY_MULT:
2953 return ATT_NOTIFICATION;
2954 case BT_ATT_OP_INDICATE:
2955 return ATT_INDICATION;
2956 }
2957
2958 if (op & ATT_CMD_MASK) {
2959 return ATT_COMMAND;
2960 }
2961
2962 return ATT_UNKNOWN;
2963 }
2964
get_conn(struct bt_att_chan * att_chan)2965 static struct bt_conn *get_conn(struct bt_att_chan *att_chan)
2966 {
2967 return att_chan->chan.chan.conn;
2968 }
2969
bt_att_recv(struct bt_l2cap_chan * chan,struct net_buf * buf)2970 static int bt_att_recv(struct bt_l2cap_chan *chan, struct net_buf *buf)
2971 {
2972 struct bt_att_chan *att_chan = ATT_CHAN(chan);
2973 struct bt_conn *conn = get_conn(att_chan);
2974 struct bt_att_hdr *hdr;
2975 const struct att_handler *handler;
2976 uint8_t err;
2977 size_t i;
2978
2979 if (buf->len < sizeof(*hdr)) {
2980 LOG_ERR("Too small ATT PDU received");
2981 return 0;
2982 }
2983
2984 hdr = net_buf_pull_mem(buf, sizeof(*hdr));
2985 LOG_DBG("Received ATT chan %p code 0x%02x len %zu", att_chan, hdr->code,
2986 net_buf_frags_len(buf));
2987
2988 if (conn->state != BT_CONN_CONNECTED) {
2989 LOG_DBG("not connected: conn %p state %u", conn, conn->state);
2990 return 0;
2991 }
2992
2993 if (!att_chan->att) {
2994 LOG_DBG("Ignore recv on detached ATT chan");
2995 return 0;
2996 }
2997
2998 for (i = 0, handler = NULL; i < ARRAY_SIZE(handlers); i++) {
2999 if (hdr->code == handlers[i].op) {
3000 handler = &handlers[i];
3001 break;
3002 }
3003 }
3004
3005 if (!handler) {
3006 LOG_WRN("Unhandled ATT code 0x%02x", hdr->code);
3007 if (att_op_get_type(hdr->code) != ATT_COMMAND &&
3008 att_op_get_type(hdr->code) != ATT_INDICATION) {
3009 send_err_rsp(att_chan, hdr->code, 0,
3010 BT_ATT_ERR_NOT_SUPPORTED);
3011 }
3012 return 0;
3013 }
3014
3015 if (buf->len < handler->expect_len) {
3016 LOG_ERR("Invalid len %u for code 0x%02x", buf->len, hdr->code);
3017 err = BT_ATT_ERR_INVALID_PDU;
3018 } else {
3019 err = handler->func(att_chan, buf);
3020 }
3021
3022 if (handler->type == ATT_REQUEST && err) {
3023 LOG_DBG("ATT error 0x%02x", err);
3024 send_err_rsp(att_chan, hdr->code, 0, err);
3025 }
3026
3027 return 0;
3028 }
3029
att_get(struct bt_conn * conn)3030 static struct bt_att *att_get(struct bt_conn *conn)
3031 {
3032 struct bt_l2cap_chan *chan;
3033 struct bt_att_chan *att_chan;
3034
3035 if (conn->state != BT_CONN_CONNECTED) {
3036 LOG_WRN("Not connected");
3037 return NULL;
3038 }
3039
3040 chan = bt_l2cap_le_lookup_rx_cid(conn, BT_L2CAP_CID_ATT);
3041 if (!chan) {
3042 LOG_ERR("Unable to find ATT channel");
3043 return NULL;
3044 }
3045
3046 att_chan = ATT_CHAN(chan);
3047 if (!atomic_test_bit(att_chan->flags, ATT_CONNECTED)) {
3048 LOG_ERR("ATT channel not connected");
3049 return NULL;
3050 }
3051
3052 return att_chan->att;
3053 }
3054
bt_att_create_pdu(struct bt_conn * conn,uint8_t op,size_t len)3055 struct net_buf *bt_att_create_pdu(struct bt_conn *conn, uint8_t op, size_t len)
3056 {
3057 struct bt_att *att;
3058 struct bt_att_chan *chan, *tmp;
3059
3060 att = att_get(conn);
3061 if (!att) {
3062 return NULL;
3063 }
3064
3065 /* This allocator should _not_ be used for RSPs. */
3066 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&att->chans, chan, tmp, node) {
3067 if (len + sizeof(op) > bt_att_mtu(chan)) {
3068 continue;
3069 }
3070
3071 return bt_att_chan_create_pdu(chan, op, len);
3072 }
3073
3074 LOG_WRN("No ATT channel for MTU %zu", len + sizeof(op));
3075
3076 return NULL;
3077 }
3078
bt_att_create_rsp_pdu(struct bt_att_chan * chan,uint8_t op)3079 struct net_buf *bt_att_create_rsp_pdu(struct bt_att_chan *chan, uint8_t op)
3080 {
3081 size_t headroom;
3082 struct bt_att_hdr *hdr;
3083 struct bt_att_tx_meta_data *data;
3084 struct net_buf *buf;
3085
3086 buf = net_buf_alloc(&att_pool, BT_ATT_TIMEOUT);
3087 if (!buf) {
3088 LOG_ERR("Unable to allocate buffer for op 0x%02x", op);
3089 return NULL;
3090 }
3091
3092 headroom = BT_L2CAP_BUF_SIZE(0);
3093
3094 if (bt_att_is_enhanced(chan)) {
3095 headroom += BT_L2CAP_SDU_HDR_SIZE;
3096 }
3097
3098 net_buf_reserve(buf, headroom);
3099
3100 data = bt_att_get_tx_meta_data(buf);
3101 data->att_chan = chan;
3102
3103 hdr = net_buf_add(buf, sizeof(*hdr));
3104 hdr->code = op;
3105
3106 return buf;
3107 }
3108
att_reset(struct bt_att * att)3109 static void att_reset(struct bt_att *att)
3110 {
3111 struct net_buf *buf;
3112
3113 #if CONFIG_BT_ATT_PREPARE_COUNT > 0
3114 /* Discard queued buffers */
3115 while ((buf = net_buf_slist_get(&att->prep_queue))) {
3116 net_buf_unref(buf);
3117 }
3118 #endif /* CONFIG_BT_ATT_PREPARE_COUNT > 0 */
3119
3120 #if defined(CONFIG_BT_EATT)
3121 struct k_work_sync sync;
3122
3123 (void)k_work_cancel_delayable_sync(&att->eatt.connection_work, &sync);
3124 #endif /* CONFIG_BT_EATT */
3125
3126 while ((buf = k_fifo_get(&att->tx_queue, K_NO_WAIT))) {
3127 net_buf_unref(buf);
3128 }
3129
3130 /* Notify pending requests */
3131 while (!sys_slist_is_empty(&att->reqs)) {
3132 struct bt_att_req *req;
3133 sys_snode_t *node;
3134
3135 node = sys_slist_get_not_empty(&att->reqs);
3136 req = CONTAINER_OF(node, struct bt_att_req, node);
3137 if (req->func) {
3138 req->func(att->conn, -ECONNRESET, NULL, 0,
3139 req->user_data);
3140 }
3141
3142 bt_att_req_free(req);
3143 }
3144
3145 /* FIXME: `att->conn` is not reference counted. Consider using `bt_conn_ref`
3146 * and `bt_conn_unref` to follow convention.
3147 */
3148 att->conn = NULL;
3149 k_mem_slab_free(&att_slab, (void *)att);
3150 }
3151
att_chan_detach(struct bt_att_chan * chan)3152 static void att_chan_detach(struct bt_att_chan *chan)
3153 {
3154 struct net_buf *buf;
3155
3156 LOG_DBG("chan %p", chan);
3157
3158 sys_slist_find_and_remove(&chan->att->chans, &chan->node);
3159
3160 /* Release pending buffers */
3161 while ((buf = k_fifo_get(&chan->tx_queue, K_NO_WAIT))) {
3162 net_buf_unref(buf);
3163 }
3164
3165 if (chan->req) {
3166 /* Notify outstanding request */
3167 att_handle_rsp(chan, NULL, 0, -ECONNRESET);
3168 }
3169
3170 chan->att = NULL;
3171 atomic_clear_bit(chan->flags, ATT_CONNECTED);
3172 }
3173
att_timeout(struct k_work * work)3174 static void att_timeout(struct k_work *work)
3175 {
3176 char addr[BT_ADDR_LE_STR_LEN];
3177 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
3178 struct bt_att_chan *chan = CONTAINER_OF(dwork, struct bt_att_chan, timeout_work);
3179
3180 bt_addr_le_to_str(bt_conn_get_dst(chan->att->conn), addr, sizeof(addr));
3181 LOG_ERR("ATT Timeout for device %s. Disconnecting...", addr);
3182
3183 /* BLUETOOTH SPECIFICATION Version 4.2 [Vol 3, Part F] page 480:
3184 *
3185 * A transaction not completed within 30 seconds shall time out. Such a
3186 * transaction shall be considered to have failed and the local higher
3187 * layers shall be informed of this failure. No more attribute protocol
3188 * requests, commands, indications or notifications shall be sent to the
3189 * target device on this ATT Bearer.
3190 */
3191
3192 /* The timeout state is local and can block new ATT operations, but does not affect the
3193 * remote side. Disconnecting the GATT connection upon ATT timeout simplifies error handling
3194 * for developers. This reduces rare failure conditions to a common one, allowing developers
3195 * to handle unexpected disconnections without needing special cases for ATT timeouts.
3196 */
3197 att_disconnect(chan);
3198 }
3199
att_get_fixed_chan(struct bt_conn * conn)3200 static struct bt_att_chan *att_get_fixed_chan(struct bt_conn *conn)
3201 {
3202 struct bt_l2cap_chan *chan;
3203
3204 chan = bt_l2cap_le_lookup_tx_cid(conn, BT_L2CAP_CID_ATT);
3205 __ASSERT(chan, "No ATT channel found");
3206
3207 return ATT_CHAN(chan);
3208 }
3209
att_chan_attach(struct bt_att * att,struct bt_att_chan * chan)3210 static void att_chan_attach(struct bt_att *att, struct bt_att_chan *chan)
3211 {
3212 LOG_DBG("att %p chan %p flags %lu", att, chan, atomic_get(chan->flags));
3213
3214 if (sys_slist_is_empty(&att->chans)) {
3215 /* Init general queues when attaching the first channel */
3216 k_fifo_init(&att->tx_queue);
3217 #if CONFIG_BT_ATT_PREPARE_COUNT > 0
3218 sys_slist_init(&att->prep_queue);
3219 #endif
3220 }
3221
3222 sys_slist_prepend(&att->chans, &chan->node);
3223 }
3224
bt_att_connected(struct bt_l2cap_chan * chan)3225 static void bt_att_connected(struct bt_l2cap_chan *chan)
3226 {
3227 struct bt_att_chan *att_chan = ATT_CHAN(chan);
3228 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
3229
3230 LOG_DBG("chan %p cid 0x%04x", le_chan, le_chan->tx.cid);
3231
3232 atomic_set_bit(att_chan->flags, ATT_CONNECTED);
3233
3234 att_chan_mtu_updated(att_chan);
3235
3236 k_work_init_delayable(&att_chan->timeout_work, att_timeout);
3237
3238 bt_gatt_connected(le_chan->chan.conn);
3239 }
3240
bt_att_disconnected(struct bt_l2cap_chan * chan)3241 static void bt_att_disconnected(struct bt_l2cap_chan *chan)
3242 {
3243 struct bt_att_chan *att_chan = ATT_CHAN(chan);
3244 struct bt_att *att = att_chan->att;
3245 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
3246
3247 LOG_DBG("chan %p cid 0x%04x", le_chan, le_chan->tx.cid);
3248
3249 if (!att_chan->att) {
3250 LOG_DBG("Ignore disconnect on detached ATT chan");
3251 return;
3252 }
3253
3254 att_chan_detach(att_chan);
3255
3256 /* Don't reset if there are still channels to be used */
3257 if (!sys_slist_is_empty(&att->chans)) {
3258 return;
3259 }
3260
3261 att_reset(att);
3262
3263 bt_gatt_disconnected(le_chan->chan.conn);
3264 }
3265
3266 #if defined(CONFIG_BT_SMP)
att_req_retry(struct bt_att_chan * att_chan)3267 static uint8_t att_req_retry(struct bt_att_chan *att_chan)
3268 {
3269 struct bt_att_req *req = att_chan->req;
3270 struct net_buf *buf;
3271
3272 /* Resend buffer */
3273 if (!req->encode) {
3274 /* This request does not support resending */
3275 return BT_ATT_ERR_AUTHENTICATION;
3276 }
3277
3278
3279 buf = bt_att_chan_create_pdu(att_chan, req->att_op, req->len);
3280 if (!buf) {
3281 return BT_ATT_ERR_UNLIKELY;
3282 }
3283
3284 if (req->encode(buf, req->len, req->user_data)) {
3285 net_buf_unref(buf);
3286 return BT_ATT_ERR_UNLIKELY;
3287 }
3288
3289 if (chan_send(att_chan, buf)) {
3290 net_buf_unref(buf);
3291 return BT_ATT_ERR_UNLIKELY;
3292 }
3293
3294 return BT_ATT_ERR_SUCCESS;
3295 }
3296
bt_att_encrypt_change(struct bt_l2cap_chan * chan,uint8_t hci_status)3297 static void bt_att_encrypt_change(struct bt_l2cap_chan *chan,
3298 uint8_t hci_status)
3299 {
3300 struct bt_att_chan *att_chan = ATT_CHAN(chan);
3301 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
3302 struct bt_conn *conn = le_chan->chan.conn;
3303 uint8_t err;
3304
3305 LOG_DBG("chan %p conn %p handle %u sec_level 0x%02x status 0x%02x %s", le_chan, conn,
3306 conn->handle, conn->sec_level, hci_status, bt_hci_err_to_str(hci_status));
3307
3308 if (!att_chan->att) {
3309 LOG_DBG("Ignore encrypt change on detached ATT chan");
3310 return;
3311 }
3312
3313 /*
3314 * If status (HCI status of security procedure) is non-zero, notify
3315 * outstanding request about security failure.
3316 */
3317 if (hci_status) {
3318 if (att_chan->req && att_chan->req->retrying) {
3319 att_handle_rsp(att_chan, NULL, 0,
3320 BT_ATT_ERR_AUTHENTICATION);
3321 }
3322
3323 return;
3324 }
3325
3326 bt_gatt_encrypt_change(conn);
3327
3328 if (conn->sec_level == BT_SECURITY_L1) {
3329 return;
3330 }
3331
3332 if (!(att_chan->req && att_chan->req->retrying)) {
3333 return;
3334 }
3335
3336 LOG_DBG("Retrying");
3337
3338 err = att_req_retry(att_chan);
3339 if (err) {
3340 LOG_DBG("Retry failed (%d)", err);
3341 att_handle_rsp(att_chan, NULL, 0, err);
3342 }
3343 }
3344 #endif /* CONFIG_BT_SMP */
3345
bt_att_status(struct bt_l2cap_chan * ch,atomic_t * status)3346 static void bt_att_status(struct bt_l2cap_chan *ch, atomic_t *status)
3347 {
3348 struct bt_att_chan *chan = ATT_CHAN(ch);
3349 sys_snode_t *node;
3350
3351 LOG_DBG("chan %p status %p", ch, status);
3352
3353 if (!atomic_test_bit(status, BT_L2CAP_STATUS_OUT)) {
3354 return;
3355 }
3356
3357 if (!chan->att) {
3358 LOG_DBG("Ignore status on detached ATT chan");
3359 return;
3360 }
3361
3362 /* If there is a request pending don't attempt to send */
3363 if (chan->req) {
3364 return;
3365 }
3366
3367 /* Pull next request from the list */
3368 node = sys_slist_get(&chan->att->reqs);
3369 if (!node) {
3370 return;
3371 }
3372
3373 if (bt_att_chan_req_send(chan, ATT_REQ(node)) >= 0) {
3374 return;
3375 }
3376
3377 /* Prepend back to the list as it could not be sent */
3378 sys_slist_prepend(&chan->att->reqs, node);
3379 }
3380
bt_att_released(struct bt_l2cap_chan * ch)3381 static void bt_att_released(struct bt_l2cap_chan *ch)
3382 {
3383 struct bt_att_chan *chan = ATT_CHAN(ch);
3384
3385 LOG_DBG("chan %p", chan);
3386
3387 k_mem_slab_free(&chan_slab, (void *)chan);
3388 }
3389
3390 #if defined(CONFIG_BT_EATT)
bt_att_reconfigured(struct bt_l2cap_chan * l2cap_chan)3391 static void bt_att_reconfigured(struct bt_l2cap_chan *l2cap_chan)
3392 {
3393 struct bt_att_chan *att_chan = ATT_CHAN(l2cap_chan);
3394
3395 LOG_DBG("chan %p", att_chan);
3396
3397 att_chan_mtu_updated(att_chan);
3398 }
3399 #endif /* CONFIG_BT_EATT */
3400
att_chan_new(struct bt_att * att,atomic_val_t flags)3401 static struct bt_att_chan *att_chan_new(struct bt_att *att, atomic_val_t flags)
3402 {
3403 int quota = 0;
3404 static struct bt_l2cap_chan_ops ops = {
3405 .connected = bt_att_connected,
3406 .disconnected = bt_att_disconnected,
3407 .recv = bt_att_recv,
3408 .sent = bt_att_sent,
3409 .status = bt_att_status,
3410 #if defined(CONFIG_BT_SMP)
3411 .encrypt_change = bt_att_encrypt_change,
3412 #endif /* CONFIG_BT_SMP */
3413 .released = bt_att_released,
3414 #if defined(CONFIG_BT_EATT)
3415 .reconfigured = bt_att_reconfigured,
3416 #endif /* CONFIG_BT_EATT */
3417 };
3418 struct bt_att_chan *chan;
3419
3420 SYS_SLIST_FOR_EACH_CONTAINER(&att->chans, chan, node) {
3421 if (chan->att == att) {
3422 quota++;
3423 }
3424
3425 if (quota == ATT_CHAN_MAX) {
3426 LOG_DBG("Maximum number of channels reached: %d", quota);
3427 return NULL;
3428 }
3429 }
3430
3431 if (k_mem_slab_alloc(&chan_slab, (void **)&chan, K_NO_WAIT)) {
3432 LOG_WRN("No available ATT channel for conn %p", att->conn);
3433 return NULL;
3434 }
3435
3436 (void)memset(chan, 0, sizeof(*chan));
3437 chan->chan.chan.ops = &ops;
3438 k_fifo_init(&chan->tx_queue);
3439 atomic_set(chan->flags, flags);
3440 chan->att = att;
3441 att_chan_attach(att, chan);
3442
3443 if (bt_att_is_enhanced(chan)) {
3444 /* EATT: The MTU will be sent in the ECRED conn req/rsp PDU. The
3445 * TX MTU is received on L2CAP-level.
3446 */
3447 chan->chan.rx.mtu = BT_LOCAL_ATT_MTU_EATT;
3448 } else {
3449 /* UATT: L2CAP Basic is not able to communicate the L2CAP MTU
3450 * without help. ATT has to manage the MTU. The initial MTU is
3451 * defined by spec.
3452 */
3453 chan->chan.tx.mtu = BT_ATT_DEFAULT_LE_MTU;
3454 chan->chan.rx.mtu = BT_ATT_DEFAULT_LE_MTU;
3455 }
3456
3457 return chan;
3458 }
3459
3460 #if defined(CONFIG_BT_EATT)
bt_eatt_count(struct bt_conn * conn)3461 size_t bt_eatt_count(struct bt_conn *conn)
3462 {
3463 struct bt_att *att;
3464 struct bt_att_chan *chan;
3465 size_t eatt_count = 0;
3466
3467 if (!conn) {
3468 return 0;
3469 }
3470
3471 att = att_get(conn);
3472 if (!att) {
3473 return 0;
3474 }
3475
3476 SYS_SLIST_FOR_EACH_CONTAINER(&att->chans, chan, node) {
3477 if (bt_att_is_enhanced(chan) &&
3478 atomic_test_bit(chan->flags, ATT_CONNECTED)) {
3479 eatt_count++;
3480 }
3481 }
3482
3483 return eatt_count;
3484 }
3485
att_enhanced_connection_work_handler(struct k_work * work)3486 static void att_enhanced_connection_work_handler(struct k_work *work)
3487 {
3488 const struct k_work_delayable *dwork = k_work_delayable_from_work(work);
3489 const struct bt_att *att = CONTAINER_OF(dwork, struct bt_att, eatt.connection_work);
3490 const int err = bt_eatt_connect(att->conn, att->eatt.chans_to_connect);
3491
3492 if (err == -ENOMEM) {
3493 LOG_DBG("Failed to connect %d EATT channels, central has probably "
3494 "already established some.",
3495 att->eatt.chans_to_connect);
3496 } else if (err < 0) {
3497 LOG_WRN("Failed to connect %d EATT channels (err: %d)", att->eatt.chans_to_connect,
3498 err);
3499 }
3500
3501 }
3502 #endif /* CONFIG_BT_EATT */
3503
bt_att_accept(struct bt_conn * conn,struct bt_l2cap_chan ** ch)3504 static int bt_att_accept(struct bt_conn *conn, struct bt_l2cap_chan **ch)
3505 {
3506 struct bt_att *att;
3507 struct bt_att_chan *chan;
3508
3509 LOG_DBG("conn %p handle %u", conn, conn->handle);
3510
3511 if (k_mem_slab_alloc(&att_slab, (void **)&att, K_NO_WAIT)) {
3512 LOG_ERR("No available ATT context for conn %p", conn);
3513 return -ENOMEM;
3514 }
3515
3516 att_handle_rsp_thread = k_current_get();
3517
3518 (void)memset(att, 0, sizeof(*att));
3519 att->conn = conn;
3520 sys_slist_init(&att->reqs);
3521 sys_slist_init(&att->chans);
3522
3523 #if defined(CONFIG_BT_EATT)
3524 k_work_init_delayable(&att->eatt.connection_work,
3525 att_enhanced_connection_work_handler);
3526 #endif /* CONFIG_BT_EATT */
3527
3528 chan = att_chan_new(att, 0);
3529 if (!chan) {
3530 return -ENOMEM;
3531 }
3532
3533 *ch = &chan->chan.chan;
3534
3535 return 0;
3536 }
3537
3538 /* The L2CAP channel section is sorted lexicographically. Make sure that ATT fixed channel will be
3539 * placed as the last one to ensure that SMP channel is properly initialized before bt_att_connected
3540 * tries to send security request.
3541 */
3542 BT_L2CAP_CHANNEL_DEFINE(z_att_fixed_chan, BT_L2CAP_CID_ATT, bt_att_accept, NULL);
3543
3544 #if defined(CONFIG_BT_EATT)
credit_based_connection_delay(struct bt_conn * conn)3545 static k_timeout_t credit_based_connection_delay(struct bt_conn *conn)
3546 {
3547 /*
3548 * 5.3 Vol 3, Part G, Section 5.4 L2CAP COLLISION MITIGATION
3549 * ... In this situation, the Central may retry
3550 * immediately but the Peripheral shall wait a minimum of 100 ms before retrying;
3551 * on LE connections, the Peripheral shall wait at least 2 *
3552 * (connPeripheralLatency + 1) * connInterval if that is longer.
3553 */
3554
3555 if (IS_ENABLED(CONFIG_BT_CENTRAL) && conn->role == BT_CONN_ROLE_CENTRAL) {
3556 return K_NO_WAIT;
3557 } else if (IS_ENABLED(CONFIG_BT_PERIPHERAL)) {
3558 uint8_t random;
3559 int err;
3560
3561 err = bt_rand(&random, sizeof(random));
3562 if (err) {
3563 random = 0;
3564 }
3565
3566 const uint8_t rand_delay = random & 0x7; /* Small random delay for IOP */
3567 /* The maximum value of (latency + 1) * 2 multiplied with the
3568 * maximum connection interval has a maximum value of
3569 * 4000000000 which can be stored in 32-bits, so this won't
3570 * result in an overflow
3571 */
3572 const uint32_t calculated_delay_us =
3573 2 * (conn->le.latency + 1) * BT_CONN_INTERVAL_TO_US(conn->le.interval);
3574 const uint32_t calculated_delay_ms = calculated_delay_us / USEC_PER_MSEC;
3575
3576 return K_MSEC(MAX(100, calculated_delay_ms + rand_delay));
3577 }
3578
3579 /* Must be either central or peripheral */
3580 __ASSERT_NO_MSG(false);
3581 CODE_UNREACHABLE;
3582 }
3583
att_schedule_eatt_connect(struct bt_conn * conn,uint8_t chans_to_connect)3584 static int att_schedule_eatt_connect(struct bt_conn *conn, uint8_t chans_to_connect)
3585 {
3586 struct bt_att *att = att_get(conn);
3587
3588 if (!att) {
3589 return -ENOTCONN;
3590 }
3591
3592 att->eatt.chans_to_connect = chans_to_connect;
3593
3594 return k_work_reschedule(&att->eatt.connection_work,
3595 credit_based_connection_delay(conn));
3596 }
3597
handle_potential_collision(struct bt_att * att)3598 static void handle_potential_collision(struct bt_att *att)
3599 {
3600 __ASSERT_NO_MSG(att);
3601
3602 int err;
3603 size_t to_connect = att->eatt.prev_conn_req_missing_chans;
3604
3605 if (att->eatt.prev_conn_rsp_result == BT_L2CAP_LE_ERR_NO_RESOURCES &&
3606 att->eatt.prev_conn_req_result == BT_L2CAP_LE_ERR_NO_RESOURCES) {
3607 LOG_DBG("Credit based connection request collision detected");
3608
3609 /* Reset to not keep retrying on repeated failures */
3610 att->eatt.prev_conn_rsp_result = 0;
3611 att->eatt.prev_conn_req_result = 0;
3612 att->eatt.prev_conn_req_missing_chans = 0;
3613
3614 if (to_connect == 0) {
3615 return;
3616 }
3617
3618 err = att_schedule_eatt_connect(att->conn, to_connect);
3619 if (err < 0) {
3620 LOG_ERR("Failed to schedule EATT connection retry (err: %d)", err);
3621 }
3622 }
3623 }
3624
ecred_connect_req_cb(struct bt_conn * conn,uint16_t result,uint16_t psm)3625 static void ecred_connect_req_cb(struct bt_conn *conn, uint16_t result, uint16_t psm)
3626 {
3627 struct bt_att *att = att_get(conn);
3628
3629 if (!att) {
3630 return;
3631 }
3632
3633 if (psm != BT_EATT_PSM) {
3634 /* Collision mitigation is only a requirement on the EATT PSM */
3635 return;
3636 }
3637
3638 att->eatt.prev_conn_rsp_result = result;
3639
3640 handle_potential_collision(att);
3641 }
3642
ecred_connect_rsp_cb(struct bt_conn * conn,uint16_t result,uint8_t attempted_to_connect,uint8_t succeeded_to_connect,uint16_t psm)3643 static void ecred_connect_rsp_cb(struct bt_conn *conn, uint16_t result,
3644 uint8_t attempted_to_connect, uint8_t succeeded_to_connect,
3645 uint16_t psm)
3646 {
3647 struct bt_att *att = att_get(conn);
3648
3649 if (!att) {
3650 return;
3651 }
3652
3653 if (psm != BT_EATT_PSM) {
3654 /* Collision mitigation is only a requirement on the EATT PSM */
3655 return;
3656 }
3657
3658 att->eatt.prev_conn_req_result = result;
3659 att->eatt.prev_conn_req_missing_chans =
3660 attempted_to_connect - succeeded_to_connect;
3661
3662 handle_potential_collision(att);
3663 }
3664
bt_eatt_connect(struct bt_conn * conn,size_t num_channels)3665 int bt_eatt_connect(struct bt_conn *conn, size_t num_channels)
3666 {
3667 struct bt_att_chan *att_chan;
3668 struct bt_att *att;
3669 struct bt_l2cap_chan *chan[CONFIG_BT_EATT_MAX + 1] = {};
3670 size_t offset = 0;
3671 size_t i = 0;
3672 int err;
3673
3674 if (!conn) {
3675 return -EINVAL;
3676 }
3677
3678 /* Check the encryption level for EATT */
3679 if (bt_conn_get_security(conn) < BT_SECURITY_L2) {
3680 /* Vol 3, Part G, Section 5.3.2 Channel Requirements states:
3681 * The channel shall be encrypted.
3682 */
3683 return -EPERM;
3684 }
3685
3686 if (num_channels > CONFIG_BT_EATT_MAX || num_channels == 0) {
3687 return -EINVAL;
3688 }
3689
3690 att_chan = att_get_fixed_chan(conn);
3691 att = att_chan->att;
3692
3693 while (num_channels--) {
3694 att_chan = att_chan_new(att, BIT(ATT_ENHANCED));
3695 if (!att_chan) {
3696 break;
3697 }
3698
3699 chan[i] = &att_chan->chan.chan;
3700 i++;
3701 }
3702
3703 if (!i) {
3704 return -ENOMEM;
3705 }
3706
3707 while (offset < i) {
3708 /* bt_l2cap_ecred_chan_connect() uses the first BT_L2CAP_ECRED_CHAN_MAX_PER_REQ
3709 * elements of the array or until a null-terminator is reached.
3710 */
3711 err = bt_l2cap_ecred_chan_connect(conn, &chan[offset], BT_EATT_PSM);
3712 if (err < 0) {
3713 return err;
3714 }
3715
3716 offset += BT_L2CAP_ECRED_CHAN_MAX_PER_REQ;
3717 }
3718
3719 return 0;
3720 }
3721
3722 #if defined(CONFIG_BT_EATT_AUTO_CONNECT)
eatt_auto_connect(struct bt_conn * conn,bt_security_t level,enum bt_security_err err)3723 static void eatt_auto_connect(struct bt_conn *conn, bt_security_t level,
3724 enum bt_security_err err)
3725 {
3726 int eatt_err;
3727
3728 if (err || level < BT_SECURITY_L2 || !bt_att_fixed_chan_only(conn)) {
3729 return;
3730 }
3731
3732 eatt_err = att_schedule_eatt_connect(conn, CONFIG_BT_EATT_MAX);
3733 if (eatt_err < 0) {
3734 LOG_WRN("Automatic creation of EATT bearers failed on "
3735 "connection %s with error %d",
3736 bt_addr_le_str(bt_conn_get_dst(conn)), eatt_err);
3737 }
3738 }
3739
3740 BT_CONN_CB_DEFINE(conn_callbacks) = {
3741 .security_changed = eatt_auto_connect,
3742 };
3743
3744 #endif /* CONFIG_BT_EATT_AUTO_CONNECT */
3745
bt_eatt_disconnect(struct bt_conn * conn)3746 int bt_eatt_disconnect(struct bt_conn *conn)
3747 {
3748 struct bt_att_chan *chan;
3749 struct bt_att *att;
3750 int err = -ENOTCONN;
3751
3752 if (!conn) {
3753 return -EINVAL;
3754 }
3755
3756 chan = att_get_fixed_chan(conn);
3757 att = chan->att;
3758
3759 SYS_SLIST_FOR_EACH_CONTAINER(&att->chans, chan, node) {
3760 if (bt_att_is_enhanced(chan)) {
3761 err = bt_l2cap_chan_disconnect(&chan->chan.chan);
3762 }
3763 }
3764
3765 return err;
3766 }
3767
3768 #if defined(CONFIG_BT_TESTING)
bt_eatt_disconnect_one(struct bt_conn * conn)3769 int bt_eatt_disconnect_one(struct bt_conn *conn)
3770 {
3771 struct bt_att *att;
3772 struct bt_att_chan *chan;
3773
3774 if (!conn) {
3775 return -EINVAL;
3776 }
3777
3778 chan = att_get_fixed_chan(conn);
3779 att = chan->att;
3780
3781 SYS_SLIST_FOR_EACH_CONTAINER(&att->chans, chan, node) {
3782 if (bt_att_is_enhanced(chan)) {
3783 return bt_l2cap_chan_disconnect(&chan->chan.chan);
3784 }
3785 }
3786
3787 return -ENOTCONN;
3788 }
3789
bt_eatt_reconfigure(struct bt_conn * conn,uint16_t mtu)3790 int bt_eatt_reconfigure(struct bt_conn *conn, uint16_t mtu)
3791 {
3792 struct bt_att_chan *att_chan = att_get_fixed_chan(conn);
3793 struct bt_att *att = att_chan->att;
3794 struct bt_l2cap_chan *chans[CONFIG_BT_EATT_MAX + 1] = {};
3795 size_t offset = 0;
3796 size_t i = 0;
3797 int err;
3798
3799 SYS_SLIST_FOR_EACH_CONTAINER(&att->chans, att_chan, node) {
3800 if (bt_att_is_enhanced(att_chan)) {
3801 chans[i] = &att_chan->chan.chan;
3802 i++;
3803 }
3804 }
3805
3806 while (offset < i) {
3807 /* bt_l2cap_ecred_chan_reconfigure() uses the first BT_L2CAP_ECRED_CHAN_MAX_PER_REQ
3808 * elements of the array or until a null-terminator is reached.
3809 */
3810 err = bt_l2cap_ecred_chan_reconfigure(&chans[offset], mtu);
3811 if (err < 0) {
3812 return err;
3813 }
3814
3815 offset += BT_L2CAP_ECRED_CHAN_MAX_PER_REQ;
3816 }
3817
3818 return 0;
3819 }
3820 #endif /* CONFIG_BT_TESTING */
3821 #endif /* CONFIG_BT_EATT */
3822
bt_eatt_accept(struct bt_conn * conn,struct bt_l2cap_server * server,struct bt_l2cap_chan ** chan)3823 static int bt_eatt_accept(struct bt_conn *conn, struct bt_l2cap_server *server,
3824 struct bt_l2cap_chan **chan)
3825 {
3826 struct bt_att_chan *att_chan = att_get_fixed_chan(conn);
3827 struct bt_att *att = att_chan->att;
3828
3829 LOG_DBG("conn %p handle %u", conn, conn->handle);
3830
3831 att_chan = att_chan_new(att, BIT(ATT_ENHANCED));
3832 if (att_chan) {
3833 *chan = &att_chan->chan.chan;
3834 return 0;
3835 }
3836
3837 return -ENOMEM;
3838 }
3839
bt_eatt_init(void)3840 static void bt_eatt_init(void)
3841 {
3842 int err;
3843 static struct bt_l2cap_server eatt_l2cap = {
3844 .psm = BT_EATT_PSM,
3845 .sec_level = BT_SECURITY_L2,
3846 .accept = bt_eatt_accept,
3847 };
3848 struct bt_l2cap_server *registered_server;
3849
3850 LOG_DBG("");
3851
3852 /* Check if eatt_l2cap server has already been registered. */
3853 registered_server = bt_l2cap_server_lookup_psm(eatt_l2cap.psm);
3854 if (registered_server != &eatt_l2cap) {
3855 err = bt_l2cap_server_register(&eatt_l2cap);
3856 if (err < 0) {
3857 LOG_ERR("EATT Server registration failed %d", err);
3858 }
3859 }
3860
3861 #if defined(CONFIG_BT_EATT)
3862 static const struct bt_l2cap_ecred_cb cb = {
3863 .ecred_conn_rsp = ecred_connect_rsp_cb,
3864 .ecred_conn_req = ecred_connect_req_cb,
3865 };
3866
3867 bt_l2cap_register_ecred_cb(&cb);
3868 #endif /* CONFIG_BT_EATT */
3869 }
3870
bt_att_init(void)3871 void bt_att_init(void)
3872 {
3873 bt_gatt_init();
3874
3875 if (IS_ENABLED(CONFIG_BT_EATT)) {
3876 bt_eatt_init();
3877 }
3878 }
3879
bt_att_get_mtu(struct bt_conn * conn)3880 uint16_t bt_att_get_mtu(struct bt_conn *conn)
3881 {
3882 struct bt_att_chan *chan, *tmp;
3883 struct bt_att *att;
3884 uint16_t mtu = 0;
3885
3886 att = att_get(conn);
3887 if (!att) {
3888 return 0;
3889 }
3890
3891 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&att->chans, chan, tmp, node) {
3892 if (bt_att_mtu(chan) > mtu) {
3893 mtu = bt_att_mtu(chan);
3894 }
3895 }
3896
3897 return mtu;
3898 }
3899
bt_att_get_uatt_mtu(struct bt_conn * conn)3900 uint16_t bt_att_get_uatt_mtu(struct bt_conn *conn)
3901 {
3902 struct bt_att_chan *chan, *tmp;
3903 struct bt_att *att;
3904
3905 att = att_get(conn);
3906 if (!att) {
3907 return 0;
3908 }
3909
3910 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&att->chans, chan, tmp, node) {
3911 if (!bt_att_is_enhanced(chan)) {
3912 return bt_att_mtu(chan);
3913 }
3914 }
3915
3916 LOG_WRN("No UATT channel found in %p", conn);
3917
3918 return 0;
3919 }
3920
att_chan_mtu_updated(struct bt_att_chan * updated_chan)3921 static void att_chan_mtu_updated(struct bt_att_chan *updated_chan)
3922 {
3923 struct bt_att *att = updated_chan->att;
3924 struct bt_att_chan *chan, *tmp;
3925 uint16_t max_tx = 0, max_rx = 0;
3926
3927 /* Get maximum MTU's of other channels */
3928 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&att->chans, chan, tmp, node) {
3929 if (chan == updated_chan) {
3930 continue;
3931 }
3932 max_tx = MAX(max_tx, chan->chan.tx.mtu);
3933 max_rx = MAX(max_rx, chan->chan.rx.mtu);
3934 }
3935
3936 /* If either maximum MTU has changed */
3937 if ((updated_chan->chan.tx.mtu > max_tx) ||
3938 (updated_chan->chan.rx.mtu > max_rx)) {
3939 max_tx = MAX(max_tx, updated_chan->chan.tx.mtu);
3940 max_rx = MAX(max_rx, updated_chan->chan.rx.mtu);
3941 bt_gatt_att_max_mtu_changed(att->conn, max_tx, max_rx);
3942 }
3943 }
3944
bt_att_req_alloc(k_timeout_t timeout)3945 struct bt_att_req *bt_att_req_alloc(k_timeout_t timeout)
3946 {
3947 struct bt_att_req *req = NULL;
3948
3949 if (k_current_get() == att_handle_rsp_thread) {
3950 /* No req will be fulfilled while blocking on the bt_recv thread.
3951 * Blocking would cause deadlock.
3952 */
3953 LOG_DBG("Timeout discarded. No blocking on bt_recv thread.");
3954 timeout = K_NO_WAIT;
3955 }
3956
3957 /* Reserve space for request */
3958 if (k_mem_slab_alloc(&req_slab, (void **)&req, timeout)) {
3959 LOG_DBG("No space for req");
3960 return NULL;
3961 }
3962
3963 LOG_DBG("req %p", req);
3964
3965 memset(req, 0, sizeof(*req));
3966
3967 return req;
3968 }
3969
bt_att_req_free(struct bt_att_req * req)3970 void bt_att_req_free(struct bt_att_req *req)
3971 {
3972 LOG_DBG("req %p", req);
3973
3974 if (req->buf) {
3975 net_buf_unref(req->buf);
3976 req->buf = NULL;
3977 }
3978
3979 k_mem_slab_free(&req_slab, (void *)req);
3980 }
3981
bt_att_send(struct bt_conn * conn,struct net_buf * buf)3982 int bt_att_send(struct bt_conn *conn, struct net_buf *buf)
3983 {
3984 struct bt_att *att;
3985
3986 __ASSERT_NO_MSG(conn);
3987 __ASSERT_NO_MSG(buf);
3988
3989 att = att_get(conn);
3990 if (!att) {
3991 net_buf_unref(buf);
3992 return -ENOTCONN;
3993 }
3994
3995 k_fifo_put(&att->tx_queue, buf);
3996 att_send_process(att);
3997
3998 return 0;
3999 }
4000
bt_att_req_send(struct bt_conn * conn,struct bt_att_req * req)4001 int bt_att_req_send(struct bt_conn *conn, struct bt_att_req *req)
4002 {
4003 struct bt_att *att;
4004
4005 LOG_DBG("conn %p req %p", conn, req);
4006
4007 __ASSERT_NO_MSG(conn);
4008 __ASSERT_NO_MSG(req);
4009
4010 k_sched_lock();
4011
4012 att = att_get(conn);
4013 if (!att) {
4014 k_sched_unlock();
4015 return -ENOTCONN;
4016 }
4017
4018 sys_slist_append(&att->reqs, &req->node);
4019 att_req_send_process(att);
4020
4021 k_sched_unlock();
4022
4023 return 0;
4024 }
4025
bt_att_chan_req_cancel(struct bt_att_chan * chan,struct bt_att_req * req)4026 static bool bt_att_chan_req_cancel(struct bt_att_chan *chan,
4027 struct bt_att_req *req)
4028 {
4029 if (chan->req != req) {
4030 return false;
4031 }
4032
4033 chan->req = &cancel;
4034
4035 bt_att_req_free(req);
4036
4037 return true;
4038 }
4039
bt_att_req_cancel(struct bt_conn * conn,struct bt_att_req * req)4040 void bt_att_req_cancel(struct bt_conn *conn, struct bt_att_req *req)
4041 {
4042 struct bt_att *att;
4043 struct bt_att_chan *chan, *tmp;
4044
4045 LOG_DBG("req %p", req);
4046
4047 if (!conn || !req) {
4048 return;
4049 }
4050
4051 att = att_get(conn);
4052 if (!att) {
4053 return;
4054 }
4055
4056 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&att->chans, chan, tmp, node) {
4057 /* Check if request is outstanding */
4058 if (bt_att_chan_req_cancel(chan, req)) {
4059 return;
4060 }
4061 }
4062
4063 /* Remove request from the list */
4064 sys_slist_find_and_remove(&att->reqs, &req->node);
4065
4066 bt_att_req_free(req);
4067 }
4068
bt_att_find_req_by_user_data(struct bt_conn * conn,const void * user_data)4069 struct bt_att_req *bt_att_find_req_by_user_data(struct bt_conn *conn, const void *user_data)
4070 {
4071 struct bt_att *att;
4072 struct bt_att_chan *chan;
4073 struct bt_att_req *req;
4074
4075 att = att_get(conn);
4076 if (!att) {
4077 return NULL;
4078 }
4079
4080 SYS_SLIST_FOR_EACH_CONTAINER(&att->chans, chan, node) {
4081 if (chan->req->user_data == user_data) {
4082 return chan->req;
4083 }
4084 }
4085
4086 SYS_SLIST_FOR_EACH_CONTAINER(&att->reqs, req, node) {
4087 if (req->user_data == user_data) {
4088 return req;
4089 }
4090 }
4091
4092 return NULL;
4093 }
4094
bt_att_fixed_chan_only(struct bt_conn * conn)4095 bool bt_att_fixed_chan_only(struct bt_conn *conn)
4096 {
4097 #if defined(CONFIG_BT_EATT)
4098 return bt_eatt_count(conn) == 0;
4099 #else
4100 return true;
4101 #endif /* CONFIG_BT_EATT */
4102 }
4103
bt_att_clear_out_of_sync_sent(struct bt_conn * conn)4104 void bt_att_clear_out_of_sync_sent(struct bt_conn *conn)
4105 {
4106 struct bt_att *att = att_get(conn);
4107 struct bt_att_chan *chan;
4108
4109 if (!att) {
4110 return;
4111 }
4112
4113 SYS_SLIST_FOR_EACH_CONTAINER(&att->chans, chan, node) {
4114 atomic_clear_bit(chan->flags, ATT_OUT_OF_SYNC_SENT);
4115 }
4116 }
4117
bt_att_out_of_sync_sent_on_fixed(struct bt_conn * conn)4118 bool bt_att_out_of_sync_sent_on_fixed(struct bt_conn *conn)
4119 {
4120 struct bt_l2cap_chan *l2cap_chan;
4121 struct bt_att_chan *att_chan;
4122
4123 l2cap_chan = bt_l2cap_le_lookup_rx_cid(conn, BT_L2CAP_CID_ATT);
4124 if (!l2cap_chan) {
4125 return false;
4126 }
4127
4128 att_chan = ATT_CHAN(l2cap_chan);
4129 return atomic_test_bit(att_chan->flags, ATT_OUT_OF_SYNC_SENT);
4130 }
4131
bt_att_set_tx_meta_data(struct net_buf * buf,bt_gatt_complete_func_t func,void * user_data,enum bt_att_chan_opt chan_opt)4132 void bt_att_set_tx_meta_data(struct net_buf *buf, bt_gatt_complete_func_t func, void *user_data,
4133 enum bt_att_chan_opt chan_opt)
4134 {
4135 struct bt_att_tx_meta_data *data = bt_att_get_tx_meta_data(buf);
4136
4137 data->func = func;
4138 data->user_data = user_data;
4139 data->attr_count = 1;
4140 data->chan_opt = chan_opt;
4141 }
4142
bt_att_increment_tx_meta_data_attr_count(struct net_buf * buf,uint16_t attr_count)4143 void bt_att_increment_tx_meta_data_attr_count(struct net_buf *buf, uint16_t attr_count)
4144 {
4145 struct bt_att_tx_meta_data *data = bt_att_get_tx_meta_data(buf);
4146
4147 data->attr_count += attr_count;
4148 }
4149
bt_att_tx_meta_data_match(const struct net_buf * buf,bt_gatt_complete_func_t func,const void * user_data,enum bt_att_chan_opt chan_opt)4150 bool bt_att_tx_meta_data_match(const struct net_buf *buf, bt_gatt_complete_func_t func,
4151 const void *user_data, enum bt_att_chan_opt chan_opt)
4152 {
4153 const struct bt_att_tx_meta_data *meta = bt_att_get_tx_meta_data(buf);
4154
4155 return ((meta->func == func) &&
4156 (meta->user_data == user_data) &&
4157 (meta->chan_opt == chan_opt));
4158 }
4159
bt_att_chan_opt_valid(struct bt_conn * conn,enum bt_att_chan_opt chan_opt)4160 bool bt_att_chan_opt_valid(struct bt_conn *conn, enum bt_att_chan_opt chan_opt)
4161 {
4162 if ((chan_opt & (BT_ATT_CHAN_OPT_ENHANCED_ONLY | BT_ATT_CHAN_OPT_UNENHANCED_ONLY)) ==
4163 (BT_ATT_CHAN_OPT_ENHANCED_ONLY | BT_ATT_CHAN_OPT_UNENHANCED_ONLY)) {
4164 /* Enhanced and Unenhanced are mutually exclusive */
4165 return false;
4166 }
4167
4168 /* Choosing EATT requires EATT channels connected and encryption enabled */
4169 if (chan_opt & BT_ATT_CHAN_OPT_ENHANCED_ONLY) {
4170 return (bt_conn_get_security(conn) > BT_SECURITY_L1) &&
4171 !bt_att_fixed_chan_only(conn);
4172 }
4173
4174 return true;
4175 }
4176
bt_gatt_authorization_cb_register(const struct bt_gatt_authorization_cb * cb)4177 int bt_gatt_authorization_cb_register(const struct bt_gatt_authorization_cb *cb)
4178 {
4179 if (!IS_ENABLED(CONFIG_BT_GATT_AUTHORIZATION_CUSTOM)) {
4180 return -ENOSYS;
4181 }
4182
4183 if (!cb) {
4184 authorization_cb = NULL;
4185 return 0;
4186 }
4187
4188 if (authorization_cb) {
4189 return -EALREADY;
4190 }
4191
4192 authorization_cb = cb;
4193
4194 return 0;
4195 }
4196