1 /* att.c - Attribute protocol handling */
2
3 /*
4 * Copyright (c) 2015-2016 Intel Corporation
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9 #include <zephyr/kernel.h>
10 #include <string.h>
11 #include <errno.h>
12 #include <stdbool.h>
13 #include <zephyr/sys/atomic.h>
14 #include <zephyr/sys/byteorder.h>
15 #include <zephyr/sys/util.h>
16
17 #include <zephyr/bluetooth/hci.h>
18 #include <zephyr/bluetooth/bluetooth.h>
19 #include <zephyr/bluetooth/uuid.h>
20 #include <zephyr/bluetooth/att.h>
21 #include <zephyr/bluetooth/gatt.h>
22
23 #include "common/bt_str.h"
24
25 #include "hci_core.h"
26 #include "conn_internal.h"
27 #include "l2cap_internal.h"
28 #include "smp.h"
29 #include "att_internal.h"
30 #include "gatt_internal.h"
31
32 #define LOG_LEVEL CONFIG_BT_ATT_LOG_LEVEL
33 #include <zephyr/logging/log.h>
34 LOG_MODULE_REGISTER(bt_att);
35
36 #define ATT_CHAN(_ch) CONTAINER_OF(_ch, struct bt_att_chan, chan.chan)
37 #define ATT_REQ(_node) CONTAINER_OF(_node, struct bt_att_req, node)
38
39 #define ATT_CMD_MASK 0x40
40
41 #if defined(CONFIG_BT_EATT)
42 #define ATT_CHAN_MAX (CONFIG_BT_EATT_MAX + 1)
43 #else
44 #define ATT_CHAN_MAX 1
45 #endif /* CONFIG_BT_EATT */
46
47 typedef enum __packed {
48 ATT_COMMAND,
49 ATT_REQUEST,
50 ATT_RESPONSE,
51 ATT_NOTIFICATION,
52 ATT_CONFIRMATION,
53 ATT_INDICATION,
54 ATT_UNKNOWN,
55 } att_type_t;
56
57 static att_type_t att_op_get_type(uint8_t op);
58
59 #if CONFIG_BT_ATT_PREPARE_COUNT > 0
60 struct bt_attr_data {
61 uint16_t handle;
62 uint16_t offset;
63 };
64
65 /* Pool for incoming ATT packets */
66 NET_BUF_POOL_DEFINE(prep_pool, CONFIG_BT_ATT_PREPARE_COUNT, BT_ATT_BUF_SIZE,
67 sizeof(struct bt_attr_data), NULL);
68 #endif /* CONFIG_BT_ATT_PREPARE_COUNT */
69
70 K_MEM_SLAB_DEFINE(req_slab, sizeof(struct bt_att_req),
71 CONFIG_BT_ATT_TX_COUNT, __alignof__(struct bt_att_req));
72
73 enum {
74 ATT_CONNECTED,
75 ATT_ENHANCED,
76 ATT_PENDING_SENT,
77 ATT_OUT_OF_SYNC_SENT,
78
79 /* Total number of flags - must be at the end of the enum */
80 ATT_NUM_FLAGS,
81 };
82
83 struct bt_att_tx_meta_data;
84 typedef void (*bt_att_tx_cb_t)(struct bt_conn *conn,
85 struct bt_att_tx_meta_data *user_data);
86
87 struct bt_att_tx_meta_data {
88 int err;
89 uint8_t opcode;
90 uint16_t attr_count;
91 struct bt_att_chan *att_chan;
92 bt_gatt_complete_func_t func;
93 void *user_data;
94 enum bt_att_chan_opt chan_opt;
95 };
96
97 struct bt_att_tx_meta {
98 struct bt_att_tx_meta_data *data;
99 };
100
101 /* ATT channel specific data */
102 struct bt_att_chan {
103 /* Connection this channel is associated with */
104 struct bt_att *att;
105 struct bt_l2cap_le_chan chan;
106 ATOMIC_DEFINE(flags, ATT_NUM_FLAGS);
107 struct bt_att_req *req;
108 struct k_fifo tx_queue;
109 struct k_work_delayable timeout_work;
110 sys_snode_t node;
111 };
112
bt_att_is_enhanced(struct bt_att_chan * chan)113 static bool bt_att_is_enhanced(struct bt_att_chan *chan)
114 {
115 /* Optimization. */
116 if (!IS_ENABLED(CONFIG_BT_EATT)) {
117 return false;
118 }
119
120 return atomic_test_bit(chan->flags, ATT_ENHANCED);
121 }
122
bt_att_mtu(struct bt_att_chan * chan)123 static uint16_t bt_att_mtu(struct bt_att_chan *chan)
124 {
125 /* Core v5.3 Vol 3 Part F 3.4.2:
126 *
127 * The server and client shall set ATT_MTU to the minimum of the
128 * Client Rx MTU and the Server Rx MTU.
129 */
130 return MIN(chan->chan.rx.mtu, chan->chan.tx.mtu);
131 }
132
133 /* Descriptor of application-specific authorization callbacks that are used
134 * with the CONFIG_BT_GATT_AUTHORIZATION_CUSTOM Kconfig enabled.
135 */
136 const static struct bt_gatt_authorization_cb *authorization_cb;
137
138 /* ATT connection specific data */
139 struct bt_att {
140 struct bt_conn *conn;
141 /* Shared request queue */
142 sys_slist_t reqs;
143 struct k_fifo tx_queue;
144 #if CONFIG_BT_ATT_PREPARE_COUNT > 0
145 sys_slist_t prep_queue;
146 #endif
147 /* Contains bt_att_chan instance(s) */
148 sys_slist_t chans;
149 #if defined(CONFIG_BT_EATT)
150 struct {
151 struct k_work_delayable connection_work;
152 uint8_t chans_to_connect;
153
154 uint16_t prev_conn_rsp_result;
155 uint16_t prev_conn_req_result;
156 uint8_t prev_conn_req_missing_chans;
157 } eatt;
158 #endif /* CONFIG_BT_EATT */
159 };
160
161 K_MEM_SLAB_DEFINE(att_slab, sizeof(struct bt_att),
162 CONFIG_BT_MAX_CONN, __alignof__(struct bt_att));
163 K_MEM_SLAB_DEFINE(chan_slab, sizeof(struct bt_att_chan),
164 CONFIG_BT_MAX_CONN * ATT_CHAN_MAX,
165 __alignof__(struct bt_att_chan));
166 static struct bt_att_req cancel;
167
168 /** The thread ATT response handlers likely run on.
169 *
170 * Blocking this thread while waiting for an ATT request to resolve can cause a
171 * deadlock.
172 *
173 * This can happen if the application queues ATT requests in the context of a
174 * callback from the Bluetooth stack. This is because queuing an ATT request
175 * will block until a request-resource is available, and the callbacks run on
176 * the same thread as the ATT response handler that frees request-resources.
177 *
178 * The intended use of this value is to detect the above situation.
179 */
180 static k_tid_t att_handle_rsp_thread;
181
182 static struct bt_att_tx_meta_data tx_meta_data_storage[CONFIG_BT_ATT_TX_COUNT];
183
184 struct bt_att_tx_meta_data *bt_att_get_tx_meta_data(const struct net_buf *buf);
185 static void att_on_sent_cb(struct bt_att_tx_meta_data *meta);
186
187 #if defined(CONFIG_BT_ATT_ERR_TO_STR)
bt_att_err_to_str(uint8_t att_err)188 const char *bt_att_err_to_str(uint8_t att_err)
189 {
190 /* To mapping tables are used to avoid a big gap with NULL-entries. */
191 #define ATT_ERR(err) [err] = #err
192 #define ATT_ERR_SECOND(err) [err - BT_ATT_ERR_WRITE_REQ_REJECTED] = #err
193
194 const char * const first_mapping_table[] = {
195 ATT_ERR(BT_ATT_ERR_SUCCESS),
196 ATT_ERR(BT_ATT_ERR_INVALID_HANDLE),
197 ATT_ERR(BT_ATT_ERR_READ_NOT_PERMITTED),
198 ATT_ERR(BT_ATT_ERR_WRITE_NOT_PERMITTED),
199 ATT_ERR(BT_ATT_ERR_INVALID_PDU),
200 ATT_ERR(BT_ATT_ERR_AUTHENTICATION),
201 ATT_ERR(BT_ATT_ERR_NOT_SUPPORTED),
202 ATT_ERR(BT_ATT_ERR_INVALID_OFFSET),
203 ATT_ERR(BT_ATT_ERR_AUTHORIZATION),
204 ATT_ERR(BT_ATT_ERR_PREPARE_QUEUE_FULL),
205 ATT_ERR(BT_ATT_ERR_ATTRIBUTE_NOT_FOUND),
206 ATT_ERR(BT_ATT_ERR_ATTRIBUTE_NOT_LONG),
207 ATT_ERR(BT_ATT_ERR_ENCRYPTION_KEY_SIZE),
208 ATT_ERR(BT_ATT_ERR_INVALID_ATTRIBUTE_LEN),
209 ATT_ERR(BT_ATT_ERR_UNLIKELY),
210 ATT_ERR(BT_ATT_ERR_INSUFFICIENT_ENCRYPTION),
211 ATT_ERR(BT_ATT_ERR_UNSUPPORTED_GROUP_TYPE),
212 ATT_ERR(BT_ATT_ERR_INSUFFICIENT_RESOURCES),
213 ATT_ERR(BT_ATT_ERR_DB_OUT_OF_SYNC),
214 ATT_ERR(BT_ATT_ERR_VALUE_NOT_ALLOWED),
215 };
216
217 const char * const second_mapping_table[] = {
218 ATT_ERR_SECOND(BT_ATT_ERR_WRITE_REQ_REJECTED),
219 ATT_ERR_SECOND(BT_ATT_ERR_CCC_IMPROPER_CONF),
220 ATT_ERR_SECOND(BT_ATT_ERR_PROCEDURE_IN_PROGRESS),
221 ATT_ERR_SECOND(BT_ATT_ERR_OUT_OF_RANGE),
222 };
223
224
225 if (att_err < ARRAY_SIZE(first_mapping_table) && first_mapping_table[att_err]) {
226 return first_mapping_table[att_err];
227 } else if (att_err >= BT_ATT_ERR_WRITE_REQ_REJECTED) {
228 const uint8_t second_index = att_err - BT_ATT_ERR_WRITE_REQ_REJECTED;
229
230 if (second_index < ARRAY_SIZE(second_mapping_table) &&
231 second_mapping_table[second_index]) {
232 return second_mapping_table[second_index];
233 }
234 }
235
236 return "(unknown)";
237
238 #undef ATT_ERR
239 #undef ATT_ERR_SECOND
240 }
241 #endif /* CONFIG_BT_ATT_ERR_TO_STR */
242
att_tx_destroy(struct net_buf * buf)243 static void att_tx_destroy(struct net_buf *buf)
244 {
245 struct bt_att_tx_meta_data *p_meta = bt_att_get_tx_meta_data(buf);
246 struct bt_att_tx_meta_data meta;
247
248 LOG_DBG("%p", buf);
249
250 /* Destroy the buffer first, as the callback may attempt to allocate a
251 * new one for another operation.
252 */
253 meta = *p_meta;
254
255 /* Clear the meta storage. This might help catch illegal
256 * "use-after-free"s. An initial memset is not necessary, as the
257 * metadata storage array is `static`.
258 */
259 memset(p_meta, 0x00, sizeof(*p_meta));
260
261 /* After this point, p_meta doesn't belong to us.
262 * The user data will be memset to 0 on allocation.
263 */
264 net_buf_destroy(buf);
265
266 /* ATT opcode 0 is invalid. If we get here, that means the buffer got
267 * destroyed before it was ready to be sent. Hopefully nobody sets the
268 * opcode and then destroys the buffer without sending it. :'(
269 */
270 if (meta.opcode != 0) {
271 att_on_sent_cb(&meta);
272 }
273 }
274
275 NET_BUF_POOL_DEFINE(att_pool, CONFIG_BT_ATT_TX_COUNT,
276 BT_L2CAP_SDU_BUF_SIZE(BT_ATT_BUF_SIZE),
277 CONFIG_BT_CONN_TX_USER_DATA_SIZE, att_tx_destroy);
278
bt_att_get_tx_meta_data(const struct net_buf * buf)279 struct bt_att_tx_meta_data *bt_att_get_tx_meta_data(const struct net_buf *buf)
280 {
281 __ASSERT_NO_MSG(net_buf_pool_get(buf->pool_id) == &att_pool);
282
283 /* Metadata lifetime is implicitly tied to the buffer lifetime.
284 * Treat it as part of the buffer itself.
285 */
286 return &tx_meta_data_storage[net_buf_id((struct net_buf *)buf)];
287 }
288
289 static int bt_att_chan_send(struct bt_att_chan *chan, struct net_buf *buf);
290
291 static void att_chan_mtu_updated(struct bt_att_chan *updated_chan);
292 static void bt_att_disconnected(struct bt_l2cap_chan *chan);
293
294 struct net_buf *bt_att_create_rsp_pdu(struct bt_att_chan *chan, uint8_t op);
295
296 static void bt_att_sent(struct bt_l2cap_chan *ch);
297
att_sent(void * user_data)298 static void att_sent(void *user_data)
299 {
300 struct bt_att_tx_meta_data *data = user_data;
301 struct bt_att_chan *att_chan = data->att_chan;
302 struct bt_conn *conn = att_chan->att->conn;
303 struct bt_l2cap_chan *chan = &att_chan->chan.chan;
304
305 __ASSERT_NO_MSG(!bt_att_is_enhanced(att_chan));
306
307 LOG_DBG("conn %p chan %p", conn, chan);
308
309 /* For EATT, `bt_att_sent` is assigned to the `.sent` L2 callback.
310 * L2CAP will then call it once the SDU has finished sending.
311 *
312 * For UATT, this won't happen, as static LE l2cap channels don't have
313 * SDUs. Call it manually instead.
314 */
315 bt_att_sent(chan);
316 }
317
318 /* In case of success the ownership of the buffer is transferred to the stack
319 * which takes care of releasing it when it completes transmitting to the
320 * controller.
321 *
322 * In case bt_l2cap_send_cb fails the buffer state and ownership are retained
323 * so the buffer can be safely pushed back to the queue to be processed later.
324 */
chan_send(struct bt_att_chan * chan,struct net_buf * buf)325 static int chan_send(struct bt_att_chan *chan, struct net_buf *buf)
326 {
327 struct bt_att_hdr *hdr;
328 struct net_buf_simple_state state;
329 int err;
330 struct bt_att_tx_meta_data *data = bt_att_get_tx_meta_data(buf);
331 struct bt_att_chan *prev_chan = data->att_chan;
332
333 hdr = (void *)buf->data;
334
335 LOG_DBG("code 0x%02x", hdr->code);
336
337 if (!atomic_test_bit(chan->flags, ATT_CONNECTED)) {
338 LOG_ERR("ATT channel not connected");
339 return -EINVAL;
340 }
341
342 if (IS_ENABLED(CONFIG_BT_EATT) && hdr->code == BT_ATT_OP_MTU_REQ &&
343 chan->chan.tx.cid != BT_L2CAP_CID_ATT) {
344 /* The Exchange MTU sub-procedure shall only be supported on
345 * the LE Fixed Channel Unenhanced ATT bearer
346 */
347 return -ENOTSUP;
348 }
349
350 __ASSERT_NO_MSG(buf->len >= sizeof(struct bt_att_hdr));
351 data->opcode = buf->data[0];
352 data->err = 0;
353
354 if (IS_ENABLED(CONFIG_BT_EATT) && bt_att_is_enhanced(chan)) {
355 /* Check if sent is pending already, if it does it cannot be
356 * modified so the operation will need to be queued.
357 */
358 if (atomic_test_bit(chan->flags, ATT_PENDING_SENT)) {
359 return -EAGAIN;
360 }
361
362 if (hdr->code == BT_ATT_OP_SIGNED_WRITE_CMD) {
363 return -ENOTSUP;
364 }
365
366 /* Check if the channel is ready to send in case of a request */
367 if (att_op_get_type(hdr->code) == ATT_REQUEST &&
368 !atomic_test_bit(chan->chan.chan.status,
369 BT_L2CAP_STATUS_OUT)) {
370 return -EAGAIN;
371 }
372
373 atomic_set_bit(chan->flags, ATT_PENDING_SENT);
374 data->att_chan = chan;
375
376 /* bt_l2cap_chan_send does actually return the number of bytes
377 * that could be sent immediately.
378 */
379 err = bt_l2cap_chan_send(&chan->chan.chan, buf);
380 if (err < 0) {
381 data->att_chan = prev_chan;
382 atomic_clear_bit(chan->flags, ATT_PENDING_SENT);
383 data->err = err;
384
385 return err;
386 } else {
387 /* On success, the almighty scheduler might already have
388 * run the destroy cb on the buffer. In that case, buf
389 * and its metadata are dangling pointers.
390 */
391 buf = NULL;
392 data = NULL;
393 }
394
395 return 0;
396 }
397
398 if (hdr->code == BT_ATT_OP_SIGNED_WRITE_CMD) {
399 err = bt_smp_sign(chan->att->conn, buf);
400 if (err) {
401 LOG_ERR("Error signing data");
402 net_buf_unref(buf);
403 return err;
404 }
405 }
406
407 net_buf_simple_save(&buf->b, &state);
408
409 data->att_chan = chan;
410
411 err = bt_l2cap_send_pdu(&chan->chan, buf, NULL, NULL);
412 if (err) {
413 if (err == -ENOBUFS) {
414 LOG_ERR("Ran out of TX buffers or contexts.");
415 }
416 /* In case of an error has occurred restore the buffer state */
417 net_buf_simple_restore(&buf->b, &state);
418 data->att_chan = prev_chan;
419 data->err = err;
420 }
421
422 return err;
423 }
424
att_chan_matches_chan_opt(struct bt_att_chan * chan,enum bt_att_chan_opt chan_opt)425 static bool att_chan_matches_chan_opt(struct bt_att_chan *chan, enum bt_att_chan_opt chan_opt)
426 {
427 __ASSERT_NO_MSG(chan_opt <= BT_ATT_CHAN_OPT_ENHANCED_ONLY);
428
429 if (chan_opt == BT_ATT_CHAN_OPT_NONE) {
430 return true;
431 }
432
433 if (bt_att_is_enhanced(chan)) {
434 return (chan_opt & BT_ATT_CHAN_OPT_ENHANCED_ONLY);
435 } else {
436 return (chan_opt & BT_ATT_CHAN_OPT_UNENHANCED_ONLY);
437 }
438 }
439
get_first_buf_matching_chan(struct k_fifo * fifo,struct bt_att_chan * chan)440 static struct net_buf *get_first_buf_matching_chan(struct k_fifo *fifo, struct bt_att_chan *chan)
441 {
442 if (IS_ENABLED(CONFIG_BT_EATT)) {
443 struct k_fifo skipped;
444 struct net_buf *buf;
445 struct net_buf *ret = NULL;
446 struct bt_att_tx_meta_data *meta;
447
448 k_fifo_init(&skipped);
449
450 while ((buf = k_fifo_get(fifo, K_NO_WAIT))) {
451 meta = bt_att_get_tx_meta_data(buf);
452 if (!ret &&
453 att_chan_matches_chan_opt(chan, meta->chan_opt)) {
454 ret = buf;
455 } else {
456 k_fifo_put(&skipped, buf);
457 }
458 }
459
460 __ASSERT_NO_MSG(k_fifo_is_empty(fifo));
461
462 while ((buf = k_fifo_get(&skipped, K_NO_WAIT))) {
463 k_fifo_put(fifo, buf);
464 }
465
466 __ASSERT_NO_MSG(k_fifo_is_empty(&skipped));
467
468 return ret;
469 } else {
470 return k_fifo_get(fifo, K_NO_WAIT);
471 }
472 }
473
get_first_req_matching_chan(sys_slist_t * reqs,struct bt_att_chan * chan)474 static struct bt_att_req *get_first_req_matching_chan(sys_slist_t *reqs, struct bt_att_chan *chan)
475 {
476 if (IS_ENABLED(CONFIG_BT_EATT)) {
477 sys_snode_t *curr, *prev = NULL;
478 struct bt_att_tx_meta_data *meta = NULL;
479
480 SYS_SLIST_FOR_EACH_NODE(reqs, curr) {
481 meta = bt_att_get_tx_meta_data(ATT_REQ(curr)->buf);
482 if (att_chan_matches_chan_opt(chan, meta->chan_opt)) {
483 break;
484 }
485
486 prev = curr;
487 }
488
489 if (curr) {
490 sys_slist_remove(reqs, prev, curr);
491
492 return ATT_REQ(curr);
493 }
494
495 return NULL;
496 }
497
498 sys_snode_t *node = sys_slist_get(reqs);
499
500 if (node) {
501 return ATT_REQ(node);
502 } else {
503 return NULL;
504 }
505 }
506
process_queue(struct bt_att_chan * chan,struct k_fifo * queue)507 static int process_queue(struct bt_att_chan *chan, struct k_fifo *queue)
508 {
509 struct net_buf *buf;
510 int err;
511
512 buf = get_first_buf_matching_chan(queue, chan);
513 if (buf) {
514 err = bt_att_chan_send(chan, buf);
515 if (err) {
516 /* Push it back if it could not be send */
517 k_queue_prepend(&queue->_queue, buf);
518 return err;
519 }
520
521 return 0;
522 }
523
524 return -ENOENT;
525 }
526
527 /* Send requests without taking tx_sem */
chan_req_send(struct bt_att_chan * chan,struct bt_att_req * req)528 static int chan_req_send(struct bt_att_chan *chan, struct bt_att_req *req)
529 {
530 struct net_buf *buf;
531 int err;
532
533 if (bt_att_mtu(chan) < net_buf_frags_len(req->buf)) {
534 return -EMSGSIZE;
535 }
536
537 LOG_DBG("chan %p req %p len %zu", chan, req, net_buf_frags_len(req->buf));
538
539 chan->req = req;
540
541 /* Release since bt_l2cap_send_cb takes ownership of the buffer */
542 buf = req->buf;
543 req->buf = NULL;
544
545 /* This lock makes sure the value of `bt_att_mtu(chan)` does not
546 * change.
547 */
548 k_sched_lock();
549 err = bt_att_chan_send(chan, buf);
550 if (err) {
551 /* We still have the ownership of the buffer */
552 req->buf = buf;
553 chan->req = NULL;
554 } else {
555 bt_gatt_req_set_mtu(req, bt_att_mtu(chan));
556 }
557 k_sched_unlock();
558
559 return err;
560 }
561
bt_att_sent(struct bt_l2cap_chan * ch)562 static void bt_att_sent(struct bt_l2cap_chan *ch)
563 {
564 struct bt_att_chan *chan = ATT_CHAN(ch);
565 struct bt_att *att = chan->att;
566 int err;
567
568 LOG_DBG("chan %p", chan);
569
570 atomic_clear_bit(chan->flags, ATT_PENDING_SENT);
571
572 if (!att) {
573 LOG_DBG("Ignore sent on detached ATT chan");
574 return;
575 }
576
577 /* Process pending requests first since they require a response they
578 * can only be processed one at time while if other queues were
579 * processed before they may always contain a buffer starving the
580 * request queue.
581 */
582 if (!chan->req && !sys_slist_is_empty(&att->reqs)) {
583 sys_snode_t *node = sys_slist_get(&att->reqs);
584
585 if (chan_req_send(chan, ATT_REQ(node)) >= 0) {
586 return;
587 }
588
589 /* Prepend back to the list as it could not be sent */
590 sys_slist_prepend(&att->reqs, node);
591 }
592
593 /* Process channel queue */
594 err = process_queue(chan, &chan->tx_queue);
595 if (!err) {
596 return;
597 }
598
599 /* Process global queue */
600 (void)process_queue(chan, &att->tx_queue);
601 }
602
chan_rebegin_att_timeout(struct bt_att_tx_meta_data * user_data)603 static void chan_rebegin_att_timeout(struct bt_att_tx_meta_data *user_data)
604 {
605 struct bt_att_tx_meta_data *data = user_data;
606 struct bt_att_chan *chan = data->att_chan;
607
608 LOG_DBG("chan %p chan->req %p", chan, chan->req);
609
610 if (!atomic_test_bit(chan->flags, ATT_CONNECTED)) {
611 LOG_ERR("ATT channel not connected");
612 return;
613 }
614
615 /* Start timeout work. Only if we are sure that the request is really
616 * in-flight.
617 */
618 if (chan->req) {
619 k_work_reschedule(&chan->timeout_work, BT_ATT_TIMEOUT);
620 }
621 }
622
chan_req_notif_sent(struct bt_att_tx_meta_data * user_data)623 static void chan_req_notif_sent(struct bt_att_tx_meta_data *user_data)
624 {
625 struct bt_att_tx_meta_data *data = user_data;
626 struct bt_att_chan *chan = data->att_chan;
627 struct bt_conn *conn = chan->att->conn;
628 bt_gatt_complete_func_t func = data->func;
629 uint16_t attr_count = data->attr_count;
630 void *ud = data->user_data;
631
632 LOG_DBG("chan %p CID 0x%04X", chan, chan->chan.tx.cid);
633
634 if (!atomic_test_bit(chan->flags, ATT_CONNECTED)) {
635 LOG_ERR("ATT channel not connected");
636 return;
637 }
638
639 if (func) {
640 for (uint16_t i = 0; i < attr_count; i++) {
641 func(conn, ud);
642 }
643 }
644 }
645
att_on_sent_cb(struct bt_att_tx_meta_data * meta)646 static void att_on_sent_cb(struct bt_att_tx_meta_data *meta)
647 {
648 const att_type_t op_type = att_op_get_type(meta->opcode);
649
650 LOG_DBG("opcode 0x%x", meta->opcode);
651
652 if (!meta->att_chan ||
653 !meta->att_chan->att ||
654 !meta->att_chan->att->conn) {
655 LOG_DBG("Bearer not connected, dropping ATT cb");
656 return;
657 }
658
659 if (meta->err) {
660 LOG_ERR("Got err %d, not calling ATT cb", meta->err);
661 return;
662 }
663
664 if (!bt_att_is_enhanced(meta->att_chan)) {
665 /* For EATT, L2CAP will call it after the SDU is fully sent. */
666 LOG_DBG("UATT bearer, calling att_sent");
667 att_sent(meta);
668 }
669
670 switch (op_type) {
671 case ATT_RESPONSE:
672 return;
673 case ATT_CONFIRMATION:
674 return;
675 case ATT_REQUEST:
676 case ATT_INDICATION:
677 chan_rebegin_att_timeout(meta);
678 return;
679 case ATT_COMMAND:
680 case ATT_NOTIFICATION:
681 chan_req_notif_sent(meta);
682 return;
683 default:
684 __ASSERT(false, "Unknown op type 0x%02X", op_type);
685 return;
686 }
687 }
688
bt_att_chan_create_pdu(struct bt_att_chan * chan,uint8_t op,size_t len)689 static struct net_buf *bt_att_chan_create_pdu(struct bt_att_chan *chan, uint8_t op, size_t len)
690 {
691 struct bt_att_hdr *hdr;
692 struct net_buf *buf;
693 struct bt_att_tx_meta_data *data;
694 k_timeout_t timeout;
695
696 if (len + sizeof(op) > bt_att_mtu(chan)) {
697 LOG_WRN("ATT MTU exceeded, max %u, wanted %zu", bt_att_mtu(chan),
698 len + sizeof(op));
699 return NULL;
700 }
701
702 switch (att_op_get_type(op)) {
703 case ATT_RESPONSE:
704 case ATT_CONFIRMATION:
705 /* Use a timeout only when responding/confirming */
706 timeout = BT_ATT_TIMEOUT;
707 break;
708 default:
709 timeout = K_FOREVER;
710 }
711
712 /* This will reserve headspace for lower layers */
713 buf = bt_l2cap_create_pdu_timeout(&att_pool, 0, timeout);
714 if (!buf) {
715 LOG_ERR("Unable to allocate buffer for op 0x%02x", op);
716 return NULL;
717 }
718
719 /* If we got a buf from `att_pool`, then the metadata slot at its index
720 * is officially ours to use.
721 */
722 data = bt_att_get_tx_meta_data(buf);
723
724 if (IS_ENABLED(CONFIG_BT_EATT)) {
725 net_buf_reserve(buf, BT_L2CAP_SDU_BUF_SIZE(0));
726 }
727
728 data->att_chan = chan;
729
730 hdr = net_buf_add(buf, sizeof(*hdr));
731 hdr->code = op;
732
733 return buf;
734 }
735
bt_att_chan_send(struct bt_att_chan * chan,struct net_buf * buf)736 static int bt_att_chan_send(struct bt_att_chan *chan, struct net_buf *buf)
737 {
738 LOG_DBG("chan %p flags %lu code 0x%02x", chan, atomic_get(chan->flags),
739 ((struct bt_att_hdr *)buf->data)->code);
740
741 if (IS_ENABLED(CONFIG_BT_EATT) &&
742 !att_chan_matches_chan_opt(chan, bt_att_get_tx_meta_data(buf)->chan_opt)) {
743 return -EINVAL;
744 }
745
746 return chan_send(chan, buf);
747 }
748
att_send_process(struct bt_att * att)749 static void att_send_process(struct bt_att *att)
750 {
751 struct bt_att_chan *chan, *tmp, *prev = NULL;
752 int err = 0;
753
754 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&att->chans, chan, tmp, node) {
755 if (err == -ENOENT && prev &&
756 (bt_att_is_enhanced(chan) == bt_att_is_enhanced(prev))) {
757 /* If there was nothing to send for the previous channel and the current
758 * channel has the same "enhancedness", there will be nothing to send for
759 * this channel either.
760 */
761 continue;
762 }
763
764 err = process_queue(chan, &att->tx_queue);
765 if (!err) {
766 /* Success */
767 return;
768 }
769
770 prev = chan;
771 }
772 }
773
bt_att_chan_send_rsp(struct bt_att_chan * chan,struct net_buf * buf)774 static void bt_att_chan_send_rsp(struct bt_att_chan *chan, struct net_buf *buf)
775 {
776 int err;
777
778 err = chan_send(chan, buf);
779 if (err) {
780 /* Responses need to be sent back using the same channel */
781 k_fifo_put(&chan->tx_queue, buf);
782 }
783 }
784
send_err_rsp(struct bt_att_chan * chan,uint8_t req,uint16_t handle,uint8_t err)785 static void send_err_rsp(struct bt_att_chan *chan, uint8_t req, uint16_t handle,
786 uint8_t err)
787 {
788 struct bt_att_error_rsp *rsp;
789 struct net_buf *buf;
790
791 /* Ignore opcode 0x00 */
792 if (!req) {
793 return;
794 }
795
796 buf = bt_att_chan_create_pdu(chan, BT_ATT_OP_ERROR_RSP, sizeof(*rsp));
797 if (!buf) {
798 return;
799 }
800
801 rsp = net_buf_add(buf, sizeof(*rsp));
802 rsp->request = req;
803 rsp->handle = sys_cpu_to_le16(handle);
804 rsp->error = err;
805
806 bt_att_chan_send_rsp(chan, buf);
807 }
808
att_mtu_req(struct bt_att_chan * chan,struct net_buf * buf)809 static uint8_t att_mtu_req(struct bt_att_chan *chan, struct net_buf *buf)
810 {
811 struct bt_att_exchange_mtu_req *req;
812 struct bt_att_exchange_mtu_rsp *rsp;
813 struct net_buf *pdu;
814 uint16_t mtu_client, mtu_server;
815
816 /* Exchange MTU sub-procedure shall only be supported on the
817 * LE Fixed Channel Unenhanced ATT bearer.
818 */
819 if (bt_att_is_enhanced(chan)) {
820 return BT_ATT_ERR_NOT_SUPPORTED;
821 }
822
823 req = (void *)buf->data;
824
825 mtu_client = sys_le16_to_cpu(req->mtu);
826
827 LOG_DBG("Client MTU %u", mtu_client);
828
829 /* Check if MTU is valid */
830 if (mtu_client < BT_ATT_DEFAULT_LE_MTU) {
831 return BT_ATT_ERR_INVALID_PDU;
832 }
833
834 pdu = bt_att_create_rsp_pdu(chan, BT_ATT_OP_MTU_RSP);
835 if (!pdu) {
836 return BT_ATT_ERR_UNLIKELY;
837 }
838
839 mtu_server = BT_LOCAL_ATT_MTU_UATT;
840
841 LOG_DBG("Server MTU %u", mtu_server);
842
843 rsp = net_buf_add(pdu, sizeof(*rsp));
844 rsp->mtu = sys_cpu_to_le16(mtu_server);
845
846 bt_att_chan_send_rsp(chan, pdu);
847
848 /* The ATT_EXCHANGE_MTU_REQ/RSP is just an alternative way of
849 * communicating the L2CAP MTU.
850 */
851 chan->chan.rx.mtu = mtu_server;
852 chan->chan.tx.mtu = mtu_client;
853
854 LOG_DBG("Negotiated MTU %u", bt_att_mtu(chan));
855
856 #if defined(CONFIG_BT_GATT_CLIENT)
857 /* Mark the MTU Exchange as complete.
858 * This will skip sending ATT Exchange MTU from our side.
859 *
860 * Core 5.3 | Vol 3, Part F 3.4.2.2:
861 * If MTU is exchanged in one direction, that is sufficient for both directions.
862 */
863 atomic_set_bit(chan->att->conn->flags, BT_CONN_ATT_MTU_EXCHANGED);
864 #endif /* CONFIG_BT_GATT_CLIENT */
865
866 att_chan_mtu_updated(chan);
867
868 return 0;
869 }
870
bt_att_chan_req_send(struct bt_att_chan * chan,struct bt_att_req * req)871 static int bt_att_chan_req_send(struct bt_att_chan *chan,
872 struct bt_att_req *req)
873 {
874 __ASSERT_NO_MSG(chan);
875 __ASSERT_NO_MSG(req);
876 __ASSERT_NO_MSG(req->func);
877 __ASSERT_NO_MSG(!chan->req);
878
879 LOG_DBG("req %p", req);
880
881 return chan_req_send(chan, req);
882 }
883
att_req_send_process(struct bt_att * att)884 static void att_req_send_process(struct bt_att *att)
885 {
886 struct bt_att_req *req = NULL;
887 struct bt_att_chan *chan, *tmp, *prev = NULL;
888
889 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&att->chans, chan, tmp, node) {
890 /* If there is an ongoing transaction, do not use the channel */
891 if (chan->req) {
892 continue;
893 }
894
895 if (!req && prev && (bt_att_is_enhanced(chan) == bt_att_is_enhanced(prev))) {
896 /* If there was nothing to send for the previous channel and the current
897 * channel has the same "enhancedness", there will be nothing to send for
898 * this channel either.
899 */
900 continue;
901 }
902
903 prev = chan;
904
905 /* Pull next request from the list */
906 req = get_first_req_matching_chan(&att->reqs, chan);
907 if (!req) {
908 continue;
909 }
910
911 if (bt_att_chan_req_send(chan, req) >= 0) {
912 return;
913 }
914
915 /* Prepend back to the list as it could not be sent */
916 sys_slist_prepend(&att->reqs, &req->node);
917 }
918 }
919
att_handle_rsp(struct bt_att_chan * chan,void * pdu,uint16_t len,int err)920 static uint8_t att_handle_rsp(struct bt_att_chan *chan, void *pdu, uint16_t len,
921 int err)
922 {
923 bt_att_func_t func = NULL;
924 void *params;
925
926 LOG_DBG("chan %p err %d len %u: %s", chan, err, len, bt_hex(pdu, len));
927
928 /* Cancel timeout if ongoing */
929 k_work_cancel_delayable(&chan->timeout_work);
930
931 if (!chan->req) {
932 LOG_WRN("No pending ATT request");
933 goto process;
934 }
935
936 /* Check if request has been cancelled */
937 if (chan->req == &cancel) {
938 chan->req = NULL;
939 goto process;
940 }
941
942 /* Reset func so it can be reused by the callback */
943 func = chan->req->func;
944 chan->req->func = NULL;
945 params = chan->req->user_data;
946
947 /* free allocated request so its memory can be reused */
948 bt_att_req_free(chan->req);
949 chan->req = NULL;
950
951 process:
952 /* Process pending requests */
953 att_req_send_process(chan->att);
954 if (func) {
955 func(chan->att->conn, err, pdu, len, params);
956 }
957
958 return 0;
959 }
960
961 #if defined(CONFIG_BT_GATT_CLIENT)
att_mtu_rsp(struct bt_att_chan * chan,struct net_buf * buf)962 static uint8_t att_mtu_rsp(struct bt_att_chan *chan, struct net_buf *buf)
963 {
964 struct bt_att_exchange_mtu_rsp *rsp;
965 uint16_t mtu;
966
967 rsp = (void *)buf->data;
968
969 mtu = sys_le16_to_cpu(rsp->mtu);
970
971 LOG_DBG("Server MTU %u", mtu);
972
973 /* Check if MTU is valid */
974 if (mtu < BT_ATT_DEFAULT_LE_MTU) {
975 return att_handle_rsp(chan, NULL, 0, BT_ATT_ERR_INVALID_PDU);
976 }
977
978 /* The following must equal the value we sent in the req. We assume this
979 * is a rsp to `gatt_exchange_mtu_encode`.
980 */
981 chan->chan.rx.mtu = BT_LOCAL_ATT_MTU_UATT;
982 /* The ATT_EXCHANGE_MTU_REQ/RSP is just an alternative way of
983 * communicating the L2CAP MTU.
984 */
985
986 chan->chan.tx.mtu = mtu;
987
988 LOG_DBG("Negotiated MTU %u", bt_att_mtu(chan));
989
990 att_chan_mtu_updated(chan);
991
992 return att_handle_rsp(chan, rsp, buf->len, 0);
993 }
994 #endif /* CONFIG_BT_GATT_CLIENT */
995
range_is_valid(uint16_t start,uint16_t end,uint16_t * err)996 static bool range_is_valid(uint16_t start, uint16_t end, uint16_t *err)
997 {
998 /* Handle 0 is invalid */
999 if (!start || !end) {
1000 if (err) {
1001 *err = 0U;
1002 }
1003 return false;
1004 }
1005
1006 /* Check if range is valid */
1007 if (start > end) {
1008 if (err) {
1009 *err = start;
1010 }
1011 return false;
1012 }
1013
1014 return true;
1015 }
1016
1017 struct find_info_data {
1018 struct bt_att_chan *chan;
1019 struct net_buf *buf;
1020 struct bt_att_find_info_rsp *rsp;
1021 union {
1022 struct bt_att_info_16 *info16;
1023 struct bt_att_info_128 *info128;
1024 };
1025 };
1026
find_info_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)1027 static uint8_t find_info_cb(const struct bt_gatt_attr *attr, uint16_t handle,
1028 void *user_data)
1029 {
1030 struct find_info_data *data = user_data;
1031 struct bt_att_chan *chan = data->chan;
1032
1033 LOG_DBG("handle 0x%04x", handle);
1034
1035 /* Initialize rsp at first entry */
1036 if (!data->rsp) {
1037 data->rsp = net_buf_add(data->buf, sizeof(*data->rsp));
1038 data->rsp->format = (attr->uuid->type == BT_UUID_TYPE_16) ?
1039 BT_ATT_INFO_16 : BT_ATT_INFO_128;
1040 }
1041
1042 switch (data->rsp->format) {
1043 case BT_ATT_INFO_16:
1044 if (attr->uuid->type != BT_UUID_TYPE_16) {
1045 return BT_GATT_ITER_STOP;
1046 }
1047
1048 /* Fast forward to next item position */
1049 data->info16 = net_buf_add(data->buf, sizeof(*data->info16));
1050 data->info16->handle = sys_cpu_to_le16(handle);
1051 data->info16->uuid = sys_cpu_to_le16(BT_UUID_16(attr->uuid)->val);
1052
1053 if (bt_att_mtu(chan) - data->buf->len >
1054 sizeof(*data->info16)) {
1055 return BT_GATT_ITER_CONTINUE;
1056 }
1057
1058 break;
1059 case BT_ATT_INFO_128:
1060 if (attr->uuid->type != BT_UUID_TYPE_128) {
1061 return BT_GATT_ITER_STOP;
1062 }
1063
1064 /* Fast forward to next item position */
1065 data->info128 = net_buf_add(data->buf, sizeof(*data->info128));
1066 data->info128->handle = sys_cpu_to_le16(handle);
1067 memcpy(data->info128->uuid, BT_UUID_128(attr->uuid)->val,
1068 sizeof(data->info128->uuid));
1069
1070 if (bt_att_mtu(chan) - data->buf->len >
1071 sizeof(*data->info128)) {
1072 return BT_GATT_ITER_CONTINUE;
1073 }
1074 }
1075
1076 return BT_GATT_ITER_STOP;
1077 }
1078
att_find_info_rsp(struct bt_att_chan * chan,uint16_t start_handle,uint16_t end_handle)1079 static uint8_t att_find_info_rsp(struct bt_att_chan *chan, uint16_t start_handle,
1080 uint16_t end_handle)
1081 {
1082 struct find_info_data data;
1083
1084 (void)memset(&data, 0, sizeof(data));
1085
1086 data.buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_FIND_INFO_RSP);
1087 if (!data.buf) {
1088 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
1089 }
1090
1091 data.chan = chan;
1092 bt_gatt_foreach_attr(start_handle, end_handle, find_info_cb, &data);
1093
1094 if (!data.rsp) {
1095 net_buf_unref(data.buf);
1096 /* Respond since handle is set */
1097 send_err_rsp(chan, BT_ATT_OP_FIND_INFO_REQ, start_handle,
1098 BT_ATT_ERR_ATTRIBUTE_NOT_FOUND);
1099 return 0;
1100 }
1101
1102 bt_att_chan_send_rsp(chan, data.buf);
1103
1104 return 0;
1105 }
1106
att_find_info_req(struct bt_att_chan * chan,struct net_buf * buf)1107 static uint8_t att_find_info_req(struct bt_att_chan *chan, struct net_buf *buf)
1108 {
1109 struct bt_att_find_info_req *req;
1110 uint16_t start_handle, end_handle, err_handle;
1111
1112 req = (void *)buf->data;
1113
1114 start_handle = sys_le16_to_cpu(req->start_handle);
1115 end_handle = sys_le16_to_cpu(req->end_handle);
1116
1117 LOG_DBG("start_handle 0x%04x end_handle 0x%04x", start_handle, end_handle);
1118
1119 if (!range_is_valid(start_handle, end_handle, &err_handle)) {
1120 send_err_rsp(chan, BT_ATT_OP_FIND_INFO_REQ, err_handle,
1121 BT_ATT_ERR_INVALID_HANDLE);
1122 return 0;
1123 }
1124
1125 return att_find_info_rsp(chan, start_handle, end_handle);
1126 }
1127
1128 struct find_type_data {
1129 struct bt_att_chan *chan;
1130 struct net_buf *buf;
1131 struct bt_att_handle_group *group;
1132 const void *value;
1133 uint8_t value_len;
1134 uint8_t err;
1135 };
1136
find_type_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)1137 static uint8_t find_type_cb(const struct bt_gatt_attr *attr, uint16_t handle,
1138 void *user_data)
1139 {
1140 struct find_type_data *data = user_data;
1141 struct bt_att_chan *chan = data->chan;
1142 struct bt_conn *conn = chan->chan.chan.conn;
1143 int read;
1144 uint8_t uuid[16];
1145 struct net_buf *frag;
1146 size_t len;
1147
1148 /* Skip secondary services */
1149 if (!bt_uuid_cmp(attr->uuid, BT_UUID_GATT_SECONDARY)) {
1150 goto skip;
1151 }
1152
1153 /* Update group end_handle if not a primary service */
1154 if (bt_uuid_cmp(attr->uuid, BT_UUID_GATT_PRIMARY)) {
1155 if (data->group &&
1156 handle > sys_le16_to_cpu(data->group->end_handle)) {
1157 data->group->end_handle = sys_cpu_to_le16(handle);
1158 }
1159 return BT_GATT_ITER_CONTINUE;
1160 }
1161
1162 LOG_DBG("handle 0x%04x", handle);
1163
1164 /* stop if there is no space left */
1165 if (bt_att_mtu(chan) - net_buf_frags_len(data->buf) <
1166 sizeof(*data->group)) {
1167 return BT_GATT_ITER_STOP;
1168 }
1169
1170 frag = net_buf_frag_last(data->buf);
1171
1172 len = MIN(bt_att_mtu(chan) - net_buf_frags_len(data->buf),
1173 net_buf_tailroom(frag));
1174 if (!len) {
1175 frag = net_buf_alloc(net_buf_pool_get(data->buf->pool_id),
1176 K_NO_WAIT);
1177 /* If not buffer can be allocated immediately stop */
1178 if (!frag) {
1179 return BT_GATT_ITER_STOP;
1180 }
1181
1182 net_buf_frag_add(data->buf, frag);
1183 }
1184
1185 /* Read attribute value and store in the buffer */
1186 read = attr->read(conn, attr, uuid, sizeof(uuid), 0);
1187 if (read < 0) {
1188 /*
1189 * Since we don't know if it is the service with requested UUID,
1190 * we cannot respond with an error to this request.
1191 */
1192 goto skip;
1193 }
1194
1195 /* Check if data matches */
1196 if (read != data->value_len) {
1197 /* Use bt_uuid_cmp() to compare UUIDs of different form. */
1198 struct bt_uuid_128 ref_uuid;
1199 struct bt_uuid_128 recvd_uuid;
1200
1201 if (!bt_uuid_create(&recvd_uuid.uuid, data->value, data->value_len)) {
1202 LOG_WRN("Unable to create UUID: size %u", data->value_len);
1203 goto skip;
1204 }
1205 if (!bt_uuid_create(&ref_uuid.uuid, uuid, read)) {
1206 LOG_WRN("Unable to create UUID: size %d", read);
1207 goto skip;
1208 }
1209 if (bt_uuid_cmp(&recvd_uuid.uuid, &ref_uuid.uuid)) {
1210 goto skip;
1211 }
1212 } else if (memcmp(data->value, uuid, read)) {
1213 goto skip;
1214 }
1215
1216 /* If service has been found, error should be cleared */
1217 data->err = 0x00;
1218
1219 /* Fast forward to next item position */
1220 data->group = net_buf_add(frag, sizeof(*data->group));
1221 data->group->start_handle = sys_cpu_to_le16(handle);
1222 data->group->end_handle = sys_cpu_to_le16(handle);
1223
1224 /* continue to find the end_handle */
1225 return BT_GATT_ITER_CONTINUE;
1226
1227 skip:
1228 data->group = NULL;
1229 return BT_GATT_ITER_CONTINUE;
1230 }
1231
att_find_type_rsp(struct bt_att_chan * chan,uint16_t start_handle,uint16_t end_handle,const void * value,uint8_t value_len)1232 static uint8_t att_find_type_rsp(struct bt_att_chan *chan, uint16_t start_handle,
1233 uint16_t end_handle, const void *value,
1234 uint8_t value_len)
1235 {
1236 struct find_type_data data;
1237
1238 (void)memset(&data, 0, sizeof(data));
1239
1240 data.buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_FIND_TYPE_RSP);
1241 if (!data.buf) {
1242 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
1243 }
1244
1245 data.chan = chan;
1246 data.group = NULL;
1247 data.value = value;
1248 data.value_len = value_len;
1249
1250 /* Pre-set error in case no service will be found */
1251 data.err = BT_ATT_ERR_ATTRIBUTE_NOT_FOUND;
1252
1253 bt_gatt_foreach_attr(start_handle, end_handle, find_type_cb, &data);
1254
1255 /* If error has not been cleared, no service has been found */
1256 if (data.err) {
1257 net_buf_unref(data.buf);
1258 /* Respond since handle is set */
1259 send_err_rsp(chan, BT_ATT_OP_FIND_TYPE_REQ, start_handle,
1260 data.err);
1261 return 0;
1262 }
1263
1264 bt_att_chan_send_rsp(chan, data.buf);
1265
1266 return 0;
1267 }
1268
att_find_type_req(struct bt_att_chan * chan,struct net_buf * buf)1269 static uint8_t att_find_type_req(struct bt_att_chan *chan, struct net_buf *buf)
1270 {
1271 struct bt_att_find_type_req *req;
1272 uint16_t start_handle, end_handle, err_handle, type;
1273 uint8_t *value;
1274
1275 req = net_buf_pull_mem(buf, sizeof(*req));
1276
1277 start_handle = sys_le16_to_cpu(req->start_handle);
1278 end_handle = sys_le16_to_cpu(req->end_handle);
1279 type = sys_le16_to_cpu(req->type);
1280 value = buf->data;
1281
1282 LOG_DBG("start_handle 0x%04x end_handle 0x%04x type %u", start_handle, end_handle, type);
1283
1284 if (!range_is_valid(start_handle, end_handle, &err_handle)) {
1285 send_err_rsp(chan, BT_ATT_OP_FIND_TYPE_REQ, err_handle,
1286 BT_ATT_ERR_INVALID_HANDLE);
1287 return 0;
1288 }
1289
1290 /* The Attribute Protocol Find By Type Value Request shall be used with
1291 * the Attribute Type parameter set to the UUID for "Primary Service"
1292 * and the Attribute Value set to the 16-bit Bluetooth UUID or 128-bit
1293 * UUID for the specific primary service.
1294 */
1295 if (bt_uuid_cmp(BT_UUID_DECLARE_16(type), BT_UUID_GATT_PRIMARY)) {
1296 send_err_rsp(chan, BT_ATT_OP_FIND_TYPE_REQ, start_handle,
1297 BT_ATT_ERR_ATTRIBUTE_NOT_FOUND);
1298 return 0;
1299 }
1300
1301 return att_find_type_rsp(chan, start_handle, end_handle, value,
1302 buf->len);
1303 }
1304
err_to_att(int err)1305 static uint8_t err_to_att(int err)
1306 {
1307 LOG_DBG("%d", err);
1308
1309 if (err < 0 && err >= -0xff) {
1310 return -err;
1311 }
1312
1313 return BT_ATT_ERR_UNLIKELY;
1314 }
1315
1316 struct read_type_data {
1317 struct bt_att_chan *chan;
1318 struct bt_uuid *uuid;
1319 struct net_buf *buf;
1320 struct bt_att_read_type_rsp *rsp;
1321 struct bt_att_data *item;
1322 uint8_t err;
1323 };
1324
1325 typedef bool (*attr_read_cb)(struct net_buf *buf, ssize_t read,
1326 void *user_data);
1327
attr_read_authorize(struct bt_conn * conn,const struct bt_gatt_attr * attr)1328 static bool attr_read_authorize(struct bt_conn *conn,
1329 const struct bt_gatt_attr *attr)
1330 {
1331 if (!IS_ENABLED(CONFIG_BT_GATT_AUTHORIZATION_CUSTOM)) {
1332 return true;
1333 }
1334
1335 if (!authorization_cb || !authorization_cb->read_authorize) {
1336 return true;
1337 }
1338
1339 return authorization_cb->read_authorize(conn, attr);
1340 }
1341
attr_read_type_cb(struct net_buf * frag,ssize_t read,void * user_data)1342 static bool attr_read_type_cb(struct net_buf *frag, ssize_t read,
1343 void *user_data)
1344 {
1345 struct read_type_data *data = user_data;
1346
1347 if (!data->rsp->len) {
1348 /* Set len to be the first item found */
1349 data->rsp->len = read + sizeof(*data->item);
1350 } else if (data->rsp->len != read + sizeof(*data->item)) {
1351 /* All items should have the same size */
1352 frag->len -= sizeof(*data->item);
1353 data->item = NULL;
1354 return false;
1355 }
1356
1357 return true;
1358 }
1359
att_chan_read(struct bt_att_chan * chan,const struct bt_gatt_attr * attr,struct net_buf * buf,uint16_t offset,attr_read_cb cb,void * user_data)1360 static ssize_t att_chan_read(struct bt_att_chan *chan,
1361 const struct bt_gatt_attr *attr,
1362 struct net_buf *buf, uint16_t offset,
1363 attr_read_cb cb, void *user_data)
1364 {
1365 struct bt_conn *conn = chan->chan.chan.conn;
1366 ssize_t read;
1367 struct net_buf *frag;
1368 size_t len, total = 0;
1369
1370 if (bt_att_mtu(chan) <= net_buf_frags_len(buf)) {
1371 return 0;
1372 }
1373
1374 frag = net_buf_frag_last(buf);
1375
1376 /* Create necessary fragments if MTU is bigger than what a buffer can
1377 * hold.
1378 */
1379 do {
1380 len = MIN(bt_att_mtu(chan) - net_buf_frags_len(buf),
1381 net_buf_tailroom(frag));
1382 if (!len) {
1383 frag = net_buf_alloc(net_buf_pool_get(buf->pool_id),
1384 K_NO_WAIT);
1385 /* If not buffer can be allocated immediately return */
1386 if (!frag) {
1387 return total;
1388 }
1389
1390 net_buf_frag_add(buf, frag);
1391
1392 len = MIN(bt_att_mtu(chan) - net_buf_frags_len(buf),
1393 net_buf_tailroom(frag));
1394 }
1395
1396 read = attr->read(conn, attr, frag->data + frag->len, len,
1397 offset);
1398 if (read < 0) {
1399 if (total) {
1400 return total;
1401 }
1402
1403 return read;
1404 }
1405
1406 if (cb && !cb(frag, read, user_data)) {
1407 break;
1408 }
1409
1410 net_buf_add(frag, read);
1411 total += read;
1412 offset += read;
1413 } while (bt_att_mtu(chan) > net_buf_frags_len(buf) && read == len);
1414
1415 return total;
1416 }
1417
read_type_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)1418 static uint8_t read_type_cb(const struct bt_gatt_attr *attr, uint16_t handle,
1419 void *user_data)
1420 {
1421 struct read_type_data *data = user_data;
1422 struct bt_att_chan *chan = data->chan;
1423 struct bt_conn *conn = chan->chan.chan.conn;
1424 ssize_t read;
1425
1426 /* Skip if doesn't match */
1427 if (bt_uuid_cmp(attr->uuid, data->uuid)) {
1428 return BT_GATT_ITER_CONTINUE;
1429 }
1430
1431 LOG_DBG("handle 0x%04x", handle);
1432
1433 /*
1434 * If an attribute in the set of requested attributes would cause an
1435 * Error Response then this attribute cannot be included in a
1436 * Read By Type Response and the attributes before this attribute
1437 * shall be returned
1438 *
1439 * If the first attribute in the set of requested attributes would
1440 * cause an Error Response then no other attributes in the requested
1441 * attributes can be considered.
1442 */
1443 data->err = bt_gatt_check_perm(conn, attr, BT_GATT_PERM_READ_MASK);
1444 if (data->err) {
1445 if (data->rsp->len) {
1446 data->err = 0x00;
1447 }
1448 return BT_GATT_ITER_STOP;
1449 }
1450
1451 /* Check the attribute authorization logic */
1452 if (!attr_read_authorize(conn, attr)) {
1453 data->err = BT_ATT_ERR_AUTHORIZATION;
1454 return BT_GATT_ITER_STOP;
1455 }
1456
1457 /*
1458 * If any attribute is founded in handle range it means that error
1459 * should be changed from pre-set: attr not found error to no error.
1460 */
1461 data->err = 0x00;
1462
1463 /* Fast forward to next item position */
1464 data->item = net_buf_add(net_buf_frag_last(data->buf),
1465 sizeof(*data->item));
1466 data->item->handle = sys_cpu_to_le16(handle);
1467
1468 read = att_chan_read(chan, attr, data->buf, 0, attr_read_type_cb, data);
1469 if (read < 0) {
1470 data->err = err_to_att(read);
1471 return BT_GATT_ITER_STOP;
1472 }
1473
1474 if (!data->item) {
1475 return BT_GATT_ITER_STOP;
1476 }
1477
1478 /* continue only if there are still space for more items */
1479 return bt_att_mtu(chan) - net_buf_frags_len(data->buf) >
1480 data->rsp->len ? BT_GATT_ITER_CONTINUE : BT_GATT_ITER_STOP;
1481 }
1482
att_read_type_rsp(struct bt_att_chan * chan,struct bt_uuid * uuid,uint16_t start_handle,uint16_t end_handle)1483 static uint8_t att_read_type_rsp(struct bt_att_chan *chan, struct bt_uuid *uuid,
1484 uint16_t start_handle, uint16_t end_handle)
1485 {
1486 struct read_type_data data;
1487
1488 (void)memset(&data, 0, sizeof(data));
1489
1490 data.buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_READ_TYPE_RSP);
1491 if (!data.buf) {
1492 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
1493 }
1494
1495 data.chan = chan;
1496 data.uuid = uuid;
1497 data.rsp = net_buf_add(data.buf, sizeof(*data.rsp));
1498 data.rsp->len = 0U;
1499
1500 /* Pre-set error if no attr will be found in handle */
1501 data.err = BT_ATT_ERR_ATTRIBUTE_NOT_FOUND;
1502
1503 bt_gatt_foreach_attr(start_handle, end_handle, read_type_cb, &data);
1504
1505 if (data.err) {
1506 net_buf_unref(data.buf);
1507 /* Response here since handle is set */
1508 send_err_rsp(chan, BT_ATT_OP_READ_TYPE_REQ, start_handle,
1509 data.err);
1510 return 0;
1511 }
1512
1513 bt_att_chan_send_rsp(chan, data.buf);
1514
1515 return 0;
1516 }
1517
att_read_type_req(struct bt_att_chan * chan,struct net_buf * buf)1518 static uint8_t att_read_type_req(struct bt_att_chan *chan, struct net_buf *buf)
1519 {
1520 struct bt_att_read_type_req *req;
1521 uint16_t start_handle, end_handle, err_handle;
1522 union {
1523 struct bt_uuid uuid;
1524 struct bt_uuid_16 u16;
1525 struct bt_uuid_128 u128;
1526 } u;
1527 uint8_t uuid_len = buf->len - sizeof(*req);
1528
1529 /* Type can only be UUID16 or UUID128 */
1530 if (uuid_len != 2 && uuid_len != 16) {
1531 return BT_ATT_ERR_INVALID_PDU;
1532 }
1533
1534 req = net_buf_pull_mem(buf, sizeof(*req));
1535
1536 start_handle = sys_le16_to_cpu(req->start_handle);
1537 end_handle = sys_le16_to_cpu(req->end_handle);
1538 if (!bt_uuid_create(&u.uuid, req->uuid, uuid_len)) {
1539 return BT_ATT_ERR_UNLIKELY;
1540 }
1541
1542 LOG_DBG("start_handle 0x%04x end_handle 0x%04x type %s", start_handle, end_handle,
1543 bt_uuid_str(&u.uuid));
1544
1545 if (!range_is_valid(start_handle, end_handle, &err_handle)) {
1546 send_err_rsp(chan, BT_ATT_OP_READ_TYPE_REQ, err_handle,
1547 BT_ATT_ERR_INVALID_HANDLE);
1548 return 0;
1549 }
1550
1551 /* If a client that has indicated support for robust caching (by setting the Robust
1552 * Caching bit in the Client Supported Features characteristic) is change-unaware
1553 * then the server shall send an ATT_ERROR_RSP PDU with the Error Code
1554 * parameter set to Database Out Of Sync (0x12) when either of the following happen:
1555 * • That client requests an operation at any Attribute Handle or list of Attribute
1556 * Handles by sending an ATT request.
1557 * • That client sends an ATT_READ_BY_TYPE_REQ PDU with Attribute Type
1558 * other than «Include» or «Characteristic» and an Attribute Handle range
1559 * other than 0x0001 to 0xFFFF.
1560 * (Core Specification 5.4 Vol 3. Part G. 2.5.2.1 Robust Caching).
1561 */
1562 if (!bt_gatt_change_aware(chan->chan.chan.conn, true)) {
1563 if (bt_uuid_cmp(&u.uuid, BT_UUID_GATT_INCLUDE) != 0 &&
1564 bt_uuid_cmp(&u.uuid, BT_UUID_GATT_CHRC) != 0 &&
1565 (start_handle != BT_ATT_FIRST_ATTRIBUTE_HANDLE ||
1566 end_handle != BT_ATT_LAST_ATTRIBUTE_HANDLE)) {
1567 if (!atomic_test_and_set_bit(chan->flags, ATT_OUT_OF_SYNC_SENT)) {
1568 return BT_ATT_ERR_DB_OUT_OF_SYNC;
1569 } else {
1570 return 0;
1571 }
1572 }
1573 }
1574
1575 return att_read_type_rsp(chan, &u.uuid, start_handle, end_handle);
1576 }
1577
1578 struct read_data {
1579 struct bt_att_chan *chan;
1580 uint16_t offset;
1581 struct net_buf *buf;
1582 uint8_t err;
1583 };
1584
read_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)1585 static uint8_t read_cb(const struct bt_gatt_attr *attr, uint16_t handle,
1586 void *user_data)
1587 {
1588 struct read_data *data = user_data;
1589 struct bt_att_chan *chan = data->chan;
1590 struct bt_conn *conn = chan->chan.chan.conn;
1591 int ret;
1592
1593 LOG_DBG("handle 0x%04x", handle);
1594
1595 /*
1596 * If any attribute is founded in handle range it means that error
1597 * should be changed from pre-set: invalid handle error to no error.
1598 */
1599 data->err = 0x00;
1600
1601 /* Check attribute permissions */
1602 data->err = bt_gatt_check_perm(conn, attr, BT_GATT_PERM_READ_MASK);
1603 if (data->err) {
1604 return BT_GATT_ITER_STOP;
1605 }
1606
1607 /* Check the attribute authorization logic */
1608 if (!attr_read_authorize(conn, attr)) {
1609 data->err = BT_ATT_ERR_AUTHORIZATION;
1610 return BT_GATT_ITER_STOP;
1611 }
1612
1613 /* Read attribute value and store in the buffer */
1614 ret = att_chan_read(chan, attr, data->buf, data->offset, NULL, NULL);
1615 if (ret < 0) {
1616 data->err = err_to_att(ret);
1617 return BT_GATT_ITER_STOP;
1618 }
1619
1620 return BT_GATT_ITER_CONTINUE;
1621 }
1622
att_read_rsp(struct bt_att_chan * chan,uint8_t op,uint8_t rsp,uint16_t handle,uint16_t offset)1623 static uint8_t att_read_rsp(struct bt_att_chan *chan, uint8_t op, uint8_t rsp,
1624 uint16_t handle, uint16_t offset)
1625 {
1626 struct read_data data;
1627
1628 if (!bt_gatt_change_aware(chan->att->conn, true)) {
1629 if (!atomic_test_and_set_bit(chan->flags, ATT_OUT_OF_SYNC_SENT)) {
1630 return BT_ATT_ERR_DB_OUT_OF_SYNC;
1631 } else {
1632 return 0;
1633 }
1634 }
1635
1636 if (!handle) {
1637 return BT_ATT_ERR_INVALID_HANDLE;
1638 }
1639
1640 (void)memset(&data, 0, sizeof(data));
1641
1642 data.buf = bt_att_create_rsp_pdu(chan, rsp);
1643 if (!data.buf) {
1644 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
1645 }
1646
1647 data.chan = chan;
1648 data.offset = offset;
1649
1650 /* Pre-set error if no attr will be found in handle */
1651 data.err = BT_ATT_ERR_INVALID_HANDLE;
1652
1653 bt_gatt_foreach_attr(handle, handle, read_cb, &data);
1654
1655 /* In case of error discard data and respond with an error */
1656 if (data.err) {
1657 net_buf_unref(data.buf);
1658 /* Respond here since handle is set */
1659 send_err_rsp(chan, op, handle, data.err);
1660 return 0;
1661 }
1662
1663 bt_att_chan_send_rsp(chan, data.buf);
1664
1665 return 0;
1666 }
1667
att_read_req(struct bt_att_chan * chan,struct net_buf * buf)1668 static uint8_t att_read_req(struct bt_att_chan *chan, struct net_buf *buf)
1669 {
1670 struct bt_att_read_req *req;
1671 uint16_t handle;
1672
1673 req = (void *)buf->data;
1674
1675 handle = sys_le16_to_cpu(req->handle);
1676
1677 LOG_DBG("handle 0x%04x", handle);
1678
1679 return att_read_rsp(chan, BT_ATT_OP_READ_REQ, BT_ATT_OP_READ_RSP,
1680 handle, 0);
1681 }
1682
att_read_blob_req(struct bt_att_chan * chan,struct net_buf * buf)1683 static uint8_t att_read_blob_req(struct bt_att_chan *chan, struct net_buf *buf)
1684 {
1685 struct bt_att_read_blob_req *req;
1686 uint16_t handle, offset;
1687
1688 req = (void *)buf->data;
1689
1690 handle = sys_le16_to_cpu(req->handle);
1691 offset = sys_le16_to_cpu(req->offset);
1692
1693 LOG_DBG("handle 0x%04x offset %u", handle, offset);
1694
1695 return att_read_rsp(chan, BT_ATT_OP_READ_BLOB_REQ,
1696 BT_ATT_OP_READ_BLOB_RSP, handle, offset);
1697 }
1698
1699 #if defined(CONFIG_BT_GATT_READ_MULTIPLE)
att_read_mult_req(struct bt_att_chan * chan,struct net_buf * buf)1700 static uint8_t att_read_mult_req(struct bt_att_chan *chan, struct net_buf *buf)
1701 {
1702 struct read_data data;
1703 uint16_t handle;
1704
1705 if (!bt_gatt_change_aware(chan->att->conn, true)) {
1706 if (!atomic_test_and_set_bit(chan->flags, ATT_OUT_OF_SYNC_SENT)) {
1707 return BT_ATT_ERR_DB_OUT_OF_SYNC;
1708 } else {
1709 return 0;
1710 }
1711 }
1712
1713 (void)memset(&data, 0, sizeof(data));
1714
1715 data.buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_READ_MULT_RSP);
1716 if (!data.buf) {
1717 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
1718 }
1719
1720 data.chan = chan;
1721
1722 while (buf->len >= sizeof(uint16_t)) {
1723 handle = net_buf_pull_le16(buf);
1724
1725 LOG_DBG("handle 0x%04x ", handle);
1726
1727 /* An Error Response shall be sent by the server in response to
1728 * the Read Multiple Request [....] if a read operation is not
1729 * permitted on any of the Characteristic Values.
1730 *
1731 * If handle is not valid then return invalid handle error.
1732 * If handle is found error will be cleared by read_cb.
1733 */
1734 data.err = BT_ATT_ERR_INVALID_HANDLE;
1735
1736 bt_gatt_foreach_attr(handle, handle, read_cb, &data);
1737
1738 /* Stop reading in case of error */
1739 if (data.err) {
1740 net_buf_unref(data.buf);
1741 /* Respond here since handle is set */
1742 send_err_rsp(chan, BT_ATT_OP_READ_MULT_REQ, handle,
1743 data.err);
1744 return 0;
1745 }
1746 }
1747
1748 bt_att_chan_send_rsp(chan, data.buf);
1749
1750 return 0;
1751 }
1752 #endif /* CONFIG_BT_GATT_READ_MULTIPLE */
1753
1754 #if defined(CONFIG_BT_GATT_READ_MULT_VAR_LEN)
read_vl_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)1755 static uint8_t read_vl_cb(const struct bt_gatt_attr *attr, uint16_t handle,
1756 void *user_data)
1757 {
1758 struct read_data *data = user_data;
1759 struct bt_att_chan *chan = data->chan;
1760 struct bt_conn *conn = chan->chan.chan.conn;
1761 struct bt_att_read_mult_vl_rsp *rsp;
1762 int read;
1763
1764 LOG_DBG("handle 0x%04x", handle);
1765
1766 /*
1767 * If any attribute is founded in handle range it means that error
1768 * should be changed from pre-set: invalid handle error to no error.
1769 */
1770 data->err = 0x00;
1771
1772 /* Check attribute permissions */
1773 data->err = bt_gatt_check_perm(conn, attr, BT_GATT_PERM_READ_MASK);
1774 if (data->err) {
1775 return BT_GATT_ITER_STOP;
1776 }
1777
1778 /* Check the attribute authorization logic */
1779 if (!attr_read_authorize(conn, attr)) {
1780 data->err = BT_ATT_ERR_AUTHORIZATION;
1781 return BT_GATT_ITER_STOP;
1782 }
1783
1784 /* The Length Value Tuple List may be truncated within the first two
1785 * octets of a tuple due to the size limits of the current ATT_MTU.
1786 */
1787 if (bt_att_mtu(chan) - data->buf->len < 2) {
1788 return BT_GATT_ITER_STOP;
1789 }
1790
1791 rsp = net_buf_add(data->buf, sizeof(*rsp));
1792
1793 read = att_chan_read(chan, attr, data->buf, data->offset, NULL, NULL);
1794 if (read < 0) {
1795 data->err = err_to_att(read);
1796 return BT_GATT_ITER_STOP;
1797 }
1798
1799 rsp->len = read;
1800
1801 return BT_GATT_ITER_CONTINUE;
1802 }
1803
att_read_mult_vl_req(struct bt_att_chan * chan,struct net_buf * buf)1804 static uint8_t att_read_mult_vl_req(struct bt_att_chan *chan, struct net_buf *buf)
1805 {
1806 struct read_data data;
1807 uint16_t handle;
1808
1809 if (!bt_gatt_change_aware(chan->att->conn, true)) {
1810 if (!atomic_test_and_set_bit(chan->flags, ATT_OUT_OF_SYNC_SENT)) {
1811 return BT_ATT_ERR_DB_OUT_OF_SYNC;
1812 } else {
1813 return 0;
1814 }
1815 }
1816
1817 (void)memset(&data, 0, sizeof(data));
1818
1819 data.buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_READ_MULT_VL_RSP);
1820 if (!data.buf) {
1821 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
1822 }
1823
1824 data.chan = chan;
1825
1826 while (buf->len >= sizeof(uint16_t)) {
1827 handle = net_buf_pull_le16(buf);
1828
1829 LOG_DBG("handle 0x%04x ", handle);
1830
1831 /* If handle is not valid then return invalid handle error.
1832 * If handle is found error will be cleared by read_cb.
1833 */
1834 data.err = BT_ATT_ERR_INVALID_HANDLE;
1835
1836 bt_gatt_foreach_attr(handle, handle, read_vl_cb, &data);
1837
1838 /* Stop reading in case of error */
1839 if (data.err) {
1840 net_buf_unref(data.buf);
1841 /* Respond here since handle is set */
1842 send_err_rsp(chan, BT_ATT_OP_READ_MULT_VL_REQ, handle,
1843 data.err);
1844 return 0;
1845 }
1846 }
1847
1848 bt_att_chan_send_rsp(chan, data.buf);
1849
1850 return 0;
1851 }
1852 #endif /* CONFIG_BT_GATT_READ_MULT_VAR_LEN */
1853
1854 struct read_group_data {
1855 struct bt_att_chan *chan;
1856 struct bt_uuid *uuid;
1857 struct net_buf *buf;
1858 struct bt_att_read_group_rsp *rsp;
1859 struct bt_att_group_data *group;
1860 };
1861
attr_read_group_cb(struct net_buf * frag,ssize_t read,void * user_data)1862 static bool attr_read_group_cb(struct net_buf *frag, ssize_t read,
1863 void *user_data)
1864 {
1865 struct read_group_data *data = user_data;
1866
1867 if (!data->rsp->len) {
1868 /* Set len to be the first group found */
1869 data->rsp->len = read + sizeof(*data->group);
1870 } else if (data->rsp->len != read + sizeof(*data->group)) {
1871 /* All groups entries should have the same size */
1872 data->buf->len -= sizeof(*data->group);
1873 data->group = NULL;
1874 return false;
1875 }
1876
1877 return true;
1878 }
1879
read_group_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)1880 static uint8_t read_group_cb(const struct bt_gatt_attr *attr, uint16_t handle,
1881 void *user_data)
1882 {
1883 struct read_group_data *data = user_data;
1884 struct bt_att_chan *chan = data->chan;
1885 int read;
1886
1887 /* Update group end_handle if attribute is not a service */
1888 if (bt_uuid_cmp(attr->uuid, BT_UUID_GATT_PRIMARY) &&
1889 bt_uuid_cmp(attr->uuid, BT_UUID_GATT_SECONDARY)) {
1890 if (data->group &&
1891 handle > sys_le16_to_cpu(data->group->end_handle)) {
1892 data->group->end_handle = sys_cpu_to_le16(handle);
1893 }
1894 return BT_GATT_ITER_CONTINUE;
1895 }
1896
1897 /* If Group Type don't match skip */
1898 if (bt_uuid_cmp(attr->uuid, data->uuid)) {
1899 data->group = NULL;
1900 return BT_GATT_ITER_CONTINUE;
1901 }
1902
1903 LOG_DBG("handle 0x%04x", handle);
1904
1905 /* Stop if there is no space left */
1906 if (data->rsp->len &&
1907 bt_att_mtu(chan) - data->buf->len < data->rsp->len) {
1908 return BT_GATT_ITER_STOP;
1909 }
1910
1911 /* Fast forward to next group position */
1912 data->group = net_buf_add(data->buf, sizeof(*data->group));
1913
1914 /* Initialize group handle range */
1915 data->group->start_handle = sys_cpu_to_le16(handle);
1916 data->group->end_handle = sys_cpu_to_le16(handle);
1917
1918 /* Read attribute value and store in the buffer */
1919 read = att_chan_read(chan, attr, data->buf, 0, attr_read_group_cb,
1920 data);
1921 if (read < 0) {
1922 /* TODO: Handle read errors */
1923 return BT_GATT_ITER_STOP;
1924 }
1925
1926 if (!data->group) {
1927 return BT_GATT_ITER_STOP;
1928 }
1929
1930 /* continue only if there are still space for more items */
1931 return BT_GATT_ITER_CONTINUE;
1932 }
1933
att_read_group_rsp(struct bt_att_chan * chan,struct bt_uuid * uuid,uint16_t start_handle,uint16_t end_handle)1934 static uint8_t att_read_group_rsp(struct bt_att_chan *chan, struct bt_uuid *uuid,
1935 uint16_t start_handle, uint16_t end_handle)
1936 {
1937 struct read_group_data data;
1938
1939 (void)memset(&data, 0, sizeof(data));
1940
1941 data.buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_READ_GROUP_RSP);
1942 if (!data.buf) {
1943 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
1944 }
1945
1946 data.chan = chan;
1947 data.uuid = uuid;
1948 data.rsp = net_buf_add(data.buf, sizeof(*data.rsp));
1949 data.rsp->len = 0U;
1950 data.group = NULL;
1951
1952 bt_gatt_foreach_attr(start_handle, end_handle, read_group_cb, &data);
1953
1954 if (!data.rsp->len) {
1955 net_buf_unref(data.buf);
1956 /* Respond here since handle is set */
1957 send_err_rsp(chan, BT_ATT_OP_READ_GROUP_REQ, start_handle,
1958 BT_ATT_ERR_ATTRIBUTE_NOT_FOUND);
1959 return 0;
1960 }
1961
1962 bt_att_chan_send_rsp(chan, data.buf);
1963
1964 return 0;
1965 }
1966
att_read_group_req(struct bt_att_chan * chan,struct net_buf * buf)1967 static uint8_t att_read_group_req(struct bt_att_chan *chan, struct net_buf *buf)
1968 {
1969 struct bt_att_read_group_req *req;
1970 uint16_t start_handle, end_handle, err_handle;
1971 union {
1972 struct bt_uuid uuid;
1973 struct bt_uuid_16 u16;
1974 struct bt_uuid_128 u128;
1975 } u;
1976 uint8_t uuid_len = buf->len - sizeof(*req);
1977
1978 /* Type can only be UUID16 or UUID128 */
1979 if (uuid_len != 2 && uuid_len != 16) {
1980 return BT_ATT_ERR_INVALID_PDU;
1981 }
1982
1983 req = net_buf_pull_mem(buf, sizeof(*req));
1984
1985 start_handle = sys_le16_to_cpu(req->start_handle);
1986 end_handle = sys_le16_to_cpu(req->end_handle);
1987
1988 if (!bt_uuid_create(&u.uuid, req->uuid, uuid_len)) {
1989 return BT_ATT_ERR_UNLIKELY;
1990 }
1991
1992 LOG_DBG("start_handle 0x%04x end_handle 0x%04x type %s", start_handle, end_handle,
1993 bt_uuid_str(&u.uuid));
1994
1995 if (!range_is_valid(start_handle, end_handle, &err_handle)) {
1996 send_err_rsp(chan, BT_ATT_OP_READ_GROUP_REQ, err_handle,
1997 BT_ATT_ERR_INVALID_HANDLE);
1998 return 0;
1999 }
2000
2001 /* Core v4.2, Vol 3, sec 2.5.3 Attribute Grouping:
2002 * Not all of the grouping attributes can be used in the ATT
2003 * Read By Group Type Request. The "Primary Service" and "Secondary
2004 * Service" grouping types may be used in the Read By Group Type
2005 * Request. The "Characteristic" grouping type shall not be used in
2006 * the ATT Read By Group Type Request.
2007 */
2008 if (bt_uuid_cmp(&u.uuid, BT_UUID_GATT_PRIMARY) &&
2009 bt_uuid_cmp(&u.uuid, BT_UUID_GATT_SECONDARY)) {
2010 send_err_rsp(chan, BT_ATT_OP_READ_GROUP_REQ, start_handle,
2011 BT_ATT_ERR_UNSUPPORTED_GROUP_TYPE);
2012 return 0;
2013 }
2014
2015 return att_read_group_rsp(chan, &u.uuid, start_handle, end_handle);
2016 }
2017
2018 struct write_data {
2019 struct bt_conn *conn;
2020 struct net_buf *buf;
2021 uint8_t req;
2022 const void *value;
2023 uint16_t len;
2024 uint16_t offset;
2025 uint8_t err;
2026 };
2027
attr_write_authorize(struct bt_conn * conn,const struct bt_gatt_attr * attr)2028 static bool attr_write_authorize(struct bt_conn *conn,
2029 const struct bt_gatt_attr *attr)
2030 {
2031 if (!IS_ENABLED(CONFIG_BT_GATT_AUTHORIZATION_CUSTOM)) {
2032 return true;
2033 }
2034
2035 if (!authorization_cb || !authorization_cb->write_authorize) {
2036 return true;
2037 }
2038
2039 return authorization_cb->write_authorize(conn, attr);
2040 }
2041
write_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)2042 static uint8_t write_cb(const struct bt_gatt_attr *attr, uint16_t handle,
2043 void *user_data)
2044 {
2045 struct write_data *data = user_data;
2046 int write;
2047 uint8_t flags = 0U;
2048
2049 LOG_DBG("handle 0x%04x offset %u", handle, data->offset);
2050
2051 /* Check attribute permissions */
2052 data->err = bt_gatt_check_perm(data->conn, attr,
2053 BT_GATT_PERM_WRITE_MASK);
2054 if (data->err) {
2055 return BT_GATT_ITER_STOP;
2056 }
2057
2058 /* Check the attribute authorization logic */
2059 if (!attr_write_authorize(data->conn, attr)) {
2060 data->err = BT_ATT_ERR_AUTHORIZATION;
2061 return BT_GATT_ITER_STOP;
2062 }
2063
2064 /* Set command flag if not a request */
2065 if (!data->req) {
2066 flags |= BT_GATT_WRITE_FLAG_CMD;
2067 } else if (data->req == BT_ATT_OP_EXEC_WRITE_REQ) {
2068 flags |= BT_GATT_WRITE_FLAG_EXECUTE;
2069 }
2070
2071 /* Write attribute value */
2072 write = attr->write(data->conn, attr, data->value, data->len,
2073 data->offset, flags);
2074 if (write < 0 || write != data->len) {
2075 data->err = err_to_att(write);
2076 return BT_GATT_ITER_STOP;
2077 }
2078
2079 data->err = 0U;
2080
2081 return BT_GATT_ITER_CONTINUE;
2082 }
2083
att_write_rsp(struct bt_att_chan * chan,uint8_t req,uint8_t rsp,uint16_t handle,uint16_t offset,const void * value,uint16_t len)2084 static uint8_t att_write_rsp(struct bt_att_chan *chan, uint8_t req, uint8_t rsp,
2085 uint16_t handle, uint16_t offset, const void *value,
2086 uint16_t len)
2087 {
2088 struct write_data data;
2089
2090 if (!bt_gatt_change_aware(chan->att->conn, req ? true : false)) {
2091 if (!atomic_test_and_set_bit(chan->flags, ATT_OUT_OF_SYNC_SENT)) {
2092 return BT_ATT_ERR_DB_OUT_OF_SYNC;
2093 } else {
2094 return 0;
2095 }
2096 }
2097
2098 if (!handle) {
2099 return BT_ATT_ERR_INVALID_HANDLE;
2100 }
2101
2102 (void)memset(&data, 0, sizeof(data));
2103
2104 /* Only allocate buf if required to respond */
2105 if (rsp) {
2106 data.buf = bt_att_chan_create_pdu(chan, rsp, 0);
2107 if (!data.buf) {
2108 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
2109 }
2110 }
2111
2112 data.conn = chan->att->conn;
2113 data.req = req;
2114 data.offset = offset;
2115 data.value = value;
2116 data.len = len;
2117 data.err = BT_ATT_ERR_INVALID_HANDLE;
2118
2119 bt_gatt_foreach_attr(handle, handle, write_cb, &data);
2120
2121 if (data.err) {
2122 /* In case of error discard data and respond with an error */
2123 if (rsp) {
2124 net_buf_unref(data.buf);
2125 /* Respond here since handle is set */
2126 send_err_rsp(chan, req, handle, data.err);
2127 }
2128 return req == BT_ATT_OP_EXEC_WRITE_REQ ? data.err : 0;
2129 }
2130
2131 if (data.buf) {
2132 bt_att_chan_send_rsp(chan, data.buf);
2133 }
2134
2135 return 0;
2136 }
2137
att_write_req(struct bt_att_chan * chan,struct net_buf * buf)2138 static uint8_t att_write_req(struct bt_att_chan *chan, struct net_buf *buf)
2139 {
2140 uint16_t handle;
2141
2142 handle = net_buf_pull_le16(buf);
2143
2144 LOG_DBG("handle 0x%04x", handle);
2145
2146 return att_write_rsp(chan, BT_ATT_OP_WRITE_REQ, BT_ATT_OP_WRITE_RSP,
2147 handle, 0, buf->data, buf->len);
2148 }
2149
2150 #if CONFIG_BT_ATT_PREPARE_COUNT > 0
2151 struct prep_data {
2152 struct bt_conn *conn;
2153 struct net_buf *buf;
2154 const void *value;
2155 uint16_t len;
2156 uint16_t offset;
2157 uint8_t err;
2158 };
2159
prep_write_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)2160 static uint8_t prep_write_cb(const struct bt_gatt_attr *attr, uint16_t handle,
2161 void *user_data)
2162 {
2163 struct prep_data *data = user_data;
2164 struct bt_attr_data *attr_data;
2165 int write;
2166
2167 LOG_DBG("handle 0x%04x offset %u", handle, data->offset);
2168
2169 /* Check attribute permissions */
2170 data->err = bt_gatt_check_perm(data->conn, attr,
2171 BT_GATT_PERM_WRITE_MASK);
2172 if (data->err) {
2173 return BT_GATT_ITER_STOP;
2174 }
2175
2176 /* Check the attribute authorization logic */
2177 if (!attr_write_authorize(data->conn, attr)) {
2178 data->err = BT_ATT_ERR_AUTHORIZATION;
2179 return BT_GATT_ITER_STOP;
2180 }
2181
2182 /* Check if attribute requires handler to accept the data */
2183 if (!(attr->perm & BT_GATT_PERM_PREPARE_WRITE)) {
2184 goto append;
2185 }
2186
2187 /* Write attribute value to check if device is authorized */
2188 write = attr->write(data->conn, attr, data->value, data->len,
2189 data->offset, BT_GATT_WRITE_FLAG_PREPARE);
2190 if (write != 0) {
2191 data->err = err_to_att(write);
2192 return BT_GATT_ITER_STOP;
2193 }
2194
2195 append:
2196 /* Copy data into the outstanding queue */
2197 data->buf = net_buf_alloc(&prep_pool, K_NO_WAIT);
2198 if (!data->buf) {
2199 data->err = BT_ATT_ERR_PREPARE_QUEUE_FULL;
2200 return BT_GATT_ITER_STOP;
2201 }
2202
2203 attr_data = net_buf_user_data(data->buf);
2204 attr_data->handle = handle;
2205 attr_data->offset = data->offset;
2206
2207 net_buf_add_mem(data->buf, data->value, data->len);
2208
2209 data->err = 0U;
2210
2211 return BT_GATT_ITER_CONTINUE;
2212 }
2213
att_prep_write_rsp(struct bt_att_chan * chan,uint16_t handle,uint16_t offset,const void * value,uint8_t len)2214 static uint8_t att_prep_write_rsp(struct bt_att_chan *chan, uint16_t handle,
2215 uint16_t offset, const void *value, uint8_t len)
2216 {
2217 struct prep_data data;
2218 struct bt_att_prepare_write_rsp *rsp;
2219
2220 if (!bt_gatt_change_aware(chan->att->conn, true)) {
2221 if (!atomic_test_and_set_bit(chan->flags, ATT_OUT_OF_SYNC_SENT)) {
2222 return BT_ATT_ERR_DB_OUT_OF_SYNC;
2223 } else {
2224 return 0;
2225 }
2226 }
2227
2228 if (!handle) {
2229 return BT_ATT_ERR_INVALID_HANDLE;
2230 }
2231
2232 (void)memset(&data, 0, sizeof(data));
2233
2234 data.conn = chan->att->conn;
2235 data.offset = offset;
2236 data.value = value;
2237 data.len = len;
2238 data.err = BT_ATT_ERR_INVALID_HANDLE;
2239
2240 bt_gatt_foreach_attr(handle, handle, prep_write_cb, &data);
2241
2242 if (data.err) {
2243 /* Respond here since handle is set */
2244 send_err_rsp(chan, BT_ATT_OP_PREPARE_WRITE_REQ, handle,
2245 data.err);
2246 return 0;
2247 }
2248
2249 LOG_DBG("buf %p handle 0x%04x offset %u", data.buf, handle, offset);
2250
2251 /* Store buffer in the outstanding queue */
2252 net_buf_slist_put(&chan->att->prep_queue, data.buf);
2253
2254 /* Generate response */
2255 data.buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_PREPARE_WRITE_RSP);
2256 if (!data.buf) {
2257 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
2258 }
2259
2260 rsp = net_buf_add(data.buf, sizeof(*rsp));
2261 rsp->handle = sys_cpu_to_le16(handle);
2262 rsp->offset = sys_cpu_to_le16(offset);
2263 net_buf_add(data.buf, len);
2264 memcpy(rsp->value, value, len);
2265
2266 bt_att_chan_send_rsp(chan, data.buf);
2267
2268 return 0;
2269 }
2270 #endif /* CONFIG_BT_ATT_PREPARE_COUNT */
2271
att_prepare_write_req(struct bt_att_chan * chan,struct net_buf * buf)2272 static uint8_t att_prepare_write_req(struct bt_att_chan *chan, struct net_buf *buf)
2273 {
2274 #if CONFIG_BT_ATT_PREPARE_COUNT == 0
2275 return BT_ATT_ERR_NOT_SUPPORTED;
2276 #else
2277 struct bt_att_prepare_write_req *req;
2278 uint16_t handle, offset;
2279
2280 req = net_buf_pull_mem(buf, sizeof(*req));
2281
2282 handle = sys_le16_to_cpu(req->handle);
2283 offset = sys_le16_to_cpu(req->offset);
2284
2285 LOG_DBG("handle 0x%04x offset %u", handle, offset);
2286
2287 return att_prep_write_rsp(chan, handle, offset, buf->data, buf->len);
2288 #endif /* CONFIG_BT_ATT_PREPARE_COUNT */
2289 }
2290
2291 #if CONFIG_BT_ATT_PREPARE_COUNT > 0
exec_write_reassemble(uint16_t handle,uint16_t offset,sys_slist_t * list,struct net_buf_simple * buf)2292 static uint8_t exec_write_reassemble(uint16_t handle, uint16_t offset,
2293 sys_slist_t *list,
2294 struct net_buf_simple *buf)
2295 {
2296 struct net_buf *entry, *next;
2297 sys_snode_t *prev;
2298
2299 prev = NULL;
2300 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(list, entry, next, node) {
2301 struct bt_attr_data *tmp_data = net_buf_user_data(entry);
2302
2303 LOG_DBG("entry %p handle 0x%04x, offset %u", entry, tmp_data->handle,
2304 tmp_data->offset);
2305
2306 if (tmp_data->handle == handle) {
2307 if (tmp_data->offset == 0) {
2308 /* Multiple writes to the same handle can occur
2309 * in a prepare write queue. If the offset is 0,
2310 * that should mean that it's a new write to the
2311 * same handle, and we break to process the
2312 * first write.
2313 */
2314
2315 LOG_DBG("tmp_data->offset == 0");
2316 break;
2317 }
2318
2319 if (tmp_data->offset != buf->len + offset) {
2320 /* We require that the offset is increasing
2321 * properly to avoid badly reassembled buffers
2322 */
2323
2324 LOG_DBG("Bad offset %u (%u, %u)", tmp_data->offset, buf->len,
2325 offset);
2326
2327 return BT_ATT_ERR_INVALID_OFFSET;
2328 }
2329
2330 if (buf->len + entry->len > buf->size) {
2331 return BT_ATT_ERR_INVALID_ATTRIBUTE_LEN;
2332 }
2333
2334 net_buf_simple_add_mem(buf, entry->data, entry->len);
2335 sys_slist_remove(list, prev, &entry->node);
2336 net_buf_unref(entry);
2337 } else {
2338 prev = &entry->node;
2339 }
2340 }
2341
2342 return BT_ATT_ERR_SUCCESS;
2343 }
2344
att_exec_write_rsp(struct bt_att_chan * chan,uint8_t flags)2345 static uint8_t att_exec_write_rsp(struct bt_att_chan *chan, uint8_t flags)
2346 {
2347 struct net_buf *buf;
2348 uint8_t err = 0U;
2349
2350 /* The following code will iterate on all prepare writes in the
2351 * prep_queue, and reassemble those that share the same handle.
2352 * Once a handle has been reassembled, it is sent to the upper layers,
2353 * and the next handle is processed
2354 */
2355 while (!sys_slist_is_empty(&chan->att->prep_queue)) {
2356 struct bt_attr_data *data;
2357 uint16_t handle;
2358
2359 NET_BUF_SIMPLE_DEFINE_STATIC(reassembled_data,
2360 MIN(BT_ATT_MAX_ATTRIBUTE_LEN,
2361 CONFIG_BT_ATT_PREPARE_COUNT * BT_ATT_BUF_SIZE));
2362
2363 buf = net_buf_slist_get(&chan->att->prep_queue);
2364 data = net_buf_user_data(buf);
2365 handle = data->handle;
2366
2367 LOG_DBG("buf %p handle 0x%04x offset %u", buf, handle, data->offset);
2368
2369 net_buf_simple_reset(&reassembled_data);
2370 net_buf_simple_add_mem(&reassembled_data, buf->data, buf->len);
2371
2372 err = exec_write_reassemble(handle, data->offset,
2373 &chan->att->prep_queue,
2374 &reassembled_data);
2375 if (err != BT_ATT_ERR_SUCCESS) {
2376 send_err_rsp(chan, BT_ATT_OP_EXEC_WRITE_REQ,
2377 handle, err);
2378 return 0;
2379 }
2380
2381 /* Just discard the data if an error was set */
2382 if (!err && flags == BT_ATT_FLAG_EXEC) {
2383 err = att_write_rsp(chan, BT_ATT_OP_EXEC_WRITE_REQ, 0,
2384 handle, data->offset,
2385 reassembled_data.data,
2386 reassembled_data.len);
2387 if (err) {
2388 /* Respond here since handle is set */
2389 send_err_rsp(chan, BT_ATT_OP_EXEC_WRITE_REQ,
2390 data->handle, err);
2391 }
2392 }
2393
2394 net_buf_unref(buf);
2395 }
2396
2397 if (err) {
2398 return 0;
2399 }
2400
2401 /* Generate response */
2402 buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_EXEC_WRITE_RSP);
2403 if (!buf) {
2404 return BT_ATT_ERR_UNLIKELY;
2405 }
2406
2407 bt_att_chan_send_rsp(chan, buf);
2408
2409 return 0;
2410 }
2411 #endif /* CONFIG_BT_ATT_PREPARE_COUNT */
2412
2413
att_exec_write_req(struct bt_att_chan * chan,struct net_buf * buf)2414 static uint8_t att_exec_write_req(struct bt_att_chan *chan, struct net_buf *buf)
2415 {
2416 #if CONFIG_BT_ATT_PREPARE_COUNT == 0
2417 return BT_ATT_ERR_NOT_SUPPORTED;
2418 #else
2419 struct bt_att_exec_write_req *req;
2420
2421 req = (void *)buf->data;
2422
2423 LOG_DBG("flags 0x%02x", req->flags);
2424
2425 return att_exec_write_rsp(chan, req->flags);
2426 #endif /* CONFIG_BT_ATT_PREPARE_COUNT */
2427 }
2428
att_write_cmd(struct bt_att_chan * chan,struct net_buf * buf)2429 static uint8_t att_write_cmd(struct bt_att_chan *chan, struct net_buf *buf)
2430 {
2431 uint16_t handle;
2432
2433 handle = net_buf_pull_le16(buf);
2434
2435 LOG_DBG("handle 0x%04x", handle);
2436
2437 return att_write_rsp(chan, 0, 0, handle, 0, buf->data, buf->len);
2438 }
2439
2440 #if defined(CONFIG_BT_SIGNING)
att_signed_write_cmd(struct bt_att_chan * chan,struct net_buf * buf)2441 static uint8_t att_signed_write_cmd(struct bt_att_chan *chan, struct net_buf *buf)
2442 {
2443 struct bt_conn *conn = chan->chan.chan.conn;
2444 struct bt_att_signed_write_cmd *req;
2445 uint16_t handle;
2446 int err;
2447
2448 /* The Signed Write Without Response sub-procedure shall only be supported
2449 * on the LE Fixed Channel Unenhanced ATT bearer.
2450 */
2451 if (bt_att_is_enhanced(chan)) {
2452 /* No response for this command */
2453 return 0;
2454 }
2455
2456 req = (void *)buf->data;
2457
2458 handle = sys_le16_to_cpu(req->handle);
2459
2460 LOG_DBG("handle 0x%04x", handle);
2461
2462 /* Verifying data requires full buffer including attribute header */
2463 net_buf_push(buf, sizeof(struct bt_att_hdr));
2464 err = bt_smp_sign_verify(conn, buf);
2465 if (err) {
2466 LOG_ERR("Error verifying data");
2467 /* No response for this command */
2468 return 0;
2469 }
2470
2471 net_buf_pull(buf, sizeof(struct bt_att_hdr));
2472 net_buf_pull(buf, sizeof(*req));
2473
2474 return att_write_rsp(chan, 0, 0, handle, 0, buf->data,
2475 buf->len - sizeof(struct bt_att_signature));
2476 }
2477 #endif /* CONFIG_BT_SIGNING */
2478
2479 #if defined(CONFIG_BT_GATT_CLIENT)
2480 #if defined(CONFIG_BT_ATT_RETRY_ON_SEC_ERR)
att_change_security(struct bt_conn * conn,uint8_t err)2481 static int att_change_security(struct bt_conn *conn, uint8_t err)
2482 {
2483 bt_security_t sec;
2484
2485 switch (err) {
2486 case BT_ATT_ERR_INSUFFICIENT_ENCRYPTION:
2487 if (conn->sec_level >= BT_SECURITY_L2) {
2488 return -EALREADY;
2489 }
2490 sec = BT_SECURITY_L2;
2491 break;
2492 case BT_ATT_ERR_AUTHENTICATION:
2493 if (conn->sec_level < BT_SECURITY_L2) {
2494 /* BLUETOOTH SPECIFICATION Version 4.2 [Vol 3, Part C]
2495 * page 375:
2496 *
2497 * If an LTK is not available, the service request
2498 * shall be rejected with the error code 'Insufficient
2499 * Authentication'.
2500 * Note: When the link is not encrypted, the error code
2501 * "Insufficient Authentication" does not indicate that
2502 * MITM protection is required.
2503 */
2504 sec = BT_SECURITY_L2;
2505 } else if (conn->sec_level < BT_SECURITY_L3) {
2506 /* BLUETOOTH SPECIFICATION Version 4.2 [Vol 3, Part C]
2507 * page 375:
2508 *
2509 * If an authenticated pairing is required but only an
2510 * unauthenticated pairing has occurred and the link is
2511 * currently encrypted, the service request shall be
2512 * rejected with the error code 'Insufficient
2513 * Authentication'.
2514 * Note: When unauthenticated pairing has occurred and
2515 * the link is currently encrypted, the error code
2516 * 'Insufficient Authentication' indicates that MITM
2517 * protection is required.
2518 */
2519 sec = BT_SECURITY_L3;
2520 } else if (conn->sec_level < BT_SECURITY_L4) {
2521 /* BLUETOOTH SPECIFICATION Version 4.2 [Vol 3, Part C]
2522 * page 375:
2523 *
2524 * If LE Secure Connections authenticated pairing is
2525 * required but LE legacy pairing has occurred and the
2526 * link is currently encrypted, the service request
2527 * shall be rejected with the error code ''Insufficient
2528 * Authentication'.
2529 */
2530 sec = BT_SECURITY_L4;
2531 } else {
2532 return -EALREADY;
2533 }
2534 break;
2535 default:
2536 return -EINVAL;
2537 }
2538
2539 return bt_conn_set_security(conn, sec);
2540 }
2541 #endif /* CONFIG_BT_ATT_RETRY_ON_SEC_ERR */
2542
att_error_rsp(struct bt_att_chan * chan,struct net_buf * buf)2543 static uint8_t att_error_rsp(struct bt_att_chan *chan, struct net_buf *buf)
2544 {
2545 struct bt_att_error_rsp *rsp;
2546 uint8_t err;
2547
2548 rsp = (void *)buf->data;
2549
2550 LOG_DBG("request 0x%02x handle 0x%04x error 0x%02x", rsp->request,
2551 sys_le16_to_cpu(rsp->handle), rsp->error);
2552
2553 /* Don't retry if there is no req pending or it has been cancelled.
2554 *
2555 * BLUETOOTH SPECIFICATION Version 5.2 [Vol 3, Part F]
2556 * page 1423:
2557 *
2558 * If an error code is received in the ATT_ERROR_RSP PDU that is not
2559 * understood by the client, for example an error code that was reserved
2560 * for future use that is now being used in a future version of the
2561 * specification, then the ATT_ERROR_RSP PDU shall still be considered to
2562 * state that the given request cannot be performed for an unknown reason.
2563 */
2564 if (!chan->req || chan->req == &cancel || !rsp->error) {
2565 err = BT_ATT_ERR_UNLIKELY;
2566 goto done;
2567 }
2568
2569 err = rsp->error;
2570
2571 #if defined(CONFIG_BT_ATT_RETRY_ON_SEC_ERR)
2572 int ret;
2573
2574 /* Check if error can be handled by elevating security. */
2575 ret = att_change_security(chan->chan.chan.conn, err);
2576 if (ret == 0 || ret == -EBUSY) {
2577 /* ATT timeout work is normally cancelled in att_handle_rsp.
2578 * However retrying is special case, so the timeout shall
2579 * be cancelled here.
2580 */
2581 k_work_cancel_delayable(&chan->timeout_work);
2582
2583 chan->req->retrying = true;
2584 return 0;
2585 }
2586 #endif /* CONFIG_BT_ATT_RETRY_ON_SEC_ERR */
2587
2588 done:
2589 return att_handle_rsp(chan, NULL, 0, err);
2590 }
2591
att_handle_find_info_rsp(struct bt_att_chan * chan,struct net_buf * buf)2592 static uint8_t att_handle_find_info_rsp(struct bt_att_chan *chan,
2593 struct net_buf *buf)
2594 {
2595 LOG_DBG("");
2596
2597 return att_handle_rsp(chan, buf->data, buf->len, 0);
2598 }
2599
att_handle_find_type_rsp(struct bt_att_chan * chan,struct net_buf * buf)2600 static uint8_t att_handle_find_type_rsp(struct bt_att_chan *chan,
2601 struct net_buf *buf)
2602 {
2603 LOG_DBG("");
2604
2605 return att_handle_rsp(chan, buf->data, buf->len, 0);
2606 }
2607
att_handle_read_type_rsp(struct bt_att_chan * chan,struct net_buf * buf)2608 static uint8_t att_handle_read_type_rsp(struct bt_att_chan *chan,
2609 struct net_buf *buf)
2610 {
2611 LOG_DBG("");
2612
2613 return att_handle_rsp(chan, buf->data, buf->len, 0);
2614 }
2615
att_handle_read_rsp(struct bt_att_chan * chan,struct net_buf * buf)2616 static uint8_t att_handle_read_rsp(struct bt_att_chan *chan,
2617 struct net_buf *buf)
2618 {
2619 LOG_DBG("");
2620
2621 return att_handle_rsp(chan, buf->data, buf->len, 0);
2622 }
2623
att_handle_read_blob_rsp(struct bt_att_chan * chan,struct net_buf * buf)2624 static uint8_t att_handle_read_blob_rsp(struct bt_att_chan *chan,
2625 struct net_buf *buf)
2626 {
2627 LOG_DBG("");
2628
2629 return att_handle_rsp(chan, buf->data, buf->len, 0);
2630 }
2631
2632 #if defined(CONFIG_BT_GATT_READ_MULTIPLE)
att_handle_read_mult_rsp(struct bt_att_chan * chan,struct net_buf * buf)2633 static uint8_t att_handle_read_mult_rsp(struct bt_att_chan *chan,
2634 struct net_buf *buf)
2635 {
2636 LOG_DBG("");
2637
2638 return att_handle_rsp(chan, buf->data, buf->len, 0);
2639 }
2640
2641 #endif /* CONFIG_BT_GATT_READ_MULTIPLE */
2642
2643 #if defined(CONFIG_BT_GATT_READ_MULT_VAR_LEN)
att_handle_read_mult_vl_rsp(struct bt_att_chan * chan,struct net_buf * buf)2644 static uint8_t att_handle_read_mult_vl_rsp(struct bt_att_chan *chan,
2645 struct net_buf *buf)
2646 {
2647 LOG_DBG("");
2648
2649 return att_handle_rsp(chan, buf->data, buf->len, 0);
2650 }
2651 #endif /* CONFIG_BT_GATT_READ_MULT_VAR_LEN */
2652
att_handle_read_group_rsp(struct bt_att_chan * chan,struct net_buf * buf)2653 static uint8_t att_handle_read_group_rsp(struct bt_att_chan *chan,
2654 struct net_buf *buf)
2655 {
2656 LOG_DBG("");
2657
2658 return att_handle_rsp(chan, buf->data, buf->len, 0);
2659 }
2660
att_handle_write_rsp(struct bt_att_chan * chan,struct net_buf * buf)2661 static uint8_t att_handle_write_rsp(struct bt_att_chan *chan,
2662 struct net_buf *buf)
2663 {
2664 LOG_DBG("");
2665
2666 return att_handle_rsp(chan, buf->data, buf->len, 0);
2667 }
2668
att_handle_prepare_write_rsp(struct bt_att_chan * chan,struct net_buf * buf)2669 static uint8_t att_handle_prepare_write_rsp(struct bt_att_chan *chan,
2670 struct net_buf *buf)
2671 {
2672 LOG_DBG("");
2673
2674 return att_handle_rsp(chan, buf->data, buf->len, 0);
2675 }
2676
att_handle_exec_write_rsp(struct bt_att_chan * chan,struct net_buf * buf)2677 static uint8_t att_handle_exec_write_rsp(struct bt_att_chan *chan,
2678 struct net_buf *buf)
2679 {
2680 LOG_DBG("");
2681
2682 return att_handle_rsp(chan, buf->data, buf->len, 0);
2683 }
2684
att_notify(struct bt_att_chan * chan,struct net_buf * buf)2685 static uint8_t att_notify(struct bt_att_chan *chan, struct net_buf *buf)
2686 {
2687 uint16_t handle;
2688
2689 handle = net_buf_pull_le16(buf);
2690
2691 LOG_DBG("chan %p handle 0x%04x", chan, handle);
2692
2693 bt_gatt_notification(chan->att->conn, handle, buf->data, buf->len);
2694
2695 return 0;
2696 }
2697
att_indicate(struct bt_att_chan * chan,struct net_buf * buf)2698 static uint8_t att_indicate(struct bt_att_chan *chan, struct net_buf *buf)
2699 {
2700 uint16_t handle;
2701
2702 handle = net_buf_pull_le16(buf);
2703
2704 LOG_DBG("chan %p handle 0x%04x", chan, handle);
2705
2706 bt_gatt_notification(chan->att->conn, handle, buf->data, buf->len);
2707
2708 buf = bt_att_chan_create_pdu(chan, BT_ATT_OP_CONFIRM, 0);
2709 if (!buf) {
2710 return 0;
2711 }
2712
2713 bt_att_chan_send_rsp(chan, buf);
2714
2715 return 0;
2716 }
2717
att_notify_mult(struct bt_att_chan * chan,struct net_buf * buf)2718 static uint8_t att_notify_mult(struct bt_att_chan *chan, struct net_buf *buf)
2719 {
2720 LOG_DBG("chan %p", chan);
2721
2722 bt_gatt_mult_notification(chan->att->conn, buf->data, buf->len);
2723
2724 return 0;
2725 }
2726 #endif /* CONFIG_BT_GATT_CLIENT */
2727
att_confirm(struct bt_att_chan * chan,struct net_buf * buf)2728 static uint8_t att_confirm(struct bt_att_chan *chan, struct net_buf *buf)
2729 {
2730 LOG_DBG("");
2731
2732 return att_handle_rsp(chan, buf->data, buf->len, 0);
2733 }
2734
2735 static const struct att_handler {
2736 uint8_t op;
2737 uint8_t expect_len;
2738 att_type_t type;
2739 uint8_t (*func)(struct bt_att_chan *chan, struct net_buf *buf);
2740 } handlers[] = {
2741 { BT_ATT_OP_MTU_REQ,
2742 sizeof(struct bt_att_exchange_mtu_req),
2743 ATT_REQUEST,
2744 att_mtu_req },
2745 { BT_ATT_OP_FIND_INFO_REQ,
2746 sizeof(struct bt_att_find_info_req),
2747 ATT_REQUEST,
2748 att_find_info_req },
2749 { BT_ATT_OP_FIND_TYPE_REQ,
2750 sizeof(struct bt_att_find_type_req),
2751 ATT_REQUEST,
2752 att_find_type_req },
2753 { BT_ATT_OP_READ_TYPE_REQ,
2754 sizeof(struct bt_att_read_type_req),
2755 ATT_REQUEST,
2756 att_read_type_req },
2757 { BT_ATT_OP_READ_REQ,
2758 sizeof(struct bt_att_read_req),
2759 ATT_REQUEST,
2760 att_read_req },
2761 { BT_ATT_OP_READ_BLOB_REQ,
2762 sizeof(struct bt_att_read_blob_req),
2763 ATT_REQUEST,
2764 att_read_blob_req },
2765 #if defined(CONFIG_BT_GATT_READ_MULTIPLE)
2766 { BT_ATT_OP_READ_MULT_REQ,
2767 BT_ATT_READ_MULT_MIN_LEN_REQ,
2768 ATT_REQUEST,
2769 att_read_mult_req },
2770 #endif /* CONFIG_BT_GATT_READ_MULTIPLE */
2771 #if defined(CONFIG_BT_GATT_READ_MULT_VAR_LEN)
2772 { BT_ATT_OP_READ_MULT_VL_REQ,
2773 BT_ATT_READ_MULT_MIN_LEN_REQ,
2774 ATT_REQUEST,
2775 att_read_mult_vl_req },
2776 #endif /* CONFIG_BT_GATT_READ_MULT_VAR_LEN */
2777 { BT_ATT_OP_READ_GROUP_REQ,
2778 sizeof(struct bt_att_read_group_req),
2779 ATT_REQUEST,
2780 att_read_group_req },
2781 { BT_ATT_OP_WRITE_REQ,
2782 sizeof(struct bt_att_write_req),
2783 ATT_REQUEST,
2784 att_write_req },
2785 { BT_ATT_OP_PREPARE_WRITE_REQ,
2786 sizeof(struct bt_att_prepare_write_req),
2787 ATT_REQUEST,
2788 att_prepare_write_req },
2789 { BT_ATT_OP_EXEC_WRITE_REQ,
2790 sizeof(struct bt_att_exec_write_req),
2791 ATT_REQUEST,
2792 att_exec_write_req },
2793 { BT_ATT_OP_CONFIRM,
2794 0,
2795 ATT_CONFIRMATION,
2796 att_confirm },
2797 { BT_ATT_OP_WRITE_CMD,
2798 sizeof(struct bt_att_write_cmd),
2799 ATT_COMMAND,
2800 att_write_cmd },
2801 #if defined(CONFIG_BT_SIGNING)
2802 { BT_ATT_OP_SIGNED_WRITE_CMD,
2803 (sizeof(struct bt_att_write_cmd) +
2804 sizeof(struct bt_att_signature)),
2805 ATT_COMMAND,
2806 att_signed_write_cmd },
2807 #endif /* CONFIG_BT_SIGNING */
2808 #if defined(CONFIG_BT_GATT_CLIENT)
2809 { BT_ATT_OP_ERROR_RSP,
2810 sizeof(struct bt_att_error_rsp),
2811 ATT_RESPONSE,
2812 att_error_rsp },
2813 { BT_ATT_OP_MTU_RSP,
2814 sizeof(struct bt_att_exchange_mtu_rsp),
2815 ATT_RESPONSE,
2816 att_mtu_rsp },
2817 { BT_ATT_OP_FIND_INFO_RSP,
2818 sizeof(struct bt_att_find_info_rsp),
2819 ATT_RESPONSE,
2820 att_handle_find_info_rsp },
2821 { BT_ATT_OP_FIND_TYPE_RSP,
2822 sizeof(struct bt_att_handle_group),
2823 ATT_RESPONSE,
2824 att_handle_find_type_rsp },
2825 { BT_ATT_OP_READ_TYPE_RSP,
2826 sizeof(struct bt_att_read_type_rsp),
2827 ATT_RESPONSE,
2828 att_handle_read_type_rsp },
2829 { BT_ATT_OP_READ_RSP,
2830 0,
2831 ATT_RESPONSE,
2832 att_handle_read_rsp },
2833 { BT_ATT_OP_READ_BLOB_RSP,
2834 0,
2835 ATT_RESPONSE,
2836 att_handle_read_blob_rsp },
2837 #if defined(CONFIG_BT_GATT_READ_MULTIPLE)
2838 { BT_ATT_OP_READ_MULT_RSP,
2839 0,
2840 ATT_RESPONSE,
2841 att_handle_read_mult_rsp },
2842 #endif /* CONFIG_BT_GATT_READ_MULTIPLE */
2843 #if defined(CONFIG_BT_GATT_READ_MULT_VAR_LEN)
2844 { BT_ATT_OP_READ_MULT_VL_RSP,
2845 sizeof(struct bt_att_read_mult_vl_rsp),
2846 ATT_RESPONSE,
2847 att_handle_read_mult_vl_rsp },
2848 #endif /* CONFIG_BT_GATT_READ_MULT_VAR_LEN */
2849 { BT_ATT_OP_READ_GROUP_RSP,
2850 sizeof(struct bt_att_read_group_rsp),
2851 ATT_RESPONSE,
2852 att_handle_read_group_rsp },
2853 { BT_ATT_OP_WRITE_RSP,
2854 0,
2855 ATT_RESPONSE,
2856 att_handle_write_rsp },
2857 { BT_ATT_OP_PREPARE_WRITE_RSP,
2858 sizeof(struct bt_att_prepare_write_rsp),
2859 ATT_RESPONSE,
2860 att_handle_prepare_write_rsp },
2861 { BT_ATT_OP_EXEC_WRITE_RSP,
2862 0,
2863 ATT_RESPONSE,
2864 att_handle_exec_write_rsp },
2865 { BT_ATT_OP_NOTIFY,
2866 sizeof(struct bt_att_notify),
2867 ATT_NOTIFICATION,
2868 att_notify },
2869 { BT_ATT_OP_INDICATE,
2870 sizeof(struct bt_att_indicate),
2871 ATT_INDICATION,
2872 att_indicate },
2873 { BT_ATT_OP_NOTIFY_MULT,
2874 sizeof(struct bt_att_notify_mult),
2875 ATT_NOTIFICATION,
2876 att_notify_mult },
2877 #endif /* CONFIG_BT_GATT_CLIENT */
2878 };
2879
att_op_get_type(uint8_t op)2880 static att_type_t att_op_get_type(uint8_t op)
2881 {
2882 switch (op) {
2883 case BT_ATT_OP_MTU_REQ:
2884 case BT_ATT_OP_FIND_INFO_REQ:
2885 case BT_ATT_OP_FIND_TYPE_REQ:
2886 case BT_ATT_OP_READ_TYPE_REQ:
2887 case BT_ATT_OP_READ_REQ:
2888 case BT_ATT_OP_READ_BLOB_REQ:
2889 case BT_ATT_OP_READ_MULT_REQ:
2890 case BT_ATT_OP_READ_MULT_VL_REQ:
2891 case BT_ATT_OP_READ_GROUP_REQ:
2892 case BT_ATT_OP_WRITE_REQ:
2893 case BT_ATT_OP_PREPARE_WRITE_REQ:
2894 case BT_ATT_OP_EXEC_WRITE_REQ:
2895 return ATT_REQUEST;
2896 case BT_ATT_OP_CONFIRM:
2897 return ATT_CONFIRMATION;
2898 case BT_ATT_OP_WRITE_CMD:
2899 case BT_ATT_OP_SIGNED_WRITE_CMD:
2900 return ATT_COMMAND;
2901 case BT_ATT_OP_ERROR_RSP:
2902 case BT_ATT_OP_MTU_RSP:
2903 case BT_ATT_OP_FIND_INFO_RSP:
2904 case BT_ATT_OP_FIND_TYPE_RSP:
2905 case BT_ATT_OP_READ_TYPE_RSP:
2906 case BT_ATT_OP_READ_RSP:
2907 case BT_ATT_OP_READ_BLOB_RSP:
2908 case BT_ATT_OP_READ_MULT_RSP:
2909 case BT_ATT_OP_READ_MULT_VL_RSP:
2910 case BT_ATT_OP_READ_GROUP_RSP:
2911 case BT_ATT_OP_WRITE_RSP:
2912 case BT_ATT_OP_PREPARE_WRITE_RSP:
2913 case BT_ATT_OP_EXEC_WRITE_RSP:
2914 return ATT_RESPONSE;
2915 case BT_ATT_OP_NOTIFY:
2916 case BT_ATT_OP_NOTIFY_MULT:
2917 return ATT_NOTIFICATION;
2918 case BT_ATT_OP_INDICATE:
2919 return ATT_INDICATION;
2920 }
2921
2922 if (op & ATT_CMD_MASK) {
2923 return ATT_COMMAND;
2924 }
2925
2926 return ATT_UNKNOWN;
2927 }
2928
get_conn(struct bt_att_chan * att_chan)2929 static struct bt_conn *get_conn(struct bt_att_chan *att_chan)
2930 {
2931 return att_chan->chan.chan.conn;
2932 }
2933
bt_att_recv(struct bt_l2cap_chan * chan,struct net_buf * buf)2934 static int bt_att_recv(struct bt_l2cap_chan *chan, struct net_buf *buf)
2935 {
2936 struct bt_att_chan *att_chan = ATT_CHAN(chan);
2937 struct bt_conn *conn = get_conn(att_chan);
2938 struct bt_att_hdr *hdr;
2939 const struct att_handler *handler;
2940 uint8_t err;
2941 size_t i;
2942
2943 if (buf->len < sizeof(*hdr)) {
2944 LOG_ERR("Too small ATT PDU received");
2945 return 0;
2946 }
2947
2948 hdr = net_buf_pull_mem(buf, sizeof(*hdr));
2949 LOG_DBG("Received ATT chan %p code 0x%02x len %zu", att_chan, hdr->code,
2950 net_buf_frags_len(buf));
2951
2952 if (conn->state != BT_CONN_CONNECTED) {
2953 LOG_DBG("not connected: conn %p state %u", conn, conn->state);
2954 return 0;
2955 }
2956
2957 if (!att_chan->att) {
2958 LOG_DBG("Ignore recv on detached ATT chan");
2959 return 0;
2960 }
2961
2962 for (i = 0, handler = NULL; i < ARRAY_SIZE(handlers); i++) {
2963 if (hdr->code == handlers[i].op) {
2964 handler = &handlers[i];
2965 break;
2966 }
2967 }
2968
2969 if (!handler) {
2970 LOG_WRN("Unhandled ATT code 0x%02x", hdr->code);
2971 if (att_op_get_type(hdr->code) != ATT_COMMAND &&
2972 att_op_get_type(hdr->code) != ATT_INDICATION) {
2973 send_err_rsp(att_chan, hdr->code, 0,
2974 BT_ATT_ERR_NOT_SUPPORTED);
2975 }
2976 return 0;
2977 }
2978
2979 if (buf->len < handler->expect_len) {
2980 LOG_ERR("Invalid len %u for code 0x%02x", buf->len, hdr->code);
2981 err = BT_ATT_ERR_INVALID_PDU;
2982 } else {
2983 err = handler->func(att_chan, buf);
2984 }
2985
2986 if (handler->type == ATT_REQUEST && err) {
2987 LOG_DBG("ATT error 0x%02x", err);
2988 send_err_rsp(att_chan, hdr->code, 0, err);
2989 }
2990
2991 return 0;
2992 }
2993
att_get(struct bt_conn * conn)2994 static struct bt_att *att_get(struct bt_conn *conn)
2995 {
2996 struct bt_l2cap_chan *chan;
2997 struct bt_att_chan *att_chan;
2998
2999 if (conn->state != BT_CONN_CONNECTED) {
3000 LOG_WRN("Not connected");
3001 return NULL;
3002 }
3003
3004 chan = bt_l2cap_le_lookup_rx_cid(conn, BT_L2CAP_CID_ATT);
3005 if (!chan) {
3006 LOG_ERR("Unable to find ATT channel");
3007 return NULL;
3008 }
3009
3010 att_chan = ATT_CHAN(chan);
3011 if (!atomic_test_bit(att_chan->flags, ATT_CONNECTED)) {
3012 LOG_ERR("ATT channel not connected");
3013 return NULL;
3014 }
3015
3016 return att_chan->att;
3017 }
3018
bt_att_create_pdu(struct bt_conn * conn,uint8_t op,size_t len)3019 struct net_buf *bt_att_create_pdu(struct bt_conn *conn, uint8_t op, size_t len)
3020 {
3021 struct bt_att *att;
3022 struct bt_att_chan *chan, *tmp;
3023
3024 att = att_get(conn);
3025 if (!att) {
3026 return NULL;
3027 }
3028
3029 /* This allocator should _not_ be used for RSPs. */
3030 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&att->chans, chan, tmp, node) {
3031 if (len + sizeof(op) > bt_att_mtu(chan)) {
3032 continue;
3033 }
3034
3035 return bt_att_chan_create_pdu(chan, op, len);
3036 }
3037
3038 LOG_WRN("No ATT channel for MTU %zu", len + sizeof(op));
3039
3040 return NULL;
3041 }
3042
bt_att_create_rsp_pdu(struct bt_att_chan * chan,uint8_t op)3043 struct net_buf *bt_att_create_rsp_pdu(struct bt_att_chan *chan, uint8_t op)
3044 {
3045 size_t headroom;
3046 struct bt_att_hdr *hdr;
3047 struct bt_att_tx_meta_data *data;
3048 struct net_buf *buf;
3049
3050 buf = net_buf_alloc(&att_pool, BT_ATT_TIMEOUT);
3051 if (!buf) {
3052 LOG_ERR("Unable to allocate buffer for op 0x%02x", op);
3053 return NULL;
3054 }
3055
3056 headroom = BT_L2CAP_BUF_SIZE(0);
3057
3058 if (bt_att_is_enhanced(chan)) {
3059 headroom += BT_L2CAP_SDU_HDR_SIZE;
3060 }
3061
3062 net_buf_reserve(buf, headroom);
3063
3064 data = bt_att_get_tx_meta_data(buf);
3065 data->att_chan = chan;
3066
3067 hdr = net_buf_add(buf, sizeof(*hdr));
3068 hdr->code = op;
3069
3070 return buf;
3071 }
3072
att_reset(struct bt_att * att)3073 static void att_reset(struct bt_att *att)
3074 {
3075 struct net_buf *buf;
3076
3077 #if CONFIG_BT_ATT_PREPARE_COUNT > 0
3078 /* Discard queued buffers */
3079 while ((buf = net_buf_slist_get(&att->prep_queue))) {
3080 net_buf_unref(buf);
3081 }
3082 #endif /* CONFIG_BT_ATT_PREPARE_COUNT > 0 */
3083
3084 #if defined(CONFIG_BT_EATT)
3085 struct k_work_sync sync;
3086
3087 (void)k_work_cancel_delayable_sync(&att->eatt.connection_work, &sync);
3088 #endif /* CONFIG_BT_EATT */
3089
3090 while ((buf = k_fifo_get(&att->tx_queue, K_NO_WAIT))) {
3091 net_buf_unref(buf);
3092 }
3093
3094 /* Notify pending requests */
3095 while (!sys_slist_is_empty(&att->reqs)) {
3096 struct bt_att_req *req;
3097 sys_snode_t *node;
3098
3099 node = sys_slist_get_not_empty(&att->reqs);
3100 req = CONTAINER_OF(node, struct bt_att_req, node);
3101 if (req->func) {
3102 req->func(att->conn, -ECONNRESET, NULL, 0,
3103 req->user_data);
3104 }
3105
3106 bt_att_req_free(req);
3107 }
3108
3109 /* FIXME: `att->conn` is not reference counted. Consider using `bt_conn_ref`
3110 * and `bt_conn_unref` to follow convention.
3111 */
3112 att->conn = NULL;
3113 k_mem_slab_free(&att_slab, (void *)att);
3114 }
3115
att_chan_detach(struct bt_att_chan * chan)3116 static void att_chan_detach(struct bt_att_chan *chan)
3117 {
3118 struct net_buf *buf;
3119
3120 LOG_DBG("chan %p", chan);
3121
3122 sys_slist_find_and_remove(&chan->att->chans, &chan->node);
3123
3124 /* Release pending buffers */
3125 while ((buf = k_fifo_get(&chan->tx_queue, K_NO_WAIT))) {
3126 net_buf_unref(buf);
3127 }
3128
3129 if (chan->req) {
3130 /* Notify outstanding request */
3131 att_handle_rsp(chan, NULL, 0, -ECONNRESET);
3132 }
3133
3134 chan->att = NULL;
3135 atomic_clear_bit(chan->flags, ATT_CONNECTED);
3136 }
3137
att_timeout(struct k_work * work)3138 static void att_timeout(struct k_work *work)
3139 {
3140 char addr[BT_ADDR_LE_STR_LEN];
3141 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
3142 struct bt_att_chan *chan = CONTAINER_OF(dwork, struct bt_att_chan,
3143 timeout_work);
3144 int err;
3145
3146 bt_addr_le_to_str(bt_conn_get_dst(chan->att->conn), addr, sizeof(addr));
3147 LOG_ERR("ATT Timeout for device %s. Disconnecting...", addr);
3148
3149 /* BLUETOOTH SPECIFICATION Version 4.2 [Vol 3, Part F] page 480:
3150 *
3151 * A transaction not completed within 30 seconds shall time out. Such a
3152 * transaction shall be considered to have failed and the local higher
3153 * layers shall be informed of this failure. No more attribute protocol
3154 * requests, commands, indications or notifications shall be sent to the
3155 * target device on this ATT Bearer.
3156 */
3157 bt_att_disconnected(&chan->chan.chan);
3158
3159 /* The timeout state is local and can block new ATT operations, but does not affect the
3160 * remote side. Disconnecting the GATT connection upon ATT timeout simplifies error handling
3161 * for developers. This reduces rare failure conditions to a common one, allowing developers
3162 * to handle unexpected disconnections without needing special cases for ATT timeouts.
3163 */
3164 err = bt_conn_disconnect(chan->chan.chan.conn, BT_HCI_ERR_REMOTE_USER_TERM_CONN);
3165 if (err) {
3166 LOG_ERR("Disconnecting failed (err %d)", err);
3167 }
3168 }
3169
att_get_fixed_chan(struct bt_conn * conn)3170 static struct bt_att_chan *att_get_fixed_chan(struct bt_conn *conn)
3171 {
3172 struct bt_l2cap_chan *chan;
3173
3174 chan = bt_l2cap_le_lookup_tx_cid(conn, BT_L2CAP_CID_ATT);
3175 __ASSERT(chan, "No ATT channel found");
3176
3177 return ATT_CHAN(chan);
3178 }
3179
att_chan_attach(struct bt_att * att,struct bt_att_chan * chan)3180 static void att_chan_attach(struct bt_att *att, struct bt_att_chan *chan)
3181 {
3182 LOG_DBG("att %p chan %p flags %lu", att, chan, atomic_get(chan->flags));
3183
3184 if (sys_slist_is_empty(&att->chans)) {
3185 /* Init general queues when attaching the first channel */
3186 k_fifo_init(&att->tx_queue);
3187 #if CONFIG_BT_ATT_PREPARE_COUNT > 0
3188 sys_slist_init(&att->prep_queue);
3189 #endif
3190 }
3191
3192 sys_slist_prepend(&att->chans, &chan->node);
3193 }
3194
bt_att_connected(struct bt_l2cap_chan * chan)3195 static void bt_att_connected(struct bt_l2cap_chan *chan)
3196 {
3197 struct bt_att_chan *att_chan = ATT_CHAN(chan);
3198 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
3199
3200 LOG_DBG("chan %p cid 0x%04x", le_chan, le_chan->tx.cid);
3201
3202 atomic_set_bit(att_chan->flags, ATT_CONNECTED);
3203
3204 att_chan_mtu_updated(att_chan);
3205
3206 k_work_init_delayable(&att_chan->timeout_work, att_timeout);
3207
3208 bt_gatt_connected(le_chan->chan.conn);
3209 }
3210
bt_att_disconnected(struct bt_l2cap_chan * chan)3211 static void bt_att_disconnected(struct bt_l2cap_chan *chan)
3212 {
3213 struct bt_att_chan *att_chan = ATT_CHAN(chan);
3214 struct bt_att *att = att_chan->att;
3215 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
3216
3217 LOG_DBG("chan %p cid 0x%04x", le_chan, le_chan->tx.cid);
3218
3219 if (!att_chan->att) {
3220 LOG_DBG("Ignore disconnect on detached ATT chan");
3221 return;
3222 }
3223
3224 att_chan_detach(att_chan);
3225
3226 /* Don't reset if there are still channels to be used */
3227 if (!sys_slist_is_empty(&att->chans)) {
3228 return;
3229 }
3230
3231 att_reset(att);
3232
3233 bt_gatt_disconnected(le_chan->chan.conn);
3234 }
3235
3236 #if defined(CONFIG_BT_SMP)
att_req_retry(struct bt_att_chan * att_chan)3237 static uint8_t att_req_retry(struct bt_att_chan *att_chan)
3238 {
3239 struct bt_att_req *req = att_chan->req;
3240 struct net_buf *buf;
3241
3242 /* Resend buffer */
3243 if (!req->encode) {
3244 /* This request does not support resending */
3245 return BT_ATT_ERR_AUTHENTICATION;
3246 }
3247
3248
3249 buf = bt_att_chan_create_pdu(att_chan, req->att_op, req->len);
3250 if (!buf) {
3251 return BT_ATT_ERR_UNLIKELY;
3252 }
3253
3254 if (req->encode(buf, req->len, req->user_data)) {
3255 net_buf_unref(buf);
3256 return BT_ATT_ERR_UNLIKELY;
3257 }
3258
3259 if (chan_send(att_chan, buf)) {
3260 net_buf_unref(buf);
3261 return BT_ATT_ERR_UNLIKELY;
3262 }
3263
3264 return BT_ATT_ERR_SUCCESS;
3265 }
3266
bt_att_encrypt_change(struct bt_l2cap_chan * chan,uint8_t hci_status)3267 static void bt_att_encrypt_change(struct bt_l2cap_chan *chan,
3268 uint8_t hci_status)
3269 {
3270 struct bt_att_chan *att_chan = ATT_CHAN(chan);
3271 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
3272 struct bt_conn *conn = le_chan->chan.conn;
3273 uint8_t err;
3274
3275 LOG_DBG("chan %p conn %p handle %u sec_level 0x%02x status 0x%02x %s", le_chan, conn,
3276 conn->handle, conn->sec_level, hci_status, bt_hci_err_to_str(hci_status));
3277
3278 if (!att_chan->att) {
3279 LOG_DBG("Ignore encrypt change on detached ATT chan");
3280 return;
3281 }
3282
3283 /*
3284 * If status (HCI status of security procedure) is non-zero, notify
3285 * outstanding request about security failure.
3286 */
3287 if (hci_status) {
3288 if (att_chan->req && att_chan->req->retrying) {
3289 att_handle_rsp(att_chan, NULL, 0,
3290 BT_ATT_ERR_AUTHENTICATION);
3291 }
3292
3293 return;
3294 }
3295
3296 bt_gatt_encrypt_change(conn);
3297
3298 if (conn->sec_level == BT_SECURITY_L1) {
3299 return;
3300 }
3301
3302 if (!(att_chan->req && att_chan->req->retrying)) {
3303 return;
3304 }
3305
3306 LOG_DBG("Retrying");
3307
3308 err = att_req_retry(att_chan);
3309 if (err) {
3310 LOG_DBG("Retry failed (%d)", err);
3311 att_handle_rsp(att_chan, NULL, 0, err);
3312 }
3313 }
3314 #endif /* CONFIG_BT_SMP */
3315
bt_att_status(struct bt_l2cap_chan * ch,atomic_t * status)3316 static void bt_att_status(struct bt_l2cap_chan *ch, atomic_t *status)
3317 {
3318 struct bt_att_chan *chan = ATT_CHAN(ch);
3319 sys_snode_t *node;
3320
3321 LOG_DBG("chan %p status %p", ch, status);
3322
3323 if (!atomic_test_bit(status, BT_L2CAP_STATUS_OUT)) {
3324 return;
3325 }
3326
3327 if (!chan->att) {
3328 LOG_DBG("Ignore status on detached ATT chan");
3329 return;
3330 }
3331
3332 /* If there is a request pending don't attempt to send */
3333 if (chan->req) {
3334 return;
3335 }
3336
3337 /* Pull next request from the list */
3338 node = sys_slist_get(&chan->att->reqs);
3339 if (!node) {
3340 return;
3341 }
3342
3343 if (bt_att_chan_req_send(chan, ATT_REQ(node)) >= 0) {
3344 return;
3345 }
3346
3347 /* Prepend back to the list as it could not be sent */
3348 sys_slist_prepend(&chan->att->reqs, node);
3349 }
3350
bt_att_released(struct bt_l2cap_chan * ch)3351 static void bt_att_released(struct bt_l2cap_chan *ch)
3352 {
3353 struct bt_att_chan *chan = ATT_CHAN(ch);
3354
3355 LOG_DBG("chan %p", chan);
3356
3357 k_mem_slab_free(&chan_slab, (void *)chan);
3358 }
3359
3360 #if defined(CONFIG_BT_EATT)
bt_att_reconfigured(struct bt_l2cap_chan * l2cap_chan)3361 static void bt_att_reconfigured(struct bt_l2cap_chan *l2cap_chan)
3362 {
3363 struct bt_att_chan *att_chan = ATT_CHAN(l2cap_chan);
3364
3365 LOG_DBG("chan %p", att_chan);
3366
3367 att_chan_mtu_updated(att_chan);
3368 }
3369 #endif /* CONFIG_BT_EATT */
3370
att_chan_new(struct bt_att * att,atomic_val_t flags)3371 static struct bt_att_chan *att_chan_new(struct bt_att *att, atomic_val_t flags)
3372 {
3373 int quota = 0;
3374 static struct bt_l2cap_chan_ops ops = {
3375 .connected = bt_att_connected,
3376 .disconnected = bt_att_disconnected,
3377 .recv = bt_att_recv,
3378 .sent = bt_att_sent,
3379 .status = bt_att_status,
3380 #if defined(CONFIG_BT_SMP)
3381 .encrypt_change = bt_att_encrypt_change,
3382 #endif /* CONFIG_BT_SMP */
3383 .released = bt_att_released,
3384 #if defined(CONFIG_BT_EATT)
3385 .reconfigured = bt_att_reconfigured,
3386 #endif /* CONFIG_BT_EATT */
3387 };
3388 struct bt_att_chan *chan;
3389
3390 SYS_SLIST_FOR_EACH_CONTAINER(&att->chans, chan, node) {
3391 if (chan->att == att) {
3392 quota++;
3393 }
3394
3395 if (quota == ATT_CHAN_MAX) {
3396 LOG_DBG("Maximum number of channels reached: %d", quota);
3397 return NULL;
3398 }
3399 }
3400
3401 if (k_mem_slab_alloc(&chan_slab, (void **)&chan, K_NO_WAIT)) {
3402 LOG_WRN("No available ATT channel for conn %p", att->conn);
3403 return NULL;
3404 }
3405
3406 (void)memset(chan, 0, sizeof(*chan));
3407 chan->chan.chan.ops = &ops;
3408 k_fifo_init(&chan->tx_queue);
3409 atomic_set(chan->flags, flags);
3410 chan->att = att;
3411 att_chan_attach(att, chan);
3412
3413 if (bt_att_is_enhanced(chan)) {
3414 /* EATT: The MTU will be sent in the ECRED conn req/rsp PDU. The
3415 * TX MTU is received on L2CAP-level.
3416 */
3417 chan->chan.rx.mtu = BT_LOCAL_ATT_MTU_EATT;
3418 } else {
3419 /* UATT: L2CAP Basic is not able to communicate the L2CAP MTU
3420 * without help. ATT has to manage the MTU. The initial MTU is
3421 * defined by spec.
3422 */
3423 chan->chan.tx.mtu = BT_ATT_DEFAULT_LE_MTU;
3424 chan->chan.rx.mtu = BT_ATT_DEFAULT_LE_MTU;
3425 }
3426
3427 return chan;
3428 }
3429
3430 #if defined(CONFIG_BT_EATT)
bt_eatt_count(struct bt_conn * conn)3431 size_t bt_eatt_count(struct bt_conn *conn)
3432 {
3433 struct bt_att *att;
3434 struct bt_att_chan *chan;
3435 size_t eatt_count = 0;
3436
3437 if (!conn) {
3438 return 0;
3439 }
3440
3441 att = att_get(conn);
3442 if (!att) {
3443 return 0;
3444 }
3445
3446 SYS_SLIST_FOR_EACH_CONTAINER(&att->chans, chan, node) {
3447 if (bt_att_is_enhanced(chan) &&
3448 atomic_test_bit(chan->flags, ATT_CONNECTED)) {
3449 eatt_count++;
3450 }
3451 }
3452
3453 return eatt_count;
3454 }
3455
att_enhanced_connection_work_handler(struct k_work * work)3456 static void att_enhanced_connection_work_handler(struct k_work *work)
3457 {
3458 const struct k_work_delayable *dwork = k_work_delayable_from_work(work);
3459 const struct bt_att *att = CONTAINER_OF(dwork, struct bt_att, eatt.connection_work);
3460 const int err = bt_eatt_connect(att->conn, att->eatt.chans_to_connect);
3461
3462 if (err == -ENOMEM) {
3463 LOG_DBG("Failed to connect %d EATT channels, central has probably "
3464 "already established some.",
3465 att->eatt.chans_to_connect);
3466 } else if (err < 0) {
3467 LOG_WRN("Failed to connect %d EATT channels (err: %d)", att->eatt.chans_to_connect,
3468 err);
3469 }
3470
3471 }
3472 #endif /* CONFIG_BT_EATT */
3473
bt_att_accept(struct bt_conn * conn,struct bt_l2cap_chan ** ch)3474 static int bt_att_accept(struct bt_conn *conn, struct bt_l2cap_chan **ch)
3475 {
3476 struct bt_att *att;
3477 struct bt_att_chan *chan;
3478
3479 LOG_DBG("conn %p handle %u", conn, conn->handle);
3480
3481 if (k_mem_slab_alloc(&att_slab, (void **)&att, K_NO_WAIT)) {
3482 LOG_ERR("No available ATT context for conn %p", conn);
3483 return -ENOMEM;
3484 }
3485
3486 att_handle_rsp_thread = k_current_get();
3487
3488 (void)memset(att, 0, sizeof(*att));
3489 att->conn = conn;
3490 sys_slist_init(&att->reqs);
3491 sys_slist_init(&att->chans);
3492
3493 #if defined(CONFIG_BT_EATT)
3494 k_work_init_delayable(&att->eatt.connection_work,
3495 att_enhanced_connection_work_handler);
3496 #endif /* CONFIG_BT_EATT */
3497
3498 chan = att_chan_new(att, 0);
3499 if (!chan) {
3500 return -ENOMEM;
3501 }
3502
3503 *ch = &chan->chan.chan;
3504
3505 return 0;
3506 }
3507
3508 /* The L2CAP channel section is sorted lexicographically. Make sure that ATT fixed channel will be
3509 * placed as the last one to ensure that SMP channel is properly initialized before bt_att_connected
3510 * tries to send security request.
3511 */
3512 BT_L2CAP_CHANNEL_DEFINE(z_att_fixed_chan, BT_L2CAP_CID_ATT, bt_att_accept, NULL);
3513
3514 #if defined(CONFIG_BT_EATT)
credit_based_connection_delay(struct bt_conn * conn)3515 static k_timeout_t credit_based_connection_delay(struct bt_conn *conn)
3516 {
3517 /*
3518 * 5.3 Vol 3, Part G, Section 5.4 L2CAP COLLISION MITIGATION
3519 * ... In this situation, the Central may retry
3520 * immediately but the Peripheral shall wait a minimum of 100 ms before retrying;
3521 * on LE connections, the Peripheral shall wait at least 2 *
3522 * (connPeripheralLatency + 1) * connInterval if that is longer.
3523 */
3524
3525 if (IS_ENABLED(CONFIG_BT_CENTRAL) && conn->role == BT_CONN_ROLE_CENTRAL) {
3526 return K_NO_WAIT;
3527 } else if (IS_ENABLED(CONFIG_BT_PERIPHERAL)) {
3528 uint8_t random;
3529 int err;
3530
3531 err = bt_rand(&random, sizeof(random));
3532 if (err) {
3533 random = 0;
3534 }
3535
3536 const uint8_t rand_delay = random & 0x7; /* Small random delay for IOP */
3537 /* The maximum value of (latency + 1) * 2 multiplied with the
3538 * maximum connection interval has a maximum value of
3539 * 4000000000 which can be stored in 32-bits, so this won't
3540 * result in an overflow
3541 */
3542 const uint32_t calculated_delay_us =
3543 2 * (conn->le.latency + 1) * BT_CONN_INTERVAL_TO_US(conn->le.interval);
3544 const uint32_t calculated_delay_ms = calculated_delay_us / USEC_PER_MSEC;
3545
3546 return K_MSEC(MAX(100, calculated_delay_ms + rand_delay));
3547 }
3548
3549 /* Must be either central or peripheral */
3550 __ASSERT_NO_MSG(false);
3551 CODE_UNREACHABLE;
3552 }
3553
att_schedule_eatt_connect(struct bt_conn * conn,uint8_t chans_to_connect)3554 static int att_schedule_eatt_connect(struct bt_conn *conn, uint8_t chans_to_connect)
3555 {
3556 struct bt_att *att = att_get(conn);
3557
3558 if (!att) {
3559 return -ENOTCONN;
3560 }
3561
3562 att->eatt.chans_to_connect = chans_to_connect;
3563
3564 return k_work_reschedule(&att->eatt.connection_work,
3565 credit_based_connection_delay(conn));
3566 }
3567
handle_potential_collision(struct bt_att * att)3568 static void handle_potential_collision(struct bt_att *att)
3569 {
3570 __ASSERT_NO_MSG(att);
3571
3572 int err;
3573 size_t to_connect = att->eatt.prev_conn_req_missing_chans;
3574
3575 if (att->eatt.prev_conn_rsp_result == BT_L2CAP_LE_ERR_NO_RESOURCES &&
3576 att->eatt.prev_conn_req_result == BT_L2CAP_LE_ERR_NO_RESOURCES) {
3577 LOG_DBG("Credit based connection request collision detected");
3578
3579 /* Reset to not keep retrying on repeated failures */
3580 att->eatt.prev_conn_rsp_result = 0;
3581 att->eatt.prev_conn_req_result = 0;
3582 att->eatt.prev_conn_req_missing_chans = 0;
3583
3584 if (to_connect == 0) {
3585 return;
3586 }
3587
3588 err = att_schedule_eatt_connect(att->conn, to_connect);
3589 if (err < 0) {
3590 LOG_ERR("Failed to schedule EATT connection retry (err: %d)", err);
3591 }
3592 }
3593 }
3594
ecred_connect_req_cb(struct bt_conn * conn,uint16_t result,uint16_t psm)3595 static void ecred_connect_req_cb(struct bt_conn *conn, uint16_t result, uint16_t psm)
3596 {
3597 struct bt_att *att = att_get(conn);
3598
3599 if (!att) {
3600 return;
3601 }
3602
3603 if (psm != BT_EATT_PSM) {
3604 /* Collision mitigation is only a requirement on the EATT PSM */
3605 return;
3606 }
3607
3608 att->eatt.prev_conn_rsp_result = result;
3609
3610 handle_potential_collision(att);
3611 }
3612
ecred_connect_rsp_cb(struct bt_conn * conn,uint16_t result,uint8_t attempted_to_connect,uint8_t succeeded_to_connect,uint16_t psm)3613 static void ecred_connect_rsp_cb(struct bt_conn *conn, uint16_t result,
3614 uint8_t attempted_to_connect, uint8_t succeeded_to_connect,
3615 uint16_t psm)
3616 {
3617 struct bt_att *att = att_get(conn);
3618
3619 if (!att) {
3620 return;
3621 }
3622
3623 if (psm != BT_EATT_PSM) {
3624 /* Collision mitigation is only a requirement on the EATT PSM */
3625 return;
3626 }
3627
3628 att->eatt.prev_conn_req_result = result;
3629 att->eatt.prev_conn_req_missing_chans =
3630 attempted_to_connect - succeeded_to_connect;
3631
3632 handle_potential_collision(att);
3633 }
3634
bt_eatt_connect(struct bt_conn * conn,size_t num_channels)3635 int bt_eatt_connect(struct bt_conn *conn, size_t num_channels)
3636 {
3637 struct bt_att_chan *att_chan;
3638 struct bt_att *att;
3639 struct bt_l2cap_chan *chan[CONFIG_BT_EATT_MAX + 1] = {};
3640 size_t offset = 0;
3641 size_t i = 0;
3642 int err;
3643
3644 if (!conn) {
3645 return -EINVAL;
3646 }
3647
3648 /* Check the encryption level for EATT */
3649 if (bt_conn_get_security(conn) < BT_SECURITY_L2) {
3650 /* Vol 3, Part G, Section 5.3.2 Channel Requirements states:
3651 * The channel shall be encrypted.
3652 */
3653 return -EPERM;
3654 }
3655
3656 if (num_channels > CONFIG_BT_EATT_MAX || num_channels == 0) {
3657 return -EINVAL;
3658 }
3659
3660 att_chan = att_get_fixed_chan(conn);
3661 att = att_chan->att;
3662
3663 while (num_channels--) {
3664 att_chan = att_chan_new(att, BIT(ATT_ENHANCED));
3665 if (!att_chan) {
3666 break;
3667 }
3668
3669 chan[i] = &att_chan->chan.chan;
3670 i++;
3671 }
3672
3673 if (!i) {
3674 return -ENOMEM;
3675 }
3676
3677 while (offset < i) {
3678 /* bt_l2cap_ecred_chan_connect() uses the first BT_L2CAP_ECRED_CHAN_MAX_PER_REQ
3679 * elements of the array or until a null-terminator is reached.
3680 */
3681 err = bt_l2cap_ecred_chan_connect(conn, &chan[offset], BT_EATT_PSM);
3682 if (err < 0) {
3683 return err;
3684 }
3685
3686 offset += BT_L2CAP_ECRED_CHAN_MAX_PER_REQ;
3687 }
3688
3689 return 0;
3690 }
3691
3692 #if defined(CONFIG_BT_EATT_AUTO_CONNECT)
eatt_auto_connect(struct bt_conn * conn,bt_security_t level,enum bt_security_err err)3693 static void eatt_auto_connect(struct bt_conn *conn, bt_security_t level,
3694 enum bt_security_err err)
3695 {
3696 int eatt_err;
3697
3698 if (err || level < BT_SECURITY_L2 || !bt_att_fixed_chan_only(conn)) {
3699 return;
3700 }
3701
3702 eatt_err = att_schedule_eatt_connect(conn, CONFIG_BT_EATT_MAX);
3703 if (eatt_err < 0) {
3704 LOG_WRN("Automatic creation of EATT bearers failed on "
3705 "connection %s with error %d",
3706 bt_addr_le_str(bt_conn_get_dst(conn)), eatt_err);
3707 }
3708 }
3709
3710 BT_CONN_CB_DEFINE(conn_callbacks) = {
3711 .security_changed = eatt_auto_connect,
3712 };
3713
3714 #endif /* CONFIG_BT_EATT_AUTO_CONNECT */
3715
bt_eatt_disconnect(struct bt_conn * conn)3716 int bt_eatt_disconnect(struct bt_conn *conn)
3717 {
3718 struct bt_att_chan *chan;
3719 struct bt_att *att;
3720 int err = -ENOTCONN;
3721
3722 if (!conn) {
3723 return -EINVAL;
3724 }
3725
3726 chan = att_get_fixed_chan(conn);
3727 att = chan->att;
3728
3729 SYS_SLIST_FOR_EACH_CONTAINER(&att->chans, chan, node) {
3730 if (bt_att_is_enhanced(chan)) {
3731 err = bt_l2cap_chan_disconnect(&chan->chan.chan);
3732 }
3733 }
3734
3735 return err;
3736 }
3737
3738 #if defined(CONFIG_BT_TESTING)
bt_eatt_disconnect_one(struct bt_conn * conn)3739 int bt_eatt_disconnect_one(struct bt_conn *conn)
3740 {
3741 struct bt_att *att;
3742 struct bt_att_chan *chan;
3743
3744 if (!conn) {
3745 return -EINVAL;
3746 }
3747
3748 chan = att_get_fixed_chan(conn);
3749 att = chan->att;
3750
3751 SYS_SLIST_FOR_EACH_CONTAINER(&att->chans, chan, node) {
3752 if (bt_att_is_enhanced(chan)) {
3753 return bt_l2cap_chan_disconnect(&chan->chan.chan);
3754 }
3755 }
3756
3757 return -ENOTCONN;
3758 }
3759
bt_eatt_reconfigure(struct bt_conn * conn,uint16_t mtu)3760 int bt_eatt_reconfigure(struct bt_conn *conn, uint16_t mtu)
3761 {
3762 struct bt_att_chan *att_chan = att_get_fixed_chan(conn);
3763 struct bt_att *att = att_chan->att;
3764 struct bt_l2cap_chan *chans[CONFIG_BT_EATT_MAX + 1] = {};
3765 size_t offset = 0;
3766 size_t i = 0;
3767 int err;
3768
3769 SYS_SLIST_FOR_EACH_CONTAINER(&att->chans, att_chan, node) {
3770 if (bt_att_is_enhanced(att_chan)) {
3771 chans[i] = &att_chan->chan.chan;
3772 i++;
3773 }
3774 }
3775
3776 while (offset < i) {
3777 /* bt_l2cap_ecred_chan_reconfigure() uses the first BT_L2CAP_ECRED_CHAN_MAX_PER_REQ
3778 * elements of the array or until a null-terminator is reached.
3779 */
3780 err = bt_l2cap_ecred_chan_reconfigure(&chans[offset], mtu);
3781 if (err < 0) {
3782 return err;
3783 }
3784
3785 offset += BT_L2CAP_ECRED_CHAN_MAX_PER_REQ;
3786 }
3787
3788 return 0;
3789 }
3790 #endif /* CONFIG_BT_TESTING */
3791 #endif /* CONFIG_BT_EATT */
3792
bt_eatt_accept(struct bt_conn * conn,struct bt_l2cap_server * server,struct bt_l2cap_chan ** chan)3793 static int bt_eatt_accept(struct bt_conn *conn, struct bt_l2cap_server *server,
3794 struct bt_l2cap_chan **chan)
3795 {
3796 struct bt_att_chan *att_chan = att_get_fixed_chan(conn);
3797 struct bt_att *att = att_chan->att;
3798
3799 LOG_DBG("conn %p handle %u", conn, conn->handle);
3800
3801 att_chan = att_chan_new(att, BIT(ATT_ENHANCED));
3802 if (att_chan) {
3803 *chan = &att_chan->chan.chan;
3804 return 0;
3805 }
3806
3807 return -ENOMEM;
3808 }
3809
bt_eatt_init(void)3810 static void bt_eatt_init(void)
3811 {
3812 int err;
3813 static struct bt_l2cap_server eatt_l2cap = {
3814 .psm = BT_EATT_PSM,
3815 .sec_level = BT_SECURITY_L2,
3816 .accept = bt_eatt_accept,
3817 };
3818 struct bt_l2cap_server *registered_server;
3819
3820 LOG_DBG("");
3821
3822 /* Check if eatt_l2cap server has already been registered. */
3823 registered_server = bt_l2cap_server_lookup_psm(eatt_l2cap.psm);
3824 if (registered_server != &eatt_l2cap) {
3825 err = bt_l2cap_server_register(&eatt_l2cap);
3826 if (err < 0) {
3827 LOG_ERR("EATT Server registration failed %d", err);
3828 }
3829 }
3830
3831 #if defined(CONFIG_BT_EATT)
3832 static const struct bt_l2cap_ecred_cb cb = {
3833 .ecred_conn_rsp = ecred_connect_rsp_cb,
3834 .ecred_conn_req = ecred_connect_req_cb,
3835 };
3836
3837 bt_l2cap_register_ecred_cb(&cb);
3838 #endif /* CONFIG_BT_EATT */
3839 }
3840
bt_att_init(void)3841 void bt_att_init(void)
3842 {
3843 bt_gatt_init();
3844
3845 if (IS_ENABLED(CONFIG_BT_EATT)) {
3846 bt_eatt_init();
3847 }
3848 }
3849
bt_att_get_mtu(struct bt_conn * conn)3850 uint16_t bt_att_get_mtu(struct bt_conn *conn)
3851 {
3852 struct bt_att_chan *chan, *tmp;
3853 struct bt_att *att;
3854 uint16_t mtu = 0;
3855
3856 att = att_get(conn);
3857 if (!att) {
3858 return 0;
3859 }
3860
3861 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&att->chans, chan, tmp, node) {
3862 if (bt_att_mtu(chan) > mtu) {
3863 mtu = bt_att_mtu(chan);
3864 }
3865 }
3866
3867 return mtu;
3868 }
3869
bt_att_get_uatt_mtu(struct bt_conn * conn)3870 uint16_t bt_att_get_uatt_mtu(struct bt_conn *conn)
3871 {
3872 struct bt_att_chan *chan, *tmp;
3873 struct bt_att *att;
3874
3875 att = att_get(conn);
3876 if (!att) {
3877 return 0;
3878 }
3879
3880 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&att->chans, chan, tmp, node) {
3881 if (!bt_att_is_enhanced(chan)) {
3882 return bt_att_mtu(chan);
3883 }
3884 }
3885
3886 LOG_WRN("No UATT channel found in %p", conn);
3887
3888 return 0;
3889 }
3890
att_chan_mtu_updated(struct bt_att_chan * updated_chan)3891 static void att_chan_mtu_updated(struct bt_att_chan *updated_chan)
3892 {
3893 struct bt_att *att = updated_chan->att;
3894 struct bt_att_chan *chan, *tmp;
3895 uint16_t max_tx = 0, max_rx = 0;
3896
3897 /* Get maximum MTU's of other channels */
3898 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&att->chans, chan, tmp, node) {
3899 if (chan == updated_chan) {
3900 continue;
3901 }
3902 max_tx = MAX(max_tx, chan->chan.tx.mtu);
3903 max_rx = MAX(max_rx, chan->chan.rx.mtu);
3904 }
3905
3906 /* If either maximum MTU has changed */
3907 if ((updated_chan->chan.tx.mtu > max_tx) ||
3908 (updated_chan->chan.rx.mtu > max_rx)) {
3909 max_tx = MAX(max_tx, updated_chan->chan.tx.mtu);
3910 max_rx = MAX(max_rx, updated_chan->chan.rx.mtu);
3911 bt_gatt_att_max_mtu_changed(att->conn, max_tx, max_rx);
3912 }
3913 }
3914
bt_att_req_alloc(k_timeout_t timeout)3915 struct bt_att_req *bt_att_req_alloc(k_timeout_t timeout)
3916 {
3917 struct bt_att_req *req = NULL;
3918
3919 if (k_current_get() == att_handle_rsp_thread) {
3920 /* No req will be fulfilled while blocking on the bt_recv thread.
3921 * Blocking would cause deadlock.
3922 */
3923 LOG_DBG("Timeout discarded. No blocking on bt_recv thread.");
3924 timeout = K_NO_WAIT;
3925 }
3926
3927 /* Reserve space for request */
3928 if (k_mem_slab_alloc(&req_slab, (void **)&req, timeout)) {
3929 LOG_DBG("No space for req");
3930 return NULL;
3931 }
3932
3933 LOG_DBG("req %p", req);
3934
3935 memset(req, 0, sizeof(*req));
3936
3937 return req;
3938 }
3939
bt_att_req_free(struct bt_att_req * req)3940 void bt_att_req_free(struct bt_att_req *req)
3941 {
3942 LOG_DBG("req %p", req);
3943
3944 if (req->buf) {
3945 net_buf_unref(req->buf);
3946 req->buf = NULL;
3947 }
3948
3949 k_mem_slab_free(&req_slab, (void *)req);
3950 }
3951
bt_att_send(struct bt_conn * conn,struct net_buf * buf)3952 int bt_att_send(struct bt_conn *conn, struct net_buf *buf)
3953 {
3954 struct bt_att *att;
3955
3956 __ASSERT_NO_MSG(conn);
3957 __ASSERT_NO_MSG(buf);
3958
3959 att = att_get(conn);
3960 if (!att) {
3961 net_buf_unref(buf);
3962 return -ENOTCONN;
3963 }
3964
3965 k_fifo_put(&att->tx_queue, buf);
3966 att_send_process(att);
3967
3968 return 0;
3969 }
3970
bt_att_req_send(struct bt_conn * conn,struct bt_att_req * req)3971 int bt_att_req_send(struct bt_conn *conn, struct bt_att_req *req)
3972 {
3973 struct bt_att *att;
3974
3975 LOG_DBG("conn %p req %p", conn, req);
3976
3977 __ASSERT_NO_MSG(conn);
3978 __ASSERT_NO_MSG(req);
3979
3980 k_sched_lock();
3981
3982 att = att_get(conn);
3983 if (!att) {
3984 k_sched_unlock();
3985 return -ENOTCONN;
3986 }
3987
3988 sys_slist_append(&att->reqs, &req->node);
3989 att_req_send_process(att);
3990
3991 k_sched_unlock();
3992
3993 return 0;
3994 }
3995
bt_att_chan_req_cancel(struct bt_att_chan * chan,struct bt_att_req * req)3996 static bool bt_att_chan_req_cancel(struct bt_att_chan *chan,
3997 struct bt_att_req *req)
3998 {
3999 if (chan->req != req) {
4000 return false;
4001 }
4002
4003 chan->req = &cancel;
4004
4005 bt_att_req_free(req);
4006
4007 return true;
4008 }
4009
bt_att_req_cancel(struct bt_conn * conn,struct bt_att_req * req)4010 void bt_att_req_cancel(struct bt_conn *conn, struct bt_att_req *req)
4011 {
4012 struct bt_att *att;
4013 struct bt_att_chan *chan, *tmp;
4014
4015 LOG_DBG("req %p", req);
4016
4017 if (!conn || !req) {
4018 return;
4019 }
4020
4021 att = att_get(conn);
4022 if (!att) {
4023 return;
4024 }
4025
4026 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&att->chans, chan, tmp, node) {
4027 /* Check if request is outstanding */
4028 if (bt_att_chan_req_cancel(chan, req)) {
4029 return;
4030 }
4031 }
4032
4033 /* Remove request from the list */
4034 sys_slist_find_and_remove(&att->reqs, &req->node);
4035
4036 bt_att_req_free(req);
4037 }
4038
bt_att_find_req_by_user_data(struct bt_conn * conn,const void * user_data)4039 struct bt_att_req *bt_att_find_req_by_user_data(struct bt_conn *conn, const void *user_data)
4040 {
4041 struct bt_att *att;
4042 struct bt_att_chan *chan;
4043 struct bt_att_req *req;
4044
4045 att = att_get(conn);
4046 if (!att) {
4047 return NULL;
4048 }
4049
4050 SYS_SLIST_FOR_EACH_CONTAINER(&att->chans, chan, node) {
4051 if (chan->req->user_data == user_data) {
4052 return chan->req;
4053 }
4054 }
4055
4056 SYS_SLIST_FOR_EACH_CONTAINER(&att->reqs, req, node) {
4057 if (req->user_data == user_data) {
4058 return req;
4059 }
4060 }
4061
4062 return NULL;
4063 }
4064
bt_att_fixed_chan_only(struct bt_conn * conn)4065 bool bt_att_fixed_chan_only(struct bt_conn *conn)
4066 {
4067 #if defined(CONFIG_BT_EATT)
4068 return bt_eatt_count(conn) == 0;
4069 #else
4070 return true;
4071 #endif /* CONFIG_BT_EATT */
4072 }
4073
bt_att_clear_out_of_sync_sent(struct bt_conn * conn)4074 void bt_att_clear_out_of_sync_sent(struct bt_conn *conn)
4075 {
4076 struct bt_att *att = att_get(conn);
4077 struct bt_att_chan *chan;
4078
4079 if (!att) {
4080 return;
4081 }
4082
4083 SYS_SLIST_FOR_EACH_CONTAINER(&att->chans, chan, node) {
4084 atomic_clear_bit(chan->flags, ATT_OUT_OF_SYNC_SENT);
4085 }
4086 }
4087
bt_att_out_of_sync_sent_on_fixed(struct bt_conn * conn)4088 bool bt_att_out_of_sync_sent_on_fixed(struct bt_conn *conn)
4089 {
4090 struct bt_l2cap_chan *l2cap_chan;
4091 struct bt_att_chan *att_chan;
4092
4093 l2cap_chan = bt_l2cap_le_lookup_rx_cid(conn, BT_L2CAP_CID_ATT);
4094 if (!l2cap_chan) {
4095 return false;
4096 }
4097
4098 att_chan = ATT_CHAN(l2cap_chan);
4099 return atomic_test_bit(att_chan->flags, ATT_OUT_OF_SYNC_SENT);
4100 }
4101
bt_att_set_tx_meta_data(struct net_buf * buf,bt_gatt_complete_func_t func,void * user_data,enum bt_att_chan_opt chan_opt)4102 void bt_att_set_tx_meta_data(struct net_buf *buf, bt_gatt_complete_func_t func, void *user_data,
4103 enum bt_att_chan_opt chan_opt)
4104 {
4105 struct bt_att_tx_meta_data *data = bt_att_get_tx_meta_data(buf);
4106
4107 data->func = func;
4108 data->user_data = user_data;
4109 data->attr_count = 1;
4110 data->chan_opt = chan_opt;
4111 }
4112
bt_att_increment_tx_meta_data_attr_count(struct net_buf * buf,uint16_t attr_count)4113 void bt_att_increment_tx_meta_data_attr_count(struct net_buf *buf, uint16_t attr_count)
4114 {
4115 struct bt_att_tx_meta_data *data = bt_att_get_tx_meta_data(buf);
4116
4117 data->attr_count += attr_count;
4118 }
4119
bt_att_tx_meta_data_match(const struct net_buf * buf,bt_gatt_complete_func_t func,const void * user_data,enum bt_att_chan_opt chan_opt)4120 bool bt_att_tx_meta_data_match(const struct net_buf *buf, bt_gatt_complete_func_t func,
4121 const void *user_data, enum bt_att_chan_opt chan_opt)
4122 {
4123 const struct bt_att_tx_meta_data *meta = bt_att_get_tx_meta_data(buf);
4124
4125 return ((meta->func == func) &&
4126 (meta->user_data == user_data) &&
4127 (meta->chan_opt == chan_opt));
4128 }
4129
bt_att_chan_opt_valid(struct bt_conn * conn,enum bt_att_chan_opt chan_opt)4130 bool bt_att_chan_opt_valid(struct bt_conn *conn, enum bt_att_chan_opt chan_opt)
4131 {
4132 if ((chan_opt & (BT_ATT_CHAN_OPT_ENHANCED_ONLY | BT_ATT_CHAN_OPT_UNENHANCED_ONLY)) ==
4133 (BT_ATT_CHAN_OPT_ENHANCED_ONLY | BT_ATT_CHAN_OPT_UNENHANCED_ONLY)) {
4134 /* Enhanced and Unenhanced are mutually exclusive */
4135 return false;
4136 }
4137
4138 /* Choosing EATT requires EATT channels connected and encryption enabled */
4139 if (chan_opt & BT_ATT_CHAN_OPT_ENHANCED_ONLY) {
4140 return (bt_conn_get_security(conn) > BT_SECURITY_L1) &&
4141 !bt_att_fixed_chan_only(conn);
4142 }
4143
4144 return true;
4145 }
4146
bt_gatt_authorization_cb_register(const struct bt_gatt_authorization_cb * cb)4147 int bt_gatt_authorization_cb_register(const struct bt_gatt_authorization_cb *cb)
4148 {
4149 if (!IS_ENABLED(CONFIG_BT_GATT_AUTHORIZATION_CUSTOM)) {
4150 return -ENOSYS;
4151 }
4152
4153 if (!cb) {
4154 authorization_cb = NULL;
4155 return 0;
4156 }
4157
4158 if (authorization_cb) {
4159 return -EALREADY;
4160 }
4161
4162 authorization_cb = cb;
4163
4164 return 0;
4165 }
4166