1 /* att.c - Attribute protocol handling */
2
3 /*
4 * Copyright (c) 2015-2016 Intel Corporation
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9 #include <zephyr/kernel.h>
10 #include <string.h>
11 #include <errno.h>
12 #include <stdbool.h>
13 #include <zephyr/sys/atomic.h>
14 #include <zephyr/sys/byteorder.h>
15 #include <zephyr/sys/util.h>
16
17 #include <zephyr/bluetooth/hci.h>
18 #include <zephyr/bluetooth/bluetooth.h>
19 #include <zephyr/bluetooth/uuid.h>
20 #include <zephyr/bluetooth/att.h>
21 #include <zephyr/bluetooth/gatt.h>
22 #include <zephyr/drivers/bluetooth/hci_driver.h>
23
24 #include "common/bt_str.h"
25
26 #include "hci_core.h"
27 #include "conn_internal.h"
28 #include "l2cap_internal.h"
29 #include "smp.h"
30 #include "att_internal.h"
31 #include "gatt_internal.h"
32
33 #define LOG_LEVEL CONFIG_BT_ATT_LOG_LEVEL
34 #include <zephyr/logging/log.h>
35 LOG_MODULE_REGISTER(bt_att);
36
37 #define ATT_CHAN(_ch) CONTAINER_OF(_ch, struct bt_att_chan, chan.chan)
38 #define ATT_REQ(_node) CONTAINER_OF(_node, struct bt_att_req, node)
39
40 #define ATT_CMD_MASK 0x40
41
42 #if defined(CONFIG_BT_EATT)
43 #define ATT_CHAN_MAX (CONFIG_BT_EATT_MAX + 1)
44 #else
45 #define ATT_CHAN_MAX 1
46 #endif /* CONFIG_BT_EATT */
47
48 typedef enum __packed {
49 ATT_COMMAND,
50 ATT_REQUEST,
51 ATT_RESPONSE,
52 ATT_NOTIFICATION,
53 ATT_CONFIRMATION,
54 ATT_INDICATION,
55 ATT_UNKNOWN,
56 } att_type_t;
57
58 static att_type_t att_op_get_type(uint8_t op);
59
60 #if CONFIG_BT_ATT_PREPARE_COUNT > 0
61 struct bt_attr_data {
62 uint16_t handle;
63 uint16_t offset;
64 };
65
66 /* Pool for incoming ATT packets */
67 NET_BUF_POOL_DEFINE(prep_pool, CONFIG_BT_ATT_PREPARE_COUNT, BT_ATT_BUF_SIZE,
68 sizeof(struct bt_attr_data), NULL);
69 #endif /* CONFIG_BT_ATT_PREPARE_COUNT */
70
71 K_MEM_SLAB_DEFINE(req_slab, sizeof(struct bt_att_req),
72 CONFIG_BT_ATT_TX_COUNT, __alignof__(struct bt_att_req));
73
74 enum {
75 ATT_CONNECTED,
76 ATT_ENHANCED,
77 ATT_PENDING_SENT,
78 ATT_OUT_OF_SYNC_SENT,
79
80 /* Total number of flags - must be at the end of the enum */
81 ATT_NUM_FLAGS,
82 };
83
84 struct bt_att_tx_meta_data;
85 typedef void (*bt_att_tx_cb_t)(struct bt_conn *conn,
86 struct bt_att_tx_meta_data *user_data);
87
88 struct bt_att_tx_meta_data {
89 int err;
90 uint8_t opcode;
91 uint16_t attr_count;
92 struct bt_att_chan *att_chan;
93 bt_gatt_complete_func_t func;
94 void *user_data;
95 enum bt_att_chan_opt chan_opt;
96 };
97
98 struct bt_att_tx_meta {
99 struct bt_att_tx_meta_data *data;
100 };
101
102 /* ATT channel specific data */
103 struct bt_att_chan {
104 /* Connection this channel is associated with */
105 struct bt_att *att;
106 struct bt_l2cap_le_chan chan;
107 ATOMIC_DEFINE(flags, ATT_NUM_FLAGS);
108 struct bt_att_req *req;
109 struct k_fifo tx_queue;
110 struct k_work_delayable timeout_work;
111 sys_snode_t node;
112 };
113
bt_att_is_enhanced(struct bt_att_chan * chan)114 static bool bt_att_is_enhanced(struct bt_att_chan *chan)
115 {
116 /* Optimization. */
117 if (!IS_ENABLED(CONFIG_BT_EATT)) {
118 return false;
119 }
120
121 return atomic_test_bit(chan->flags, ATT_ENHANCED);
122 }
123
bt_att_mtu(struct bt_att_chan * chan)124 static uint16_t bt_att_mtu(struct bt_att_chan *chan)
125 {
126 /* Core v5.3 Vol 3 Part F 3.4.2:
127 *
128 * The server and client shall set ATT_MTU to the minimum of the
129 * Client Rx MTU and the Server Rx MTU.
130 */
131 return MIN(chan->chan.rx.mtu, chan->chan.tx.mtu);
132 }
133
134 /* Descriptor of application-specific authorization callbacks that are used
135 * with the CONFIG_BT_GATT_AUTHORIZATION_CUSTOM Kconfig enabled.
136 */
137 const static struct bt_gatt_authorization_cb *authorization_cb;
138
139 /* ATT connection specific data */
140 struct bt_att {
141 struct bt_conn *conn;
142 /* Shared request queue */
143 sys_slist_t reqs;
144 struct k_fifo tx_queue;
145 #if CONFIG_BT_ATT_PREPARE_COUNT > 0
146 sys_slist_t prep_queue;
147 #endif
148 /* Contains bt_att_chan instance(s) */
149 sys_slist_t chans;
150 #if defined(CONFIG_BT_EATT)
151 struct {
152 struct k_work_delayable connection_work;
153 uint8_t chans_to_connect;
154
155 uint16_t prev_conn_rsp_result;
156 uint16_t prev_conn_req_result;
157 uint8_t prev_conn_req_missing_chans;
158 } eatt;
159 #endif /* CONFIG_BT_EATT */
160 };
161
162 K_MEM_SLAB_DEFINE(att_slab, sizeof(struct bt_att),
163 CONFIG_BT_MAX_CONN, __alignof__(struct bt_att));
164 K_MEM_SLAB_DEFINE(chan_slab, sizeof(struct bt_att_chan),
165 CONFIG_BT_MAX_CONN * ATT_CHAN_MAX,
166 __alignof__(struct bt_att_chan));
167 static struct bt_att_req cancel;
168
169 /** The thread ATT response handlers likely run on.
170 *
171 * Blocking this thread while waiting for an ATT request to resolve can cause a
172 * deadlock.
173 *
174 * This can happen if the application queues ATT requests in the context of a
175 * callback from the Bluetooth stack. This is because queuing an ATT request
176 * will block until a request-resource is available, and the callbacks run on
177 * the same thread as the ATT response handler that frees request-resources.
178 *
179 * The intended use of this value is to detect the above situation.
180 */
181 static k_tid_t att_handle_rsp_thread;
182
183 static struct bt_att_tx_meta_data tx_meta_data_storage[CONFIG_BT_ATT_TX_COUNT];
184
185 struct bt_att_tx_meta_data *bt_att_get_tx_meta_data(const struct net_buf *buf);
186 static void att_on_sent_cb(struct bt_att_tx_meta_data *meta);
187
att_tx_destroy(struct net_buf * buf)188 static void att_tx_destroy(struct net_buf *buf)
189 {
190 struct bt_att_tx_meta_data *p_meta = bt_att_get_tx_meta_data(buf);
191 struct bt_att_tx_meta_data meta;
192
193 LOG_DBG("%p", buf);
194
195 /* Destroy the buffer first, as the callback may attempt to allocate a
196 * new one for another operation.
197 */
198 meta = *p_meta;
199
200 /* Clear the meta storage. This might help catch illegal
201 * "use-after-free"s. An initial memset is not necessary, as the
202 * metadata storage array is `static`.
203 */
204 memset(p_meta, 0x00, sizeof(*p_meta));
205
206 /* After this point, p_meta doesn't belong to us.
207 * The user data will be memset to 0 on allocation.
208 */
209 net_buf_destroy(buf);
210
211 /* ATT opcode 0 is invalid. If we get here, that means the buffer got
212 * destroyed before it was ready to be sent. Hopefully nobody sets the
213 * opcode and then destroys the buffer without sending it. :'(
214 */
215 if (meta.opcode != 0) {
216 att_on_sent_cb(&meta);
217 }
218 }
219
220 NET_BUF_POOL_DEFINE(att_pool, CONFIG_BT_ATT_TX_COUNT,
221 BT_L2CAP_SDU_BUF_SIZE(BT_ATT_BUF_SIZE),
222 CONFIG_BT_CONN_TX_USER_DATA_SIZE, att_tx_destroy);
223
bt_att_get_tx_meta_data(const struct net_buf * buf)224 struct bt_att_tx_meta_data *bt_att_get_tx_meta_data(const struct net_buf *buf)
225 {
226 __ASSERT_NO_MSG(net_buf_pool_get(buf->pool_id) == &att_pool);
227
228 /* Metadata lifetime is implicitly tied to the buffer lifetime.
229 * Treat it as part of the buffer itself.
230 */
231 return &tx_meta_data_storage[net_buf_id((struct net_buf *)buf)];
232 }
233
234 static int bt_att_chan_send(struct bt_att_chan *chan, struct net_buf *buf);
235
236 static void att_chan_mtu_updated(struct bt_att_chan *updated_chan);
237 static void bt_att_disconnected(struct bt_l2cap_chan *chan);
238
239 struct net_buf *bt_att_create_rsp_pdu(struct bt_att_chan *chan, uint8_t op);
240
241 static void bt_att_sent(struct bt_l2cap_chan *ch);
242
att_sent(void * user_data)243 static void att_sent(void *user_data)
244 {
245 struct bt_att_tx_meta_data *data = user_data;
246 struct bt_att_chan *att_chan = data->att_chan;
247 struct bt_conn *conn = att_chan->att->conn;
248 struct bt_l2cap_chan *chan = &att_chan->chan.chan;
249
250 __ASSERT_NO_MSG(!bt_att_is_enhanced(att_chan));
251
252 LOG_DBG("conn %p chan %p", conn, chan);
253
254 /* For EATT, `bt_att_sent` is assigned to the `.sent` L2 callback.
255 * L2CAP will then call it once the SDU has finished sending.
256 *
257 * For UATT, this won't happen, as static LE l2cap channels don't have
258 * SDUs. Call it manually instead.
259 */
260 bt_att_sent(chan);
261 }
262
263 /* In case of success the ownership of the buffer is transferred to the stack
264 * which takes care of releasing it when it completes transmitting to the
265 * controller.
266 *
267 * In case bt_l2cap_send_cb fails the buffer state and ownership are retained
268 * so the buffer can be safely pushed back to the queue to be processed later.
269 */
chan_send(struct bt_att_chan * chan,struct net_buf * buf)270 static int chan_send(struct bt_att_chan *chan, struct net_buf *buf)
271 {
272 struct bt_att_hdr *hdr;
273 struct net_buf_simple_state state;
274 int err;
275 struct bt_att_tx_meta_data *data = bt_att_get_tx_meta_data(buf);
276 struct bt_att_chan *prev_chan = data->att_chan;
277
278 hdr = (void *)buf->data;
279
280 LOG_DBG("code 0x%02x", hdr->code);
281
282 if (!atomic_test_bit(chan->flags, ATT_CONNECTED)) {
283 LOG_ERR("ATT channel not connected");
284 return -EINVAL;
285 }
286
287 if (IS_ENABLED(CONFIG_BT_EATT) && hdr->code == BT_ATT_OP_MTU_REQ &&
288 chan->chan.tx.cid != BT_L2CAP_CID_ATT) {
289 /* The Exchange MTU sub-procedure shall only be supported on
290 * the LE Fixed Channel Unenhanced ATT bearer
291 */
292 return -ENOTSUP;
293 }
294
295 __ASSERT_NO_MSG(buf->len >= sizeof(struct bt_att_hdr));
296 data->opcode = buf->data[0];
297 data->err = 0;
298
299 if (IS_ENABLED(CONFIG_BT_EATT) && bt_att_is_enhanced(chan)) {
300 /* Check if sent is pending already, if it does it cannot be
301 * modified so the operation will need to be queued.
302 */
303 if (atomic_test_bit(chan->flags, ATT_PENDING_SENT)) {
304 return -EAGAIN;
305 }
306
307 if (hdr->code == BT_ATT_OP_SIGNED_WRITE_CMD) {
308 return -ENOTSUP;
309 }
310
311 /* Check if the channel is ready to send in case of a request */
312 if (att_op_get_type(hdr->code) == ATT_REQUEST &&
313 !atomic_test_bit(chan->chan.chan.status,
314 BT_L2CAP_STATUS_OUT)) {
315 return -EAGAIN;
316 }
317
318 atomic_set_bit(chan->flags, ATT_PENDING_SENT);
319 data->att_chan = chan;
320
321 /* bt_l2cap_chan_send does actually return the number of bytes
322 * that could be sent immediately.
323 */
324 err = bt_l2cap_chan_send(&chan->chan.chan, buf);
325 if (err < 0) {
326 data->att_chan = prev_chan;
327 atomic_clear_bit(chan->flags, ATT_PENDING_SENT);
328 data->err = err;
329
330 return err;
331 } else {
332 /* On success, the almighty scheduler might already have
333 * run the destroy cb on the buffer. In that case, buf
334 * and its metadata are dangling pointers.
335 */
336 buf = NULL;
337 data = NULL;
338 }
339
340 return 0;
341 }
342
343 if (hdr->code == BT_ATT_OP_SIGNED_WRITE_CMD) {
344 err = bt_smp_sign(chan->att->conn, buf);
345 if (err) {
346 LOG_ERR("Error signing data");
347 net_buf_unref(buf);
348 return err;
349 }
350 }
351
352 net_buf_simple_save(&buf->b, &state);
353
354 data->att_chan = chan;
355
356 err = bt_l2cap_send(chan->att->conn, BT_L2CAP_CID_ATT, buf);
357 if (err) {
358 if (err == -ENOBUFS) {
359 LOG_ERR("Ran out of TX buffers or contexts.");
360 }
361 /* In case of an error has occurred restore the buffer state */
362 net_buf_simple_restore(&buf->b, &state);
363 data->att_chan = prev_chan;
364 data->err = err;
365 }
366
367 return err;
368 }
369
att_chan_matches_chan_opt(struct bt_att_chan * chan,enum bt_att_chan_opt chan_opt)370 static bool att_chan_matches_chan_opt(struct bt_att_chan *chan, enum bt_att_chan_opt chan_opt)
371 {
372 __ASSERT_NO_MSG(chan_opt <= BT_ATT_CHAN_OPT_ENHANCED_ONLY);
373
374 if (chan_opt == BT_ATT_CHAN_OPT_NONE) {
375 return true;
376 }
377
378 if (bt_att_is_enhanced(chan)) {
379 return (chan_opt & BT_ATT_CHAN_OPT_ENHANCED_ONLY);
380 } else {
381 return (chan_opt & BT_ATT_CHAN_OPT_UNENHANCED_ONLY);
382 }
383 }
384
get_first_buf_matching_chan(struct k_fifo * fifo,struct bt_att_chan * chan)385 static struct net_buf *get_first_buf_matching_chan(struct k_fifo *fifo, struct bt_att_chan *chan)
386 {
387 if (IS_ENABLED(CONFIG_BT_EATT)) {
388 struct k_fifo skipped;
389 struct net_buf *buf;
390 struct net_buf *ret = NULL;
391 struct bt_att_tx_meta_data *meta;
392
393 k_fifo_init(&skipped);
394
395 while ((buf = net_buf_get(fifo, K_NO_WAIT))) {
396 meta = bt_att_get_tx_meta_data(buf);
397 if (!ret &&
398 att_chan_matches_chan_opt(chan, meta->chan_opt)) {
399 ret = buf;
400 } else {
401 net_buf_put(&skipped, buf);
402 }
403 }
404
405 __ASSERT_NO_MSG(k_fifo_is_empty(fifo));
406
407 while ((buf = net_buf_get(&skipped, K_NO_WAIT))) {
408 net_buf_put(fifo, buf);
409 }
410
411 __ASSERT_NO_MSG(k_fifo_is_empty(&skipped));
412
413 return ret;
414 } else {
415 return net_buf_get(fifo, K_NO_WAIT);
416 }
417 }
418
get_first_req_matching_chan(sys_slist_t * reqs,struct bt_att_chan * chan)419 static struct bt_att_req *get_first_req_matching_chan(sys_slist_t *reqs, struct bt_att_chan *chan)
420 {
421 if (IS_ENABLED(CONFIG_BT_EATT)) {
422 sys_snode_t *curr, *prev = NULL;
423 struct bt_att_tx_meta_data *meta = NULL;
424
425 SYS_SLIST_FOR_EACH_NODE(reqs, curr) {
426 meta = bt_att_get_tx_meta_data(ATT_REQ(curr)->buf);
427 if (att_chan_matches_chan_opt(chan, meta->chan_opt)) {
428 break;
429 }
430
431 prev = curr;
432 }
433
434 if (curr) {
435 sys_slist_remove(reqs, prev, curr);
436
437 return ATT_REQ(curr);
438 }
439
440 return NULL;
441 }
442
443 sys_snode_t *node = sys_slist_get(reqs);
444
445 if (node) {
446 return ATT_REQ(node);
447 } else {
448 return NULL;
449 }
450 }
451
process_queue(struct bt_att_chan * chan,struct k_fifo * queue)452 static int process_queue(struct bt_att_chan *chan, struct k_fifo *queue)
453 {
454 struct net_buf *buf;
455 int err;
456
457 buf = get_first_buf_matching_chan(queue, chan);
458 if (buf) {
459 err = bt_att_chan_send(chan, buf);
460 if (err) {
461 /* Push it back if it could not be send */
462 k_queue_prepend(&queue->_queue, buf);
463 return err;
464 }
465
466 return 0;
467 }
468
469 return -ENOENT;
470 }
471
472 /* Send requests without taking tx_sem */
chan_req_send(struct bt_att_chan * chan,struct bt_att_req * req)473 static int chan_req_send(struct bt_att_chan *chan, struct bt_att_req *req)
474 {
475 struct net_buf *buf;
476 int err;
477
478 if (bt_att_mtu(chan) < net_buf_frags_len(req->buf)) {
479 return -EMSGSIZE;
480 }
481
482 LOG_DBG("chan %p req %p len %zu", chan, req, net_buf_frags_len(req->buf));
483
484 chan->req = req;
485
486 /* Release since bt_l2cap_send_cb takes ownership of the buffer */
487 buf = req->buf;
488 req->buf = NULL;
489
490 /* This lock makes sure the value of `bt_att_mtu(chan)` does not
491 * change.
492 */
493 k_sched_lock();
494 err = bt_att_chan_send(chan, buf);
495 if (err) {
496 /* We still have the ownership of the buffer */
497 req->buf = buf;
498 chan->req = NULL;
499 } else {
500 bt_gatt_req_set_mtu(req, bt_att_mtu(chan));
501 }
502 k_sched_unlock();
503
504 return err;
505 }
506
bt_att_sent(struct bt_l2cap_chan * ch)507 static void bt_att_sent(struct bt_l2cap_chan *ch)
508 {
509 struct bt_att_chan *chan = ATT_CHAN(ch);
510 struct bt_att *att = chan->att;
511 int err;
512
513 LOG_DBG("chan %p", chan);
514
515 atomic_clear_bit(chan->flags, ATT_PENDING_SENT);
516
517 if (!att) {
518 LOG_DBG("Ignore sent on detached ATT chan");
519 return;
520 }
521
522 /* Process pending requests first since they require a response they
523 * can only be processed one at time while if other queues were
524 * processed before they may always contain a buffer starving the
525 * request queue.
526 */
527 if (!chan->req && !sys_slist_is_empty(&att->reqs)) {
528 sys_snode_t *node = sys_slist_get(&att->reqs);
529
530 if (chan_req_send(chan, ATT_REQ(node)) >= 0) {
531 return;
532 }
533
534 /* Prepend back to the list as it could not be sent */
535 sys_slist_prepend(&att->reqs, node);
536 }
537
538 /* Process channel queue */
539 err = process_queue(chan, &chan->tx_queue);
540 if (!err) {
541 return;
542 }
543
544 /* Process global queue */
545 (void)process_queue(chan, &att->tx_queue);
546 }
547
chan_rebegin_att_timeout(struct bt_att_tx_meta_data * user_data)548 static void chan_rebegin_att_timeout(struct bt_att_tx_meta_data *user_data)
549 {
550 struct bt_att_tx_meta_data *data = user_data;
551 struct bt_att_chan *chan = data->att_chan;
552
553 LOG_DBG("chan %p chan->req %p", chan, chan->req);
554
555 if (!atomic_test_bit(chan->flags, ATT_CONNECTED)) {
556 LOG_ERR("ATT channel not connected");
557 return;
558 }
559
560 /* Start timeout work. Only if we are sure that the request is really
561 * in-flight.
562 */
563 if (chan->req) {
564 k_work_reschedule(&chan->timeout_work, BT_ATT_TIMEOUT);
565 }
566 }
567
chan_req_notif_sent(struct bt_att_tx_meta_data * user_data)568 static void chan_req_notif_sent(struct bt_att_tx_meta_data *user_data)
569 {
570 struct bt_att_tx_meta_data *data = user_data;
571 struct bt_att_chan *chan = data->att_chan;
572 struct bt_conn *conn = chan->att->conn;
573 bt_gatt_complete_func_t func = data->func;
574 uint16_t attr_count = data->attr_count;
575 void *ud = data->user_data;
576
577 LOG_DBG("chan %p CID 0x%04X", chan, chan->chan.tx.cid);
578
579 if (!atomic_test_bit(chan->flags, ATT_CONNECTED)) {
580 LOG_ERR("ATT channel not connected");
581 return;
582 }
583
584 if (func) {
585 for (uint16_t i = 0; i < attr_count; i++) {
586 func(conn, ud);
587 }
588 }
589 }
590
att_on_sent_cb(struct bt_att_tx_meta_data * meta)591 static void att_on_sent_cb(struct bt_att_tx_meta_data *meta)
592 {
593 const att_type_t op_type = att_op_get_type(meta->opcode);
594
595 LOG_DBG("opcode 0x%x", meta->opcode);
596
597 if (!meta->att_chan ||
598 !meta->att_chan->att ||
599 !meta->att_chan->att->conn) {
600 LOG_DBG("Bearer not connected, dropping ATT cb");
601 return;
602 }
603
604 if (meta->err) {
605 LOG_ERR("Got err %d, not calling ATT cb", meta->err);
606 return;
607 }
608
609 if (!bt_att_is_enhanced(meta->att_chan)) {
610 /* For EATT, L2CAP will call it after the SDU is fully sent. */
611 LOG_DBG("UATT bearer, calling att_sent");
612 att_sent(meta);
613 }
614
615 switch (op_type) {
616 case ATT_RESPONSE:
617 return;
618 case ATT_CONFIRMATION:
619 return;
620 case ATT_REQUEST:
621 case ATT_INDICATION:
622 chan_rebegin_att_timeout(meta);
623 return;
624 case ATT_COMMAND:
625 case ATT_NOTIFICATION:
626 chan_req_notif_sent(meta);
627 return;
628 default:
629 __ASSERT(false, "Unknown op type 0x%02X", op_type);
630 return;
631 }
632 }
633
bt_att_chan_create_pdu(struct bt_att_chan * chan,uint8_t op,size_t len)634 static struct net_buf *bt_att_chan_create_pdu(struct bt_att_chan *chan, uint8_t op, size_t len)
635 {
636 struct bt_att_hdr *hdr;
637 struct net_buf *buf;
638 struct bt_att_tx_meta_data *data;
639 k_timeout_t timeout;
640
641 if (len + sizeof(op) > bt_att_mtu(chan)) {
642 LOG_WRN("ATT MTU exceeded, max %u, wanted %zu", bt_att_mtu(chan),
643 len + sizeof(op));
644 return NULL;
645 }
646
647 switch (att_op_get_type(op)) {
648 case ATT_RESPONSE:
649 case ATT_CONFIRMATION:
650 /* Use a timeout only when responding/confirming */
651 timeout = BT_ATT_TIMEOUT;
652 break;
653 default:
654 timeout = K_FOREVER;
655 }
656
657 /* This will reserve headspace for lower layers */
658 buf = bt_l2cap_create_pdu_timeout(&att_pool, 0, timeout);
659 if (!buf) {
660 LOG_ERR("Unable to allocate buffer for op 0x%02x", op);
661 return NULL;
662 }
663
664 /* If we got a buf from `att_pool`, then the metadata slot at its index
665 * is officially ours to use.
666 */
667 data = bt_att_get_tx_meta_data(buf);
668
669 if (IS_ENABLED(CONFIG_BT_EATT)) {
670 net_buf_reserve(buf, BT_L2CAP_SDU_BUF_SIZE(0));
671 }
672
673 data->att_chan = chan;
674
675 hdr = net_buf_add(buf, sizeof(*hdr));
676 hdr->code = op;
677
678 return buf;
679 }
680
bt_att_chan_send(struct bt_att_chan * chan,struct net_buf * buf)681 static int bt_att_chan_send(struct bt_att_chan *chan, struct net_buf *buf)
682 {
683 LOG_DBG("chan %p flags %lu code 0x%02x", chan, atomic_get(chan->flags),
684 ((struct bt_att_hdr *)buf->data)->code);
685
686 if (IS_ENABLED(CONFIG_BT_EATT) &&
687 !att_chan_matches_chan_opt(chan, bt_att_get_tx_meta_data(buf)->chan_opt)) {
688 return -EINVAL;
689 }
690
691 return chan_send(chan, buf);
692 }
693
att_send_process(struct bt_att * att)694 static void att_send_process(struct bt_att *att)
695 {
696 struct bt_att_chan *chan, *tmp, *prev = NULL;
697 int err = 0;
698
699 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&att->chans, chan, tmp, node) {
700 if (err == -ENOENT && prev &&
701 (bt_att_is_enhanced(chan) == bt_att_is_enhanced(prev))) {
702 /* If there was nothing to send for the previous channel and the current
703 * channel has the same "enhancedness", there will be nothing to send for
704 * this channel either.
705 */
706 continue;
707 }
708
709 err = process_queue(chan, &att->tx_queue);
710 if (!err) {
711 /* Success */
712 return;
713 }
714
715 prev = chan;
716 }
717 }
718
bt_att_chan_send_rsp(struct bt_att_chan * chan,struct net_buf * buf)719 static void bt_att_chan_send_rsp(struct bt_att_chan *chan, struct net_buf *buf)
720 {
721 int err;
722
723 err = chan_send(chan, buf);
724 if (err) {
725 /* Responses need to be sent back using the same channel */
726 net_buf_put(&chan->tx_queue, buf);
727 }
728 }
729
send_err_rsp(struct bt_att_chan * chan,uint8_t req,uint16_t handle,uint8_t err)730 static void send_err_rsp(struct bt_att_chan *chan, uint8_t req, uint16_t handle,
731 uint8_t err)
732 {
733 struct bt_att_error_rsp *rsp;
734 struct net_buf *buf;
735
736 /* Ignore opcode 0x00 */
737 if (!req) {
738 return;
739 }
740
741 buf = bt_att_chan_create_pdu(chan, BT_ATT_OP_ERROR_RSP, sizeof(*rsp));
742 if (!buf) {
743 return;
744 }
745
746 rsp = net_buf_add(buf, sizeof(*rsp));
747 rsp->request = req;
748 rsp->handle = sys_cpu_to_le16(handle);
749 rsp->error = err;
750
751 bt_att_chan_send_rsp(chan, buf);
752 }
753
att_mtu_req(struct bt_att_chan * chan,struct net_buf * buf)754 static uint8_t att_mtu_req(struct bt_att_chan *chan, struct net_buf *buf)
755 {
756 struct bt_att_exchange_mtu_req *req;
757 struct bt_att_exchange_mtu_rsp *rsp;
758 struct net_buf *pdu;
759 uint16_t mtu_client, mtu_server;
760
761 /* Exchange MTU sub-procedure shall only be supported on the
762 * LE Fixed Channel Unenhanced ATT bearer.
763 */
764 if (bt_att_is_enhanced(chan)) {
765 return BT_ATT_ERR_NOT_SUPPORTED;
766 }
767
768 req = (void *)buf->data;
769
770 mtu_client = sys_le16_to_cpu(req->mtu);
771
772 LOG_DBG("Client MTU %u", mtu_client);
773
774 /* Check if MTU is valid */
775 if (mtu_client < BT_ATT_DEFAULT_LE_MTU) {
776 return BT_ATT_ERR_INVALID_PDU;
777 }
778
779 pdu = bt_att_create_rsp_pdu(chan, BT_ATT_OP_MTU_RSP);
780 if (!pdu) {
781 return BT_ATT_ERR_UNLIKELY;
782 }
783
784 mtu_server = BT_LOCAL_ATT_MTU_UATT;
785
786 LOG_DBG("Server MTU %u", mtu_server);
787
788 rsp = net_buf_add(pdu, sizeof(*rsp));
789 rsp->mtu = sys_cpu_to_le16(mtu_server);
790
791 bt_att_chan_send_rsp(chan, pdu);
792
793 /* The ATT_EXCHANGE_MTU_REQ/RSP is just an alternative way of
794 * communicating the L2CAP MTU.
795 */
796 chan->chan.rx.mtu = mtu_server;
797 chan->chan.tx.mtu = mtu_client;
798
799 LOG_DBG("Negotiated MTU %u", bt_att_mtu(chan));
800
801 #if defined(CONFIG_BT_GATT_CLIENT)
802 /* Mark the MTU Exchange as complete.
803 * This will skip sending ATT Exchange MTU from our side.
804 *
805 * Core 5.3 | Vol 3, Part F 3.4.2.2:
806 * If MTU is exchanged in one direction, that is sufficient for both directions.
807 */
808 atomic_set_bit(chan->att->conn->flags, BT_CONN_ATT_MTU_EXCHANGED);
809 #endif /* CONFIG_BT_GATT_CLIENT */
810
811 att_chan_mtu_updated(chan);
812
813 return 0;
814 }
815
bt_att_chan_req_send(struct bt_att_chan * chan,struct bt_att_req * req)816 static int bt_att_chan_req_send(struct bt_att_chan *chan,
817 struct bt_att_req *req)
818 {
819 __ASSERT_NO_MSG(chan);
820 __ASSERT_NO_MSG(req);
821 __ASSERT_NO_MSG(req->func);
822 __ASSERT_NO_MSG(!chan->req);
823
824 LOG_DBG("req %p", req);
825
826 return chan_req_send(chan, req);
827 }
828
att_req_send_process(struct bt_att * att)829 static void att_req_send_process(struct bt_att *att)
830 {
831 struct bt_att_req *req = NULL;
832 struct bt_att_chan *chan, *tmp, *prev = NULL;
833
834 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&att->chans, chan, tmp, node) {
835 /* If there is an ongoing transaction, do not use the channel */
836 if (chan->req) {
837 continue;
838 }
839
840 if (!req && prev && (bt_att_is_enhanced(chan) == bt_att_is_enhanced(prev))) {
841 /* If there was nothing to send for the previous channel and the current
842 * channel has the same "enhancedness", there will be nothing to send for
843 * this channel either.
844 */
845 continue;
846 }
847
848 prev = chan;
849
850 /* Pull next request from the list */
851 req = get_first_req_matching_chan(&att->reqs, chan);
852 if (!req) {
853 continue;
854 }
855
856 if (bt_att_chan_req_send(chan, req) >= 0) {
857 return;
858 }
859
860 /* Prepend back to the list as it could not be sent */
861 sys_slist_prepend(&att->reqs, &req->node);
862 }
863 }
864
att_handle_rsp(struct bt_att_chan * chan,void * pdu,uint16_t len,int err)865 static uint8_t att_handle_rsp(struct bt_att_chan *chan, void *pdu, uint16_t len,
866 int err)
867 {
868 bt_att_func_t func = NULL;
869 void *params;
870
871 LOG_DBG("chan %p err %d len %u: %s", chan, err, len, bt_hex(pdu, len));
872
873 /* Cancel timeout if ongoing */
874 k_work_cancel_delayable(&chan->timeout_work);
875
876 if (!chan->req) {
877 LOG_WRN("No pending ATT request");
878 goto process;
879 }
880
881 /* Check if request has been cancelled */
882 if (chan->req == &cancel) {
883 chan->req = NULL;
884 goto process;
885 }
886
887 /* Reset func so it can be reused by the callback */
888 func = chan->req->func;
889 chan->req->func = NULL;
890 params = chan->req->user_data;
891
892 /* free allocated request so its memory can be reused */
893 bt_att_req_free(chan->req);
894 chan->req = NULL;
895
896 process:
897 /* Process pending requests */
898 att_req_send_process(chan->att);
899 if (func) {
900 func(chan->att->conn, err, pdu, len, params);
901 }
902
903 return 0;
904 }
905
906 #if defined(CONFIG_BT_GATT_CLIENT)
att_mtu_rsp(struct bt_att_chan * chan,struct net_buf * buf)907 static uint8_t att_mtu_rsp(struct bt_att_chan *chan, struct net_buf *buf)
908 {
909 struct bt_att_exchange_mtu_rsp *rsp;
910 uint16_t mtu;
911
912 rsp = (void *)buf->data;
913
914 mtu = sys_le16_to_cpu(rsp->mtu);
915
916 LOG_DBG("Server MTU %u", mtu);
917
918 /* Check if MTU is valid */
919 if (mtu < BT_ATT_DEFAULT_LE_MTU) {
920 return att_handle_rsp(chan, NULL, 0, BT_ATT_ERR_INVALID_PDU);
921 }
922
923 /* The following must equal the value we sent in the req. We assume this
924 * is a rsp to `gatt_exchange_mtu_encode`.
925 */
926 chan->chan.rx.mtu = BT_LOCAL_ATT_MTU_UATT;
927 /* The ATT_EXCHANGE_MTU_REQ/RSP is just an alternative way of
928 * communicating the L2CAP MTU.
929 */
930
931 chan->chan.tx.mtu = mtu;
932
933 LOG_DBG("Negotiated MTU %u", bt_att_mtu(chan));
934
935 att_chan_mtu_updated(chan);
936
937 return att_handle_rsp(chan, rsp, buf->len, 0);
938 }
939 #endif /* CONFIG_BT_GATT_CLIENT */
940
range_is_valid(uint16_t start,uint16_t end,uint16_t * err)941 static bool range_is_valid(uint16_t start, uint16_t end, uint16_t *err)
942 {
943 /* Handle 0 is invalid */
944 if (!start || !end) {
945 if (err) {
946 *err = 0U;
947 }
948 return false;
949 }
950
951 /* Check if range is valid */
952 if (start > end) {
953 if (err) {
954 *err = start;
955 }
956 return false;
957 }
958
959 return true;
960 }
961
962 struct find_info_data {
963 struct bt_att_chan *chan;
964 struct net_buf *buf;
965 struct bt_att_find_info_rsp *rsp;
966 union {
967 struct bt_att_info_16 *info16;
968 struct bt_att_info_128 *info128;
969 };
970 };
971
find_info_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)972 static uint8_t find_info_cb(const struct bt_gatt_attr *attr, uint16_t handle,
973 void *user_data)
974 {
975 struct find_info_data *data = user_data;
976 struct bt_att_chan *chan = data->chan;
977
978 LOG_DBG("handle 0x%04x", handle);
979
980 /* Initialize rsp at first entry */
981 if (!data->rsp) {
982 data->rsp = net_buf_add(data->buf, sizeof(*data->rsp));
983 data->rsp->format = (attr->uuid->type == BT_UUID_TYPE_16) ?
984 BT_ATT_INFO_16 : BT_ATT_INFO_128;
985 }
986
987 switch (data->rsp->format) {
988 case BT_ATT_INFO_16:
989 if (attr->uuid->type != BT_UUID_TYPE_16) {
990 return BT_GATT_ITER_STOP;
991 }
992
993 /* Fast forward to next item position */
994 data->info16 = net_buf_add(data->buf, sizeof(*data->info16));
995 data->info16->handle = sys_cpu_to_le16(handle);
996 data->info16->uuid = sys_cpu_to_le16(BT_UUID_16(attr->uuid)->val);
997
998 if (bt_att_mtu(chan) - data->buf->len >
999 sizeof(*data->info16)) {
1000 return BT_GATT_ITER_CONTINUE;
1001 }
1002
1003 break;
1004 case BT_ATT_INFO_128:
1005 if (attr->uuid->type != BT_UUID_TYPE_128) {
1006 return BT_GATT_ITER_STOP;
1007 }
1008
1009 /* Fast forward to next item position */
1010 data->info128 = net_buf_add(data->buf, sizeof(*data->info128));
1011 data->info128->handle = sys_cpu_to_le16(handle);
1012 memcpy(data->info128->uuid, BT_UUID_128(attr->uuid)->val,
1013 sizeof(data->info128->uuid));
1014
1015 if (bt_att_mtu(chan) - data->buf->len >
1016 sizeof(*data->info128)) {
1017 return BT_GATT_ITER_CONTINUE;
1018 }
1019 }
1020
1021 return BT_GATT_ITER_STOP;
1022 }
1023
att_find_info_rsp(struct bt_att_chan * chan,uint16_t start_handle,uint16_t end_handle)1024 static uint8_t att_find_info_rsp(struct bt_att_chan *chan, uint16_t start_handle,
1025 uint16_t end_handle)
1026 {
1027 struct find_info_data data;
1028
1029 (void)memset(&data, 0, sizeof(data));
1030
1031 data.buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_FIND_INFO_RSP);
1032 if (!data.buf) {
1033 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
1034 }
1035
1036 data.chan = chan;
1037 bt_gatt_foreach_attr(start_handle, end_handle, find_info_cb, &data);
1038
1039 if (!data.rsp) {
1040 net_buf_unref(data.buf);
1041 /* Respond since handle is set */
1042 send_err_rsp(chan, BT_ATT_OP_FIND_INFO_REQ, start_handle,
1043 BT_ATT_ERR_ATTRIBUTE_NOT_FOUND);
1044 return 0;
1045 }
1046
1047 bt_att_chan_send_rsp(chan, data.buf);
1048
1049 return 0;
1050 }
1051
att_find_info_req(struct bt_att_chan * chan,struct net_buf * buf)1052 static uint8_t att_find_info_req(struct bt_att_chan *chan, struct net_buf *buf)
1053 {
1054 struct bt_att_find_info_req *req;
1055 uint16_t start_handle, end_handle, err_handle;
1056
1057 req = (void *)buf->data;
1058
1059 start_handle = sys_le16_to_cpu(req->start_handle);
1060 end_handle = sys_le16_to_cpu(req->end_handle);
1061
1062 LOG_DBG("start_handle 0x%04x end_handle 0x%04x", start_handle, end_handle);
1063
1064 if (!range_is_valid(start_handle, end_handle, &err_handle)) {
1065 send_err_rsp(chan, BT_ATT_OP_FIND_INFO_REQ, err_handle,
1066 BT_ATT_ERR_INVALID_HANDLE);
1067 return 0;
1068 }
1069
1070 return att_find_info_rsp(chan, start_handle, end_handle);
1071 }
1072
1073 struct find_type_data {
1074 struct bt_att_chan *chan;
1075 struct net_buf *buf;
1076 struct bt_att_handle_group *group;
1077 const void *value;
1078 uint8_t value_len;
1079 uint8_t err;
1080 };
1081
find_type_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)1082 static uint8_t find_type_cb(const struct bt_gatt_attr *attr, uint16_t handle,
1083 void *user_data)
1084 {
1085 struct find_type_data *data = user_data;
1086 struct bt_att_chan *chan = data->chan;
1087 struct bt_conn *conn = chan->chan.chan.conn;
1088 int read;
1089 uint8_t uuid[16];
1090 struct net_buf *frag;
1091 size_t len;
1092
1093 /* Skip secondary services */
1094 if (!bt_uuid_cmp(attr->uuid, BT_UUID_GATT_SECONDARY)) {
1095 goto skip;
1096 }
1097
1098 /* Update group end_handle if not a primary service */
1099 if (bt_uuid_cmp(attr->uuid, BT_UUID_GATT_PRIMARY)) {
1100 if (data->group &&
1101 handle > sys_le16_to_cpu(data->group->end_handle)) {
1102 data->group->end_handle = sys_cpu_to_le16(handle);
1103 }
1104 return BT_GATT_ITER_CONTINUE;
1105 }
1106
1107 LOG_DBG("handle 0x%04x", handle);
1108
1109 /* stop if there is no space left */
1110 if (bt_att_mtu(chan) - net_buf_frags_len(data->buf) <
1111 sizeof(*data->group)) {
1112 return BT_GATT_ITER_STOP;
1113 }
1114
1115 frag = net_buf_frag_last(data->buf);
1116
1117 len = MIN(bt_att_mtu(chan) - net_buf_frags_len(data->buf),
1118 net_buf_tailroom(frag));
1119 if (!len) {
1120 frag = net_buf_alloc(net_buf_pool_get(data->buf->pool_id),
1121 K_NO_WAIT);
1122 /* If not buffer can be allocated immediately stop */
1123 if (!frag) {
1124 return BT_GATT_ITER_STOP;
1125 }
1126
1127 net_buf_frag_add(data->buf, frag);
1128 }
1129
1130 /* Read attribute value and store in the buffer */
1131 read = attr->read(conn, attr, uuid, sizeof(uuid), 0);
1132 if (read < 0) {
1133 /*
1134 * Since we don't know if it is the service with requested UUID,
1135 * we cannot respond with an error to this request.
1136 */
1137 goto skip;
1138 }
1139
1140 /* Check if data matches */
1141 if (read != data->value_len) {
1142 /* Use bt_uuid_cmp() to compare UUIDs of different form. */
1143 struct bt_uuid_128 ref_uuid;
1144 struct bt_uuid_128 recvd_uuid;
1145
1146 if (!bt_uuid_create(&recvd_uuid.uuid, data->value, data->value_len)) {
1147 LOG_WRN("Unable to create UUID: size %u", data->value_len);
1148 goto skip;
1149 }
1150 if (!bt_uuid_create(&ref_uuid.uuid, uuid, read)) {
1151 LOG_WRN("Unable to create UUID: size %d", read);
1152 goto skip;
1153 }
1154 if (bt_uuid_cmp(&recvd_uuid.uuid, &ref_uuid.uuid)) {
1155 goto skip;
1156 }
1157 } else if (memcmp(data->value, uuid, read)) {
1158 goto skip;
1159 }
1160
1161 /* If service has been found, error should be cleared */
1162 data->err = 0x00;
1163
1164 /* Fast forward to next item position */
1165 data->group = net_buf_add(frag, sizeof(*data->group));
1166 data->group->start_handle = sys_cpu_to_le16(handle);
1167 data->group->end_handle = sys_cpu_to_le16(handle);
1168
1169 /* continue to find the end_handle */
1170 return BT_GATT_ITER_CONTINUE;
1171
1172 skip:
1173 data->group = NULL;
1174 return BT_GATT_ITER_CONTINUE;
1175 }
1176
att_find_type_rsp(struct bt_att_chan * chan,uint16_t start_handle,uint16_t end_handle,const void * value,uint8_t value_len)1177 static uint8_t att_find_type_rsp(struct bt_att_chan *chan, uint16_t start_handle,
1178 uint16_t end_handle, const void *value,
1179 uint8_t value_len)
1180 {
1181 struct find_type_data data;
1182
1183 (void)memset(&data, 0, sizeof(data));
1184
1185 data.buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_FIND_TYPE_RSP);
1186 if (!data.buf) {
1187 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
1188 }
1189
1190 data.chan = chan;
1191 data.group = NULL;
1192 data.value = value;
1193 data.value_len = value_len;
1194
1195 /* Pre-set error in case no service will be found */
1196 data.err = BT_ATT_ERR_ATTRIBUTE_NOT_FOUND;
1197
1198 bt_gatt_foreach_attr(start_handle, end_handle, find_type_cb, &data);
1199
1200 /* If error has not been cleared, no service has been found */
1201 if (data.err) {
1202 net_buf_unref(data.buf);
1203 /* Respond since handle is set */
1204 send_err_rsp(chan, BT_ATT_OP_FIND_TYPE_REQ, start_handle,
1205 data.err);
1206 return 0;
1207 }
1208
1209 bt_att_chan_send_rsp(chan, data.buf);
1210
1211 return 0;
1212 }
1213
att_find_type_req(struct bt_att_chan * chan,struct net_buf * buf)1214 static uint8_t att_find_type_req(struct bt_att_chan *chan, struct net_buf *buf)
1215 {
1216 struct bt_att_find_type_req *req;
1217 uint16_t start_handle, end_handle, err_handle, type;
1218 uint8_t *value;
1219
1220 req = net_buf_pull_mem(buf, sizeof(*req));
1221
1222 start_handle = sys_le16_to_cpu(req->start_handle);
1223 end_handle = sys_le16_to_cpu(req->end_handle);
1224 type = sys_le16_to_cpu(req->type);
1225 value = buf->data;
1226
1227 LOG_DBG("start_handle 0x%04x end_handle 0x%04x type %u", start_handle, end_handle, type);
1228
1229 if (!range_is_valid(start_handle, end_handle, &err_handle)) {
1230 send_err_rsp(chan, BT_ATT_OP_FIND_TYPE_REQ, err_handle,
1231 BT_ATT_ERR_INVALID_HANDLE);
1232 return 0;
1233 }
1234
1235 /* The Attribute Protocol Find By Type Value Request shall be used with
1236 * the Attribute Type parameter set to the UUID for "Primary Service"
1237 * and the Attribute Value set to the 16-bit Bluetooth UUID or 128-bit
1238 * UUID for the specific primary service.
1239 */
1240 if (bt_uuid_cmp(BT_UUID_DECLARE_16(type), BT_UUID_GATT_PRIMARY)) {
1241 send_err_rsp(chan, BT_ATT_OP_FIND_TYPE_REQ, start_handle,
1242 BT_ATT_ERR_ATTRIBUTE_NOT_FOUND);
1243 return 0;
1244 }
1245
1246 return att_find_type_rsp(chan, start_handle, end_handle, value,
1247 buf->len);
1248 }
1249
err_to_att(int err)1250 static uint8_t err_to_att(int err)
1251 {
1252 LOG_DBG("%d", err);
1253
1254 if (err < 0 && err >= -0xff) {
1255 return -err;
1256 }
1257
1258 return BT_ATT_ERR_UNLIKELY;
1259 }
1260
1261 struct read_type_data {
1262 struct bt_att_chan *chan;
1263 struct bt_uuid *uuid;
1264 struct net_buf *buf;
1265 struct bt_att_read_type_rsp *rsp;
1266 struct bt_att_data *item;
1267 uint8_t err;
1268 };
1269
1270 typedef bool (*attr_read_cb)(struct net_buf *buf, ssize_t read,
1271 void *user_data);
1272
attr_read_authorize(struct bt_conn * conn,const struct bt_gatt_attr * attr)1273 static bool attr_read_authorize(struct bt_conn *conn,
1274 const struct bt_gatt_attr *attr)
1275 {
1276 if (!IS_ENABLED(CONFIG_BT_GATT_AUTHORIZATION_CUSTOM)) {
1277 return true;
1278 }
1279
1280 if (!authorization_cb || !authorization_cb->read_authorize) {
1281 return true;
1282 }
1283
1284 return authorization_cb->read_authorize(conn, attr);
1285 }
1286
attr_read_type_cb(struct net_buf * frag,ssize_t read,void * user_data)1287 static bool attr_read_type_cb(struct net_buf *frag, ssize_t read,
1288 void *user_data)
1289 {
1290 struct read_type_data *data = user_data;
1291
1292 if (!data->rsp->len) {
1293 /* Set len to be the first item found */
1294 data->rsp->len = read + sizeof(*data->item);
1295 } else if (data->rsp->len != read + sizeof(*data->item)) {
1296 /* All items should have the same size */
1297 frag->len -= sizeof(*data->item);
1298 data->item = NULL;
1299 return false;
1300 }
1301
1302 return true;
1303 }
1304
att_chan_read(struct bt_att_chan * chan,const struct bt_gatt_attr * attr,struct net_buf * buf,uint16_t offset,attr_read_cb cb,void * user_data)1305 static ssize_t att_chan_read(struct bt_att_chan *chan,
1306 const struct bt_gatt_attr *attr,
1307 struct net_buf *buf, uint16_t offset,
1308 attr_read_cb cb, void *user_data)
1309 {
1310 struct bt_conn *conn = chan->chan.chan.conn;
1311 ssize_t read;
1312 struct net_buf *frag;
1313 size_t len, total = 0;
1314
1315 if (bt_att_mtu(chan) <= net_buf_frags_len(buf)) {
1316 return 0;
1317 }
1318
1319 frag = net_buf_frag_last(buf);
1320
1321 /* Create necessary fragments if MTU is bigger than what a buffer can
1322 * hold.
1323 */
1324 do {
1325 len = MIN(bt_att_mtu(chan) - net_buf_frags_len(buf),
1326 net_buf_tailroom(frag));
1327 if (!len) {
1328 frag = net_buf_alloc(net_buf_pool_get(buf->pool_id),
1329 K_NO_WAIT);
1330 /* If not buffer can be allocated immediately return */
1331 if (!frag) {
1332 return total;
1333 }
1334
1335 net_buf_frag_add(buf, frag);
1336
1337 len = MIN(bt_att_mtu(chan) - net_buf_frags_len(buf),
1338 net_buf_tailroom(frag));
1339 }
1340
1341 read = attr->read(conn, attr, frag->data + frag->len, len,
1342 offset);
1343 if (read < 0) {
1344 if (total) {
1345 return total;
1346 }
1347
1348 return read;
1349 }
1350
1351 if (cb && !cb(frag, read, user_data)) {
1352 break;
1353 }
1354
1355 net_buf_add(frag, read);
1356 total += read;
1357 offset += read;
1358 } while (bt_att_mtu(chan) > net_buf_frags_len(buf) && read == len);
1359
1360 return total;
1361 }
1362
read_type_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)1363 static uint8_t read_type_cb(const struct bt_gatt_attr *attr, uint16_t handle,
1364 void *user_data)
1365 {
1366 struct read_type_data *data = user_data;
1367 struct bt_att_chan *chan = data->chan;
1368 struct bt_conn *conn = chan->chan.chan.conn;
1369 ssize_t read;
1370
1371 /* Skip if doesn't match */
1372 if (bt_uuid_cmp(attr->uuid, data->uuid)) {
1373 return BT_GATT_ITER_CONTINUE;
1374 }
1375
1376 LOG_DBG("handle 0x%04x", handle);
1377
1378 /*
1379 * If an attribute in the set of requested attributes would cause an
1380 * Error Response then this attribute cannot be included in a
1381 * Read By Type Response and the attributes before this attribute
1382 * shall be returned
1383 *
1384 * If the first attribute in the set of requested attributes would
1385 * cause an Error Response then no other attributes in the requested
1386 * attributes can be considered.
1387 */
1388 data->err = bt_gatt_check_perm(conn, attr, BT_GATT_PERM_READ_MASK);
1389 if (data->err) {
1390 if (data->rsp->len) {
1391 data->err = 0x00;
1392 }
1393 return BT_GATT_ITER_STOP;
1394 }
1395
1396 /* Check the attribute authorization logic */
1397 if (!attr_read_authorize(conn, attr)) {
1398 data->err = BT_ATT_ERR_AUTHORIZATION;
1399 return BT_GATT_ITER_STOP;
1400 }
1401
1402 /*
1403 * If any attribute is founded in handle range it means that error
1404 * should be changed from pre-set: attr not found error to no error.
1405 */
1406 data->err = 0x00;
1407
1408 /* Fast forward to next item position */
1409 data->item = net_buf_add(net_buf_frag_last(data->buf),
1410 sizeof(*data->item));
1411 data->item->handle = sys_cpu_to_le16(handle);
1412
1413 read = att_chan_read(chan, attr, data->buf, 0, attr_read_type_cb, data);
1414 if (read < 0) {
1415 data->err = err_to_att(read);
1416 return BT_GATT_ITER_STOP;
1417 }
1418
1419 if (!data->item) {
1420 return BT_GATT_ITER_STOP;
1421 }
1422
1423 /* continue only if there are still space for more items */
1424 return bt_att_mtu(chan) - net_buf_frags_len(data->buf) >
1425 data->rsp->len ? BT_GATT_ITER_CONTINUE : BT_GATT_ITER_STOP;
1426 }
1427
att_read_type_rsp(struct bt_att_chan * chan,struct bt_uuid * uuid,uint16_t start_handle,uint16_t end_handle)1428 static uint8_t att_read_type_rsp(struct bt_att_chan *chan, struct bt_uuid *uuid,
1429 uint16_t start_handle, uint16_t end_handle)
1430 {
1431 struct read_type_data data;
1432
1433 (void)memset(&data, 0, sizeof(data));
1434
1435 data.buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_READ_TYPE_RSP);
1436 if (!data.buf) {
1437 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
1438 }
1439
1440 data.chan = chan;
1441 data.uuid = uuid;
1442 data.rsp = net_buf_add(data.buf, sizeof(*data.rsp));
1443 data.rsp->len = 0U;
1444
1445 /* Pre-set error if no attr will be found in handle */
1446 data.err = BT_ATT_ERR_ATTRIBUTE_NOT_FOUND;
1447
1448 bt_gatt_foreach_attr(start_handle, end_handle, read_type_cb, &data);
1449
1450 if (data.err) {
1451 net_buf_unref(data.buf);
1452 /* Response here since handle is set */
1453 send_err_rsp(chan, BT_ATT_OP_READ_TYPE_REQ, start_handle,
1454 data.err);
1455 return 0;
1456 }
1457
1458 bt_att_chan_send_rsp(chan, data.buf);
1459
1460 return 0;
1461 }
1462
att_read_type_req(struct bt_att_chan * chan,struct net_buf * buf)1463 static uint8_t att_read_type_req(struct bt_att_chan *chan, struct net_buf *buf)
1464 {
1465 struct bt_att_read_type_req *req;
1466 uint16_t start_handle, end_handle, err_handle;
1467 union {
1468 struct bt_uuid uuid;
1469 struct bt_uuid_16 u16;
1470 struct bt_uuid_128 u128;
1471 } u;
1472 uint8_t uuid_len = buf->len - sizeof(*req);
1473
1474 /* Type can only be UUID16 or UUID128 */
1475 if (uuid_len != 2 && uuid_len != 16) {
1476 return BT_ATT_ERR_INVALID_PDU;
1477 }
1478
1479 req = net_buf_pull_mem(buf, sizeof(*req));
1480
1481 start_handle = sys_le16_to_cpu(req->start_handle);
1482 end_handle = sys_le16_to_cpu(req->end_handle);
1483 if (!bt_uuid_create(&u.uuid, req->uuid, uuid_len)) {
1484 return BT_ATT_ERR_UNLIKELY;
1485 }
1486
1487 LOG_DBG("start_handle 0x%04x end_handle 0x%04x type %s", start_handle, end_handle,
1488 bt_uuid_str(&u.uuid));
1489
1490 if (!range_is_valid(start_handle, end_handle, &err_handle)) {
1491 send_err_rsp(chan, BT_ATT_OP_READ_TYPE_REQ, err_handle,
1492 BT_ATT_ERR_INVALID_HANDLE);
1493 return 0;
1494 }
1495
1496 return att_read_type_rsp(chan, &u.uuid, start_handle, end_handle);
1497 }
1498
1499 struct read_data {
1500 struct bt_att_chan *chan;
1501 uint16_t offset;
1502 struct net_buf *buf;
1503 uint8_t err;
1504 };
1505
read_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)1506 static uint8_t read_cb(const struct bt_gatt_attr *attr, uint16_t handle,
1507 void *user_data)
1508 {
1509 struct read_data *data = user_data;
1510 struct bt_att_chan *chan = data->chan;
1511 struct bt_conn *conn = chan->chan.chan.conn;
1512 int ret;
1513
1514 LOG_DBG("handle 0x%04x", handle);
1515
1516 /*
1517 * If any attribute is founded in handle range it means that error
1518 * should be changed from pre-set: invalid handle error to no error.
1519 */
1520 data->err = 0x00;
1521
1522 /* Check attribute permissions */
1523 data->err = bt_gatt_check_perm(conn, attr, BT_GATT_PERM_READ_MASK);
1524 if (data->err) {
1525 return BT_GATT_ITER_STOP;
1526 }
1527
1528 /* Check the attribute authorization logic */
1529 if (!attr_read_authorize(conn, attr)) {
1530 data->err = BT_ATT_ERR_AUTHORIZATION;
1531 return BT_GATT_ITER_STOP;
1532 }
1533
1534 /* Read attribute value and store in the buffer */
1535 ret = att_chan_read(chan, attr, data->buf, data->offset, NULL, NULL);
1536 if (ret < 0) {
1537 data->err = err_to_att(ret);
1538 return BT_GATT_ITER_STOP;
1539 }
1540
1541 return BT_GATT_ITER_CONTINUE;
1542 }
1543
att_read_rsp(struct bt_att_chan * chan,uint8_t op,uint8_t rsp,uint16_t handle,uint16_t offset)1544 static uint8_t att_read_rsp(struct bt_att_chan *chan, uint8_t op, uint8_t rsp,
1545 uint16_t handle, uint16_t offset)
1546 {
1547 struct read_data data;
1548
1549 if (!bt_gatt_change_aware(chan->att->conn, true)) {
1550 if (!atomic_test_and_set_bit(chan->flags, ATT_OUT_OF_SYNC_SENT)) {
1551 return BT_ATT_ERR_DB_OUT_OF_SYNC;
1552 } else {
1553 return 0;
1554 }
1555 }
1556
1557 if (!handle) {
1558 return BT_ATT_ERR_INVALID_HANDLE;
1559 }
1560
1561 (void)memset(&data, 0, sizeof(data));
1562
1563 data.buf = bt_att_create_rsp_pdu(chan, rsp);
1564 if (!data.buf) {
1565 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
1566 }
1567
1568 data.chan = chan;
1569 data.offset = offset;
1570
1571 /* Pre-set error if no attr will be found in handle */
1572 data.err = BT_ATT_ERR_INVALID_HANDLE;
1573
1574 bt_gatt_foreach_attr(handle, handle, read_cb, &data);
1575
1576 /* In case of error discard data and respond with an error */
1577 if (data.err) {
1578 net_buf_unref(data.buf);
1579 /* Respond here since handle is set */
1580 send_err_rsp(chan, op, handle, data.err);
1581 return 0;
1582 }
1583
1584 bt_att_chan_send_rsp(chan, data.buf);
1585
1586 return 0;
1587 }
1588
att_read_req(struct bt_att_chan * chan,struct net_buf * buf)1589 static uint8_t att_read_req(struct bt_att_chan *chan, struct net_buf *buf)
1590 {
1591 struct bt_att_read_req *req;
1592 uint16_t handle;
1593
1594 req = (void *)buf->data;
1595
1596 handle = sys_le16_to_cpu(req->handle);
1597
1598 LOG_DBG("handle 0x%04x", handle);
1599
1600 return att_read_rsp(chan, BT_ATT_OP_READ_REQ, BT_ATT_OP_READ_RSP,
1601 handle, 0);
1602 }
1603
att_read_blob_req(struct bt_att_chan * chan,struct net_buf * buf)1604 static uint8_t att_read_blob_req(struct bt_att_chan *chan, struct net_buf *buf)
1605 {
1606 struct bt_att_read_blob_req *req;
1607 uint16_t handle, offset;
1608
1609 req = (void *)buf->data;
1610
1611 handle = sys_le16_to_cpu(req->handle);
1612 offset = sys_le16_to_cpu(req->offset);
1613
1614 LOG_DBG("handle 0x%04x offset %u", handle, offset);
1615
1616 return att_read_rsp(chan, BT_ATT_OP_READ_BLOB_REQ,
1617 BT_ATT_OP_READ_BLOB_RSP, handle, offset);
1618 }
1619
1620 #if defined(CONFIG_BT_GATT_READ_MULTIPLE)
att_read_mult_req(struct bt_att_chan * chan,struct net_buf * buf)1621 static uint8_t att_read_mult_req(struct bt_att_chan *chan, struct net_buf *buf)
1622 {
1623 struct read_data data;
1624 uint16_t handle;
1625
1626 if (!bt_gatt_change_aware(chan->att->conn, true)) {
1627 if (!atomic_test_and_set_bit(chan->flags, ATT_OUT_OF_SYNC_SENT)) {
1628 return BT_ATT_ERR_DB_OUT_OF_SYNC;
1629 } else {
1630 return 0;
1631 }
1632 }
1633
1634 (void)memset(&data, 0, sizeof(data));
1635
1636 data.buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_READ_MULT_RSP);
1637 if (!data.buf) {
1638 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
1639 }
1640
1641 data.chan = chan;
1642
1643 while (buf->len >= sizeof(uint16_t)) {
1644 handle = net_buf_pull_le16(buf);
1645
1646 LOG_DBG("handle 0x%04x ", handle);
1647
1648 /* An Error Response shall be sent by the server in response to
1649 * the Read Multiple Request [....] if a read operation is not
1650 * permitted on any of the Characteristic Values.
1651 *
1652 * If handle is not valid then return invalid handle error.
1653 * If handle is found error will be cleared by read_cb.
1654 */
1655 data.err = BT_ATT_ERR_INVALID_HANDLE;
1656
1657 bt_gatt_foreach_attr(handle, handle, read_cb, &data);
1658
1659 /* Stop reading in case of error */
1660 if (data.err) {
1661 net_buf_unref(data.buf);
1662 /* Respond here since handle is set */
1663 send_err_rsp(chan, BT_ATT_OP_READ_MULT_REQ, handle,
1664 data.err);
1665 return 0;
1666 }
1667 }
1668
1669 bt_att_chan_send_rsp(chan, data.buf);
1670
1671 return 0;
1672 }
1673 #endif /* CONFIG_BT_GATT_READ_MULTIPLE */
1674
1675 #if defined(CONFIG_BT_GATT_READ_MULT_VAR_LEN)
read_vl_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)1676 static uint8_t read_vl_cb(const struct bt_gatt_attr *attr, uint16_t handle,
1677 void *user_data)
1678 {
1679 struct read_data *data = user_data;
1680 struct bt_att_chan *chan = data->chan;
1681 struct bt_conn *conn = chan->chan.chan.conn;
1682 struct bt_att_read_mult_vl_rsp *rsp;
1683 int read;
1684
1685 LOG_DBG("handle 0x%04x", handle);
1686
1687 /*
1688 * If any attribute is founded in handle range it means that error
1689 * should be changed from pre-set: invalid handle error to no error.
1690 */
1691 data->err = 0x00;
1692
1693 /* Check attribute permissions */
1694 data->err = bt_gatt_check_perm(conn, attr, BT_GATT_PERM_READ_MASK);
1695 if (data->err) {
1696 return BT_GATT_ITER_STOP;
1697 }
1698
1699 /* Check the attribute authorization logic */
1700 if (!attr_read_authorize(conn, attr)) {
1701 data->err = BT_ATT_ERR_AUTHORIZATION;
1702 return BT_GATT_ITER_STOP;
1703 }
1704
1705 /* The Length Value Tuple List may be truncated within the first two
1706 * octets of a tuple due to the size limits of the current ATT_MTU.
1707 */
1708 if (bt_att_mtu(chan) - data->buf->len < 2) {
1709 return BT_GATT_ITER_STOP;
1710 }
1711
1712 rsp = net_buf_add(data->buf, sizeof(*rsp));
1713
1714 read = att_chan_read(chan, attr, data->buf, data->offset, NULL, NULL);
1715 if (read < 0) {
1716 data->err = err_to_att(read);
1717 return BT_GATT_ITER_STOP;
1718 }
1719
1720 rsp->len = read;
1721
1722 return BT_GATT_ITER_CONTINUE;
1723 }
1724
att_read_mult_vl_req(struct bt_att_chan * chan,struct net_buf * buf)1725 static uint8_t att_read_mult_vl_req(struct bt_att_chan *chan, struct net_buf *buf)
1726 {
1727 struct read_data data;
1728 uint16_t handle;
1729
1730 if (!bt_gatt_change_aware(chan->att->conn, true)) {
1731 if (!atomic_test_and_set_bit(chan->flags, ATT_OUT_OF_SYNC_SENT)) {
1732 return BT_ATT_ERR_DB_OUT_OF_SYNC;
1733 } else {
1734 return 0;
1735 }
1736 }
1737
1738 (void)memset(&data, 0, sizeof(data));
1739
1740 data.buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_READ_MULT_VL_RSP);
1741 if (!data.buf) {
1742 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
1743 }
1744
1745 data.chan = chan;
1746
1747 while (buf->len >= sizeof(uint16_t)) {
1748 handle = net_buf_pull_le16(buf);
1749
1750 LOG_DBG("handle 0x%04x ", handle);
1751
1752 /* If handle is not valid then return invalid handle error.
1753 * If handle is found error will be cleared by read_cb.
1754 */
1755 data.err = BT_ATT_ERR_INVALID_HANDLE;
1756
1757 bt_gatt_foreach_attr(handle, handle, read_vl_cb, &data);
1758
1759 /* Stop reading in case of error */
1760 if (data.err) {
1761 net_buf_unref(data.buf);
1762 /* Respond here since handle is set */
1763 send_err_rsp(chan, BT_ATT_OP_READ_MULT_VL_REQ, handle,
1764 data.err);
1765 return 0;
1766 }
1767 }
1768
1769 bt_att_chan_send_rsp(chan, data.buf);
1770
1771 return 0;
1772 }
1773 #endif /* CONFIG_BT_GATT_READ_MULT_VAR_LEN */
1774
1775 struct read_group_data {
1776 struct bt_att_chan *chan;
1777 struct bt_uuid *uuid;
1778 struct net_buf *buf;
1779 struct bt_att_read_group_rsp *rsp;
1780 struct bt_att_group_data *group;
1781 };
1782
attr_read_group_cb(struct net_buf * frag,ssize_t read,void * user_data)1783 static bool attr_read_group_cb(struct net_buf *frag, ssize_t read,
1784 void *user_data)
1785 {
1786 struct read_group_data *data = user_data;
1787
1788 if (!data->rsp->len) {
1789 /* Set len to be the first group found */
1790 data->rsp->len = read + sizeof(*data->group);
1791 } else if (data->rsp->len != read + sizeof(*data->group)) {
1792 /* All groups entries should have the same size */
1793 data->buf->len -= sizeof(*data->group);
1794 data->group = NULL;
1795 return false;
1796 }
1797
1798 return true;
1799 }
1800
read_group_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)1801 static uint8_t read_group_cb(const struct bt_gatt_attr *attr, uint16_t handle,
1802 void *user_data)
1803 {
1804 struct read_group_data *data = user_data;
1805 struct bt_att_chan *chan = data->chan;
1806 int read;
1807
1808 /* Update group end_handle if attribute is not a service */
1809 if (bt_uuid_cmp(attr->uuid, BT_UUID_GATT_PRIMARY) &&
1810 bt_uuid_cmp(attr->uuid, BT_UUID_GATT_SECONDARY)) {
1811 if (data->group &&
1812 handle > sys_le16_to_cpu(data->group->end_handle)) {
1813 data->group->end_handle = sys_cpu_to_le16(handle);
1814 }
1815 return BT_GATT_ITER_CONTINUE;
1816 }
1817
1818 /* If Group Type don't match skip */
1819 if (bt_uuid_cmp(attr->uuid, data->uuid)) {
1820 data->group = NULL;
1821 return BT_GATT_ITER_CONTINUE;
1822 }
1823
1824 LOG_DBG("handle 0x%04x", handle);
1825
1826 /* Stop if there is no space left */
1827 if (data->rsp->len &&
1828 bt_att_mtu(chan) - data->buf->len < data->rsp->len) {
1829 return BT_GATT_ITER_STOP;
1830 }
1831
1832 /* Fast forward to next group position */
1833 data->group = net_buf_add(data->buf, sizeof(*data->group));
1834
1835 /* Initialize group handle range */
1836 data->group->start_handle = sys_cpu_to_le16(handle);
1837 data->group->end_handle = sys_cpu_to_le16(handle);
1838
1839 /* Read attribute value and store in the buffer */
1840 read = att_chan_read(chan, attr, data->buf, 0, attr_read_group_cb,
1841 data);
1842 if (read < 0) {
1843 /* TODO: Handle read errors */
1844 return BT_GATT_ITER_STOP;
1845 }
1846
1847 if (!data->group) {
1848 return BT_GATT_ITER_STOP;
1849 }
1850
1851 /* continue only if there are still space for more items */
1852 return BT_GATT_ITER_CONTINUE;
1853 }
1854
att_read_group_rsp(struct bt_att_chan * chan,struct bt_uuid * uuid,uint16_t start_handle,uint16_t end_handle)1855 static uint8_t att_read_group_rsp(struct bt_att_chan *chan, struct bt_uuid *uuid,
1856 uint16_t start_handle, uint16_t end_handle)
1857 {
1858 struct read_group_data data;
1859
1860 (void)memset(&data, 0, sizeof(data));
1861
1862 data.buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_READ_GROUP_RSP);
1863 if (!data.buf) {
1864 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
1865 }
1866
1867 data.chan = chan;
1868 data.uuid = uuid;
1869 data.rsp = net_buf_add(data.buf, sizeof(*data.rsp));
1870 data.rsp->len = 0U;
1871 data.group = NULL;
1872
1873 bt_gatt_foreach_attr(start_handle, end_handle, read_group_cb, &data);
1874
1875 if (!data.rsp->len) {
1876 net_buf_unref(data.buf);
1877 /* Respond here since handle is set */
1878 send_err_rsp(chan, BT_ATT_OP_READ_GROUP_REQ, start_handle,
1879 BT_ATT_ERR_ATTRIBUTE_NOT_FOUND);
1880 return 0;
1881 }
1882
1883 bt_att_chan_send_rsp(chan, data.buf);
1884
1885 return 0;
1886 }
1887
att_read_group_req(struct bt_att_chan * chan,struct net_buf * buf)1888 static uint8_t att_read_group_req(struct bt_att_chan *chan, struct net_buf *buf)
1889 {
1890 struct bt_att_read_group_req *req;
1891 uint16_t start_handle, end_handle, err_handle;
1892 union {
1893 struct bt_uuid uuid;
1894 struct bt_uuid_16 u16;
1895 struct bt_uuid_128 u128;
1896 } u;
1897 uint8_t uuid_len = buf->len - sizeof(*req);
1898
1899 /* Type can only be UUID16 or UUID128 */
1900 if (uuid_len != 2 && uuid_len != 16) {
1901 return BT_ATT_ERR_INVALID_PDU;
1902 }
1903
1904 req = net_buf_pull_mem(buf, sizeof(*req));
1905
1906 start_handle = sys_le16_to_cpu(req->start_handle);
1907 end_handle = sys_le16_to_cpu(req->end_handle);
1908
1909 if (!bt_uuid_create(&u.uuid, req->uuid, uuid_len)) {
1910 return BT_ATT_ERR_UNLIKELY;
1911 }
1912
1913 LOG_DBG("start_handle 0x%04x end_handle 0x%04x type %s", start_handle, end_handle,
1914 bt_uuid_str(&u.uuid));
1915
1916 if (!range_is_valid(start_handle, end_handle, &err_handle)) {
1917 send_err_rsp(chan, BT_ATT_OP_READ_GROUP_REQ, err_handle,
1918 BT_ATT_ERR_INVALID_HANDLE);
1919 return 0;
1920 }
1921
1922 /* Core v4.2, Vol 3, sec 2.5.3 Attribute Grouping:
1923 * Not all of the grouping attributes can be used in the ATT
1924 * Read By Group Type Request. The "Primary Service" and "Secondary
1925 * Service" grouping types may be used in the Read By Group Type
1926 * Request. The "Characteristic" grouping type shall not be used in
1927 * the ATT Read By Group Type Request.
1928 */
1929 if (bt_uuid_cmp(&u.uuid, BT_UUID_GATT_PRIMARY) &&
1930 bt_uuid_cmp(&u.uuid, BT_UUID_GATT_SECONDARY)) {
1931 send_err_rsp(chan, BT_ATT_OP_READ_GROUP_REQ, start_handle,
1932 BT_ATT_ERR_UNSUPPORTED_GROUP_TYPE);
1933 return 0;
1934 }
1935
1936 return att_read_group_rsp(chan, &u.uuid, start_handle, end_handle);
1937 }
1938
1939 struct write_data {
1940 struct bt_conn *conn;
1941 struct net_buf *buf;
1942 uint8_t req;
1943 const void *value;
1944 uint16_t len;
1945 uint16_t offset;
1946 uint8_t err;
1947 };
1948
attr_write_authorize(struct bt_conn * conn,const struct bt_gatt_attr * attr)1949 static bool attr_write_authorize(struct bt_conn *conn,
1950 const struct bt_gatt_attr *attr)
1951 {
1952 if (!IS_ENABLED(CONFIG_BT_GATT_AUTHORIZATION_CUSTOM)) {
1953 return true;
1954 }
1955
1956 if (!authorization_cb || !authorization_cb->write_authorize) {
1957 return true;
1958 }
1959
1960 return authorization_cb->write_authorize(conn, attr);
1961 }
1962
write_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)1963 static uint8_t write_cb(const struct bt_gatt_attr *attr, uint16_t handle,
1964 void *user_data)
1965 {
1966 struct write_data *data = user_data;
1967 int write;
1968 uint8_t flags = 0U;
1969
1970 LOG_DBG("handle 0x%04x offset %u", handle, data->offset);
1971
1972 /* Check attribute permissions */
1973 data->err = bt_gatt_check_perm(data->conn, attr,
1974 BT_GATT_PERM_WRITE_MASK);
1975 if (data->err) {
1976 return BT_GATT_ITER_STOP;
1977 }
1978
1979 /* Check the attribute authorization logic */
1980 if (!attr_write_authorize(data->conn, attr)) {
1981 data->err = BT_ATT_ERR_AUTHORIZATION;
1982 return BT_GATT_ITER_STOP;
1983 }
1984
1985 /* Set command flag if not a request */
1986 if (!data->req) {
1987 flags |= BT_GATT_WRITE_FLAG_CMD;
1988 } else if (data->req == BT_ATT_OP_EXEC_WRITE_REQ) {
1989 flags |= BT_GATT_WRITE_FLAG_EXECUTE;
1990 }
1991
1992 /* Write attribute value */
1993 write = attr->write(data->conn, attr, data->value, data->len,
1994 data->offset, flags);
1995 if (write < 0 || write != data->len) {
1996 data->err = err_to_att(write);
1997 return BT_GATT_ITER_STOP;
1998 }
1999
2000 data->err = 0U;
2001
2002 return BT_GATT_ITER_CONTINUE;
2003 }
2004
att_write_rsp(struct bt_att_chan * chan,uint8_t req,uint8_t rsp,uint16_t handle,uint16_t offset,const void * value,uint16_t len)2005 static uint8_t att_write_rsp(struct bt_att_chan *chan, uint8_t req, uint8_t rsp,
2006 uint16_t handle, uint16_t offset, const void *value,
2007 uint16_t len)
2008 {
2009 struct write_data data;
2010
2011 if (!bt_gatt_change_aware(chan->att->conn, req ? true : false)) {
2012 if (!atomic_test_and_set_bit(chan->flags, ATT_OUT_OF_SYNC_SENT)) {
2013 return BT_ATT_ERR_DB_OUT_OF_SYNC;
2014 } else {
2015 return 0;
2016 }
2017 }
2018
2019 if (!handle) {
2020 return BT_ATT_ERR_INVALID_HANDLE;
2021 }
2022
2023 (void)memset(&data, 0, sizeof(data));
2024
2025 /* Only allocate buf if required to respond */
2026 if (rsp) {
2027 data.buf = bt_att_chan_create_pdu(chan, rsp, 0);
2028 if (!data.buf) {
2029 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
2030 }
2031 }
2032
2033 data.conn = chan->att->conn;
2034 data.req = req;
2035 data.offset = offset;
2036 data.value = value;
2037 data.len = len;
2038 data.err = BT_ATT_ERR_INVALID_HANDLE;
2039
2040 bt_gatt_foreach_attr(handle, handle, write_cb, &data);
2041
2042 if (data.err) {
2043 /* In case of error discard data and respond with an error */
2044 if (rsp) {
2045 net_buf_unref(data.buf);
2046 /* Respond here since handle is set */
2047 send_err_rsp(chan, req, handle, data.err);
2048 }
2049 return req == BT_ATT_OP_EXEC_WRITE_REQ ? data.err : 0;
2050 }
2051
2052 if (data.buf) {
2053 bt_att_chan_send_rsp(chan, data.buf);
2054 }
2055
2056 return 0;
2057 }
2058
att_write_req(struct bt_att_chan * chan,struct net_buf * buf)2059 static uint8_t att_write_req(struct bt_att_chan *chan, struct net_buf *buf)
2060 {
2061 uint16_t handle;
2062
2063 handle = net_buf_pull_le16(buf);
2064
2065 LOG_DBG("handle 0x%04x", handle);
2066
2067 return att_write_rsp(chan, BT_ATT_OP_WRITE_REQ, BT_ATT_OP_WRITE_RSP,
2068 handle, 0, buf->data, buf->len);
2069 }
2070
2071 #if CONFIG_BT_ATT_PREPARE_COUNT > 0
2072 struct prep_data {
2073 struct bt_conn *conn;
2074 struct net_buf *buf;
2075 const void *value;
2076 uint16_t len;
2077 uint16_t offset;
2078 uint8_t err;
2079 };
2080
prep_write_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)2081 static uint8_t prep_write_cb(const struct bt_gatt_attr *attr, uint16_t handle,
2082 void *user_data)
2083 {
2084 struct prep_data *data = user_data;
2085 struct bt_attr_data *attr_data;
2086 int write;
2087
2088 LOG_DBG("handle 0x%04x offset %u", handle, data->offset);
2089
2090 /* Check attribute permissions */
2091 data->err = bt_gatt_check_perm(data->conn, attr,
2092 BT_GATT_PERM_WRITE_MASK);
2093 if (data->err) {
2094 return BT_GATT_ITER_STOP;
2095 }
2096
2097 /* Check the attribute authorization logic */
2098 if (!attr_write_authorize(data->conn, attr)) {
2099 data->err = BT_ATT_ERR_AUTHORIZATION;
2100 return BT_GATT_ITER_STOP;
2101 }
2102
2103 /* Check if attribute requires handler to accept the data */
2104 if (!(attr->perm & BT_GATT_PERM_PREPARE_WRITE)) {
2105 goto append;
2106 }
2107
2108 /* Write attribute value to check if device is authorized */
2109 write = attr->write(data->conn, attr, data->value, data->len,
2110 data->offset, BT_GATT_WRITE_FLAG_PREPARE);
2111 if (write != 0) {
2112 data->err = err_to_att(write);
2113 return BT_GATT_ITER_STOP;
2114 }
2115
2116 append:
2117 /* Copy data into the outstanding queue */
2118 data->buf = net_buf_alloc(&prep_pool, K_NO_WAIT);
2119 if (!data->buf) {
2120 data->err = BT_ATT_ERR_PREPARE_QUEUE_FULL;
2121 return BT_GATT_ITER_STOP;
2122 }
2123
2124 attr_data = net_buf_user_data(data->buf);
2125 attr_data->handle = handle;
2126 attr_data->offset = data->offset;
2127
2128 net_buf_add_mem(data->buf, data->value, data->len);
2129
2130 data->err = 0U;
2131
2132 return BT_GATT_ITER_CONTINUE;
2133 }
2134
att_prep_write_rsp(struct bt_att_chan * chan,uint16_t handle,uint16_t offset,const void * value,uint8_t len)2135 static uint8_t att_prep_write_rsp(struct bt_att_chan *chan, uint16_t handle,
2136 uint16_t offset, const void *value, uint8_t len)
2137 {
2138 struct prep_data data;
2139 struct bt_att_prepare_write_rsp *rsp;
2140
2141 if (!bt_gatt_change_aware(chan->att->conn, true)) {
2142 if (!atomic_test_and_set_bit(chan->flags, ATT_OUT_OF_SYNC_SENT)) {
2143 return BT_ATT_ERR_DB_OUT_OF_SYNC;
2144 } else {
2145 return 0;
2146 }
2147 }
2148
2149 if (!handle) {
2150 return BT_ATT_ERR_INVALID_HANDLE;
2151 }
2152
2153 (void)memset(&data, 0, sizeof(data));
2154
2155 data.conn = chan->att->conn;
2156 data.offset = offset;
2157 data.value = value;
2158 data.len = len;
2159 data.err = BT_ATT_ERR_INVALID_HANDLE;
2160
2161 bt_gatt_foreach_attr(handle, handle, prep_write_cb, &data);
2162
2163 if (data.err) {
2164 /* Respond here since handle is set */
2165 send_err_rsp(chan, BT_ATT_OP_PREPARE_WRITE_REQ, handle,
2166 data.err);
2167 return 0;
2168 }
2169
2170 LOG_DBG("buf %p handle 0x%04x offset %u", data.buf, handle, offset);
2171
2172 /* Store buffer in the outstanding queue */
2173 net_buf_slist_put(&chan->att->prep_queue, data.buf);
2174
2175 /* Generate response */
2176 data.buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_PREPARE_WRITE_RSP);
2177 if (!data.buf) {
2178 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
2179 }
2180
2181 rsp = net_buf_add(data.buf, sizeof(*rsp));
2182 rsp->handle = sys_cpu_to_le16(handle);
2183 rsp->offset = sys_cpu_to_le16(offset);
2184 net_buf_add(data.buf, len);
2185 memcpy(rsp->value, value, len);
2186
2187 bt_att_chan_send_rsp(chan, data.buf);
2188
2189 return 0;
2190 }
2191 #endif /* CONFIG_BT_ATT_PREPARE_COUNT */
2192
att_prepare_write_req(struct bt_att_chan * chan,struct net_buf * buf)2193 static uint8_t att_prepare_write_req(struct bt_att_chan *chan, struct net_buf *buf)
2194 {
2195 #if CONFIG_BT_ATT_PREPARE_COUNT == 0
2196 return BT_ATT_ERR_NOT_SUPPORTED;
2197 #else
2198 struct bt_att_prepare_write_req *req;
2199 uint16_t handle, offset;
2200
2201 req = net_buf_pull_mem(buf, sizeof(*req));
2202
2203 handle = sys_le16_to_cpu(req->handle);
2204 offset = sys_le16_to_cpu(req->offset);
2205
2206 LOG_DBG("handle 0x%04x offset %u", handle, offset);
2207
2208 return att_prep_write_rsp(chan, handle, offset, buf->data, buf->len);
2209 #endif /* CONFIG_BT_ATT_PREPARE_COUNT */
2210 }
2211
2212 #if CONFIG_BT_ATT_PREPARE_COUNT > 0
exec_write_reassemble(uint16_t handle,uint16_t offset,sys_slist_t * list,struct net_buf_simple * buf)2213 static uint8_t exec_write_reassemble(uint16_t handle, uint16_t offset,
2214 sys_slist_t *list,
2215 struct net_buf_simple *buf)
2216 {
2217 struct net_buf *entry, *next;
2218 sys_snode_t *prev;
2219
2220 prev = NULL;
2221 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(list, entry, next, node) {
2222 struct bt_attr_data *tmp_data = net_buf_user_data(entry);
2223
2224 LOG_DBG("entry %p handle 0x%04x, offset %u", entry, tmp_data->handle,
2225 tmp_data->offset);
2226
2227 if (tmp_data->handle == handle) {
2228 if (tmp_data->offset == 0) {
2229 /* Multiple writes to the same handle can occur
2230 * in a prepare write queue. If the offset is 0,
2231 * that should mean that it's a new write to the
2232 * same handle, and we break to process the
2233 * first write.
2234 */
2235
2236 LOG_DBG("tmp_data->offset == 0");
2237 break;
2238 }
2239
2240 if (tmp_data->offset != buf->len + offset) {
2241 /* We require that the offset is increasing
2242 * properly to avoid badly reassembled buffers
2243 */
2244
2245 LOG_DBG("Bad offset %u (%u, %u)", tmp_data->offset, buf->len,
2246 offset);
2247
2248 return BT_ATT_ERR_INVALID_OFFSET;
2249 }
2250
2251 if (buf->len + entry->len > buf->size) {
2252 return BT_ATT_ERR_INVALID_ATTRIBUTE_LEN;
2253 }
2254
2255 net_buf_simple_add_mem(buf, entry->data, entry->len);
2256 sys_slist_remove(list, prev, &entry->node);
2257 net_buf_unref(entry);
2258 } else {
2259 prev = &entry->node;
2260 }
2261 }
2262
2263 return BT_ATT_ERR_SUCCESS;
2264 }
2265
att_exec_write_rsp(struct bt_att_chan * chan,uint8_t flags)2266 static uint8_t att_exec_write_rsp(struct bt_att_chan *chan, uint8_t flags)
2267 {
2268 struct net_buf *buf;
2269 uint8_t err = 0U;
2270
2271 /* The following code will iterate on all prepare writes in the
2272 * prep_queue, and reassemble those that share the same handle.
2273 * Once a handle has been ressembled, it is sent to the upper layers,
2274 * and the next handle is processed
2275 */
2276 while (!sys_slist_is_empty(&chan->att->prep_queue)) {
2277 struct bt_attr_data *data;
2278 uint16_t handle;
2279
2280 NET_BUF_SIMPLE_DEFINE_STATIC(reassembled_data,
2281 MIN(BT_ATT_MAX_ATTRIBUTE_LEN,
2282 CONFIG_BT_ATT_PREPARE_COUNT * BT_ATT_BUF_SIZE));
2283
2284 buf = net_buf_slist_get(&chan->att->prep_queue);
2285 data = net_buf_user_data(buf);
2286 handle = data->handle;
2287
2288 LOG_DBG("buf %p handle 0x%04x offset %u", buf, handle, data->offset);
2289
2290 net_buf_simple_reset(&reassembled_data);
2291 net_buf_simple_add_mem(&reassembled_data, buf->data, buf->len);
2292
2293 err = exec_write_reassemble(handle, data->offset,
2294 &chan->att->prep_queue,
2295 &reassembled_data);
2296 if (err != BT_ATT_ERR_SUCCESS) {
2297 send_err_rsp(chan, BT_ATT_OP_EXEC_WRITE_REQ,
2298 handle, err);
2299 return 0;
2300 }
2301
2302 /* Just discard the data if an error was set */
2303 if (!err && flags == BT_ATT_FLAG_EXEC) {
2304 err = att_write_rsp(chan, BT_ATT_OP_EXEC_WRITE_REQ, 0,
2305 handle, data->offset,
2306 reassembled_data.data,
2307 reassembled_data.len);
2308 if (err) {
2309 /* Respond here since handle is set */
2310 send_err_rsp(chan, BT_ATT_OP_EXEC_WRITE_REQ,
2311 data->handle, err);
2312 }
2313 }
2314
2315 net_buf_unref(buf);
2316 }
2317
2318 if (err) {
2319 return 0;
2320 }
2321
2322 /* Generate response */
2323 buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_EXEC_WRITE_RSP);
2324 if (!buf) {
2325 return BT_ATT_ERR_UNLIKELY;
2326 }
2327
2328 bt_att_chan_send_rsp(chan, buf);
2329
2330 return 0;
2331 }
2332 #endif /* CONFIG_BT_ATT_PREPARE_COUNT */
2333
2334
att_exec_write_req(struct bt_att_chan * chan,struct net_buf * buf)2335 static uint8_t att_exec_write_req(struct bt_att_chan *chan, struct net_buf *buf)
2336 {
2337 #if CONFIG_BT_ATT_PREPARE_COUNT == 0
2338 return BT_ATT_ERR_NOT_SUPPORTED;
2339 #else
2340 struct bt_att_exec_write_req *req;
2341
2342 req = (void *)buf->data;
2343
2344 LOG_DBG("flags 0x%02x", req->flags);
2345
2346 return att_exec_write_rsp(chan, req->flags);
2347 #endif /* CONFIG_BT_ATT_PREPARE_COUNT */
2348 }
2349
att_write_cmd(struct bt_att_chan * chan,struct net_buf * buf)2350 static uint8_t att_write_cmd(struct bt_att_chan *chan, struct net_buf *buf)
2351 {
2352 uint16_t handle;
2353
2354 handle = net_buf_pull_le16(buf);
2355
2356 LOG_DBG("handle 0x%04x", handle);
2357
2358 return att_write_rsp(chan, 0, 0, handle, 0, buf->data, buf->len);
2359 }
2360
2361 #if defined(CONFIG_BT_SIGNING)
att_signed_write_cmd(struct bt_att_chan * chan,struct net_buf * buf)2362 static uint8_t att_signed_write_cmd(struct bt_att_chan *chan, struct net_buf *buf)
2363 {
2364 struct bt_conn *conn = chan->chan.chan.conn;
2365 struct bt_att_signed_write_cmd *req;
2366 uint16_t handle;
2367 int err;
2368
2369 /* The Signed Write Without Response sub-procedure shall only be supported
2370 * on the LE Fixed Channel Unenhanced ATT bearer.
2371 */
2372 if (bt_att_is_enhanced(chan)) {
2373 /* No response for this command */
2374 return 0;
2375 }
2376
2377 req = (void *)buf->data;
2378
2379 handle = sys_le16_to_cpu(req->handle);
2380
2381 LOG_DBG("handle 0x%04x", handle);
2382
2383 /* Verifying data requires full buffer including attribute header */
2384 net_buf_push(buf, sizeof(struct bt_att_hdr));
2385 err = bt_smp_sign_verify(conn, buf);
2386 if (err) {
2387 LOG_ERR("Error verifying data");
2388 /* No response for this command */
2389 return 0;
2390 }
2391
2392 net_buf_pull(buf, sizeof(struct bt_att_hdr));
2393 net_buf_pull(buf, sizeof(*req));
2394
2395 return att_write_rsp(chan, 0, 0, handle, 0, buf->data,
2396 buf->len - sizeof(struct bt_att_signature));
2397 }
2398 #endif /* CONFIG_BT_SIGNING */
2399
2400 #if defined(CONFIG_BT_GATT_CLIENT)
2401 #if defined(CONFIG_BT_ATT_RETRY_ON_SEC_ERR)
att_change_security(struct bt_conn * conn,uint8_t err)2402 static int att_change_security(struct bt_conn *conn, uint8_t err)
2403 {
2404 bt_security_t sec;
2405
2406 switch (err) {
2407 case BT_ATT_ERR_INSUFFICIENT_ENCRYPTION:
2408 if (conn->sec_level >= BT_SECURITY_L2)
2409 return -EALREADY;
2410 sec = BT_SECURITY_L2;
2411 break;
2412 case BT_ATT_ERR_AUTHENTICATION:
2413 if (conn->sec_level < BT_SECURITY_L2) {
2414 /* BLUETOOTH SPECIFICATION Version 4.2 [Vol 3, Part C]
2415 * page 375:
2416 *
2417 * If an LTK is not available, the service request
2418 * shall be rejected with the error code 'Insufficient
2419 * Authentication'.
2420 * Note: When the link is not encrypted, the error code
2421 * "Insufficient Authentication" does not indicate that
2422 * MITM protection is required.
2423 */
2424 sec = BT_SECURITY_L2;
2425 } else if (conn->sec_level < BT_SECURITY_L3) {
2426 /* BLUETOOTH SPECIFICATION Version 4.2 [Vol 3, Part C]
2427 * page 375:
2428 *
2429 * If an authenticated pairing is required but only an
2430 * unauthenticated pairing has occurred and the link is
2431 * currently encrypted, the service request shall be
2432 * rejected with the error code 'Insufficient
2433 * Authentication'.
2434 * Note: When unauthenticated pairing has occurred and
2435 * the link is currently encrypted, the error code
2436 * 'Insufficient Authentication' indicates that MITM
2437 * protection is required.
2438 */
2439 sec = BT_SECURITY_L3;
2440 } else if (conn->sec_level < BT_SECURITY_L4) {
2441 /* BLUETOOTH SPECIFICATION Version 4.2 [Vol 3, Part C]
2442 * page 375:
2443 *
2444 * If LE Secure Connections authenticated pairing is
2445 * required but LE legacy pairing has occurred and the
2446 * link is currently encrypted, the service request
2447 * shall be rejected with the error code ''Insufficient
2448 * Authentication'.
2449 */
2450 sec = BT_SECURITY_L4;
2451 } else {
2452 return -EALREADY;
2453 }
2454 break;
2455 default:
2456 return -EINVAL;
2457 }
2458
2459 return bt_conn_set_security(conn, sec);
2460 }
2461 #endif /* CONFIG_BT_ATT_RETRY_ON_SEC_ERR */
2462
att_error_rsp(struct bt_att_chan * chan,struct net_buf * buf)2463 static uint8_t att_error_rsp(struct bt_att_chan *chan, struct net_buf *buf)
2464 {
2465 struct bt_att_error_rsp *rsp;
2466 uint8_t err;
2467
2468 rsp = (void *)buf->data;
2469
2470 LOG_DBG("request 0x%02x handle 0x%04x error 0x%02x", rsp->request,
2471 sys_le16_to_cpu(rsp->handle), rsp->error);
2472
2473 /* Don't retry if there is no req pending or it has been cancelled.
2474 *
2475 * BLUETOOTH SPECIFICATION Version 5.2 [Vol 3, Part F]
2476 * page 1423:
2477 *
2478 * If an error code is received in the ATT_ERROR_RSP PDU that is not
2479 * understood by the client, for example an error code that was reserved
2480 * for future use that is now being used in a future version of the
2481 * specification, then the ATT_ERROR_RSP PDU shall still be considered to
2482 * state that the given request cannot be performed for an unknown reason.
2483 */
2484 if (!chan->req || chan->req == &cancel || !rsp->error) {
2485 err = BT_ATT_ERR_UNLIKELY;
2486 goto done;
2487 }
2488
2489 err = rsp->error;
2490
2491 #if defined(CONFIG_BT_ATT_RETRY_ON_SEC_ERR)
2492 int ret;
2493
2494 /* Check if error can be handled by elevating security. */
2495 ret = att_change_security(chan->chan.chan.conn, err);
2496 if (ret == 0 || ret == -EBUSY) {
2497 /* ATT timeout work is normally cancelled in att_handle_rsp.
2498 * However retrying is special case, so the timeout shall
2499 * be cancelled here.
2500 */
2501 k_work_cancel_delayable(&chan->timeout_work);
2502
2503 chan->req->retrying = true;
2504 return 0;
2505 }
2506 #endif /* CONFIG_BT_ATT_RETRY_ON_SEC_ERR */
2507
2508 done:
2509 return att_handle_rsp(chan, NULL, 0, err);
2510 }
2511
att_handle_find_info_rsp(struct bt_att_chan * chan,struct net_buf * buf)2512 static uint8_t att_handle_find_info_rsp(struct bt_att_chan *chan,
2513 struct net_buf *buf)
2514 {
2515 LOG_DBG("");
2516
2517 return att_handle_rsp(chan, buf->data, buf->len, 0);
2518 }
2519
att_handle_find_type_rsp(struct bt_att_chan * chan,struct net_buf * buf)2520 static uint8_t att_handle_find_type_rsp(struct bt_att_chan *chan,
2521 struct net_buf *buf)
2522 {
2523 LOG_DBG("");
2524
2525 return att_handle_rsp(chan, buf->data, buf->len, 0);
2526 }
2527
att_handle_read_type_rsp(struct bt_att_chan * chan,struct net_buf * buf)2528 static uint8_t att_handle_read_type_rsp(struct bt_att_chan *chan,
2529 struct net_buf *buf)
2530 {
2531 LOG_DBG("");
2532
2533 return att_handle_rsp(chan, buf->data, buf->len, 0);
2534 }
2535
att_handle_read_rsp(struct bt_att_chan * chan,struct net_buf * buf)2536 static uint8_t att_handle_read_rsp(struct bt_att_chan *chan,
2537 struct net_buf *buf)
2538 {
2539 LOG_DBG("");
2540
2541 return att_handle_rsp(chan, buf->data, buf->len, 0);
2542 }
2543
att_handle_read_blob_rsp(struct bt_att_chan * chan,struct net_buf * buf)2544 static uint8_t att_handle_read_blob_rsp(struct bt_att_chan *chan,
2545 struct net_buf *buf)
2546 {
2547 LOG_DBG("");
2548
2549 return att_handle_rsp(chan, buf->data, buf->len, 0);
2550 }
2551
2552 #if defined(CONFIG_BT_GATT_READ_MULTIPLE)
att_handle_read_mult_rsp(struct bt_att_chan * chan,struct net_buf * buf)2553 static uint8_t att_handle_read_mult_rsp(struct bt_att_chan *chan,
2554 struct net_buf *buf)
2555 {
2556 LOG_DBG("");
2557
2558 return att_handle_rsp(chan, buf->data, buf->len, 0);
2559 }
2560
2561 #endif /* CONFIG_BT_GATT_READ_MULTIPLE */
2562
2563 #if defined(CONFIG_BT_GATT_READ_MULT_VAR_LEN)
att_handle_read_mult_vl_rsp(struct bt_att_chan * chan,struct net_buf * buf)2564 static uint8_t att_handle_read_mult_vl_rsp(struct bt_att_chan *chan,
2565 struct net_buf *buf)
2566 {
2567 LOG_DBG("");
2568
2569 return att_handle_rsp(chan, buf->data, buf->len, 0);
2570 }
2571 #endif /* CONFIG_BT_GATT_READ_MULT_VAR_LEN */
2572
att_handle_read_group_rsp(struct bt_att_chan * chan,struct net_buf * buf)2573 static uint8_t att_handle_read_group_rsp(struct bt_att_chan *chan,
2574 struct net_buf *buf)
2575 {
2576 LOG_DBG("");
2577
2578 return att_handle_rsp(chan, buf->data, buf->len, 0);
2579 }
2580
att_handle_write_rsp(struct bt_att_chan * chan,struct net_buf * buf)2581 static uint8_t att_handle_write_rsp(struct bt_att_chan *chan,
2582 struct net_buf *buf)
2583 {
2584 LOG_DBG("");
2585
2586 return att_handle_rsp(chan, buf->data, buf->len, 0);
2587 }
2588
att_handle_prepare_write_rsp(struct bt_att_chan * chan,struct net_buf * buf)2589 static uint8_t att_handle_prepare_write_rsp(struct bt_att_chan *chan,
2590 struct net_buf *buf)
2591 {
2592 LOG_DBG("");
2593
2594 return att_handle_rsp(chan, buf->data, buf->len, 0);
2595 }
2596
att_handle_exec_write_rsp(struct bt_att_chan * chan,struct net_buf * buf)2597 static uint8_t att_handle_exec_write_rsp(struct bt_att_chan *chan,
2598 struct net_buf *buf)
2599 {
2600 LOG_DBG("");
2601
2602 return att_handle_rsp(chan, buf->data, buf->len, 0);
2603 }
2604
att_notify(struct bt_att_chan * chan,struct net_buf * buf)2605 static uint8_t att_notify(struct bt_att_chan *chan, struct net_buf *buf)
2606 {
2607 uint16_t handle;
2608
2609 handle = net_buf_pull_le16(buf);
2610
2611 LOG_DBG("chan %p handle 0x%04x", chan, handle);
2612
2613 bt_gatt_notification(chan->att->conn, handle, buf->data, buf->len);
2614
2615 return 0;
2616 }
2617
att_indicate(struct bt_att_chan * chan,struct net_buf * buf)2618 static uint8_t att_indicate(struct bt_att_chan *chan, struct net_buf *buf)
2619 {
2620 uint16_t handle;
2621
2622 handle = net_buf_pull_le16(buf);
2623
2624 LOG_DBG("chan %p handle 0x%04x", chan, handle);
2625
2626 bt_gatt_notification(chan->att->conn, handle, buf->data, buf->len);
2627
2628 buf = bt_att_chan_create_pdu(chan, BT_ATT_OP_CONFIRM, 0);
2629 if (!buf) {
2630 return 0;
2631 }
2632
2633 bt_att_chan_send_rsp(chan, buf);
2634
2635 return 0;
2636 }
2637
att_notify_mult(struct bt_att_chan * chan,struct net_buf * buf)2638 static uint8_t att_notify_mult(struct bt_att_chan *chan, struct net_buf *buf)
2639 {
2640 LOG_DBG("chan %p", chan);
2641
2642 bt_gatt_mult_notification(chan->att->conn, buf->data, buf->len);
2643
2644 return 0;
2645 }
2646 #endif /* CONFIG_BT_GATT_CLIENT */
2647
att_confirm(struct bt_att_chan * chan,struct net_buf * buf)2648 static uint8_t att_confirm(struct bt_att_chan *chan, struct net_buf *buf)
2649 {
2650 LOG_DBG("");
2651
2652 return att_handle_rsp(chan, buf->data, buf->len, 0);
2653 }
2654
2655 static const struct att_handler {
2656 uint8_t op;
2657 uint8_t expect_len;
2658 att_type_t type;
2659 uint8_t (*func)(struct bt_att_chan *chan, struct net_buf *buf);
2660 } handlers[] = {
2661 { BT_ATT_OP_MTU_REQ,
2662 sizeof(struct bt_att_exchange_mtu_req),
2663 ATT_REQUEST,
2664 att_mtu_req },
2665 { BT_ATT_OP_FIND_INFO_REQ,
2666 sizeof(struct bt_att_find_info_req),
2667 ATT_REQUEST,
2668 att_find_info_req },
2669 { BT_ATT_OP_FIND_TYPE_REQ,
2670 sizeof(struct bt_att_find_type_req),
2671 ATT_REQUEST,
2672 att_find_type_req },
2673 { BT_ATT_OP_READ_TYPE_REQ,
2674 sizeof(struct bt_att_read_type_req),
2675 ATT_REQUEST,
2676 att_read_type_req },
2677 { BT_ATT_OP_READ_REQ,
2678 sizeof(struct bt_att_read_req),
2679 ATT_REQUEST,
2680 att_read_req },
2681 { BT_ATT_OP_READ_BLOB_REQ,
2682 sizeof(struct bt_att_read_blob_req),
2683 ATT_REQUEST,
2684 att_read_blob_req },
2685 #if defined(CONFIG_BT_GATT_READ_MULTIPLE)
2686 { BT_ATT_OP_READ_MULT_REQ,
2687 BT_ATT_READ_MULT_MIN_LEN_REQ,
2688 ATT_REQUEST,
2689 att_read_mult_req },
2690 #endif /* CONFIG_BT_GATT_READ_MULTIPLE */
2691 #if defined(CONFIG_BT_GATT_READ_MULT_VAR_LEN)
2692 { BT_ATT_OP_READ_MULT_VL_REQ,
2693 BT_ATT_READ_MULT_MIN_LEN_REQ,
2694 ATT_REQUEST,
2695 att_read_mult_vl_req },
2696 #endif /* CONFIG_BT_GATT_READ_MULT_VAR_LEN */
2697 { BT_ATT_OP_READ_GROUP_REQ,
2698 sizeof(struct bt_att_read_group_req),
2699 ATT_REQUEST,
2700 att_read_group_req },
2701 { BT_ATT_OP_WRITE_REQ,
2702 sizeof(struct bt_att_write_req),
2703 ATT_REQUEST,
2704 att_write_req },
2705 { BT_ATT_OP_PREPARE_WRITE_REQ,
2706 sizeof(struct bt_att_prepare_write_req),
2707 ATT_REQUEST,
2708 att_prepare_write_req },
2709 { BT_ATT_OP_EXEC_WRITE_REQ,
2710 sizeof(struct bt_att_exec_write_req),
2711 ATT_REQUEST,
2712 att_exec_write_req },
2713 { BT_ATT_OP_CONFIRM,
2714 0,
2715 ATT_CONFIRMATION,
2716 att_confirm },
2717 { BT_ATT_OP_WRITE_CMD,
2718 sizeof(struct bt_att_write_cmd),
2719 ATT_COMMAND,
2720 att_write_cmd },
2721 #if defined(CONFIG_BT_SIGNING)
2722 { BT_ATT_OP_SIGNED_WRITE_CMD,
2723 (sizeof(struct bt_att_write_cmd) +
2724 sizeof(struct bt_att_signature)),
2725 ATT_COMMAND,
2726 att_signed_write_cmd },
2727 #endif /* CONFIG_BT_SIGNING */
2728 #if defined(CONFIG_BT_GATT_CLIENT)
2729 { BT_ATT_OP_ERROR_RSP,
2730 sizeof(struct bt_att_error_rsp),
2731 ATT_RESPONSE,
2732 att_error_rsp },
2733 { BT_ATT_OP_MTU_RSP,
2734 sizeof(struct bt_att_exchange_mtu_rsp),
2735 ATT_RESPONSE,
2736 att_mtu_rsp },
2737 { BT_ATT_OP_FIND_INFO_RSP,
2738 sizeof(struct bt_att_find_info_rsp),
2739 ATT_RESPONSE,
2740 att_handle_find_info_rsp },
2741 { BT_ATT_OP_FIND_TYPE_RSP,
2742 sizeof(struct bt_att_handle_group),
2743 ATT_RESPONSE,
2744 att_handle_find_type_rsp },
2745 { BT_ATT_OP_READ_TYPE_RSP,
2746 sizeof(struct bt_att_read_type_rsp),
2747 ATT_RESPONSE,
2748 att_handle_read_type_rsp },
2749 { BT_ATT_OP_READ_RSP,
2750 0,
2751 ATT_RESPONSE,
2752 att_handle_read_rsp },
2753 { BT_ATT_OP_READ_BLOB_RSP,
2754 0,
2755 ATT_RESPONSE,
2756 att_handle_read_blob_rsp },
2757 #if defined(CONFIG_BT_GATT_READ_MULTIPLE)
2758 { BT_ATT_OP_READ_MULT_RSP,
2759 0,
2760 ATT_RESPONSE,
2761 att_handle_read_mult_rsp },
2762 #endif /* CONFIG_BT_GATT_READ_MULTIPLE */
2763 #if defined(CONFIG_BT_GATT_READ_MULT_VAR_LEN)
2764 { BT_ATT_OP_READ_MULT_VL_RSP,
2765 sizeof(struct bt_att_read_mult_vl_rsp),
2766 ATT_RESPONSE,
2767 att_handle_read_mult_vl_rsp },
2768 #endif /* CONFIG_BT_GATT_READ_MULT_VAR_LEN */
2769 { BT_ATT_OP_READ_GROUP_RSP,
2770 sizeof(struct bt_att_read_group_rsp),
2771 ATT_RESPONSE,
2772 att_handle_read_group_rsp },
2773 { BT_ATT_OP_WRITE_RSP,
2774 0,
2775 ATT_RESPONSE,
2776 att_handle_write_rsp },
2777 { BT_ATT_OP_PREPARE_WRITE_RSP,
2778 sizeof(struct bt_att_prepare_write_rsp),
2779 ATT_RESPONSE,
2780 att_handle_prepare_write_rsp },
2781 { BT_ATT_OP_EXEC_WRITE_RSP,
2782 0,
2783 ATT_RESPONSE,
2784 att_handle_exec_write_rsp },
2785 { BT_ATT_OP_NOTIFY,
2786 sizeof(struct bt_att_notify),
2787 ATT_NOTIFICATION,
2788 att_notify },
2789 { BT_ATT_OP_INDICATE,
2790 sizeof(struct bt_att_indicate),
2791 ATT_INDICATION,
2792 att_indicate },
2793 { BT_ATT_OP_NOTIFY_MULT,
2794 sizeof(struct bt_att_notify_mult),
2795 ATT_NOTIFICATION,
2796 att_notify_mult },
2797 #endif /* CONFIG_BT_GATT_CLIENT */
2798 };
2799
att_op_get_type(uint8_t op)2800 static att_type_t att_op_get_type(uint8_t op)
2801 {
2802 switch (op) {
2803 case BT_ATT_OP_MTU_REQ:
2804 case BT_ATT_OP_FIND_INFO_REQ:
2805 case BT_ATT_OP_FIND_TYPE_REQ:
2806 case BT_ATT_OP_READ_TYPE_REQ:
2807 case BT_ATT_OP_READ_REQ:
2808 case BT_ATT_OP_READ_BLOB_REQ:
2809 case BT_ATT_OP_READ_MULT_REQ:
2810 case BT_ATT_OP_READ_MULT_VL_REQ:
2811 case BT_ATT_OP_READ_GROUP_REQ:
2812 case BT_ATT_OP_WRITE_REQ:
2813 case BT_ATT_OP_PREPARE_WRITE_REQ:
2814 case BT_ATT_OP_EXEC_WRITE_REQ:
2815 return ATT_REQUEST;
2816 case BT_ATT_OP_CONFIRM:
2817 return ATT_CONFIRMATION;
2818 case BT_ATT_OP_WRITE_CMD:
2819 case BT_ATT_OP_SIGNED_WRITE_CMD:
2820 return ATT_COMMAND;
2821 case BT_ATT_OP_ERROR_RSP:
2822 case BT_ATT_OP_MTU_RSP:
2823 case BT_ATT_OP_FIND_INFO_RSP:
2824 case BT_ATT_OP_FIND_TYPE_RSP:
2825 case BT_ATT_OP_READ_TYPE_RSP:
2826 case BT_ATT_OP_READ_RSP:
2827 case BT_ATT_OP_READ_BLOB_RSP:
2828 case BT_ATT_OP_READ_MULT_RSP:
2829 case BT_ATT_OP_READ_MULT_VL_RSP:
2830 case BT_ATT_OP_READ_GROUP_RSP:
2831 case BT_ATT_OP_WRITE_RSP:
2832 case BT_ATT_OP_PREPARE_WRITE_RSP:
2833 case BT_ATT_OP_EXEC_WRITE_RSP:
2834 return ATT_RESPONSE;
2835 case BT_ATT_OP_NOTIFY:
2836 case BT_ATT_OP_NOTIFY_MULT:
2837 return ATT_NOTIFICATION;
2838 case BT_ATT_OP_INDICATE:
2839 return ATT_INDICATION;
2840 }
2841
2842 if (op & ATT_CMD_MASK) {
2843 return ATT_COMMAND;
2844 }
2845
2846 return ATT_UNKNOWN;
2847 }
2848
get_conn(struct bt_att_chan * att_chan)2849 static struct bt_conn *get_conn(struct bt_att_chan *att_chan)
2850 {
2851 return att_chan->chan.chan.conn;
2852 }
2853
bt_att_recv(struct bt_l2cap_chan * chan,struct net_buf * buf)2854 static int bt_att_recv(struct bt_l2cap_chan *chan, struct net_buf *buf)
2855 {
2856 struct bt_att_chan *att_chan = ATT_CHAN(chan);
2857 struct bt_conn *conn = get_conn(att_chan);
2858 struct bt_att_hdr *hdr;
2859 const struct att_handler *handler;
2860 uint8_t err;
2861 size_t i;
2862
2863 if (buf->len < sizeof(*hdr)) {
2864 LOG_ERR("Too small ATT PDU received");
2865 return 0;
2866 }
2867
2868 hdr = net_buf_pull_mem(buf, sizeof(*hdr));
2869 LOG_DBG("Received ATT chan %p code 0x%02x len %zu", att_chan, hdr->code,
2870 net_buf_frags_len(buf));
2871
2872 if (conn->state != BT_CONN_CONNECTED) {
2873 LOG_DBG("not connected: conn %p state %u", conn, conn->state);
2874 return 0;
2875 }
2876
2877 if (!att_chan->att) {
2878 LOG_DBG("Ignore recv on detached ATT chan");
2879 return 0;
2880 }
2881
2882 for (i = 0, handler = NULL; i < ARRAY_SIZE(handlers); i++) {
2883 if (hdr->code == handlers[i].op) {
2884 handler = &handlers[i];
2885 break;
2886 }
2887 }
2888
2889 if (!handler) {
2890 LOG_WRN("Unhandled ATT code 0x%02x", hdr->code);
2891 if (att_op_get_type(hdr->code) != ATT_COMMAND &&
2892 att_op_get_type(hdr->code) != ATT_INDICATION) {
2893 send_err_rsp(att_chan, hdr->code, 0,
2894 BT_ATT_ERR_NOT_SUPPORTED);
2895 }
2896 return 0;
2897 }
2898
2899 if (buf->len < handler->expect_len) {
2900 LOG_ERR("Invalid len %u for code 0x%02x", buf->len, hdr->code);
2901 err = BT_ATT_ERR_INVALID_PDU;
2902 } else {
2903 err = handler->func(att_chan, buf);
2904 }
2905
2906 if (handler->type == ATT_REQUEST && err) {
2907 LOG_DBG("ATT error 0x%02x", err);
2908 send_err_rsp(att_chan, hdr->code, 0, err);
2909 }
2910
2911 return 0;
2912 }
2913
att_get(struct bt_conn * conn)2914 static struct bt_att *att_get(struct bt_conn *conn)
2915 {
2916 struct bt_l2cap_chan *chan;
2917 struct bt_att_chan *att_chan;
2918
2919 if (conn->state != BT_CONN_CONNECTED) {
2920 LOG_WRN("Not connected");
2921 return NULL;
2922 }
2923
2924 chan = bt_l2cap_le_lookup_rx_cid(conn, BT_L2CAP_CID_ATT);
2925 if (!chan) {
2926 LOG_ERR("Unable to find ATT channel");
2927 return NULL;
2928 }
2929
2930 att_chan = ATT_CHAN(chan);
2931 if (!atomic_test_bit(att_chan->flags, ATT_CONNECTED)) {
2932 LOG_ERR("ATT channel not connected");
2933 return NULL;
2934 }
2935
2936 return att_chan->att;
2937 }
2938
bt_att_create_pdu(struct bt_conn * conn,uint8_t op,size_t len)2939 struct net_buf *bt_att_create_pdu(struct bt_conn *conn, uint8_t op, size_t len)
2940 {
2941 struct bt_att *att;
2942 struct bt_att_chan *chan, *tmp;
2943
2944 att = att_get(conn);
2945 if (!att) {
2946 return NULL;
2947 }
2948
2949 /* This allocator should _not_ be used for RSPs. */
2950 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&att->chans, chan, tmp, node) {
2951 if (len + sizeof(op) > bt_att_mtu(chan)) {
2952 continue;
2953 }
2954
2955 return bt_att_chan_create_pdu(chan, op, len);
2956 }
2957
2958 LOG_WRN("No ATT channel for MTU %zu", len + sizeof(op));
2959
2960 return NULL;
2961 }
2962
bt_att_create_rsp_pdu(struct bt_att_chan * chan,uint8_t op)2963 struct net_buf *bt_att_create_rsp_pdu(struct bt_att_chan *chan, uint8_t op)
2964 {
2965 size_t headroom;
2966 struct bt_att_hdr *hdr;
2967 struct bt_att_tx_meta_data *data;
2968 struct net_buf *buf;
2969
2970 buf = net_buf_alloc(&att_pool, BT_ATT_TIMEOUT);
2971 if (!buf) {
2972 LOG_ERR("Unable to allocate buffer for op 0x%02x", op);
2973 return NULL;
2974 }
2975
2976 headroom = BT_L2CAP_BUF_SIZE(0);
2977
2978 if (bt_att_is_enhanced(chan)) {
2979 headroom += BT_L2CAP_SDU_HDR_SIZE;
2980 }
2981
2982 net_buf_reserve(buf, headroom);
2983
2984 data = bt_att_get_tx_meta_data(buf);
2985 data->att_chan = chan;
2986
2987 hdr = net_buf_add(buf, sizeof(*hdr));
2988 hdr->code = op;
2989
2990 return buf;
2991 }
2992
att_reset(struct bt_att * att)2993 static void att_reset(struct bt_att *att)
2994 {
2995 struct net_buf *buf;
2996
2997 #if CONFIG_BT_ATT_PREPARE_COUNT > 0
2998 /* Discard queued buffers */
2999 while ((buf = net_buf_slist_get(&att->prep_queue))) {
3000 net_buf_unref(buf);
3001 }
3002 #endif /* CONFIG_BT_ATT_PREPARE_COUNT > 0 */
3003
3004 #if defined(CONFIG_BT_EATT)
3005 struct k_work_sync sync;
3006
3007 (void)k_work_cancel_delayable_sync(&att->eatt.connection_work, &sync);
3008 #endif /* CONFIG_BT_EATT */
3009
3010 while ((buf = net_buf_get(&att->tx_queue, K_NO_WAIT))) {
3011 net_buf_unref(buf);
3012 }
3013
3014 /* Notify pending requests */
3015 while (!sys_slist_is_empty(&att->reqs)) {
3016 struct bt_att_req *req;
3017 sys_snode_t *node;
3018
3019 node = sys_slist_get_not_empty(&att->reqs);
3020 req = CONTAINER_OF(node, struct bt_att_req, node);
3021 if (req->func) {
3022 req->func(att->conn, -ECONNRESET, NULL, 0,
3023 req->user_data);
3024 }
3025
3026 bt_att_req_free(req);
3027 }
3028
3029 /* FIXME: `att->conn` is not reference counted. Consider using `bt_conn_ref`
3030 * and `bt_conn_unref` to follow convention.
3031 */
3032 att->conn = NULL;
3033 k_mem_slab_free(&att_slab, (void *)att);
3034 }
3035
att_chan_detach(struct bt_att_chan * chan)3036 static void att_chan_detach(struct bt_att_chan *chan)
3037 {
3038 struct net_buf *buf;
3039
3040 LOG_DBG("chan %p", chan);
3041
3042 sys_slist_find_and_remove(&chan->att->chans, &chan->node);
3043
3044 /* Release pending buffers */
3045 while ((buf = net_buf_get(&chan->tx_queue, K_NO_WAIT))) {
3046 net_buf_unref(buf);
3047 }
3048
3049 if (chan->req) {
3050 /* Notify outstanding request */
3051 att_handle_rsp(chan, NULL, 0, -ECONNRESET);
3052 }
3053
3054 chan->att = NULL;
3055 atomic_clear_bit(chan->flags, ATT_CONNECTED);
3056 }
3057
att_timeout(struct k_work * work)3058 static void att_timeout(struct k_work *work)
3059 {
3060 char addr[BT_ADDR_LE_STR_LEN];
3061 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
3062 struct bt_att_chan *chan = CONTAINER_OF(dwork, struct bt_att_chan,
3063 timeout_work);
3064
3065 bt_addr_le_to_str(bt_conn_get_dst(chan->att->conn), addr, sizeof(addr));
3066 LOG_ERR("ATT Timeout for device %s", addr);
3067
3068 /* BLUETOOTH SPECIFICATION Version 4.2 [Vol 3, Part F] page 480:
3069 *
3070 * A transaction not completed within 30 seconds shall time out. Such a
3071 * transaction shall be considered to have failed and the local higher
3072 * layers shall be informed of this failure. No more attribute protocol
3073 * requests, commands, indications or notifications shall be sent to the
3074 * target device on this ATT Bearer.
3075 */
3076 bt_att_disconnected(&chan->chan.chan);
3077 }
3078
att_get_fixed_chan(struct bt_conn * conn)3079 static struct bt_att_chan *att_get_fixed_chan(struct bt_conn *conn)
3080 {
3081 struct bt_l2cap_chan *chan;
3082
3083 chan = bt_l2cap_le_lookup_tx_cid(conn, BT_L2CAP_CID_ATT);
3084 __ASSERT(chan, "No ATT channel found");
3085
3086 return ATT_CHAN(chan);
3087 }
3088
att_chan_attach(struct bt_att * att,struct bt_att_chan * chan)3089 static void att_chan_attach(struct bt_att *att, struct bt_att_chan *chan)
3090 {
3091 LOG_DBG("att %p chan %p flags %lu", att, chan, atomic_get(chan->flags));
3092
3093 if (sys_slist_is_empty(&att->chans)) {
3094 /* Init general queues when attaching the first channel */
3095 k_fifo_init(&att->tx_queue);
3096 #if CONFIG_BT_ATT_PREPARE_COUNT > 0
3097 sys_slist_init(&att->prep_queue);
3098 #endif
3099 }
3100
3101 sys_slist_prepend(&att->chans, &chan->node);
3102 }
3103
bt_att_connected(struct bt_l2cap_chan * chan)3104 static void bt_att_connected(struct bt_l2cap_chan *chan)
3105 {
3106 struct bt_att_chan *att_chan = ATT_CHAN(chan);
3107 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
3108
3109 LOG_DBG("chan %p cid 0x%04x", le_chan, le_chan->tx.cid);
3110
3111 atomic_set_bit(att_chan->flags, ATT_CONNECTED);
3112
3113 att_chan_mtu_updated(att_chan);
3114
3115 k_work_init_delayable(&att_chan->timeout_work, att_timeout);
3116
3117 bt_gatt_connected(le_chan->chan.conn);
3118 }
3119
bt_att_disconnected(struct bt_l2cap_chan * chan)3120 static void bt_att_disconnected(struct bt_l2cap_chan *chan)
3121 {
3122 struct bt_att_chan *att_chan = ATT_CHAN(chan);
3123 struct bt_att *att = att_chan->att;
3124 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
3125
3126 LOG_DBG("chan %p cid 0x%04x", le_chan, le_chan->tx.cid);
3127
3128 if (!att_chan->att) {
3129 LOG_DBG("Ignore disconnect on detached ATT chan");
3130 return;
3131 }
3132
3133 att_chan_detach(att_chan);
3134
3135 /* Don't reset if there are still channels to be used */
3136 if (!sys_slist_is_empty(&att->chans)) {
3137 return;
3138 }
3139
3140 att_reset(att);
3141
3142 bt_gatt_disconnected(le_chan->chan.conn);
3143 }
3144
3145 #if defined(CONFIG_BT_SMP)
att_req_retry(struct bt_att_chan * att_chan)3146 static uint8_t att_req_retry(struct bt_att_chan *att_chan)
3147 {
3148 struct bt_att_req *req = att_chan->req;
3149 struct net_buf *buf;
3150
3151 /* Resend buffer */
3152 if (!req->encode) {
3153 /* This request does not support resending */
3154 return BT_ATT_ERR_AUTHENTICATION;
3155 }
3156
3157
3158 buf = bt_att_chan_create_pdu(att_chan, req->att_op, req->len);
3159 if (!buf) {
3160 return BT_ATT_ERR_UNLIKELY;
3161 }
3162
3163 if (req->encode(buf, req->len, req->user_data)) {
3164 net_buf_unref(buf);
3165 return BT_ATT_ERR_UNLIKELY;
3166 }
3167
3168 if (chan_send(att_chan, buf)) {
3169 net_buf_unref(buf);
3170 return BT_ATT_ERR_UNLIKELY;
3171 }
3172
3173 return BT_ATT_ERR_SUCCESS;
3174 }
3175
bt_att_encrypt_change(struct bt_l2cap_chan * chan,uint8_t hci_status)3176 static void bt_att_encrypt_change(struct bt_l2cap_chan *chan,
3177 uint8_t hci_status)
3178 {
3179 struct bt_att_chan *att_chan = ATT_CHAN(chan);
3180 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
3181 struct bt_conn *conn = le_chan->chan.conn;
3182 uint8_t err;
3183
3184 LOG_DBG("chan %p conn %p handle %u sec_level 0x%02x status 0x%02x", le_chan, conn,
3185 conn->handle, conn->sec_level, hci_status);
3186
3187 if (!att_chan->att) {
3188 LOG_DBG("Ignore encrypt change on detached ATT chan");
3189 return;
3190 }
3191
3192 /*
3193 * If status (HCI status of security procedure) is non-zero, notify
3194 * outstanding request about security failure.
3195 */
3196 if (hci_status) {
3197 if (att_chan->req && att_chan->req->retrying) {
3198 att_handle_rsp(att_chan, NULL, 0,
3199 BT_ATT_ERR_AUTHENTICATION);
3200 }
3201
3202 return;
3203 }
3204
3205 bt_gatt_encrypt_change(conn);
3206
3207 if (conn->sec_level == BT_SECURITY_L1) {
3208 return;
3209 }
3210
3211 if (!(att_chan->req && att_chan->req->retrying)) {
3212 return;
3213 }
3214
3215 LOG_DBG("Retrying");
3216
3217 err = att_req_retry(att_chan);
3218 if (err) {
3219 LOG_DBG("Retry failed (%d)", err);
3220 att_handle_rsp(att_chan, NULL, 0, err);
3221 }
3222 }
3223 #endif /* CONFIG_BT_SMP */
3224
bt_att_status(struct bt_l2cap_chan * ch,atomic_t * status)3225 static void bt_att_status(struct bt_l2cap_chan *ch, atomic_t *status)
3226 {
3227 struct bt_att_chan *chan = ATT_CHAN(ch);
3228 sys_snode_t *node;
3229
3230 LOG_DBG("chan %p status %p", ch, status);
3231
3232 if (!atomic_test_bit(status, BT_L2CAP_STATUS_OUT)) {
3233 return;
3234 }
3235
3236 if (!chan->att) {
3237 LOG_DBG("Ignore status on detached ATT chan");
3238 return;
3239 }
3240
3241 /* If there is a request pending don't attempt to send */
3242 if (chan->req) {
3243 return;
3244 }
3245
3246 /* Pull next request from the list */
3247 node = sys_slist_get(&chan->att->reqs);
3248 if (!node) {
3249 return;
3250 }
3251
3252 if (bt_att_chan_req_send(chan, ATT_REQ(node)) >= 0) {
3253 return;
3254 }
3255
3256 /* Prepend back to the list as it could not be sent */
3257 sys_slist_prepend(&chan->att->reqs, node);
3258 }
3259
bt_att_released(struct bt_l2cap_chan * ch)3260 static void bt_att_released(struct bt_l2cap_chan *ch)
3261 {
3262 struct bt_att_chan *chan = ATT_CHAN(ch);
3263
3264 LOG_DBG("chan %p", chan);
3265
3266 k_mem_slab_free(&chan_slab, (void *)chan);
3267 }
3268
3269 #if defined(CONFIG_BT_EATT)
bt_att_reconfigured(struct bt_l2cap_chan * l2cap_chan)3270 static void bt_att_reconfigured(struct bt_l2cap_chan *l2cap_chan)
3271 {
3272 struct bt_att_chan *att_chan = ATT_CHAN(l2cap_chan);
3273
3274 LOG_DBG("chan %p", att_chan);
3275
3276 att_chan_mtu_updated(att_chan);
3277 }
3278 #endif /* CONFIG_BT_EATT */
3279
att_chan_new(struct bt_att * att,atomic_val_t flags)3280 static struct bt_att_chan *att_chan_new(struct bt_att *att, atomic_val_t flags)
3281 {
3282 int quota = 0;
3283 static struct bt_l2cap_chan_ops ops = {
3284 .connected = bt_att_connected,
3285 .disconnected = bt_att_disconnected,
3286 .recv = bt_att_recv,
3287 .sent = bt_att_sent,
3288 .status = bt_att_status,
3289 #if defined(CONFIG_BT_SMP)
3290 .encrypt_change = bt_att_encrypt_change,
3291 #endif /* CONFIG_BT_SMP */
3292 .released = bt_att_released,
3293 #if defined(CONFIG_BT_EATT)
3294 .reconfigured = bt_att_reconfigured,
3295 #endif /* CONFIG_BT_EATT */
3296 };
3297 struct bt_att_chan *chan;
3298
3299 SYS_SLIST_FOR_EACH_CONTAINER(&att->chans, chan, node) {
3300 if (chan->att == att) {
3301 quota++;
3302 }
3303
3304 if (quota == ATT_CHAN_MAX) {
3305 LOG_DBG("Maximum number of channels reached: %d", quota);
3306 return NULL;
3307 }
3308 }
3309
3310 if (k_mem_slab_alloc(&chan_slab, (void **)&chan, K_NO_WAIT)) {
3311 LOG_WRN("No available ATT channel for conn %p", att->conn);
3312 return NULL;
3313 }
3314
3315 (void)memset(chan, 0, sizeof(*chan));
3316 chan->chan.chan.ops = &ops;
3317 k_fifo_init(&chan->tx_queue);
3318 atomic_set(chan->flags, flags);
3319 chan->att = att;
3320 att_chan_attach(att, chan);
3321
3322 if (bt_att_is_enhanced(chan)) {
3323 /* EATT: The MTU will be sent in the ECRED conn req/rsp PDU. The
3324 * TX MTU is received on L2CAP-level.
3325 */
3326 chan->chan.rx.mtu = BT_LOCAL_ATT_MTU_EATT;
3327 } else {
3328 /* UATT: L2CAP Basic is not able to communicate the L2CAP MTU
3329 * without help. ATT has to manage the MTU. The initial MTU is
3330 * defined by spec.
3331 */
3332 chan->chan.tx.mtu = BT_ATT_DEFAULT_LE_MTU;
3333 chan->chan.rx.mtu = BT_ATT_DEFAULT_LE_MTU;
3334 }
3335
3336 return chan;
3337 }
3338
3339 #if defined(CONFIG_BT_EATT)
bt_eatt_count(struct bt_conn * conn)3340 size_t bt_eatt_count(struct bt_conn *conn)
3341 {
3342 struct bt_att *att;
3343 struct bt_att_chan *chan;
3344 size_t eatt_count = 0;
3345
3346 if (!conn) {
3347 return 0;
3348 }
3349
3350 att = att_get(conn);
3351 if (!att) {
3352 return 0;
3353 }
3354
3355 SYS_SLIST_FOR_EACH_CONTAINER(&att->chans, chan, node) {
3356 if (bt_att_is_enhanced(chan) &&
3357 atomic_test_bit(chan->flags, ATT_CONNECTED)) {
3358 eatt_count++;
3359 }
3360 }
3361
3362 return eatt_count;
3363 }
3364
att_enhanced_connection_work_handler(struct k_work * work)3365 static void att_enhanced_connection_work_handler(struct k_work *work)
3366 {
3367 const struct k_work_delayable *dwork = k_work_delayable_from_work(work);
3368 const struct bt_att *att = CONTAINER_OF(dwork, struct bt_att, eatt.connection_work);
3369 const int err = bt_eatt_connect(att->conn, att->eatt.chans_to_connect);
3370
3371 if (err == -ENOMEM) {
3372 LOG_DBG("Failed to connect %d EATT channels, central has probably "
3373 "already established some.",
3374 att->eatt.chans_to_connect);
3375 } else if (err < 0) {
3376 LOG_WRN("Failed to connect %d EATT channels (err: %d)", att->eatt.chans_to_connect,
3377 err);
3378 }
3379
3380 }
3381 #endif /* CONFIG_BT_EATT */
3382
bt_att_accept(struct bt_conn * conn,struct bt_l2cap_chan ** ch)3383 static int bt_att_accept(struct bt_conn *conn, struct bt_l2cap_chan **ch)
3384 {
3385 struct bt_att *att;
3386 struct bt_att_chan *chan;
3387
3388 LOG_DBG("conn %p handle %u", conn, conn->handle);
3389
3390 if (k_mem_slab_alloc(&att_slab, (void **)&att, K_NO_WAIT)) {
3391 LOG_ERR("No available ATT context for conn %p", conn);
3392 return -ENOMEM;
3393 }
3394
3395 att_handle_rsp_thread = k_current_get();
3396
3397 (void)memset(att, 0, sizeof(*att));
3398 att->conn = conn;
3399 sys_slist_init(&att->reqs);
3400 sys_slist_init(&att->chans);
3401
3402 #if defined(CONFIG_BT_EATT)
3403 k_work_init_delayable(&att->eatt.connection_work,
3404 att_enhanced_connection_work_handler);
3405 #endif /* CONFIG_BT_EATT */
3406
3407 chan = att_chan_new(att, 0);
3408 if (!chan) {
3409 return -ENOMEM;
3410 }
3411
3412 *ch = &chan->chan.chan;
3413
3414 return 0;
3415 }
3416
3417 /* The L2CAP channel section is sorted lexicographically. Make sure that ATT fixed channel will be
3418 * placed as the last one to ensure that SMP channel is properly initialized before bt_att_connected
3419 * tries to send security request.
3420 */
3421 BT_L2CAP_CHANNEL_DEFINE(z_att_fixed_chan, BT_L2CAP_CID_ATT, bt_att_accept, NULL);
3422
3423 #if defined(CONFIG_BT_EATT)
credit_based_connection_delay(struct bt_conn * conn)3424 static k_timeout_t credit_based_connection_delay(struct bt_conn *conn)
3425 {
3426 /*
3427 * 5.3 Vol 3, Part G, Section 5.4 L2CAP COLLISION MITIGATION
3428 * ... In this situation, the Central may retry
3429 * immediately but the Peripheral shall wait a minimum of 100 ms before retrying;
3430 * on LE connections, the Peripheral shall wait at least 2 *
3431 * (connPeripheralLatency + 1) * connInterval if that is longer.
3432 */
3433
3434 if (IS_ENABLED(CONFIG_BT_CENTRAL) && conn->role == BT_CONN_ROLE_CENTRAL) {
3435 return K_NO_WAIT;
3436 } else if (IS_ENABLED(CONFIG_BT_PERIPHERAL)) {
3437 uint8_t random;
3438 int err;
3439
3440 err = bt_rand(&random, sizeof(random));
3441 if (err) {
3442 random = 0;
3443 }
3444
3445 const uint8_t rand_delay = random & 0x7; /* Small random delay for IOP */
3446 /* The maximum value of (latency + 1) * 2 multipled with the
3447 * maximum connection interval has a maximum value of
3448 * 4000000000 which can be stored in 32-bits, so this won't
3449 * result in an overflow
3450 */
3451 const uint32_t calculated_delay_us =
3452 2 * (conn->le.latency + 1) * BT_CONN_INTERVAL_TO_US(conn->le.interval);
3453 const uint32_t calculated_delay_ms = calculated_delay_us / USEC_PER_MSEC;
3454
3455 return K_MSEC(MAX(100, calculated_delay_ms + rand_delay));
3456 }
3457
3458 /* Must be either central or peripheral */
3459 __ASSERT_NO_MSG(false);
3460 CODE_UNREACHABLE;
3461 }
3462
att_schedule_eatt_connect(struct bt_conn * conn,uint8_t chans_to_connect)3463 static int att_schedule_eatt_connect(struct bt_conn *conn, uint8_t chans_to_connect)
3464 {
3465 struct bt_att *att = att_get(conn);
3466
3467 if (!att) {
3468 return -ENOTCONN;
3469 }
3470
3471 att->eatt.chans_to_connect = chans_to_connect;
3472
3473 return k_work_reschedule(&att->eatt.connection_work,
3474 credit_based_connection_delay(conn));
3475 }
3476
handle_potential_collision(struct bt_att * att)3477 static void handle_potential_collision(struct bt_att *att)
3478 {
3479 __ASSERT_NO_MSG(att);
3480
3481 int err;
3482 size_t to_connect = att->eatt.prev_conn_req_missing_chans;
3483
3484 if (att->eatt.prev_conn_rsp_result == BT_L2CAP_LE_ERR_NO_RESOURCES &&
3485 att->eatt.prev_conn_req_result == BT_L2CAP_LE_ERR_NO_RESOURCES) {
3486 LOG_DBG("Credit based connection request collision detected");
3487
3488 /* Reset to not keep retrying on repeated failures */
3489 att->eatt.prev_conn_rsp_result = 0;
3490 att->eatt.prev_conn_req_result = 0;
3491 att->eatt.prev_conn_req_missing_chans = 0;
3492
3493 if (to_connect == 0) {
3494 return;
3495 }
3496
3497 err = att_schedule_eatt_connect(att->conn, to_connect);
3498 if (err < 0) {
3499 LOG_ERR("Failed to schedule EATT connection retry (err: %d)", err);
3500 }
3501 }
3502 }
3503
ecred_connect_req_cb(struct bt_conn * conn,uint16_t result,uint16_t psm)3504 static void ecred_connect_req_cb(struct bt_conn *conn, uint16_t result, uint16_t psm)
3505 {
3506 struct bt_att *att = att_get(conn);
3507
3508 if (!att) {
3509 return;
3510 }
3511
3512 if (psm != BT_EATT_PSM) {
3513 /* Collision mitigation is only a requirement on the EATT PSM */
3514 return;
3515 }
3516
3517 att->eatt.prev_conn_rsp_result = result;
3518
3519 handle_potential_collision(att);
3520 }
3521
ecred_connect_rsp_cb(struct bt_conn * conn,uint16_t result,uint8_t attempted_to_connect,uint8_t succeeded_to_connect,uint16_t psm)3522 static void ecred_connect_rsp_cb(struct bt_conn *conn, uint16_t result,
3523 uint8_t attempted_to_connect, uint8_t succeeded_to_connect,
3524 uint16_t psm)
3525 {
3526 struct bt_att *att = att_get(conn);
3527
3528 if (!att) {
3529 return;
3530 }
3531
3532 if (psm != BT_EATT_PSM) {
3533 /* Collision mitigation is only a requirement on the EATT PSM */
3534 return;
3535 }
3536
3537 att->eatt.prev_conn_req_result = result;
3538 att->eatt.prev_conn_req_missing_chans =
3539 attempted_to_connect - succeeded_to_connect;
3540
3541 handle_potential_collision(att);
3542 }
3543
bt_eatt_connect(struct bt_conn * conn,size_t num_channels)3544 int bt_eatt_connect(struct bt_conn *conn, size_t num_channels)
3545 {
3546 struct bt_att_chan *att_chan;
3547 struct bt_att *att;
3548 struct bt_l2cap_chan *chan[CONFIG_BT_EATT_MAX + 1] = {};
3549 size_t offset = 0;
3550 size_t i = 0;
3551 int err;
3552
3553 /* Check the encryption level for EATT */
3554 if (bt_conn_get_security(conn) < BT_SECURITY_L2) {
3555 /* Vol 3, Part G, Section 5.3.2 Channel Requirements states:
3556 * The channel shall be encrypted.
3557 */
3558 return -EPERM;
3559 }
3560
3561 if (num_channels > CONFIG_BT_EATT_MAX || num_channels == 0) {
3562 return -EINVAL;
3563 }
3564
3565 if (!conn) {
3566 return -EINVAL;
3567 }
3568
3569 att_chan = att_get_fixed_chan(conn);
3570 att = att_chan->att;
3571
3572 while (num_channels--) {
3573 att_chan = att_chan_new(att, BIT(ATT_ENHANCED));
3574 if (!att_chan) {
3575 break;
3576 }
3577
3578 chan[i] = &att_chan->chan.chan;
3579 i++;
3580 }
3581
3582 if (!i) {
3583 return -ENOMEM;
3584 }
3585
3586 while (offset < i) {
3587 /* bt_l2cap_ecred_chan_connect() uses the first L2CAP_ECRED_CHAN_MAX_PER_REQ
3588 * elements of the array or until a null-terminator is reached.
3589 */
3590 err = bt_l2cap_ecred_chan_connect(conn, &chan[offset], BT_EATT_PSM);
3591 if (err < 0) {
3592 return err;
3593 }
3594
3595 offset += L2CAP_ECRED_CHAN_MAX_PER_REQ;
3596 }
3597
3598 return 0;
3599 }
3600
3601 #if defined(CONFIG_BT_EATT_AUTO_CONNECT)
eatt_auto_connect(struct bt_conn * conn,bt_security_t level,enum bt_security_err err)3602 static void eatt_auto_connect(struct bt_conn *conn, bt_security_t level,
3603 enum bt_security_err err)
3604 {
3605 int eatt_err;
3606
3607 if (err || level < BT_SECURITY_L2 || !bt_att_fixed_chan_only(conn)) {
3608 return;
3609 }
3610
3611 eatt_err = att_schedule_eatt_connect(conn, CONFIG_BT_EATT_MAX);
3612 if (eatt_err < 0) {
3613 LOG_WRN("Automatic creation of EATT bearers failed on "
3614 "connection %s with error %d",
3615 bt_addr_le_str(bt_conn_get_dst(conn)), eatt_err);
3616 }
3617 }
3618
3619 BT_CONN_CB_DEFINE(conn_callbacks) = {
3620 .security_changed = eatt_auto_connect,
3621 };
3622
3623 #endif /* CONFIG_BT_EATT_AUTO_CONNECT */
3624
bt_eatt_disconnect(struct bt_conn * conn)3625 int bt_eatt_disconnect(struct bt_conn *conn)
3626 {
3627 struct bt_att_chan *chan;
3628 struct bt_att *att;
3629 int err = -ENOTCONN;
3630
3631 if (!conn) {
3632 return -EINVAL;
3633 }
3634
3635 chan = att_get_fixed_chan(conn);
3636 att = chan->att;
3637
3638 SYS_SLIST_FOR_EACH_CONTAINER(&att->chans, chan, node) {
3639 if (bt_att_is_enhanced(chan)) {
3640 err = bt_l2cap_chan_disconnect(&chan->chan.chan);
3641 }
3642 }
3643
3644 return err;
3645 }
3646
3647 #if defined(CONFIG_BT_TESTING)
bt_eatt_disconnect_one(struct bt_conn * conn)3648 int bt_eatt_disconnect_one(struct bt_conn *conn)
3649 {
3650 struct bt_att_chan *chan = att_get_fixed_chan(conn);
3651 struct bt_att *att = chan->att;
3652 int err = -ENOTCONN;
3653
3654 if (!conn) {
3655 return -EINVAL;
3656 }
3657
3658 SYS_SLIST_FOR_EACH_CONTAINER(&att->chans, chan, node) {
3659 if (bt_att_is_enhanced(chan)) {
3660 err = bt_l2cap_chan_disconnect(&chan->chan.chan);
3661 return err;
3662 }
3663 }
3664
3665 return err;
3666 }
3667
bt_eatt_reconfigure(struct bt_conn * conn,uint16_t mtu)3668 int bt_eatt_reconfigure(struct bt_conn *conn, uint16_t mtu)
3669 {
3670 struct bt_att_chan *att_chan = att_get_fixed_chan(conn);
3671 struct bt_att *att = att_chan->att;
3672 struct bt_l2cap_chan *chans[CONFIG_BT_EATT_MAX + 1] = {};
3673 size_t offset = 0;
3674 size_t i = 0;
3675 int err;
3676
3677 SYS_SLIST_FOR_EACH_CONTAINER(&att->chans, att_chan, node) {
3678 if (bt_att_is_enhanced(att_chan)) {
3679 chans[i] = &att_chan->chan.chan;
3680 i++;
3681 }
3682 }
3683
3684 while (offset < i) {
3685 /* bt_l2cap_ecred_chan_reconfigure() uses the first L2CAP_ECRED_CHAN_MAX_PER_REQ
3686 * elements of the array or until a null-terminator is reached.
3687 */
3688 err = bt_l2cap_ecred_chan_reconfigure(&chans[offset], mtu);
3689 if (err < 0) {
3690 return err;
3691 }
3692
3693 offset += L2CAP_ECRED_CHAN_MAX_PER_REQ;
3694 }
3695
3696 return 0;
3697 }
3698 #endif /* CONFIG_BT_TESTING */
3699 #endif /* CONFIG_BT_EATT */
3700
bt_eatt_accept(struct bt_conn * conn,struct bt_l2cap_server * server,struct bt_l2cap_chan ** chan)3701 static int bt_eatt_accept(struct bt_conn *conn, struct bt_l2cap_server *server,
3702 struct bt_l2cap_chan **chan)
3703 {
3704 struct bt_att_chan *att_chan = att_get_fixed_chan(conn);
3705 struct bt_att *att = att_chan->att;
3706
3707 LOG_DBG("conn %p handle %u", conn, conn->handle);
3708
3709 att_chan = att_chan_new(att, BIT(ATT_ENHANCED));
3710 if (att_chan) {
3711 *chan = &att_chan->chan.chan;
3712 return 0;
3713 }
3714
3715 return -ENOMEM;
3716 }
3717
bt_eatt_init(void)3718 static void bt_eatt_init(void)
3719 {
3720 int err;
3721 static struct bt_l2cap_server eatt_l2cap = {
3722 .psm = BT_EATT_PSM,
3723 .sec_level = BT_SECURITY_L2,
3724 .accept = bt_eatt_accept,
3725 };
3726 struct bt_l2cap_server *registered_server;
3727
3728 LOG_DBG("");
3729
3730 /* Check if eatt_l2cap server has already been registered. */
3731 registered_server = bt_l2cap_server_lookup_psm(eatt_l2cap.psm);
3732 if (registered_server != &eatt_l2cap) {
3733 err = bt_l2cap_server_register(&eatt_l2cap);
3734 if (err < 0) {
3735 LOG_ERR("EATT Server registration failed %d", err);
3736 }
3737 }
3738
3739 #if defined(CONFIG_BT_EATT)
3740 static const struct bt_l2cap_ecred_cb cb = {
3741 .ecred_conn_rsp = ecred_connect_rsp_cb,
3742 .ecred_conn_req = ecred_connect_req_cb,
3743 };
3744
3745 bt_l2cap_register_ecred_cb(&cb);
3746 #endif /* CONFIG_BT_EATT */
3747 }
3748
bt_att_init(void)3749 void bt_att_init(void)
3750 {
3751 bt_gatt_init();
3752
3753 if (IS_ENABLED(CONFIG_BT_EATT)) {
3754 bt_eatt_init();
3755 }
3756 }
3757
bt_att_get_mtu(struct bt_conn * conn)3758 uint16_t bt_att_get_mtu(struct bt_conn *conn)
3759 {
3760 struct bt_att_chan *chan, *tmp;
3761 struct bt_att *att;
3762 uint16_t mtu = 0;
3763
3764 att = att_get(conn);
3765 if (!att) {
3766 return 0;
3767 }
3768
3769 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&att->chans, chan, tmp, node) {
3770 if (bt_att_mtu(chan) > mtu) {
3771 mtu = bt_att_mtu(chan);
3772 }
3773 }
3774
3775 return mtu;
3776 }
3777
att_chan_mtu_updated(struct bt_att_chan * updated_chan)3778 static void att_chan_mtu_updated(struct bt_att_chan *updated_chan)
3779 {
3780 struct bt_att *att = updated_chan->att;
3781 struct bt_att_chan *chan, *tmp;
3782 uint16_t max_tx = 0, max_rx = 0;
3783
3784 /* Get maximum MTU's of other channels */
3785 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&att->chans, chan, tmp, node) {
3786 if (chan == updated_chan) {
3787 continue;
3788 }
3789 max_tx = MAX(max_tx, chan->chan.tx.mtu);
3790 max_rx = MAX(max_rx, chan->chan.rx.mtu);
3791 }
3792
3793 /* If either maximum MTU has changed */
3794 if ((updated_chan->chan.tx.mtu > max_tx) ||
3795 (updated_chan->chan.rx.mtu > max_rx)) {
3796 max_tx = MAX(max_tx, updated_chan->chan.tx.mtu);
3797 max_rx = MAX(max_rx, updated_chan->chan.rx.mtu);
3798 bt_gatt_att_max_mtu_changed(att->conn, max_tx, max_rx);
3799 }
3800 }
3801
bt_att_req_alloc(k_timeout_t timeout)3802 struct bt_att_req *bt_att_req_alloc(k_timeout_t timeout)
3803 {
3804 struct bt_att_req *req = NULL;
3805
3806 if (k_current_get() == att_handle_rsp_thread) {
3807 /* No req will be fulfilled while blocking on the bt_recv thread.
3808 * Blocking would cause deadlock.
3809 */
3810 timeout = K_NO_WAIT;
3811 }
3812
3813 /* Reserve space for request */
3814 if (k_mem_slab_alloc(&req_slab, (void **)&req, timeout)) {
3815 LOG_DBG("No space for req");
3816 return NULL;
3817 }
3818
3819 LOG_DBG("req %p", req);
3820
3821 memset(req, 0, sizeof(*req));
3822
3823 return req;
3824 }
3825
bt_att_req_free(struct bt_att_req * req)3826 void bt_att_req_free(struct bt_att_req *req)
3827 {
3828 LOG_DBG("req %p", req);
3829
3830 if (req->buf) {
3831 net_buf_unref(req->buf);
3832 req->buf = NULL;
3833 }
3834
3835 k_mem_slab_free(&req_slab, (void *)req);
3836 }
3837
bt_att_send(struct bt_conn * conn,struct net_buf * buf)3838 int bt_att_send(struct bt_conn *conn, struct net_buf *buf)
3839 {
3840 struct bt_att *att;
3841
3842 __ASSERT_NO_MSG(conn);
3843 __ASSERT_NO_MSG(buf);
3844
3845 att = att_get(conn);
3846 if (!att) {
3847 net_buf_unref(buf);
3848 return -ENOTCONN;
3849 }
3850
3851 net_buf_put(&att->tx_queue, buf);
3852 att_send_process(att);
3853
3854 return 0;
3855 }
3856
bt_att_req_send(struct bt_conn * conn,struct bt_att_req * req)3857 int bt_att_req_send(struct bt_conn *conn, struct bt_att_req *req)
3858 {
3859 struct bt_att *att;
3860
3861 LOG_DBG("conn %p req %p", conn, req);
3862
3863 __ASSERT_NO_MSG(conn);
3864 __ASSERT_NO_MSG(req);
3865
3866 att = att_get(conn);
3867 if (!att) {
3868 return -ENOTCONN;
3869 }
3870
3871 sys_slist_append(&att->reqs, &req->node);
3872 att_req_send_process(att);
3873
3874 return 0;
3875 }
3876
bt_att_chan_req_cancel(struct bt_att_chan * chan,struct bt_att_req * req)3877 static bool bt_att_chan_req_cancel(struct bt_att_chan *chan,
3878 struct bt_att_req *req)
3879 {
3880 if (chan->req != req) {
3881 return false;
3882 }
3883
3884 chan->req = &cancel;
3885
3886 bt_att_req_free(req);
3887
3888 return true;
3889 }
3890
bt_att_req_cancel(struct bt_conn * conn,struct bt_att_req * req)3891 void bt_att_req_cancel(struct bt_conn *conn, struct bt_att_req *req)
3892 {
3893 struct bt_att *att;
3894 struct bt_att_chan *chan, *tmp;
3895
3896 LOG_DBG("req %p", req);
3897
3898 if (!conn || !req) {
3899 return;
3900 }
3901
3902 att = att_get(conn);
3903 if (!att) {
3904 return;
3905 }
3906
3907 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&att->chans, chan, tmp, node) {
3908 /* Check if request is outstanding */
3909 if (bt_att_chan_req_cancel(chan, req)) {
3910 return;
3911 }
3912 }
3913
3914 /* Remove request from the list */
3915 sys_slist_find_and_remove(&att->reqs, &req->node);
3916
3917 bt_att_req_free(req);
3918 }
3919
bt_att_find_req_by_user_data(struct bt_conn * conn,const void * user_data)3920 struct bt_att_req *bt_att_find_req_by_user_data(struct bt_conn *conn, const void *user_data)
3921 {
3922 struct bt_att *att;
3923 struct bt_att_chan *chan;
3924 struct bt_att_req *req;
3925
3926 att = att_get(conn);
3927 if (!att) {
3928 return NULL;
3929 }
3930
3931 SYS_SLIST_FOR_EACH_CONTAINER(&att->chans, chan, node) {
3932 if (chan->req->user_data == user_data) {
3933 return chan->req;
3934 }
3935 }
3936
3937 SYS_SLIST_FOR_EACH_CONTAINER(&att->reqs, req, node) {
3938 if (req->user_data == user_data) {
3939 return req;
3940 }
3941 }
3942
3943 return NULL;
3944 }
3945
bt_att_fixed_chan_only(struct bt_conn * conn)3946 bool bt_att_fixed_chan_only(struct bt_conn *conn)
3947 {
3948 #if defined(CONFIG_BT_EATT)
3949 return bt_eatt_count(conn) == 0;
3950 #else
3951 return true;
3952 #endif /* CONFIG_BT_EATT */
3953 }
3954
bt_att_clear_out_of_sync_sent(struct bt_conn * conn)3955 void bt_att_clear_out_of_sync_sent(struct bt_conn *conn)
3956 {
3957 struct bt_att *att = att_get(conn);
3958 struct bt_att_chan *chan;
3959
3960 if (!att) {
3961 return;
3962 }
3963
3964 SYS_SLIST_FOR_EACH_CONTAINER(&att->chans, chan, node) {
3965 atomic_clear_bit(chan->flags, ATT_OUT_OF_SYNC_SENT);
3966 }
3967 }
3968
bt_att_out_of_sync_sent_on_fixed(struct bt_conn * conn)3969 bool bt_att_out_of_sync_sent_on_fixed(struct bt_conn *conn)
3970 {
3971 struct bt_l2cap_chan *l2cap_chan;
3972 struct bt_att_chan *att_chan;
3973
3974 l2cap_chan = bt_l2cap_le_lookup_rx_cid(conn, BT_L2CAP_CID_ATT);
3975 if (!l2cap_chan) {
3976 return false;
3977 }
3978
3979 att_chan = ATT_CHAN(l2cap_chan);
3980 return atomic_test_bit(att_chan->flags, ATT_OUT_OF_SYNC_SENT);
3981 }
3982
bt_att_set_tx_meta_data(struct net_buf * buf,bt_gatt_complete_func_t func,void * user_data,enum bt_att_chan_opt chan_opt)3983 void bt_att_set_tx_meta_data(struct net_buf *buf, bt_gatt_complete_func_t func, void *user_data,
3984 enum bt_att_chan_opt chan_opt)
3985 {
3986 struct bt_att_tx_meta_data *data = bt_att_get_tx_meta_data(buf);
3987
3988 data->func = func;
3989 data->user_data = user_data;
3990 data->attr_count = 1;
3991 data->chan_opt = chan_opt;
3992 }
3993
bt_att_increment_tx_meta_data_attr_count(struct net_buf * buf,uint16_t attr_count)3994 void bt_att_increment_tx_meta_data_attr_count(struct net_buf *buf, uint16_t attr_count)
3995 {
3996 struct bt_att_tx_meta_data *data = bt_att_get_tx_meta_data(buf);
3997
3998 data->attr_count += attr_count;
3999 }
4000
bt_att_tx_meta_data_match(const struct net_buf * buf,bt_gatt_complete_func_t func,const void * user_data,enum bt_att_chan_opt chan_opt)4001 bool bt_att_tx_meta_data_match(const struct net_buf *buf, bt_gatt_complete_func_t func,
4002 const void *user_data, enum bt_att_chan_opt chan_opt)
4003 {
4004 const struct bt_att_tx_meta_data *meta = bt_att_get_tx_meta_data(buf);
4005
4006 return ((meta->func == func) &&
4007 (meta->user_data == user_data) &&
4008 (meta->chan_opt == chan_opt));
4009 }
4010
bt_att_chan_opt_valid(struct bt_conn * conn,enum bt_att_chan_opt chan_opt)4011 bool bt_att_chan_opt_valid(struct bt_conn *conn, enum bt_att_chan_opt chan_opt)
4012 {
4013 if ((chan_opt & (BT_ATT_CHAN_OPT_ENHANCED_ONLY | BT_ATT_CHAN_OPT_UNENHANCED_ONLY)) ==
4014 (BT_ATT_CHAN_OPT_ENHANCED_ONLY | BT_ATT_CHAN_OPT_UNENHANCED_ONLY)) {
4015 /* Enhanced and Unenhanced are mutually exclusive */
4016 return false;
4017 }
4018
4019 /* Choosing EATT requires EATT channels connected and encryption enabled */
4020 if (chan_opt & BT_ATT_CHAN_OPT_ENHANCED_ONLY) {
4021 return (bt_conn_get_security(conn) > BT_SECURITY_L1) &&
4022 !bt_att_fixed_chan_only(conn);
4023 }
4024
4025 return true;
4026 }
4027
bt_gatt_authorization_cb_register(const struct bt_gatt_authorization_cb * cb)4028 int bt_gatt_authorization_cb_register(const struct bt_gatt_authorization_cb *cb)
4029 {
4030 if (!IS_ENABLED(CONFIG_BT_GATT_AUTHORIZATION_CUSTOM)) {
4031 return -ENOSYS;
4032 }
4033
4034 if (!cb) {
4035 authorization_cb = NULL;
4036 return 0;
4037 }
4038
4039 if (authorization_cb) {
4040 return -EALREADY;
4041 }
4042
4043 authorization_cb = cb;
4044
4045 return 0;
4046 }
4047