1 /* att.c - Attribute protocol handling */
2
3 /*
4 * Copyright (c) 2015-2016 Intel Corporation
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9 #include <zephyr/kernel.h>
10 #include <string.h>
11 #include <errno.h>
12 #include <stdbool.h>
13 #include <zephyr/sys/atomic.h>
14 #include <zephyr/sys/byteorder.h>
15 #include <zephyr/sys/util.h>
16
17 #include <zephyr/bluetooth/hci.h>
18 #include <zephyr/bluetooth/bluetooth.h>
19 #include <zephyr/bluetooth/uuid.h>
20 #include <zephyr/bluetooth/att.h>
21 #include <zephyr/bluetooth/gatt.h>
22 #include <zephyr/drivers/bluetooth/hci_driver.h>
23
24 #include "common/bt_str.h"
25
26 #include "hci_core.h"
27 #include "conn_internal.h"
28 #include "l2cap_internal.h"
29 #include "smp.h"
30 #include "att_internal.h"
31 #include "gatt_internal.h"
32
33 #define LOG_LEVEL CONFIG_BT_ATT_LOG_LEVEL
34 #include <zephyr/logging/log.h>
35 LOG_MODULE_REGISTER(bt_att);
36
37 #define ATT_CHAN(_ch) CONTAINER_OF(_ch, struct bt_att_chan, chan.chan)
38 #define ATT_REQ(_node) CONTAINER_OF(_node, struct bt_att_req, node)
39
40 #define ATT_CMD_MASK 0x40
41
42 #if defined(CONFIG_BT_EATT)
43 #define ATT_CHAN_MAX (CONFIG_BT_EATT_MAX + 1)
44 #else
45 #define ATT_CHAN_MAX 1
46 #endif /* CONFIG_BT_EATT */
47
48 typedef enum __packed {
49 ATT_COMMAND,
50 ATT_REQUEST,
51 ATT_RESPONSE,
52 ATT_NOTIFICATION,
53 ATT_CONFIRMATION,
54 ATT_INDICATION,
55 ATT_UNKNOWN,
56 } att_type_t;
57
58 static att_type_t att_op_get_type(uint8_t op);
59
60 #if CONFIG_BT_ATT_PREPARE_COUNT > 0
61 struct bt_attr_data {
62 uint16_t handle;
63 uint16_t offset;
64 };
65
66 /* Pool for incoming ATT packets */
67 NET_BUF_POOL_DEFINE(prep_pool, CONFIG_BT_ATT_PREPARE_COUNT, BT_ATT_BUF_SIZE,
68 sizeof(struct bt_attr_data), NULL);
69 #endif /* CONFIG_BT_ATT_PREPARE_COUNT */
70
71 K_MEM_SLAB_DEFINE(req_slab, sizeof(struct bt_att_req),
72 CONFIG_BT_L2CAP_TX_BUF_COUNT, __alignof__(struct bt_att_req));
73
74 enum {
75 ATT_PENDING_RSP,
76 ATT_PENDING_CFM,
77 ATT_CONNECTED,
78 ATT_ENHANCED,
79 ATT_PENDING_SENT,
80 ATT_OUT_OF_SYNC_SENT,
81
82 /* Total number of flags - must be at the end of the enum */
83 ATT_NUM_FLAGS,
84 };
85
86 struct bt_att_tx_meta_data {
87 struct bt_att_chan *att_chan;
88 uint16_t attr_count;
89 bt_gatt_complete_func_t func;
90 void *user_data;
91 enum bt_att_chan_opt chan_opt;
92 };
93
94 struct bt_att_tx_meta {
95 struct bt_att_tx_meta_data *data;
96 };
97
98 /* ATT channel specific data */
99 struct bt_att_chan {
100 /* Connection this channel is associated with */
101 struct bt_att *att;
102 struct bt_l2cap_le_chan chan;
103 ATOMIC_DEFINE(flags, ATT_NUM_FLAGS);
104 struct bt_att_req *req;
105 struct k_fifo tx_queue;
106 struct net_buf *rsp_buf;
107 struct bt_att_tx_meta_data rsp_meta;
108 struct k_work_delayable timeout_work;
109 sys_snode_t node;
110 };
111
bt_att_is_enhanced(struct bt_att_chan * chan)112 static bool bt_att_is_enhanced(struct bt_att_chan *chan)
113 {
114 /* Optimization. */
115 if (!IS_ENABLED(CONFIG_BT_EATT)) {
116 return false;
117 }
118
119 return atomic_test_bit(chan->flags, ATT_ENHANCED);
120 }
121
bt_att_mtu(struct bt_att_chan * chan)122 static uint16_t bt_att_mtu(struct bt_att_chan *chan)
123 {
124 /* Core v5.3 Vol 3 Part F 3.4.2:
125 *
126 * The server and client shall set ATT_MTU to the minimum of the
127 * Client Rx MTU and the Server Rx MTU.
128 */
129 return MIN(chan->chan.rx.mtu, chan->chan.tx.mtu);
130 }
131
132 /* ATT connection specific data */
133 struct bt_att {
134 struct bt_conn *conn;
135 /* Shared request queue */
136 sys_slist_t reqs;
137 struct k_fifo tx_queue;
138 #if CONFIG_BT_ATT_PREPARE_COUNT > 0
139 sys_slist_t prep_queue;
140 #endif
141 /* Contains bt_att_chan instance(s) */
142 sys_slist_t chans;
143 #if defined(CONFIG_BT_EATT)
144 struct {
145 struct k_work_delayable connection_work;
146 uint8_t chans_to_connect;
147
148 uint16_t prev_conn_rsp_result;
149 uint16_t prev_conn_req_result;
150 uint8_t prev_conn_req_missing_chans;
151 } eatt;
152 #endif /* CONFIG_BT_EATT */
153 };
154
155 K_MEM_SLAB_DEFINE(att_slab, sizeof(struct bt_att),
156 CONFIG_BT_MAX_CONN, __alignof__(struct bt_att));
157 K_MEM_SLAB_DEFINE(chan_slab, sizeof(struct bt_att_chan),
158 CONFIG_BT_MAX_CONN * ATT_CHAN_MAX,
159 __alignof__(struct bt_att_chan));
160 static struct bt_att_req cancel;
161
162 /** The thread ATT response handlers likely run on.
163 *
164 * Blocking this thread while waiting for an ATT request to resolve can cause a
165 * deadlock.
166 *
167 * This can happen if the application queues ATT requests in the context of a
168 * callback from the Bluetooth stack. This is because queuing an ATT request
169 * will block until a request-resource is available, and the callbacks run on
170 * the same thread as the ATT response handler that frees request-resources.
171 *
172 * The intended use of this value is to detect the above situation.
173 */
174 static k_tid_t att_handle_rsp_thread;
175
176 #define bt_att_tx_meta_data(buf) (((struct bt_att_tx_meta *)net_buf_user_data(buf))->data)
177
178 static struct bt_att_tx_meta_data tx_meta_data[CONFIG_BT_CONN_TX_MAX];
179 K_FIFO_DEFINE(free_att_tx_meta_data);
180
tx_meta_data_alloc(k_timeout_t timeout)181 static struct bt_att_tx_meta_data *tx_meta_data_alloc(k_timeout_t timeout)
182 {
183 /* The meta data always get freed in the system workqueue,
184 * so if we're in the same workqueue but there are no immediate
185 * contexts available, there's no chance we'll get one by waiting.
186 */
187 if (k_current_get() == &k_sys_work_q.thread) {
188 return k_fifo_get(&free_att_tx_meta_data, K_NO_WAIT);
189 }
190
191 return k_fifo_get(&free_att_tx_meta_data, timeout);
192 }
193
tx_meta_data_free(struct bt_att_tx_meta_data * data)194 static inline void tx_meta_data_free(struct bt_att_tx_meta_data *data)
195 {
196 __ASSERT_NO_MSG(data);
197 bool alloc_from_global = PART_OF_ARRAY(tx_meta_data, data);
198
199 if (data == &data->att_chan->rsp_meta) {
200 /* "Free-ness" is kept by remote: There can only ever be one
201 * transaction per-bearer.
202 */
203 __ASSERT_NO_MSG(!alloc_from_global);
204 } else {
205 __ASSERT_NO_MSG(alloc_from_global);
206 }
207
208 (void)memset(data, 0, sizeof(*data));
209
210 if (alloc_from_global) {
211 k_fifo_put(&free_att_tx_meta_data, data);
212 }
213 }
214
215 static int bt_att_chan_send(struct bt_att_chan *chan, struct net_buf *buf);
216 static bt_conn_tx_cb_t chan_cb(const struct net_buf *buf);
217 static bt_conn_tx_cb_t att_cb(const struct net_buf *buf);
218
219 static void att_chan_mtu_updated(struct bt_att_chan *updated_chan);
220 static void bt_att_disconnected(struct bt_l2cap_chan *chan);
221
222 struct net_buf *bt_att_create_rsp_pdu(struct bt_att_chan *chan,
223 uint8_t op, size_t len);
224
att_sent(struct bt_conn * conn,void * user_data)225 void att_sent(struct bt_conn *conn, void *user_data)
226 {
227 struct bt_att_tx_meta_data *data = user_data;
228 struct bt_att_chan *att_chan = data->att_chan;
229 struct bt_l2cap_chan *chan = &att_chan->chan.chan;
230
231 LOG_DBG("conn %p chan %p", conn, chan);
232
233 if (chan->ops->sent) {
234 chan->ops->sent(chan);
235 }
236 }
237
238 /* In case of success the ownership of the buffer is transferred to the stack
239 * which takes care of releasing it when it completes transmitting to the
240 * controller.
241 *
242 * In case bt_l2cap_send_cb fails the buffer state and ownership are retained
243 * so the buffer can be safely pushed back to the queue to be processed later.
244 */
chan_send(struct bt_att_chan * chan,struct net_buf * buf)245 static int chan_send(struct bt_att_chan *chan, struct net_buf *buf)
246 {
247 struct bt_att_hdr *hdr;
248 struct net_buf_simple_state state;
249 int err;
250 struct bt_att_tx_meta_data *data = bt_att_tx_meta_data(buf);
251 struct bt_att_chan *prev_chan = data->att_chan;
252
253 hdr = (void *)buf->data;
254
255 LOG_DBG("code 0x%02x", hdr->code);
256
257 if (!atomic_test_bit(chan->flags, ATT_CONNECTED)) {
258 LOG_ERR("ATT channel not connected");
259 return -EINVAL;
260 }
261
262 if (IS_ENABLED(CONFIG_BT_EATT) && hdr->code == BT_ATT_OP_MTU_REQ &&
263 chan->chan.tx.cid != BT_L2CAP_CID_ATT) {
264 /* The Exchange MTU sub-procedure shall only be supported on
265 * the LE Fixed Channel Unenhanced ATT bearer
266 */
267 return -ENOTSUP;
268 }
269
270 if (IS_ENABLED(CONFIG_BT_EATT) && bt_att_is_enhanced(chan)) {
271 /* Check if sent is pending already, if it does it cannot be
272 * modified so the operation will need to be queued.
273 */
274 if (atomic_test_bit(chan->flags, ATT_PENDING_SENT)) {
275 return -EAGAIN;
276 }
277
278 if (hdr->code == BT_ATT_OP_SIGNED_WRITE_CMD) {
279 return -ENOTSUP;
280 }
281
282 /* Check if the channel is ready to send in case of a request */
283 if (att_op_get_type(hdr->code) == ATT_REQUEST &&
284 !atomic_test_bit(chan->chan.chan.status,
285 BT_L2CAP_STATUS_OUT)) {
286 return -EAGAIN;
287 }
288
289 atomic_set_bit(chan->flags, ATT_PENDING_SENT);
290 data->att_chan = chan;
291
292 /* bt_l2cap_chan_send does actually return the number of bytes
293 * that could be sent immediately.
294 */
295 err = bt_l2cap_chan_send_cb(&chan->chan.chan, buf, chan_cb(buf), data);
296 if (err < 0) {
297 data->att_chan = prev_chan;
298 atomic_clear_bit(chan->flags, ATT_PENDING_SENT);
299 return err;
300 }
301
302 return 0;
303 }
304
305 if (hdr->code == BT_ATT_OP_SIGNED_WRITE_CMD) {
306 err = bt_smp_sign(chan->att->conn, buf);
307 if (err) {
308 LOG_ERR("Error signing data");
309 tx_meta_data_free(bt_att_tx_meta_data(buf));
310 net_buf_unref(buf);
311 return err;
312 }
313 }
314
315 net_buf_simple_save(&buf->b, &state);
316
317 data->att_chan = chan;
318
319 err = bt_l2cap_send_cb(chan->att->conn, BT_L2CAP_CID_ATT,
320 buf, att_cb(buf), data);
321 if (err) {
322 if (err == -ENOBUFS) {
323 LOG_ERR("Ran out of TX buffers or contexts.");
324 }
325 /* In case of an error has occurred restore the buffer state */
326 net_buf_simple_restore(&buf->b, &state);
327 data->att_chan = prev_chan;
328 }
329
330 return err;
331 }
332
att_chan_matches_chan_opt(struct bt_att_chan * chan,enum bt_att_chan_opt chan_opt)333 static bool att_chan_matches_chan_opt(struct bt_att_chan *chan, enum bt_att_chan_opt chan_opt)
334 {
335 __ASSERT_NO_MSG(chan_opt <= BT_ATT_CHAN_OPT_ENHANCED_ONLY);
336
337 if (chan_opt == BT_ATT_CHAN_OPT_NONE) {
338 return true;
339 }
340
341 if (bt_att_is_enhanced(chan)) {
342 return (chan_opt & BT_ATT_CHAN_OPT_ENHANCED_ONLY);
343 } else {
344 return (chan_opt & BT_ATT_CHAN_OPT_UNENHANCED_ONLY);
345 }
346 }
347
get_first_buf_matching_chan(struct k_fifo * fifo,struct bt_att_chan * chan)348 static struct net_buf *get_first_buf_matching_chan(struct k_fifo *fifo, struct bt_att_chan *chan)
349 {
350 if (IS_ENABLED(CONFIG_BT_EATT)) {
351 struct k_fifo skipped;
352 struct net_buf *buf;
353 struct net_buf *ret = NULL;
354
355 k_fifo_init(&skipped);
356
357 while ((buf = net_buf_get(fifo, K_NO_WAIT))) {
358 if (!ret &&
359 att_chan_matches_chan_opt(chan, bt_att_tx_meta_data(buf)->chan_opt)) {
360 ret = buf;
361 } else {
362 net_buf_put(&skipped, buf);
363 }
364 }
365
366 __ASSERT_NO_MSG(k_fifo_is_empty(fifo));
367
368 while ((buf = net_buf_get(&skipped, K_NO_WAIT))) {
369 net_buf_put(fifo, buf);
370 }
371
372 __ASSERT_NO_MSG(k_fifo_is_empty(&skipped));
373
374 return ret;
375 } else {
376 return net_buf_get(fifo, K_NO_WAIT);
377 }
378 }
379
get_first_req_matching_chan(sys_slist_t * reqs,struct bt_att_chan * chan)380 static struct bt_att_req *get_first_req_matching_chan(sys_slist_t *reqs, struct bt_att_chan *chan)
381 {
382 if (IS_ENABLED(CONFIG_BT_EATT)) {
383 sys_snode_t *curr, *prev = NULL;
384
385 SYS_SLIST_FOR_EACH_NODE(reqs, curr) {
386 if (att_chan_matches_chan_opt(
387 chan, bt_att_tx_meta_data(ATT_REQ(curr)->buf)->chan_opt)) {
388 break;
389 }
390
391 prev = curr;
392 }
393
394 if (curr) {
395 sys_slist_remove(reqs, prev, curr);
396
397 return ATT_REQ(curr);
398 }
399
400 return NULL;
401 }
402
403 sys_snode_t *node = sys_slist_get(reqs);
404
405 if (node) {
406 return ATT_REQ(node);
407 } else {
408 return NULL;
409 }
410 }
411
process_queue(struct bt_att_chan * chan,struct k_fifo * queue)412 static int process_queue(struct bt_att_chan *chan, struct k_fifo *queue)
413 {
414 struct net_buf *buf;
415 int err;
416
417 buf = get_first_buf_matching_chan(queue, chan);
418 if (buf) {
419 err = bt_att_chan_send(chan, buf);
420 if (err) {
421 /* Push it back if it could not be send */
422 k_queue_prepend(&queue->_queue, buf);
423 return err;
424 }
425
426 return 0;
427 }
428
429 return -ENOENT;
430 }
431
432 /* Send requests without taking tx_sem */
chan_req_send(struct bt_att_chan * chan,struct bt_att_req * req)433 static int chan_req_send(struct bt_att_chan *chan, struct bt_att_req *req)
434 {
435 struct net_buf *buf;
436 int err;
437
438 if (bt_att_mtu(chan) < net_buf_frags_len(req->buf)) {
439 return -EMSGSIZE;
440 }
441
442 LOG_DBG("chan %p req %p len %zu", chan, req, net_buf_frags_len(req->buf));
443
444 chan->req = req;
445
446 /* Release since bt_l2cap_send_cb takes ownership of the buffer */
447 buf = req->buf;
448 req->buf = NULL;
449
450 err = bt_att_chan_send(chan, buf);
451 if (err) {
452 /* We still have the ownership of the buffer */
453 req->buf = buf;
454 chan->req = NULL;
455 }
456
457 return err;
458 }
459
bt_att_sent(struct bt_l2cap_chan * ch)460 static void bt_att_sent(struct bt_l2cap_chan *ch)
461 {
462 struct bt_att_chan *chan = ATT_CHAN(ch);
463 struct bt_att *att = chan->att;
464 int err;
465
466 LOG_DBG("chan %p", chan);
467
468 atomic_clear_bit(chan->flags, ATT_PENDING_SENT);
469
470 if (!att) {
471 LOG_DBG("Ignore sent on detached ATT chan");
472 return;
473 }
474
475 /* Process pending requests first since they require a response they
476 * can only be processed one at time while if other queues were
477 * processed before they may always contain a buffer starving the
478 * request queue.
479 */
480 if (!chan->req && !sys_slist_is_empty(&att->reqs)) {
481 sys_snode_t *node = sys_slist_get(&att->reqs);
482
483 if (chan_req_send(chan, ATT_REQ(node)) >= 0) {
484 return;
485 }
486
487 /* Prepend back to the list as it could not be sent */
488 sys_slist_prepend(&att->reqs, node);
489 }
490
491 /* Process channel queue */
492 err = process_queue(chan, &chan->tx_queue);
493 if (!err) {
494 return;
495 }
496
497 /* Process global queue */
498 (void)process_queue(chan, &att->tx_queue);
499 }
500
chan_cfm_sent(struct bt_conn * conn,void * user_data,int err)501 static void chan_cfm_sent(struct bt_conn *conn, void *user_data, int err)
502 {
503 struct bt_att_tx_meta_data *data = user_data;
504 struct bt_att_chan *chan = data->att_chan;
505
506 LOG_DBG("chan %p", chan);
507
508 if (IS_ENABLED(CONFIG_BT_ATT_ENFORCE_FLOW)) {
509 atomic_clear_bit(chan->flags, ATT_PENDING_CFM);
510 }
511
512 tx_meta_data_free(data);
513 }
514
chan_rsp_sent(struct bt_conn * conn,void * user_data,int err)515 static void chan_rsp_sent(struct bt_conn *conn, void *user_data, int err)
516 {
517 struct bt_att_tx_meta_data *data = user_data;
518 struct bt_att_chan *chan = data->att_chan;
519
520 LOG_DBG("chan %p", chan);
521
522 if (IS_ENABLED(CONFIG_BT_ATT_ENFORCE_FLOW)) {
523 atomic_clear_bit(chan->flags, ATT_PENDING_RSP);
524 }
525
526 tx_meta_data_free(data);
527 }
528
chan_req_sent(struct bt_conn * conn,void * user_data,int err)529 static void chan_req_sent(struct bt_conn *conn, void *user_data, int err)
530 {
531 struct bt_att_tx_meta_data *data = user_data;
532 struct bt_att_chan *chan = data->att_chan;
533
534 LOG_DBG("chan %p chan->req %p", chan, chan->req);
535
536 /* Start timeout work */
537 if (chan->req) {
538 k_work_reschedule(&chan->timeout_work, BT_ATT_TIMEOUT);
539 }
540
541 tx_meta_data_free(user_data);
542 }
543
chan_tx_complete(struct bt_conn * conn,void * user_data,int err)544 static void chan_tx_complete(struct bt_conn *conn, void *user_data, int err)
545 {
546 struct bt_att_tx_meta_data *data = user_data;
547 struct bt_att_chan *chan = data->att_chan;
548 bt_gatt_complete_func_t func = data->func;
549 uint16_t attr_count = data->attr_count;
550 void *ud = data->user_data;
551
552 LOG_DBG("TX Complete chan %p CID 0x%04X", chan, chan->chan.tx.cid);
553
554 tx_meta_data_free(data);
555
556 if (!err && func) {
557 for (uint16_t i = 0; i < attr_count; i++) {
558 func(conn, ud);
559 }
560 }
561 }
562
chan_unknown(struct bt_conn * conn,void * user_data,int err)563 static void chan_unknown(struct bt_conn *conn, void *user_data, int err)
564 {
565 tx_meta_data_free(user_data);
566 }
567
chan_cb(const struct net_buf * buf)568 static bt_conn_tx_cb_t chan_cb(const struct net_buf *buf)
569 {
570 const att_type_t op_type = att_op_get_type(buf->data[0]);
571
572 switch (op_type) {
573 case ATT_RESPONSE:
574 return chan_rsp_sent;
575 case ATT_CONFIRMATION:
576 return chan_cfm_sent;
577 case ATT_REQUEST:
578 case ATT_INDICATION:
579 return chan_req_sent;
580 case ATT_COMMAND:
581 case ATT_NOTIFICATION:
582 return chan_tx_complete;
583 default:
584 __ASSERT(false, "Unknown op type 0x%02X", op_type);
585 }
586
587 return chan_unknown;
588 }
589
att_cfm_sent(struct bt_conn * conn,void * user_data,int err)590 static void att_cfm_sent(struct bt_conn *conn, void *user_data, int err)
591 {
592 if (!err) {
593 att_sent(conn, user_data);
594 }
595
596 chan_cfm_sent(conn, user_data, err);
597 }
598
att_rsp_sent(struct bt_conn * conn,void * user_data,int err)599 static void att_rsp_sent(struct bt_conn *conn, void *user_data, int err)
600 {
601 if (!err) {
602 att_sent(conn, user_data);
603 }
604
605 chan_rsp_sent(conn, user_data, err);
606 }
607
att_req_sent(struct bt_conn * conn,void * user_data,int err)608 static void att_req_sent(struct bt_conn *conn, void *user_data, int err)
609 {
610 if (!err) {
611 att_sent(conn, user_data);
612 }
613
614 chan_req_sent(conn, user_data, err);
615 }
616
att_tx_complete(struct bt_conn * conn,void * user_data,int err)617 static void att_tx_complete(struct bt_conn *conn, void *user_data, int err)
618 {
619 if (!err) {
620 att_sent(conn, user_data);
621 }
622
623 chan_tx_complete(conn, user_data, err);
624 }
625
att_unknown(struct bt_conn * conn,void * user_data,int err)626 static void att_unknown(struct bt_conn *conn, void *user_data, int err)
627 {
628 if (!err) {
629 att_sent(conn, user_data);
630 }
631
632 chan_unknown(conn, user_data, err);
633 }
634
att_cb(const struct net_buf * buf)635 static bt_conn_tx_cb_t att_cb(const struct net_buf *buf)
636 {
637 const att_type_t op_type = att_op_get_type(buf->data[0]);
638
639 switch (op_type) {
640 case ATT_RESPONSE:
641 return att_rsp_sent;
642 case ATT_CONFIRMATION:
643 return att_cfm_sent;
644 case ATT_REQUEST:
645 case ATT_INDICATION:
646 return att_req_sent;
647 case ATT_COMMAND:
648 case ATT_NOTIFICATION:
649 return att_tx_complete;
650 default:
651 __ASSERT(false, "Unknown op type 0x%02X", op_type);
652 }
653
654 return att_unknown;
655 }
656
bt_att_chan_create_pdu(struct bt_att_chan * chan,uint8_t op,size_t len)657 static struct net_buf *bt_att_chan_create_pdu(struct bt_att_chan *chan, uint8_t op, size_t len)
658 {
659 struct bt_att_hdr *hdr;
660 struct net_buf *buf;
661 struct bt_att_tx_meta_data *data;
662 k_timeout_t timeout;
663 bool re_use = false;
664
665 if (len + sizeof(op) > bt_att_mtu(chan)) {
666 LOG_WRN("ATT MTU exceeded, max %u, wanted %zu", bt_att_mtu(chan),
667 len + sizeof(op));
668 return NULL;
669 }
670
671 switch (att_op_get_type(op)) {
672 case ATT_RESPONSE:
673 /* Use a timeout only when responding */
674 timeout = BT_ATT_TIMEOUT;
675 re_use = true;
676 break;
677 case ATT_CONFIRMATION:
678 timeout = BT_ATT_TIMEOUT;
679 break;
680 default:
681 timeout = K_FOREVER;
682 }
683
684 if (IS_ENABLED(CONFIG_BT_GATT_READ_MULTIPLE) &&
685 (op == BT_ATT_OP_READ_MULT_RSP ||
686 op == BT_ATT_OP_READ_MULT_VL_RSP)) {
687 /* We can't re-use the REQ buffer (see below) for these two
688 * opcodes, as the handler will read from it _after_ allocating
689 * the RSP buffer.
690 */
691 re_use = false;
692 }
693
694 if (re_use) {
695 /* There can only ever be one transaction at a time on a
696 * bearer/channel. Use a dedicated channel meta-data to ensure
697 * we can always queue an (error) RSP for each REQ. The ATT
698 * module can then reschedule the RSP if it is not able to send
699 * it immediately.
700 */
701 if (chan->rsp_meta.att_chan) {
702 /* Returning a NULL here will trigger an ATT timeout.
703 * This is better than an assert as an assert would
704 * allow a peer to DoS us.
705 */
706 LOG_ERR("already processing a REQ/RSP on chan %p", chan);
707
708 return NULL;
709 }
710 data = &chan->rsp_meta;
711
712 /* Re-use REQ buf to avoid dropping the REQ and timing out.
713 * This only works if the bearer used to RX REQs is the same as
714 * for sending the RSP. That should always be the case
715 * (per-spec).
716 */
717 __ASSERT_NO_MSG(chan->rsp_buf);
718 buf = net_buf_ref(chan->rsp_buf);
719
720 net_buf_reset(buf);
721 net_buf_reserve(buf, BT_L2CAP_BUF_SIZE(0));
722
723 LOG_DBG("re-using REQ buf %p for RSP", buf);
724 } else {
725 LOG_DBG("alloc buf & meta from global pools");
726 buf = bt_l2cap_create_pdu_timeout(NULL, 0, timeout);
727 if (!buf) {
728 LOG_ERR("Unable to allocate buffer for op 0x%02x", op);
729 return NULL;
730 }
731
732 data = tx_meta_data_alloc(timeout);
733 if (!data) {
734 LOG_WRN("Unable to allocate ATT TX meta");
735 net_buf_unref(buf);
736 return NULL;
737 }
738 }
739
740 if (IS_ENABLED(CONFIG_BT_EATT)) {
741 net_buf_reserve(buf, BT_L2CAP_SDU_BUF_SIZE(0));
742 }
743
744 data->att_chan = chan;
745 bt_att_tx_meta_data(buf) = data;
746
747 hdr = net_buf_add(buf, sizeof(*hdr));
748 hdr->code = op;
749
750 return buf;
751 }
752
bt_att_chan_send(struct bt_att_chan * chan,struct net_buf * buf)753 static int bt_att_chan_send(struct bt_att_chan *chan, struct net_buf *buf)
754 {
755 LOG_DBG("chan %p flags %lu code 0x%02x", chan, atomic_get(chan->flags),
756 ((struct bt_att_hdr *)buf->data)->code);
757
758 if (IS_ENABLED(CONFIG_BT_EATT) &&
759 !att_chan_matches_chan_opt(chan, bt_att_tx_meta_data(buf)->chan_opt)) {
760 return -EINVAL;
761 }
762
763 return chan_send(chan, buf);
764 }
765
att_send_process(struct bt_att * att)766 static void att_send_process(struct bt_att *att)
767 {
768 struct bt_att_chan *chan, *tmp, *prev = NULL;
769 int err = 0;
770
771 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&att->chans, chan, tmp, node) {
772 if (err == -ENOENT && prev &&
773 (bt_att_is_enhanced(chan) == bt_att_is_enhanced(prev))) {
774 /* If there was nothing to send for the previous channel and the current
775 * channel has the same "enhancedness", there will be nothing to send for
776 * this channel either.
777 */
778 continue;
779 }
780
781 err = process_queue(chan, &att->tx_queue);
782 if (!err) {
783 /* Success */
784 return;
785 }
786
787 prev = chan;
788 }
789 }
790
bt_att_chan_send_rsp(struct bt_att_chan * chan,struct net_buf * buf)791 static void bt_att_chan_send_rsp(struct bt_att_chan *chan, struct net_buf *buf)
792 {
793 int err;
794
795 err = chan_send(chan, buf);
796 if (err) {
797 /* Responses need to be sent back using the same channel */
798 net_buf_put(&chan->tx_queue, buf);
799 }
800 }
801
send_err_rsp(struct bt_att_chan * chan,uint8_t req,uint16_t handle,uint8_t err)802 static void send_err_rsp(struct bt_att_chan *chan, uint8_t req, uint16_t handle,
803 uint8_t err)
804 {
805 struct bt_att_error_rsp *rsp;
806 struct net_buf *buf;
807
808 /* Ignore opcode 0x00 */
809 if (!req) {
810 return;
811 }
812
813 buf = bt_att_chan_create_pdu(chan, BT_ATT_OP_ERROR_RSP, sizeof(*rsp));
814 if (!buf) {
815 LOG_ERR("unable to allocate buf for error response");
816 return;
817 }
818
819 rsp = net_buf_add(buf, sizeof(*rsp));
820 rsp->request = req;
821 rsp->handle = sys_cpu_to_le16(handle);
822 rsp->error = err;
823
824 bt_att_chan_send_rsp(chan, buf);
825 }
826
att_mtu_req(struct bt_att_chan * chan,struct net_buf * buf)827 static uint8_t att_mtu_req(struct bt_att_chan *chan, struct net_buf *buf)
828 {
829 struct bt_att_exchange_mtu_req *req;
830 struct bt_att_exchange_mtu_rsp *rsp;
831 struct net_buf *pdu;
832 uint16_t mtu_client, mtu_server;
833
834 /* Exchange MTU sub-procedure shall only be supported on the
835 * LE Fixed Channel Unenhanced ATT bearer.
836 */
837 if (bt_att_is_enhanced(chan)) {
838 return BT_ATT_ERR_NOT_SUPPORTED;
839 }
840
841 req = (void *)buf->data;
842
843 mtu_client = sys_le16_to_cpu(req->mtu);
844
845 LOG_DBG("Client MTU %u", mtu_client);
846
847 /* Check if MTU is valid */
848 if (mtu_client < BT_ATT_DEFAULT_LE_MTU) {
849 return BT_ATT_ERR_INVALID_PDU;
850 }
851
852 pdu = bt_att_create_rsp_pdu(chan, BT_ATT_OP_MTU_RSP, sizeof(*rsp));
853 if (!pdu) {
854 return BT_ATT_ERR_UNLIKELY;
855 }
856
857 mtu_server = BT_LOCAL_ATT_MTU_UATT;
858
859 LOG_DBG("Server MTU %u", mtu_server);
860
861 rsp = net_buf_add(pdu, sizeof(*rsp));
862 rsp->mtu = sys_cpu_to_le16(mtu_server);
863
864 bt_att_chan_send_rsp(chan, pdu);
865
866 /* The ATT_EXCHANGE_MTU_REQ/RSP is just an alternative way of
867 * communicating the L2CAP MTU.
868 */
869 chan->chan.rx.mtu = mtu_server;
870 chan->chan.tx.mtu = mtu_client;
871
872 LOG_DBG("Negotiated MTU %u", bt_att_mtu(chan));
873
874 #if defined(CONFIG_BT_GATT_CLIENT)
875 /* Mark the MTU Exchange as complete.
876 * This will skip sending ATT Exchange MTU from our side.
877 *
878 * Core 5.3 | Vol 3, Part F 3.4.2.2:
879 * If MTU is exchanged in one direction, that is sufficient for both directions.
880 */
881 atomic_set_bit(chan->att->conn->flags, BT_CONN_ATT_MTU_EXCHANGED);
882 #endif /* CONFIG_BT_GATT_CLIENT */
883
884 att_chan_mtu_updated(chan);
885
886 return 0;
887 }
888
bt_att_chan_req_send(struct bt_att_chan * chan,struct bt_att_req * req)889 static int bt_att_chan_req_send(struct bt_att_chan *chan,
890 struct bt_att_req *req)
891 {
892 __ASSERT_NO_MSG(chan);
893 __ASSERT_NO_MSG(req);
894 __ASSERT_NO_MSG(req->func);
895 __ASSERT_NO_MSG(!chan->req);
896
897 LOG_DBG("req %p", req);
898
899 return chan_req_send(chan, req);
900 }
901
att_req_send_process(struct bt_att * att)902 static void att_req_send_process(struct bt_att *att)
903 {
904 struct bt_att_req *req = NULL;
905 struct bt_att_chan *chan, *tmp, *prev = NULL;
906
907 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&att->chans, chan, tmp, node) {
908 /* If there is an ongoing transaction, do not use the channel */
909 if (chan->req) {
910 continue;
911 }
912
913 if (!req && prev && (bt_att_is_enhanced(chan) == bt_att_is_enhanced(prev))) {
914 /* If there was nothing to send for the previous channel and the current
915 * channel has the same "enhancedness", there will be nothing to send for
916 * this channel either.
917 */
918 continue;
919 }
920
921 prev = chan;
922
923 /* Pull next request from the list */
924 req = get_first_req_matching_chan(&att->reqs, chan);
925 if (!req) {
926 continue;
927 }
928
929 if (bt_att_chan_req_send(chan, req) >= 0) {
930 return;
931 }
932
933 /* Prepend back to the list as it could not be sent */
934 sys_slist_prepend(&att->reqs, &req->node);
935 }
936 }
937
att_handle_rsp(struct bt_att_chan * chan,void * pdu,uint16_t len,uint8_t err)938 static uint8_t att_handle_rsp(struct bt_att_chan *chan, void *pdu, uint16_t len,
939 uint8_t err)
940 {
941 bt_att_func_t func = NULL;
942 void *params;
943
944 LOG_DBG("chan %p err 0x%02x len %u: %s", chan, err, len, bt_hex(pdu, len));
945
946 /* Cancel timeout if ongoing */
947 k_work_cancel_delayable(&chan->timeout_work);
948
949 if (!chan->req) {
950 LOG_WRN("No pending ATT request");
951 goto process;
952 }
953
954 /* Check if request has been cancelled */
955 if (chan->req == &cancel) {
956 chan->req = NULL;
957 goto process;
958 }
959
960 /* Reset func so it can be reused by the callback */
961 func = chan->req->func;
962 chan->req->func = NULL;
963 params = chan->req->user_data;
964
965 /* free allocated request so its memory can be reused */
966 bt_att_req_free(chan->req);
967 chan->req = NULL;
968
969 process:
970 /* Process pending requests */
971 att_req_send_process(chan->att);
972 if (func) {
973 func(chan->att->conn, err, pdu, len, params);
974 }
975
976 return 0;
977 }
978
979 #if defined(CONFIG_BT_GATT_CLIENT)
att_mtu_rsp(struct bt_att_chan * chan,struct net_buf * buf)980 static uint8_t att_mtu_rsp(struct bt_att_chan *chan, struct net_buf *buf)
981 {
982 struct bt_att_exchange_mtu_rsp *rsp;
983 uint16_t mtu;
984
985 rsp = (void *)buf->data;
986
987 mtu = sys_le16_to_cpu(rsp->mtu);
988
989 LOG_DBG("Server MTU %u", mtu);
990
991 /* Check if MTU is valid */
992 if (mtu < BT_ATT_DEFAULT_LE_MTU) {
993 return att_handle_rsp(chan, NULL, 0, BT_ATT_ERR_INVALID_PDU);
994 }
995
996 /* The following must equal the value we sent in the req. We assume this
997 * is a rsp to `gatt_exchange_mtu_encode`.
998 */
999 chan->chan.rx.mtu = BT_LOCAL_ATT_MTU_UATT;
1000 /* The ATT_EXCHANGE_MTU_REQ/RSP is just an alternative way of
1001 * communicating the L2CAP MTU.
1002 */
1003
1004 chan->chan.tx.mtu = mtu;
1005
1006 LOG_DBG("Negotiated MTU %u", bt_att_mtu(chan));
1007
1008 att_chan_mtu_updated(chan);
1009
1010 return att_handle_rsp(chan, rsp, buf->len, 0);
1011 }
1012 #endif /* CONFIG_BT_GATT_CLIENT */
1013
range_is_valid(uint16_t start,uint16_t end,uint16_t * err)1014 static bool range_is_valid(uint16_t start, uint16_t end, uint16_t *err)
1015 {
1016 /* Handle 0 is invalid */
1017 if (!start || !end) {
1018 if (err) {
1019 *err = 0U;
1020 }
1021 return false;
1022 }
1023
1024 /* Check if range is valid */
1025 if (start > end) {
1026 if (err) {
1027 *err = start;
1028 }
1029 return false;
1030 }
1031
1032 return true;
1033 }
1034
1035 struct find_info_data {
1036 struct bt_att_chan *chan;
1037 struct net_buf *buf;
1038 struct bt_att_find_info_rsp *rsp;
1039 union {
1040 struct bt_att_info_16 *info16;
1041 struct bt_att_info_128 *info128;
1042 };
1043 };
1044
find_info_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)1045 static uint8_t find_info_cb(const struct bt_gatt_attr *attr, uint16_t handle,
1046 void *user_data)
1047 {
1048 struct find_info_data *data = user_data;
1049 struct bt_att_chan *chan = data->chan;
1050
1051 LOG_DBG("handle 0x%04x", handle);
1052
1053 /* Initialize rsp at first entry */
1054 if (!data->rsp) {
1055 data->rsp = net_buf_add(data->buf, sizeof(*data->rsp));
1056 data->rsp->format = (attr->uuid->type == BT_UUID_TYPE_16) ?
1057 BT_ATT_INFO_16 : BT_ATT_INFO_128;
1058 }
1059
1060 switch (data->rsp->format) {
1061 case BT_ATT_INFO_16:
1062 if (attr->uuid->type != BT_UUID_TYPE_16) {
1063 return BT_GATT_ITER_STOP;
1064 }
1065
1066 /* Fast forward to next item position */
1067 data->info16 = net_buf_add(data->buf, sizeof(*data->info16));
1068 data->info16->handle = sys_cpu_to_le16(handle);
1069 data->info16->uuid = sys_cpu_to_le16(BT_UUID_16(attr->uuid)->val);
1070
1071 if (bt_att_mtu(chan) - data->buf->len >
1072 sizeof(*data->info16)) {
1073 return BT_GATT_ITER_CONTINUE;
1074 }
1075
1076 break;
1077 case BT_ATT_INFO_128:
1078 if (attr->uuid->type != BT_UUID_TYPE_128) {
1079 return BT_GATT_ITER_STOP;
1080 }
1081
1082 /* Fast forward to next item position */
1083 data->info128 = net_buf_add(data->buf, sizeof(*data->info128));
1084 data->info128->handle = sys_cpu_to_le16(handle);
1085 memcpy(data->info128->uuid, BT_UUID_128(attr->uuid)->val,
1086 sizeof(data->info128->uuid));
1087
1088 if (bt_att_mtu(chan) - data->buf->len >
1089 sizeof(*data->info128)) {
1090 return BT_GATT_ITER_CONTINUE;
1091 }
1092 }
1093
1094 return BT_GATT_ITER_STOP;
1095 }
1096
att_find_info_rsp(struct bt_att_chan * chan,uint16_t start_handle,uint16_t end_handle)1097 static uint8_t att_find_info_rsp(struct bt_att_chan *chan, uint16_t start_handle,
1098 uint16_t end_handle)
1099 {
1100 struct find_info_data data;
1101
1102 (void)memset(&data, 0, sizeof(data));
1103
1104 data.buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_FIND_INFO_RSP, 0);
1105 if (!data.buf) {
1106 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
1107 }
1108
1109 data.chan = chan;
1110 bt_gatt_foreach_attr(start_handle, end_handle, find_info_cb, &data);
1111
1112 if (!data.rsp) {
1113 tx_meta_data_free(bt_att_tx_meta_data(data.buf));
1114 net_buf_unref(data.buf);
1115 /* Respond since handle is set */
1116 send_err_rsp(chan, BT_ATT_OP_FIND_INFO_REQ, start_handle,
1117 BT_ATT_ERR_ATTRIBUTE_NOT_FOUND);
1118 return 0;
1119 }
1120
1121 bt_att_chan_send_rsp(chan, data.buf);
1122
1123 return 0;
1124 }
1125
att_find_info_req(struct bt_att_chan * chan,struct net_buf * buf)1126 static uint8_t att_find_info_req(struct bt_att_chan *chan, struct net_buf *buf)
1127 {
1128 struct bt_att_find_info_req *req;
1129 uint16_t start_handle, end_handle, err_handle;
1130
1131 req = (void *)buf->data;
1132
1133 start_handle = sys_le16_to_cpu(req->start_handle);
1134 end_handle = sys_le16_to_cpu(req->end_handle);
1135
1136 LOG_DBG("start_handle 0x%04x end_handle 0x%04x", start_handle, end_handle);
1137
1138 if (!range_is_valid(start_handle, end_handle, &err_handle)) {
1139 send_err_rsp(chan, BT_ATT_OP_FIND_INFO_REQ, err_handle,
1140 BT_ATT_ERR_INVALID_HANDLE);
1141 return 0;
1142 }
1143
1144 return att_find_info_rsp(chan, start_handle, end_handle);
1145 }
1146
1147 struct find_type_data {
1148 struct bt_att_chan *chan;
1149 struct net_buf *buf;
1150 struct bt_att_handle_group *group;
1151 const void *value;
1152 uint8_t value_len;
1153 uint8_t err;
1154 };
1155
find_type_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)1156 static uint8_t find_type_cb(const struct bt_gatt_attr *attr, uint16_t handle,
1157 void *user_data)
1158 {
1159 struct find_type_data *data = user_data;
1160 struct bt_att_chan *chan = data->chan;
1161 struct bt_conn *conn = chan->chan.chan.conn;
1162 int read;
1163 uint8_t uuid[16];
1164 struct net_buf *frag;
1165 size_t len;
1166
1167 /* Skip secondary services */
1168 if (!bt_uuid_cmp(attr->uuid, BT_UUID_GATT_SECONDARY)) {
1169 goto skip;
1170 }
1171
1172 /* Update group end_handle if not a primary service */
1173 if (bt_uuid_cmp(attr->uuid, BT_UUID_GATT_PRIMARY)) {
1174 if (data->group &&
1175 handle > sys_le16_to_cpu(data->group->end_handle)) {
1176 data->group->end_handle = sys_cpu_to_le16(handle);
1177 }
1178 return BT_GATT_ITER_CONTINUE;
1179 }
1180
1181 LOG_DBG("handle 0x%04x", handle);
1182
1183 /* stop if there is no space left */
1184 if (bt_att_mtu(chan) - net_buf_frags_len(data->buf) <
1185 sizeof(*data->group)) {
1186 return BT_GATT_ITER_STOP;
1187 }
1188
1189 frag = net_buf_frag_last(data->buf);
1190
1191 len = MIN(bt_att_mtu(chan) - net_buf_frags_len(data->buf),
1192 net_buf_tailroom(frag));
1193 if (!len) {
1194 frag = net_buf_alloc(net_buf_pool_get(data->buf->pool_id),
1195 K_NO_WAIT);
1196 /* If not buffer can be allocated immediately stop */
1197 if (!frag) {
1198 return BT_GATT_ITER_STOP;
1199 }
1200
1201 net_buf_frag_add(data->buf, frag);
1202 }
1203
1204 /* Read attribute value and store in the buffer */
1205 read = attr->read(conn, attr, uuid, sizeof(uuid), 0);
1206 if (read < 0) {
1207 /*
1208 * Since we don't know if it is the service with requested UUID,
1209 * we cannot respond with an error to this request.
1210 */
1211 goto skip;
1212 }
1213
1214 /* Check if data matches */
1215 if (read != data->value_len) {
1216 /* Use bt_uuid_cmp() to compare UUIDs of different form. */
1217 struct bt_uuid_128 ref_uuid;
1218 struct bt_uuid_128 recvd_uuid;
1219
1220 if (!bt_uuid_create(&recvd_uuid.uuid, data->value, data->value_len)) {
1221 LOG_WRN("Unable to create UUID: size %u", data->value_len);
1222 goto skip;
1223 }
1224 if (!bt_uuid_create(&ref_uuid.uuid, uuid, read)) {
1225 LOG_WRN("Unable to create UUID: size %d", read);
1226 goto skip;
1227 }
1228 if (bt_uuid_cmp(&recvd_uuid.uuid, &ref_uuid.uuid)) {
1229 goto skip;
1230 }
1231 } else if (memcmp(data->value, uuid, read)) {
1232 goto skip;
1233 }
1234
1235 /* If service has been found, error should be cleared */
1236 data->err = 0x00;
1237
1238 /* Fast forward to next item position */
1239 data->group = net_buf_add(frag, sizeof(*data->group));
1240 data->group->start_handle = sys_cpu_to_le16(handle);
1241 data->group->end_handle = sys_cpu_to_le16(handle);
1242
1243 /* continue to find the end_handle */
1244 return BT_GATT_ITER_CONTINUE;
1245
1246 skip:
1247 data->group = NULL;
1248 return BT_GATT_ITER_CONTINUE;
1249 }
1250
att_find_type_rsp(struct bt_att_chan * chan,uint16_t start_handle,uint16_t end_handle,const void * value,uint8_t value_len)1251 static uint8_t att_find_type_rsp(struct bt_att_chan *chan, uint16_t start_handle,
1252 uint16_t end_handle, const void *value,
1253 uint8_t value_len)
1254 {
1255 struct find_type_data data;
1256
1257 (void)memset(&data, 0, sizeof(data));
1258
1259 data.buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_FIND_TYPE_RSP, 0);
1260 if (!data.buf) {
1261 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
1262 }
1263
1264 data.chan = chan;
1265 data.group = NULL;
1266 data.value = value;
1267 data.value_len = value_len;
1268
1269 /* Pre-set error in case no service will be found */
1270 data.err = BT_ATT_ERR_ATTRIBUTE_NOT_FOUND;
1271
1272 bt_gatt_foreach_attr(start_handle, end_handle, find_type_cb, &data);
1273
1274 /* If error has not been cleared, no service has been found */
1275 if (data.err) {
1276 tx_meta_data_free(bt_att_tx_meta_data(data.buf));
1277 net_buf_unref(data.buf);
1278 /* Respond since handle is set */
1279 send_err_rsp(chan, BT_ATT_OP_FIND_TYPE_REQ, start_handle,
1280 data.err);
1281 return 0;
1282 }
1283
1284 bt_att_chan_send_rsp(chan, data.buf);
1285
1286 return 0;
1287 }
1288
att_find_type_req(struct bt_att_chan * chan,struct net_buf * buf)1289 static uint8_t att_find_type_req(struct bt_att_chan *chan, struct net_buf *buf)
1290 {
1291 struct bt_att_find_type_req *req;
1292 uint16_t start_handle, end_handle, err_handle, type;
1293 uint8_t *value;
1294
1295 req = net_buf_pull_mem(buf, sizeof(*req));
1296
1297 start_handle = sys_le16_to_cpu(req->start_handle);
1298 end_handle = sys_le16_to_cpu(req->end_handle);
1299 type = sys_le16_to_cpu(req->type);
1300 value = buf->data;
1301
1302 LOG_DBG("start_handle 0x%04x end_handle 0x%04x type %u", start_handle, end_handle, type);
1303
1304 if (!range_is_valid(start_handle, end_handle, &err_handle)) {
1305 send_err_rsp(chan, BT_ATT_OP_FIND_TYPE_REQ, err_handle,
1306 BT_ATT_ERR_INVALID_HANDLE);
1307 return 0;
1308 }
1309
1310 /* The Attribute Protocol Find By Type Value Request shall be used with
1311 * the Attribute Type parameter set to the UUID for "Primary Service"
1312 * and the Attribute Value set to the 16-bit Bluetooth UUID or 128-bit
1313 * UUID for the specific primary service.
1314 */
1315 if (bt_uuid_cmp(BT_UUID_DECLARE_16(type), BT_UUID_GATT_PRIMARY)) {
1316 send_err_rsp(chan, BT_ATT_OP_FIND_TYPE_REQ, start_handle,
1317 BT_ATT_ERR_ATTRIBUTE_NOT_FOUND);
1318 return 0;
1319 }
1320
1321 return att_find_type_rsp(chan, start_handle, end_handle, value,
1322 buf->len);
1323 }
1324
err_to_att(int err)1325 static uint8_t err_to_att(int err)
1326 {
1327 LOG_DBG("%d", err);
1328
1329 if (err < 0 && err >= -0xff) {
1330 return -err;
1331 }
1332
1333 return BT_ATT_ERR_UNLIKELY;
1334 }
1335
1336 struct read_type_data {
1337 struct bt_att_chan *chan;
1338 struct bt_uuid *uuid;
1339 struct net_buf *buf;
1340 struct bt_att_read_type_rsp *rsp;
1341 struct bt_att_data *item;
1342 uint8_t err;
1343 };
1344
1345 typedef bool (*attr_read_cb)(struct net_buf *buf, ssize_t read,
1346 void *user_data);
1347
attr_read_type_cb(struct net_buf * frag,ssize_t read,void * user_data)1348 static bool attr_read_type_cb(struct net_buf *frag, ssize_t read,
1349 void *user_data)
1350 {
1351 struct read_type_data *data = user_data;
1352
1353 if (!data->rsp->len) {
1354 /* Set len to be the first item found */
1355 data->rsp->len = read + sizeof(*data->item);
1356 } else if (data->rsp->len != read + sizeof(*data->item)) {
1357 /* All items should have the same size */
1358 frag->len -= sizeof(*data->item);
1359 data->item = NULL;
1360 return false;
1361 }
1362
1363 return true;
1364 }
1365
att_chan_read(struct bt_att_chan * chan,const struct bt_gatt_attr * attr,struct net_buf * buf,uint16_t offset,attr_read_cb cb,void * user_data)1366 static ssize_t att_chan_read(struct bt_att_chan *chan,
1367 const struct bt_gatt_attr *attr,
1368 struct net_buf *buf, uint16_t offset,
1369 attr_read_cb cb, void *user_data)
1370 {
1371 struct bt_conn *conn = chan->chan.chan.conn;
1372 ssize_t read;
1373 struct net_buf *frag;
1374 size_t len, total = 0;
1375
1376 if (bt_att_mtu(chan) <= net_buf_frags_len(buf)) {
1377 return 0;
1378 }
1379
1380 frag = net_buf_frag_last(buf);
1381
1382 /* Create necessary fragments if MTU is bigger than what a buffer can
1383 * hold.
1384 */
1385 do {
1386 len = MIN(bt_att_mtu(chan) - net_buf_frags_len(buf),
1387 net_buf_tailroom(frag));
1388 if (!len) {
1389 frag = net_buf_alloc(net_buf_pool_get(buf->pool_id),
1390 K_NO_WAIT);
1391 /* If not buffer can be allocated immediately return */
1392 if (!frag) {
1393 return total;
1394 }
1395
1396 net_buf_frag_add(buf, frag);
1397
1398 len = MIN(bt_att_mtu(chan) - net_buf_frags_len(buf),
1399 net_buf_tailroom(frag));
1400 }
1401
1402 read = attr->read(conn, attr, frag->data + frag->len, len,
1403 offset);
1404 if (read < 0) {
1405 if (total) {
1406 return total;
1407 }
1408
1409 return read;
1410 }
1411
1412 if (cb && !cb(frag, read, user_data)) {
1413 break;
1414 }
1415
1416 net_buf_add(frag, read);
1417 total += read;
1418 offset += read;
1419 } while (bt_att_mtu(chan) > net_buf_frags_len(buf) && read == len);
1420
1421 return total;
1422 }
1423
read_type_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)1424 static uint8_t read_type_cb(const struct bt_gatt_attr *attr, uint16_t handle,
1425 void *user_data)
1426 {
1427 struct read_type_data *data = user_data;
1428 struct bt_att_chan *chan = data->chan;
1429 struct bt_conn *conn = chan->chan.chan.conn;
1430 ssize_t read;
1431
1432 /* Skip if doesn't match */
1433 if (bt_uuid_cmp(attr->uuid, data->uuid)) {
1434 return BT_GATT_ITER_CONTINUE;
1435 }
1436
1437 LOG_DBG("handle 0x%04x", handle);
1438
1439 /*
1440 * If an attribute in the set of requested attributes would cause an
1441 * Error Response then this attribute cannot be included in a
1442 * Read By Type Response and the attributes before this attribute
1443 * shall be returned
1444 *
1445 * If the first attribute in the set of requested attributes would
1446 * cause an Error Response then no other attributes in the requested
1447 * attributes can be considered.
1448 */
1449 data->err = bt_gatt_check_perm(conn, attr, BT_GATT_PERM_READ_MASK);
1450 if (data->err) {
1451 if (data->rsp->len) {
1452 data->err = 0x00;
1453 }
1454 return BT_GATT_ITER_STOP;
1455 }
1456
1457 /*
1458 * If any attribute is founded in handle range it means that error
1459 * should be changed from pre-set: attr not found error to no error.
1460 */
1461 data->err = 0x00;
1462
1463 /* Fast forward to next item position */
1464 data->item = net_buf_add(net_buf_frag_last(data->buf),
1465 sizeof(*data->item));
1466 data->item->handle = sys_cpu_to_le16(handle);
1467
1468 read = att_chan_read(chan, attr, data->buf, 0, attr_read_type_cb, data);
1469 if (read < 0) {
1470 data->err = err_to_att(read);
1471 return BT_GATT_ITER_STOP;
1472 }
1473
1474 if (!data->item) {
1475 return BT_GATT_ITER_STOP;
1476 }
1477
1478 /* continue only if there are still space for more items */
1479 return bt_att_mtu(chan) - net_buf_frags_len(data->buf) >
1480 data->rsp->len ? BT_GATT_ITER_CONTINUE : BT_GATT_ITER_STOP;
1481 }
1482
att_read_type_rsp(struct bt_att_chan * chan,struct bt_uuid * uuid,uint16_t start_handle,uint16_t end_handle)1483 static uint8_t att_read_type_rsp(struct bt_att_chan *chan, struct bt_uuid *uuid,
1484 uint16_t start_handle, uint16_t end_handle)
1485 {
1486 struct read_type_data data;
1487
1488 (void)memset(&data, 0, sizeof(data));
1489
1490 data.buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_READ_TYPE_RSP,
1491 sizeof(*data.rsp));
1492 if (!data.buf) {
1493 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
1494 }
1495
1496 data.chan = chan;
1497 data.uuid = uuid;
1498 data.rsp = net_buf_add(data.buf, sizeof(*data.rsp));
1499 data.rsp->len = 0U;
1500
1501 /* Pre-set error if no attr will be found in handle */
1502 data.err = BT_ATT_ERR_ATTRIBUTE_NOT_FOUND;
1503
1504 bt_gatt_foreach_attr(start_handle, end_handle, read_type_cb, &data);
1505
1506 if (data.err) {
1507 tx_meta_data_free(bt_att_tx_meta_data(data.buf));
1508 net_buf_unref(data.buf);
1509 /* Response here since handle is set */
1510 send_err_rsp(chan, BT_ATT_OP_READ_TYPE_REQ, start_handle,
1511 data.err);
1512 return 0;
1513 }
1514
1515 bt_att_chan_send_rsp(chan, data.buf);
1516
1517 return 0;
1518 }
1519
att_read_type_req(struct bt_att_chan * chan,struct net_buf * buf)1520 static uint8_t att_read_type_req(struct bt_att_chan *chan, struct net_buf *buf)
1521 {
1522 struct bt_att_read_type_req *req;
1523 uint16_t start_handle, end_handle, err_handle;
1524 union {
1525 struct bt_uuid uuid;
1526 struct bt_uuid_16 u16;
1527 struct bt_uuid_128 u128;
1528 } u;
1529 uint8_t uuid_len = buf->len - sizeof(*req);
1530
1531 /* Type can only be UUID16 or UUID128 */
1532 if (uuid_len != 2 && uuid_len != 16) {
1533 return BT_ATT_ERR_INVALID_PDU;
1534 }
1535
1536 req = net_buf_pull_mem(buf, sizeof(*req));
1537
1538 start_handle = sys_le16_to_cpu(req->start_handle);
1539 end_handle = sys_le16_to_cpu(req->end_handle);
1540 if (!bt_uuid_create(&u.uuid, req->uuid, uuid_len)) {
1541 return BT_ATT_ERR_UNLIKELY;
1542 }
1543
1544 LOG_DBG("start_handle 0x%04x end_handle 0x%04x type %s", start_handle, end_handle,
1545 bt_uuid_str(&u.uuid));
1546
1547 if (!range_is_valid(start_handle, end_handle, &err_handle)) {
1548 send_err_rsp(chan, BT_ATT_OP_READ_TYPE_REQ, err_handle,
1549 BT_ATT_ERR_INVALID_HANDLE);
1550 return 0;
1551 }
1552
1553 return att_read_type_rsp(chan, &u.uuid, start_handle, end_handle);
1554 }
1555
1556 struct read_data {
1557 struct bt_att_chan *chan;
1558 uint16_t offset;
1559 struct net_buf *buf;
1560 uint8_t err;
1561 };
1562
read_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)1563 static uint8_t read_cb(const struct bt_gatt_attr *attr, uint16_t handle,
1564 void *user_data)
1565 {
1566 struct read_data *data = user_data;
1567 struct bt_att_chan *chan = data->chan;
1568 struct bt_conn *conn = chan->chan.chan.conn;
1569 int ret;
1570
1571 LOG_DBG("handle 0x%04x", handle);
1572
1573 /*
1574 * If any attribute is founded in handle range it means that error
1575 * should be changed from pre-set: invalid handle error to no error.
1576 */
1577 data->err = 0x00;
1578
1579 /* Check attribute permissions */
1580 data->err = bt_gatt_check_perm(conn, attr, BT_GATT_PERM_READ_MASK);
1581 if (data->err) {
1582 return BT_GATT_ITER_STOP;
1583 }
1584
1585 /* Read attribute value and store in the buffer */
1586 ret = att_chan_read(chan, attr, data->buf, data->offset, NULL, NULL);
1587 if (ret < 0) {
1588 data->err = err_to_att(ret);
1589 return BT_GATT_ITER_STOP;
1590 }
1591
1592 return BT_GATT_ITER_CONTINUE;
1593 }
1594
att_read_rsp(struct bt_att_chan * chan,uint8_t op,uint8_t rsp,uint16_t handle,uint16_t offset)1595 static uint8_t att_read_rsp(struct bt_att_chan *chan, uint8_t op, uint8_t rsp,
1596 uint16_t handle, uint16_t offset)
1597 {
1598 struct read_data data;
1599
1600 if (!bt_gatt_change_aware(chan->att->conn, true)) {
1601 if (!atomic_test_and_set_bit(chan->flags, ATT_OUT_OF_SYNC_SENT)) {
1602 return BT_ATT_ERR_DB_OUT_OF_SYNC;
1603 } else {
1604 return 0;
1605 }
1606 }
1607
1608 if (!handle) {
1609 return BT_ATT_ERR_INVALID_HANDLE;
1610 }
1611
1612 (void)memset(&data, 0, sizeof(data));
1613
1614 data.buf = bt_att_create_rsp_pdu(chan, rsp, 0);
1615 if (!data.buf) {
1616 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
1617 }
1618
1619 data.chan = chan;
1620 data.offset = offset;
1621
1622 /* Pre-set error if no attr will be found in handle */
1623 data.err = BT_ATT_ERR_INVALID_HANDLE;
1624
1625 bt_gatt_foreach_attr(handle, handle, read_cb, &data);
1626
1627 /* In case of error discard data and respond with an error */
1628 if (data.err) {
1629 tx_meta_data_free(bt_att_tx_meta_data(data.buf));
1630 net_buf_unref(data.buf);
1631 /* Respond here since handle is set */
1632 send_err_rsp(chan, op, handle, data.err);
1633 return 0;
1634 }
1635
1636 bt_att_chan_send_rsp(chan, data.buf);
1637
1638 return 0;
1639 }
1640
att_read_req(struct bt_att_chan * chan,struct net_buf * buf)1641 static uint8_t att_read_req(struct bt_att_chan *chan, struct net_buf *buf)
1642 {
1643 struct bt_att_read_req *req;
1644 uint16_t handle;
1645
1646 req = (void *)buf->data;
1647
1648 handle = sys_le16_to_cpu(req->handle);
1649
1650 LOG_DBG("handle 0x%04x", handle);
1651
1652 return att_read_rsp(chan, BT_ATT_OP_READ_REQ, BT_ATT_OP_READ_RSP,
1653 handle, 0);
1654 }
1655
att_read_blob_req(struct bt_att_chan * chan,struct net_buf * buf)1656 static uint8_t att_read_blob_req(struct bt_att_chan *chan, struct net_buf *buf)
1657 {
1658 struct bt_att_read_blob_req *req;
1659 uint16_t handle, offset;
1660
1661 req = (void *)buf->data;
1662
1663 handle = sys_le16_to_cpu(req->handle);
1664 offset = sys_le16_to_cpu(req->offset);
1665
1666 LOG_DBG("handle 0x%04x offset %u", handle, offset);
1667
1668 return att_read_rsp(chan, BT_ATT_OP_READ_BLOB_REQ,
1669 BT_ATT_OP_READ_BLOB_RSP, handle, offset);
1670 }
1671
1672 #if defined(CONFIG_BT_GATT_READ_MULTIPLE)
att_read_mult_req(struct bt_att_chan * chan,struct net_buf * buf)1673 static uint8_t att_read_mult_req(struct bt_att_chan *chan, struct net_buf *buf)
1674 {
1675 struct read_data data;
1676 uint16_t handle;
1677
1678 if (!bt_gatt_change_aware(chan->att->conn, true)) {
1679 if (!atomic_test_and_set_bit(chan->flags, ATT_OUT_OF_SYNC_SENT)) {
1680 return BT_ATT_ERR_DB_OUT_OF_SYNC;
1681 } else {
1682 return 0;
1683 }
1684 }
1685
1686 (void)memset(&data, 0, sizeof(data));
1687
1688 data.buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_READ_MULT_RSP, 0);
1689 if (!data.buf) {
1690 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
1691 }
1692
1693 data.chan = chan;
1694
1695 while (buf->len >= sizeof(uint16_t)) {
1696 handle = net_buf_pull_le16(buf);
1697
1698 LOG_DBG("handle 0x%04x ", handle);
1699
1700 /* An Error Response shall be sent by the server in response to
1701 * the Read Multiple Request [....] if a read operation is not
1702 * permitted on any of the Characteristic Values.
1703 *
1704 * If handle is not valid then return invalid handle error.
1705 * If handle is found error will be cleared by read_cb.
1706 */
1707 data.err = BT_ATT_ERR_INVALID_HANDLE;
1708
1709 bt_gatt_foreach_attr(handle, handle, read_cb, &data);
1710
1711 /* Stop reading in case of error */
1712 if (data.err) {
1713 tx_meta_data_free(bt_att_tx_meta_data(data.buf));
1714 net_buf_unref(data.buf);
1715 /* Respond here since handle is set */
1716 send_err_rsp(chan, BT_ATT_OP_READ_MULT_REQ, handle,
1717 data.err);
1718 return 0;
1719 }
1720 }
1721
1722 bt_att_chan_send_rsp(chan, data.buf);
1723
1724 return 0;
1725 }
1726 #endif /* CONFIG_BT_GATT_READ_MULTIPLE */
1727
1728 #if defined(CONFIG_BT_GATT_READ_MULT_VAR_LEN)
read_vl_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)1729 static uint8_t read_vl_cb(const struct bt_gatt_attr *attr, uint16_t handle,
1730 void *user_data)
1731 {
1732 struct read_data *data = user_data;
1733 struct bt_att_chan *chan = data->chan;
1734 struct bt_conn *conn = chan->chan.chan.conn;
1735 struct bt_att_read_mult_vl_rsp *rsp;
1736 int read;
1737
1738 LOG_DBG("handle 0x%04x", handle);
1739
1740 /*
1741 * If any attribute is founded in handle range it means that error
1742 * should be changed from pre-set: invalid handle error to no error.
1743 */
1744 data->err = 0x00;
1745
1746 /* Check attribute permissions */
1747 data->err = bt_gatt_check_perm(conn, attr, BT_GATT_PERM_READ_MASK);
1748 if (data->err) {
1749 return BT_GATT_ITER_STOP;
1750 }
1751
1752 /* The Length Value Tuple List may be truncated within the first two
1753 * octets of a tuple due to the size limits of the current ATT_MTU.
1754 */
1755 if (bt_att_mtu(chan) - data->buf->len < 2) {
1756 return BT_GATT_ITER_STOP;
1757 }
1758
1759 rsp = net_buf_add(data->buf, sizeof(*rsp));
1760
1761 read = att_chan_read(chan, attr, data->buf, data->offset, NULL, NULL);
1762 if (read < 0) {
1763 data->err = err_to_att(read);
1764 return BT_GATT_ITER_STOP;
1765 }
1766
1767 rsp->len = read;
1768
1769 return BT_GATT_ITER_CONTINUE;
1770 }
1771
att_read_mult_vl_req(struct bt_att_chan * chan,struct net_buf * buf)1772 static uint8_t att_read_mult_vl_req(struct bt_att_chan *chan, struct net_buf *buf)
1773 {
1774 struct read_data data;
1775 uint16_t handle;
1776
1777 if (!bt_gatt_change_aware(chan->att->conn, true)) {
1778 if (!atomic_test_and_set_bit(chan->flags, ATT_OUT_OF_SYNC_SENT)) {
1779 return BT_ATT_ERR_DB_OUT_OF_SYNC;
1780 } else {
1781 return 0;
1782 }
1783 }
1784
1785 (void)memset(&data, 0, sizeof(data));
1786
1787 data.buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_READ_MULT_VL_RSP, 0);
1788 if (!data.buf) {
1789 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
1790 }
1791
1792 data.chan = chan;
1793
1794 while (buf->len >= sizeof(uint16_t)) {
1795 handle = net_buf_pull_le16(buf);
1796
1797 LOG_DBG("handle 0x%04x ", handle);
1798
1799 /* If handle is not valid then return invalid handle error.
1800 * If handle is found error will be cleared by read_cb.
1801 */
1802 data.err = BT_ATT_ERR_INVALID_HANDLE;
1803
1804 bt_gatt_foreach_attr(handle, handle, read_vl_cb, &data);
1805
1806 /* Stop reading in case of error */
1807 if (data.err) {
1808 tx_meta_data_free(bt_att_tx_meta_data(data.buf));
1809 net_buf_unref(data.buf);
1810 /* Respond here since handle is set */
1811 send_err_rsp(chan, BT_ATT_OP_READ_MULT_VL_REQ, handle,
1812 data.err);
1813 return 0;
1814 }
1815 }
1816
1817 bt_att_chan_send_rsp(chan, data.buf);
1818
1819 return 0;
1820 }
1821 #endif /* CONFIG_BT_GATT_READ_MULT_VAR_LEN */
1822
1823 struct read_group_data {
1824 struct bt_att_chan *chan;
1825 struct bt_uuid *uuid;
1826 struct net_buf *buf;
1827 struct bt_att_read_group_rsp *rsp;
1828 struct bt_att_group_data *group;
1829 };
1830
attr_read_group_cb(struct net_buf * frag,ssize_t read,void * user_data)1831 static bool attr_read_group_cb(struct net_buf *frag, ssize_t read,
1832 void *user_data)
1833 {
1834 struct read_group_data *data = user_data;
1835
1836 if (!data->rsp->len) {
1837 /* Set len to be the first group found */
1838 data->rsp->len = read + sizeof(*data->group);
1839 } else if (data->rsp->len != read + sizeof(*data->group)) {
1840 /* All groups entries should have the same size */
1841 data->buf->len -= sizeof(*data->group);
1842 data->group = NULL;
1843 return false;
1844 }
1845
1846 return true;
1847 }
1848
read_group_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)1849 static uint8_t read_group_cb(const struct bt_gatt_attr *attr, uint16_t handle,
1850 void *user_data)
1851 {
1852 struct read_group_data *data = user_data;
1853 struct bt_att_chan *chan = data->chan;
1854 int read;
1855
1856 /* Update group end_handle if attribute is not a service */
1857 if (bt_uuid_cmp(attr->uuid, BT_UUID_GATT_PRIMARY) &&
1858 bt_uuid_cmp(attr->uuid, BT_UUID_GATT_SECONDARY)) {
1859 if (data->group &&
1860 handle > sys_le16_to_cpu(data->group->end_handle)) {
1861 data->group->end_handle = sys_cpu_to_le16(handle);
1862 }
1863 return BT_GATT_ITER_CONTINUE;
1864 }
1865
1866 /* If Group Type don't match skip */
1867 if (bt_uuid_cmp(attr->uuid, data->uuid)) {
1868 data->group = NULL;
1869 return BT_GATT_ITER_CONTINUE;
1870 }
1871
1872 LOG_DBG("handle 0x%04x", handle);
1873
1874 /* Stop if there is no space left */
1875 if (data->rsp->len &&
1876 bt_att_mtu(chan) - data->buf->len < data->rsp->len) {
1877 return BT_GATT_ITER_STOP;
1878 }
1879
1880 /* Fast forward to next group position */
1881 data->group = net_buf_add(data->buf, sizeof(*data->group));
1882
1883 /* Initialize group handle range */
1884 data->group->start_handle = sys_cpu_to_le16(handle);
1885 data->group->end_handle = sys_cpu_to_le16(handle);
1886
1887 /* Read attribute value and store in the buffer */
1888 read = att_chan_read(chan, attr, data->buf, 0, attr_read_group_cb,
1889 data);
1890 if (read < 0) {
1891 /* TODO: Handle read errors */
1892 return BT_GATT_ITER_STOP;
1893 }
1894
1895 if (!data->group) {
1896 return BT_GATT_ITER_STOP;
1897 }
1898
1899 /* continue only if there are still space for more items */
1900 return BT_GATT_ITER_CONTINUE;
1901 }
1902
att_read_group_rsp(struct bt_att_chan * chan,struct bt_uuid * uuid,uint16_t start_handle,uint16_t end_handle)1903 static uint8_t att_read_group_rsp(struct bt_att_chan *chan, struct bt_uuid *uuid,
1904 uint16_t start_handle, uint16_t end_handle)
1905 {
1906 struct read_group_data data;
1907
1908 (void)memset(&data, 0, sizeof(data));
1909
1910 data.buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_READ_GROUP_RSP,
1911 sizeof(*data.rsp));
1912 if (!data.buf) {
1913 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
1914 }
1915
1916 data.chan = chan;
1917 data.uuid = uuid;
1918 data.rsp = net_buf_add(data.buf, sizeof(*data.rsp));
1919 data.rsp->len = 0U;
1920 data.group = NULL;
1921
1922 bt_gatt_foreach_attr(start_handle, end_handle, read_group_cb, &data);
1923
1924 if (!data.rsp->len) {
1925 tx_meta_data_free(bt_att_tx_meta_data(data.buf));
1926 net_buf_unref(data.buf);
1927 /* Respond here since handle is set */
1928 send_err_rsp(chan, BT_ATT_OP_READ_GROUP_REQ, start_handle,
1929 BT_ATT_ERR_ATTRIBUTE_NOT_FOUND);
1930 return 0;
1931 }
1932
1933 bt_att_chan_send_rsp(chan, data.buf);
1934
1935 return 0;
1936 }
1937
att_read_group_req(struct bt_att_chan * chan,struct net_buf * buf)1938 static uint8_t att_read_group_req(struct bt_att_chan *chan, struct net_buf *buf)
1939 {
1940 struct bt_att_read_group_req *req;
1941 uint16_t start_handle, end_handle, err_handle;
1942 union {
1943 struct bt_uuid uuid;
1944 struct bt_uuid_16 u16;
1945 struct bt_uuid_128 u128;
1946 } u;
1947 uint8_t uuid_len = buf->len - sizeof(*req);
1948
1949 /* Type can only be UUID16 or UUID128 */
1950 if (uuid_len != 2 && uuid_len != 16) {
1951 return BT_ATT_ERR_INVALID_PDU;
1952 }
1953
1954 req = net_buf_pull_mem(buf, sizeof(*req));
1955
1956 start_handle = sys_le16_to_cpu(req->start_handle);
1957 end_handle = sys_le16_to_cpu(req->end_handle);
1958
1959 if (!bt_uuid_create(&u.uuid, req->uuid, uuid_len)) {
1960 return BT_ATT_ERR_UNLIKELY;
1961 }
1962
1963 LOG_DBG("start_handle 0x%04x end_handle 0x%04x type %s", start_handle, end_handle,
1964 bt_uuid_str(&u.uuid));
1965
1966 if (!range_is_valid(start_handle, end_handle, &err_handle)) {
1967 send_err_rsp(chan, BT_ATT_OP_READ_GROUP_REQ, err_handle,
1968 BT_ATT_ERR_INVALID_HANDLE);
1969 return 0;
1970 }
1971
1972 /* Core v4.2, Vol 3, sec 2.5.3 Attribute Grouping:
1973 * Not all of the grouping attributes can be used in the ATT
1974 * Read By Group Type Request. The "Primary Service" and "Secondary
1975 * Service" grouping types may be used in the Read By Group Type
1976 * Request. The "Characteristic" grouping type shall not be used in
1977 * the ATT Read By Group Type Request.
1978 */
1979 if (bt_uuid_cmp(&u.uuid, BT_UUID_GATT_PRIMARY) &&
1980 bt_uuid_cmp(&u.uuid, BT_UUID_GATT_SECONDARY)) {
1981 send_err_rsp(chan, BT_ATT_OP_READ_GROUP_REQ, start_handle,
1982 BT_ATT_ERR_UNSUPPORTED_GROUP_TYPE);
1983 return 0;
1984 }
1985
1986 return att_read_group_rsp(chan, &u.uuid, start_handle, end_handle);
1987 }
1988
1989 struct write_data {
1990 struct bt_conn *conn;
1991 struct net_buf *buf;
1992 uint8_t req;
1993 const void *value;
1994 uint16_t len;
1995 uint16_t offset;
1996 uint8_t err;
1997 };
1998
write_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)1999 static uint8_t write_cb(const struct bt_gatt_attr *attr, uint16_t handle,
2000 void *user_data)
2001 {
2002 struct write_data *data = user_data;
2003 int write;
2004 uint8_t flags = 0U;
2005
2006 LOG_DBG("handle 0x%04x offset %u", handle, data->offset);
2007
2008 /* Check attribute permissions */
2009 data->err = bt_gatt_check_perm(data->conn, attr,
2010 BT_GATT_PERM_WRITE_MASK);
2011 if (data->err) {
2012 return BT_GATT_ITER_STOP;
2013 }
2014
2015 /* Set command flag if not a request */
2016 if (!data->req) {
2017 flags |= BT_GATT_WRITE_FLAG_CMD;
2018 } else if (data->req == BT_ATT_OP_EXEC_WRITE_REQ) {
2019 flags |= BT_GATT_WRITE_FLAG_EXECUTE;
2020 }
2021
2022 /* Write attribute value */
2023 write = attr->write(data->conn, attr, data->value, data->len,
2024 data->offset, flags);
2025 if (write < 0 || write != data->len) {
2026 data->err = err_to_att(write);
2027 return BT_GATT_ITER_STOP;
2028 }
2029
2030 data->err = 0U;
2031
2032 return BT_GATT_ITER_CONTINUE;
2033 }
2034
att_write_rsp(struct bt_att_chan * chan,uint8_t req,uint8_t rsp,uint16_t handle,uint16_t offset,const void * value,uint16_t len)2035 static uint8_t att_write_rsp(struct bt_att_chan *chan, uint8_t req, uint8_t rsp,
2036 uint16_t handle, uint16_t offset, const void *value,
2037 uint16_t len)
2038 {
2039 struct write_data data;
2040
2041 if (!bt_gatt_change_aware(chan->att->conn, req ? true : false)) {
2042 if (!atomic_test_and_set_bit(chan->flags, ATT_OUT_OF_SYNC_SENT)) {
2043 return BT_ATT_ERR_DB_OUT_OF_SYNC;
2044 } else {
2045 return 0;
2046 }
2047 }
2048
2049 if (!handle) {
2050 return BT_ATT_ERR_INVALID_HANDLE;
2051 }
2052
2053 (void)memset(&data, 0, sizeof(data));
2054
2055 /* Only allocate buf if required to respond */
2056 if (rsp) {
2057 data.buf = bt_att_chan_create_pdu(chan, rsp, 0);
2058 if (!data.buf) {
2059 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
2060 }
2061 }
2062
2063 data.conn = chan->att->conn;
2064 data.req = req;
2065 data.offset = offset;
2066 data.value = value;
2067 data.len = len;
2068 data.err = BT_ATT_ERR_INVALID_HANDLE;
2069
2070 bt_gatt_foreach_attr(handle, handle, write_cb, &data);
2071
2072 if (data.err) {
2073 /* In case of error discard data and respond with an error */
2074 if (rsp) {
2075 tx_meta_data_free(bt_att_tx_meta_data(data.buf));
2076 net_buf_unref(data.buf);
2077 /* Respond here since handle is set */
2078 send_err_rsp(chan, req, handle, data.err);
2079 }
2080 return req == BT_ATT_OP_EXEC_WRITE_REQ ? data.err : 0;
2081 }
2082
2083 if (data.buf) {
2084 bt_att_chan_send_rsp(chan, data.buf);
2085 }
2086
2087 return 0;
2088 }
2089
att_write_req(struct bt_att_chan * chan,struct net_buf * buf)2090 static uint8_t att_write_req(struct bt_att_chan *chan, struct net_buf *buf)
2091 {
2092 uint16_t handle;
2093
2094 handle = net_buf_pull_le16(buf);
2095
2096 LOG_DBG("handle 0x%04x", handle);
2097
2098 return att_write_rsp(chan, BT_ATT_OP_WRITE_REQ, BT_ATT_OP_WRITE_RSP,
2099 handle, 0, buf->data, buf->len);
2100 }
2101
2102 #if CONFIG_BT_ATT_PREPARE_COUNT > 0
2103 struct prep_data {
2104 struct bt_conn *conn;
2105 struct net_buf *buf;
2106 const void *value;
2107 uint16_t len;
2108 uint16_t offset;
2109 uint8_t err;
2110 };
2111
prep_write_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)2112 static uint8_t prep_write_cb(const struct bt_gatt_attr *attr, uint16_t handle,
2113 void *user_data)
2114 {
2115 struct prep_data *data = user_data;
2116 struct bt_attr_data *attr_data;
2117 int write;
2118
2119 LOG_DBG("handle 0x%04x offset %u", handle, data->offset);
2120
2121 /* Check attribute permissions */
2122 data->err = bt_gatt_check_perm(data->conn, attr,
2123 BT_GATT_PERM_WRITE_MASK);
2124 if (data->err) {
2125 return BT_GATT_ITER_STOP;
2126 }
2127
2128 /* Check if attribute requires handler to accept the data */
2129 if (!(attr->perm & BT_GATT_PERM_PREPARE_WRITE)) {
2130 goto append;
2131 }
2132
2133 /* Write attribute value to check if device is authorized */
2134 write = attr->write(data->conn, attr, data->value, data->len,
2135 data->offset, BT_GATT_WRITE_FLAG_PREPARE);
2136 if (write != 0) {
2137 data->err = err_to_att(write);
2138 return BT_GATT_ITER_STOP;
2139 }
2140
2141 append:
2142 /* Copy data into the outstanding queue */
2143 data->buf = net_buf_alloc(&prep_pool, K_NO_WAIT);
2144 if (!data->buf) {
2145 data->err = BT_ATT_ERR_PREPARE_QUEUE_FULL;
2146 return BT_GATT_ITER_STOP;
2147 }
2148
2149 attr_data = net_buf_user_data(data->buf);
2150 attr_data->handle = handle;
2151 attr_data->offset = data->offset;
2152
2153 net_buf_add_mem(data->buf, data->value, data->len);
2154
2155 data->err = 0U;
2156
2157 return BT_GATT_ITER_CONTINUE;
2158 }
2159
att_prep_write_rsp(struct bt_att_chan * chan,uint16_t handle,uint16_t offset,const void * value,uint8_t len)2160 static uint8_t att_prep_write_rsp(struct bt_att_chan *chan, uint16_t handle,
2161 uint16_t offset, const void *value, uint8_t len)
2162 {
2163 struct prep_data data;
2164 struct bt_att_prepare_write_rsp *rsp;
2165
2166 if (!bt_gatt_change_aware(chan->att->conn, true)) {
2167 if (!atomic_test_and_set_bit(chan->flags, ATT_OUT_OF_SYNC_SENT)) {
2168 return BT_ATT_ERR_DB_OUT_OF_SYNC;
2169 } else {
2170 return 0;
2171 }
2172 }
2173
2174 if (!handle) {
2175 return BT_ATT_ERR_INVALID_HANDLE;
2176 }
2177
2178 (void)memset(&data, 0, sizeof(data));
2179
2180 data.conn = chan->att->conn;
2181 data.offset = offset;
2182 data.value = value;
2183 data.len = len;
2184 data.err = BT_ATT_ERR_INVALID_HANDLE;
2185
2186 bt_gatt_foreach_attr(handle, handle, prep_write_cb, &data);
2187
2188 if (data.err) {
2189 /* Respond here since handle is set */
2190 send_err_rsp(chan, BT_ATT_OP_PREPARE_WRITE_REQ, handle,
2191 data.err);
2192 return 0;
2193 }
2194
2195 LOG_DBG("buf %p handle 0x%04x offset %u", data.buf, handle, offset);
2196
2197 /* Store buffer in the outstanding queue */
2198 net_buf_slist_put(&chan->att->prep_queue, data.buf);
2199
2200 /* Generate response */
2201 data.buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_PREPARE_WRITE_RSP, 0);
2202 if (!data.buf) {
2203 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
2204 }
2205
2206 rsp = net_buf_add(data.buf, sizeof(*rsp));
2207 rsp->handle = sys_cpu_to_le16(handle);
2208 rsp->offset = sys_cpu_to_le16(offset);
2209 net_buf_add(data.buf, len);
2210 memcpy(rsp->value, value, len);
2211
2212 bt_att_chan_send_rsp(chan, data.buf);
2213
2214 return 0;
2215 }
2216 #endif /* CONFIG_BT_ATT_PREPARE_COUNT */
2217
att_prepare_write_req(struct bt_att_chan * chan,struct net_buf * buf)2218 static uint8_t att_prepare_write_req(struct bt_att_chan *chan, struct net_buf *buf)
2219 {
2220 #if CONFIG_BT_ATT_PREPARE_COUNT == 0
2221 return BT_ATT_ERR_NOT_SUPPORTED;
2222 #else
2223 struct bt_att_prepare_write_req *req;
2224 uint16_t handle, offset;
2225
2226 req = net_buf_pull_mem(buf, sizeof(*req));
2227
2228 handle = sys_le16_to_cpu(req->handle);
2229 offset = sys_le16_to_cpu(req->offset);
2230
2231 LOG_DBG("handle 0x%04x offset %u", handle, offset);
2232
2233 return att_prep_write_rsp(chan, handle, offset, buf->data, buf->len);
2234 #endif /* CONFIG_BT_ATT_PREPARE_COUNT */
2235 }
2236
2237 #if CONFIG_BT_ATT_PREPARE_COUNT > 0
exec_write_reassemble(uint16_t handle,uint16_t offset,sys_slist_t * list,struct net_buf_simple * buf)2238 static uint8_t exec_write_reassemble(uint16_t handle, uint16_t offset,
2239 sys_slist_t *list,
2240 struct net_buf_simple *buf)
2241 {
2242 struct net_buf *entry, *next;
2243 sys_snode_t *prev;
2244
2245 prev = NULL;
2246 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(list, entry, next, node) {
2247 struct bt_attr_data *tmp_data = net_buf_user_data(entry);
2248
2249 LOG_DBG("entry %p handle 0x%04x, offset %u", entry, tmp_data->handle,
2250 tmp_data->offset);
2251
2252 if (tmp_data->handle == handle) {
2253 if (tmp_data->offset == 0) {
2254 /* Multiple writes to the same handle can occur
2255 * in a prepare write queue. If the offset is 0,
2256 * that should mean that it's a new write to the
2257 * same handle, and we break to process the
2258 * first write.
2259 */
2260
2261 LOG_DBG("tmp_data->offset == 0");
2262 break;
2263 }
2264
2265 if (tmp_data->offset != buf->len + offset) {
2266 /* We require that the offset is increasing
2267 * properly to avoid badly reassembled buffers
2268 */
2269
2270 LOG_DBG("Bad offset %u (%u, %u)", tmp_data->offset, buf->len,
2271 offset);
2272
2273 return BT_ATT_ERR_INVALID_OFFSET;
2274 }
2275
2276 if (buf->len + entry->len > buf->size) {
2277 return BT_ATT_ERR_INVALID_ATTRIBUTE_LEN;
2278 }
2279
2280 net_buf_simple_add_mem(buf, entry->data, entry->len);
2281 sys_slist_remove(list, prev, &entry->node);
2282 net_buf_unref(entry);
2283 } else {
2284 prev = &entry->node;
2285 }
2286 }
2287
2288 return BT_ATT_ERR_SUCCESS;
2289 }
2290
att_exec_write_rsp(struct bt_att_chan * chan,uint8_t flags)2291 static uint8_t att_exec_write_rsp(struct bt_att_chan *chan, uint8_t flags)
2292 {
2293 struct net_buf *buf;
2294 uint8_t err = 0U;
2295
2296 /* The following code will iterate on all prepare writes in the
2297 * prep_queue, and reassemble those that share the same handle.
2298 * Once a handle has been ressembled, it is sent to the upper layers,
2299 * and the next handle is processed
2300 */
2301 while (!sys_slist_is_empty(&chan->att->prep_queue)) {
2302 struct bt_attr_data *data;
2303 uint16_t handle;
2304
2305 NET_BUF_SIMPLE_DEFINE_STATIC(reassembled_data,
2306 MIN(BT_ATT_MAX_ATTRIBUTE_LEN,
2307 CONFIG_BT_ATT_PREPARE_COUNT * BT_ATT_BUF_SIZE));
2308
2309 buf = net_buf_slist_get(&chan->att->prep_queue);
2310 data = net_buf_user_data(buf);
2311 handle = data->handle;
2312
2313 LOG_DBG("buf %p handle 0x%04x offset %u", buf, handle, data->offset);
2314
2315 net_buf_simple_reset(&reassembled_data);
2316 net_buf_simple_add_mem(&reassembled_data, buf->data, buf->len);
2317
2318 err = exec_write_reassemble(handle, data->offset,
2319 &chan->att->prep_queue,
2320 &reassembled_data);
2321 if (err != BT_ATT_ERR_SUCCESS) {
2322 send_err_rsp(chan, BT_ATT_OP_EXEC_WRITE_REQ,
2323 handle, err);
2324 return 0;
2325 }
2326
2327 /* Just discard the data if an error was set */
2328 if (!err && flags == BT_ATT_FLAG_EXEC) {
2329 err = att_write_rsp(chan, BT_ATT_OP_EXEC_WRITE_REQ, 0,
2330 handle, data->offset,
2331 reassembled_data.data,
2332 reassembled_data.len);
2333 if (err) {
2334 /* Respond here since handle is set */
2335 send_err_rsp(chan, BT_ATT_OP_EXEC_WRITE_REQ,
2336 data->handle, err);
2337 }
2338 }
2339
2340 net_buf_unref(buf);
2341 }
2342
2343 if (err) {
2344 return 0;
2345 }
2346
2347 /* Generate response */
2348 buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_EXEC_WRITE_RSP, 0);
2349 if (!buf) {
2350 return BT_ATT_ERR_UNLIKELY;
2351 }
2352
2353 bt_att_chan_send_rsp(chan, buf);
2354
2355 return 0;
2356 }
2357 #endif /* CONFIG_BT_ATT_PREPARE_COUNT */
2358
2359
att_exec_write_req(struct bt_att_chan * chan,struct net_buf * buf)2360 static uint8_t att_exec_write_req(struct bt_att_chan *chan, struct net_buf *buf)
2361 {
2362 #if CONFIG_BT_ATT_PREPARE_COUNT == 0
2363 return BT_ATT_ERR_NOT_SUPPORTED;
2364 #else
2365 struct bt_att_exec_write_req *req;
2366
2367 req = (void *)buf->data;
2368
2369 LOG_DBG("flags 0x%02x", req->flags);
2370
2371 return att_exec_write_rsp(chan, req->flags);
2372 #endif /* CONFIG_BT_ATT_PREPARE_COUNT */
2373 }
2374
att_write_cmd(struct bt_att_chan * chan,struct net_buf * buf)2375 static uint8_t att_write_cmd(struct bt_att_chan *chan, struct net_buf *buf)
2376 {
2377 uint16_t handle;
2378
2379 handle = net_buf_pull_le16(buf);
2380
2381 LOG_DBG("handle 0x%04x", handle);
2382
2383 return att_write_rsp(chan, 0, 0, handle, 0, buf->data, buf->len);
2384 }
2385
2386 #if defined(CONFIG_BT_SIGNING)
att_signed_write_cmd(struct bt_att_chan * chan,struct net_buf * buf)2387 static uint8_t att_signed_write_cmd(struct bt_att_chan *chan, struct net_buf *buf)
2388 {
2389 struct bt_conn *conn = chan->chan.chan.conn;
2390 struct bt_att_signed_write_cmd *req;
2391 uint16_t handle;
2392 int err;
2393
2394 /* The Signed Write Without Response sub-procedure shall only be supported
2395 * on the LE Fixed Channel Unenhanced ATT bearer.
2396 */
2397 if (bt_att_is_enhanced(chan)) {
2398 /* No response for this command */
2399 return 0;
2400 }
2401
2402 req = (void *)buf->data;
2403
2404 handle = sys_le16_to_cpu(req->handle);
2405
2406 LOG_DBG("handle 0x%04x", handle);
2407
2408 /* Verifying data requires full buffer including attribute header */
2409 net_buf_push(buf, sizeof(struct bt_att_hdr));
2410 err = bt_smp_sign_verify(conn, buf);
2411 if (err) {
2412 LOG_ERR("Error verifying data");
2413 /* No response for this command */
2414 return 0;
2415 }
2416
2417 net_buf_pull(buf, sizeof(struct bt_att_hdr));
2418 net_buf_pull(buf, sizeof(*req));
2419
2420 return att_write_rsp(chan, 0, 0, handle, 0, buf->data,
2421 buf->len - sizeof(struct bt_att_signature));
2422 }
2423 #endif /* CONFIG_BT_SIGNING */
2424
2425 #if defined(CONFIG_BT_GATT_CLIENT)
2426 #if defined(CONFIG_BT_ATT_RETRY_ON_SEC_ERR)
att_change_security(struct bt_conn * conn,uint8_t err)2427 static int att_change_security(struct bt_conn *conn, uint8_t err)
2428 {
2429 bt_security_t sec;
2430
2431 switch (err) {
2432 case BT_ATT_ERR_INSUFFICIENT_ENCRYPTION:
2433 if (conn->sec_level >= BT_SECURITY_L2)
2434 return -EALREADY;
2435 sec = BT_SECURITY_L2;
2436 break;
2437 case BT_ATT_ERR_AUTHENTICATION:
2438 if (conn->sec_level < BT_SECURITY_L2) {
2439 /* BLUETOOTH SPECIFICATION Version 4.2 [Vol 3, Part C]
2440 * page 375:
2441 *
2442 * If an LTK is not available, the service request
2443 * shall be rejected with the error code 'Insufficient
2444 * Authentication'.
2445 * Note: When the link is not encrypted, the error code
2446 * "Insufficient Authentication" does not indicate that
2447 * MITM protection is required.
2448 */
2449 sec = BT_SECURITY_L2;
2450 } else if (conn->sec_level < BT_SECURITY_L3) {
2451 /* BLUETOOTH SPECIFICATION Version 4.2 [Vol 3, Part C]
2452 * page 375:
2453 *
2454 * If an authenticated pairing is required but only an
2455 * unauthenticated pairing has occurred and the link is
2456 * currently encrypted, the service request shall be
2457 * rejected with the error code 'Insufficient
2458 * Authentication'.
2459 * Note: When unauthenticated pairing has occurred and
2460 * the link is currently encrypted, the error code
2461 * 'Insufficient Authentication' indicates that MITM
2462 * protection is required.
2463 */
2464 sec = BT_SECURITY_L3;
2465 } else if (conn->sec_level < BT_SECURITY_L4) {
2466 /* BLUETOOTH SPECIFICATION Version 4.2 [Vol 3, Part C]
2467 * page 375:
2468 *
2469 * If LE Secure Connections authenticated pairing is
2470 * required but LE legacy pairing has occurred and the
2471 * link is currently encrypted, the service request
2472 * shall be rejected with the error code ''Insufficient
2473 * Authentication'.
2474 */
2475 sec = BT_SECURITY_L4;
2476 } else {
2477 return -EALREADY;
2478 }
2479 break;
2480 default:
2481 return -EINVAL;
2482 }
2483
2484 return bt_conn_set_security(conn, sec);
2485 }
2486 #endif /* CONFIG_BT_ATT_RETRY_ON_SEC_ERR */
2487
att_error_rsp(struct bt_att_chan * chan,struct net_buf * buf)2488 static uint8_t att_error_rsp(struct bt_att_chan *chan, struct net_buf *buf)
2489 {
2490 struct bt_att_error_rsp *rsp;
2491 uint8_t err;
2492
2493 rsp = (void *)buf->data;
2494
2495 LOG_DBG("request 0x%02x handle 0x%04x error 0x%02x", rsp->request,
2496 sys_le16_to_cpu(rsp->handle), rsp->error);
2497
2498 /* Don't retry if there is no req pending or it has been cancelled.
2499 *
2500 * BLUETOOTH SPECIFICATION Version 5.2 [Vol 3, Part F]
2501 * page 1423:
2502 *
2503 * If an error code is received in the ATT_ERROR_RSP PDU that is not
2504 * understood by the client, for example an error code that was reserved
2505 * for future use that is now being used in a future version of the
2506 * specification, then the ATT_ERROR_RSP PDU shall still be considered to
2507 * state that the given request cannot be performed for an unknown reason.
2508 */
2509 if (!chan->req || chan->req == &cancel || !rsp->error) {
2510 err = BT_ATT_ERR_UNLIKELY;
2511 goto done;
2512 }
2513
2514 err = rsp->error;
2515 #if defined(CONFIG_BT_ATT_RETRY_ON_SEC_ERR)
2516 /* Check if error can be handled by elevating security. */
2517 if (!att_change_security(chan->chan.chan.conn, err)) {
2518 /* ATT timeout work is normally cancelled in att_handle_rsp.
2519 * However retrying is special case, so the timeout shall
2520 * be cancelled here.
2521 */
2522 k_work_cancel_delayable(&chan->timeout_work);
2523
2524 chan->req->retrying = true;
2525 return 0;
2526 }
2527 #endif /* CONFIG_BT_ATT_RETRY_ON_SEC_ERR */
2528
2529 done:
2530 return att_handle_rsp(chan, NULL, 0, err);
2531 }
2532
att_handle_find_info_rsp(struct bt_att_chan * chan,struct net_buf * buf)2533 static uint8_t att_handle_find_info_rsp(struct bt_att_chan *chan,
2534 struct net_buf *buf)
2535 {
2536 LOG_DBG("");
2537
2538 return att_handle_rsp(chan, buf->data, buf->len, 0);
2539 }
2540
att_handle_find_type_rsp(struct bt_att_chan * chan,struct net_buf * buf)2541 static uint8_t att_handle_find_type_rsp(struct bt_att_chan *chan,
2542 struct net_buf *buf)
2543 {
2544 LOG_DBG("");
2545
2546 return att_handle_rsp(chan, buf->data, buf->len, 0);
2547 }
2548
att_handle_read_type_rsp(struct bt_att_chan * chan,struct net_buf * buf)2549 static uint8_t att_handle_read_type_rsp(struct bt_att_chan *chan,
2550 struct net_buf *buf)
2551 {
2552 LOG_DBG("");
2553
2554 return att_handle_rsp(chan, buf->data, buf->len, 0);
2555 }
2556
att_handle_read_rsp(struct bt_att_chan * chan,struct net_buf * buf)2557 static uint8_t att_handle_read_rsp(struct bt_att_chan *chan,
2558 struct net_buf *buf)
2559 {
2560 LOG_DBG("");
2561
2562 return att_handle_rsp(chan, buf->data, buf->len, 0);
2563 }
2564
att_handle_read_blob_rsp(struct bt_att_chan * chan,struct net_buf * buf)2565 static uint8_t att_handle_read_blob_rsp(struct bt_att_chan *chan,
2566 struct net_buf *buf)
2567 {
2568 LOG_DBG("");
2569
2570 return att_handle_rsp(chan, buf->data, buf->len, 0);
2571 }
2572
2573 #if defined(CONFIG_BT_GATT_READ_MULTIPLE)
att_handle_read_mult_rsp(struct bt_att_chan * chan,struct net_buf * buf)2574 static uint8_t att_handle_read_mult_rsp(struct bt_att_chan *chan,
2575 struct net_buf *buf)
2576 {
2577 LOG_DBG("");
2578
2579 return att_handle_rsp(chan, buf->data, buf->len, 0);
2580 }
2581
2582 #endif /* CONFIG_BT_GATT_READ_MULTIPLE */
2583
2584 #if defined(CONFIG_BT_GATT_READ_MULT_VAR_LEN)
att_handle_read_mult_vl_rsp(struct bt_att_chan * chan,struct net_buf * buf)2585 static uint8_t att_handle_read_mult_vl_rsp(struct bt_att_chan *chan,
2586 struct net_buf *buf)
2587 {
2588 LOG_DBG("");
2589
2590 return att_handle_rsp(chan, buf->data, buf->len, 0);
2591 }
2592 #endif /* CONFIG_BT_GATT_READ_MULT_VAR_LEN */
2593
att_handle_read_group_rsp(struct bt_att_chan * chan,struct net_buf * buf)2594 static uint8_t att_handle_read_group_rsp(struct bt_att_chan *chan,
2595 struct net_buf *buf)
2596 {
2597 LOG_DBG("");
2598
2599 return att_handle_rsp(chan, buf->data, buf->len, 0);
2600 }
2601
att_handle_write_rsp(struct bt_att_chan * chan,struct net_buf * buf)2602 static uint8_t att_handle_write_rsp(struct bt_att_chan *chan,
2603 struct net_buf *buf)
2604 {
2605 LOG_DBG("");
2606
2607 return att_handle_rsp(chan, buf->data, buf->len, 0);
2608 }
2609
att_handle_prepare_write_rsp(struct bt_att_chan * chan,struct net_buf * buf)2610 static uint8_t att_handle_prepare_write_rsp(struct bt_att_chan *chan,
2611 struct net_buf *buf)
2612 {
2613 LOG_DBG("");
2614
2615 return att_handle_rsp(chan, buf->data, buf->len, 0);
2616 }
2617
att_handle_exec_write_rsp(struct bt_att_chan * chan,struct net_buf * buf)2618 static uint8_t att_handle_exec_write_rsp(struct bt_att_chan *chan,
2619 struct net_buf *buf)
2620 {
2621 LOG_DBG("");
2622
2623 return att_handle_rsp(chan, buf->data, buf->len, 0);
2624 }
2625
att_notify(struct bt_att_chan * chan,struct net_buf * buf)2626 static uint8_t att_notify(struct bt_att_chan *chan, struct net_buf *buf)
2627 {
2628 uint16_t handle;
2629
2630 handle = net_buf_pull_le16(buf);
2631
2632 LOG_DBG("chan %p handle 0x%04x", chan, handle);
2633
2634 bt_gatt_notification(chan->att->conn, handle, buf->data, buf->len);
2635
2636 return 0;
2637 }
2638
att_indicate(struct bt_att_chan * chan,struct net_buf * buf)2639 static uint8_t att_indicate(struct bt_att_chan *chan, struct net_buf *buf)
2640 {
2641 uint16_t handle;
2642
2643 handle = net_buf_pull_le16(buf);
2644
2645 LOG_DBG("chan %p handle 0x%04x", chan, handle);
2646
2647 bt_gatt_notification(chan->att->conn, handle, buf->data, buf->len);
2648
2649 buf = bt_att_chan_create_pdu(chan, BT_ATT_OP_CONFIRM, 0);
2650 if (!buf) {
2651 return 0;
2652 }
2653
2654 bt_att_chan_send_rsp(chan, buf);
2655
2656 return 0;
2657 }
2658
att_notify_mult(struct bt_att_chan * chan,struct net_buf * buf)2659 static uint8_t att_notify_mult(struct bt_att_chan *chan, struct net_buf *buf)
2660 {
2661 LOG_DBG("chan %p", chan);
2662
2663 bt_gatt_mult_notification(chan->att->conn, buf->data, buf->len);
2664
2665 return 0;
2666 }
2667 #endif /* CONFIG_BT_GATT_CLIENT */
2668
att_confirm(struct bt_att_chan * chan,struct net_buf * buf)2669 static uint8_t att_confirm(struct bt_att_chan *chan, struct net_buf *buf)
2670 {
2671 LOG_DBG("");
2672
2673 return att_handle_rsp(chan, buf->data, buf->len, 0);
2674 }
2675
2676 static const struct att_handler {
2677 uint8_t op;
2678 uint8_t expect_len;
2679 att_type_t type;
2680 uint8_t (*func)(struct bt_att_chan *chan, struct net_buf *buf);
2681 } handlers[] = {
2682 { BT_ATT_OP_MTU_REQ,
2683 sizeof(struct bt_att_exchange_mtu_req),
2684 ATT_REQUEST,
2685 att_mtu_req },
2686 { BT_ATT_OP_FIND_INFO_REQ,
2687 sizeof(struct bt_att_find_info_req),
2688 ATT_REQUEST,
2689 att_find_info_req },
2690 { BT_ATT_OP_FIND_TYPE_REQ,
2691 sizeof(struct bt_att_find_type_req),
2692 ATT_REQUEST,
2693 att_find_type_req },
2694 { BT_ATT_OP_READ_TYPE_REQ,
2695 sizeof(struct bt_att_read_type_req),
2696 ATT_REQUEST,
2697 att_read_type_req },
2698 { BT_ATT_OP_READ_REQ,
2699 sizeof(struct bt_att_read_req),
2700 ATT_REQUEST,
2701 att_read_req },
2702 { BT_ATT_OP_READ_BLOB_REQ,
2703 sizeof(struct bt_att_read_blob_req),
2704 ATT_REQUEST,
2705 att_read_blob_req },
2706 #if defined(CONFIG_BT_GATT_READ_MULTIPLE)
2707 { BT_ATT_OP_READ_MULT_REQ,
2708 BT_ATT_READ_MULT_MIN_LEN_REQ,
2709 ATT_REQUEST,
2710 att_read_mult_req },
2711 #endif /* CONFIG_BT_GATT_READ_MULTIPLE */
2712 #if defined(CONFIG_BT_GATT_READ_MULT_VAR_LEN)
2713 { BT_ATT_OP_READ_MULT_VL_REQ,
2714 BT_ATT_READ_MULT_MIN_LEN_REQ,
2715 ATT_REQUEST,
2716 att_read_mult_vl_req },
2717 #endif /* CONFIG_BT_GATT_READ_MULT_VAR_LEN */
2718 { BT_ATT_OP_READ_GROUP_REQ,
2719 sizeof(struct bt_att_read_group_req),
2720 ATT_REQUEST,
2721 att_read_group_req },
2722 { BT_ATT_OP_WRITE_REQ,
2723 sizeof(struct bt_att_write_req),
2724 ATT_REQUEST,
2725 att_write_req },
2726 { BT_ATT_OP_PREPARE_WRITE_REQ,
2727 sizeof(struct bt_att_prepare_write_req),
2728 ATT_REQUEST,
2729 att_prepare_write_req },
2730 { BT_ATT_OP_EXEC_WRITE_REQ,
2731 sizeof(struct bt_att_exec_write_req),
2732 ATT_REQUEST,
2733 att_exec_write_req },
2734 { BT_ATT_OP_CONFIRM,
2735 0,
2736 ATT_CONFIRMATION,
2737 att_confirm },
2738 { BT_ATT_OP_WRITE_CMD,
2739 sizeof(struct bt_att_write_cmd),
2740 ATT_COMMAND,
2741 att_write_cmd },
2742 #if defined(CONFIG_BT_SIGNING)
2743 { BT_ATT_OP_SIGNED_WRITE_CMD,
2744 (sizeof(struct bt_att_write_cmd) +
2745 sizeof(struct bt_att_signature)),
2746 ATT_COMMAND,
2747 att_signed_write_cmd },
2748 #endif /* CONFIG_BT_SIGNING */
2749 #if defined(CONFIG_BT_GATT_CLIENT)
2750 { BT_ATT_OP_ERROR_RSP,
2751 sizeof(struct bt_att_error_rsp),
2752 ATT_RESPONSE,
2753 att_error_rsp },
2754 { BT_ATT_OP_MTU_RSP,
2755 sizeof(struct bt_att_exchange_mtu_rsp),
2756 ATT_RESPONSE,
2757 att_mtu_rsp },
2758 { BT_ATT_OP_FIND_INFO_RSP,
2759 sizeof(struct bt_att_find_info_rsp),
2760 ATT_RESPONSE,
2761 att_handle_find_info_rsp },
2762 { BT_ATT_OP_FIND_TYPE_RSP,
2763 sizeof(struct bt_att_handle_group),
2764 ATT_RESPONSE,
2765 att_handle_find_type_rsp },
2766 { BT_ATT_OP_READ_TYPE_RSP,
2767 sizeof(struct bt_att_read_type_rsp),
2768 ATT_RESPONSE,
2769 att_handle_read_type_rsp },
2770 { BT_ATT_OP_READ_RSP,
2771 0,
2772 ATT_RESPONSE,
2773 att_handle_read_rsp },
2774 { BT_ATT_OP_READ_BLOB_RSP,
2775 0,
2776 ATT_RESPONSE,
2777 att_handle_read_blob_rsp },
2778 #if defined(CONFIG_BT_GATT_READ_MULTIPLE)
2779 { BT_ATT_OP_READ_MULT_RSP,
2780 0,
2781 ATT_RESPONSE,
2782 att_handle_read_mult_rsp },
2783 #endif /* CONFIG_BT_GATT_READ_MULTIPLE */
2784 #if defined(CONFIG_BT_GATT_READ_MULT_VAR_LEN)
2785 { BT_ATT_OP_READ_MULT_VL_RSP,
2786 sizeof(struct bt_att_read_mult_vl_rsp),
2787 ATT_RESPONSE,
2788 att_handle_read_mult_vl_rsp },
2789 #endif /* CONFIG_BT_GATT_READ_MULT_VAR_LEN */
2790 { BT_ATT_OP_READ_GROUP_RSP,
2791 sizeof(struct bt_att_read_group_rsp),
2792 ATT_RESPONSE,
2793 att_handle_read_group_rsp },
2794 { BT_ATT_OP_WRITE_RSP,
2795 0,
2796 ATT_RESPONSE,
2797 att_handle_write_rsp },
2798 { BT_ATT_OP_PREPARE_WRITE_RSP,
2799 sizeof(struct bt_att_prepare_write_rsp),
2800 ATT_RESPONSE,
2801 att_handle_prepare_write_rsp },
2802 { BT_ATT_OP_EXEC_WRITE_RSP,
2803 0,
2804 ATT_RESPONSE,
2805 att_handle_exec_write_rsp },
2806 { BT_ATT_OP_NOTIFY,
2807 sizeof(struct bt_att_notify),
2808 ATT_NOTIFICATION,
2809 att_notify },
2810 { BT_ATT_OP_INDICATE,
2811 sizeof(struct bt_att_indicate),
2812 ATT_INDICATION,
2813 att_indicate },
2814 { BT_ATT_OP_NOTIFY_MULT,
2815 sizeof(struct bt_att_notify_mult),
2816 ATT_NOTIFICATION,
2817 att_notify_mult },
2818 #endif /* CONFIG_BT_GATT_CLIENT */
2819 };
2820
att_op_get_type(uint8_t op)2821 static att_type_t att_op_get_type(uint8_t op)
2822 {
2823 switch (op) {
2824 case BT_ATT_OP_MTU_REQ:
2825 case BT_ATT_OP_FIND_INFO_REQ:
2826 case BT_ATT_OP_FIND_TYPE_REQ:
2827 case BT_ATT_OP_READ_TYPE_REQ:
2828 case BT_ATT_OP_READ_REQ:
2829 case BT_ATT_OP_READ_BLOB_REQ:
2830 case BT_ATT_OP_READ_MULT_REQ:
2831 case BT_ATT_OP_READ_MULT_VL_REQ:
2832 case BT_ATT_OP_READ_GROUP_REQ:
2833 case BT_ATT_OP_WRITE_REQ:
2834 case BT_ATT_OP_PREPARE_WRITE_REQ:
2835 case BT_ATT_OP_EXEC_WRITE_REQ:
2836 return ATT_REQUEST;
2837 case BT_ATT_OP_CONFIRM:
2838 return ATT_CONFIRMATION;
2839 case BT_ATT_OP_WRITE_CMD:
2840 case BT_ATT_OP_SIGNED_WRITE_CMD:
2841 return ATT_COMMAND;
2842 case BT_ATT_OP_ERROR_RSP:
2843 case BT_ATT_OP_MTU_RSP:
2844 case BT_ATT_OP_FIND_INFO_RSP:
2845 case BT_ATT_OP_FIND_TYPE_RSP:
2846 case BT_ATT_OP_READ_TYPE_RSP:
2847 case BT_ATT_OP_READ_RSP:
2848 case BT_ATT_OP_READ_BLOB_RSP:
2849 case BT_ATT_OP_READ_MULT_RSP:
2850 case BT_ATT_OP_READ_MULT_VL_RSP:
2851 case BT_ATT_OP_READ_GROUP_RSP:
2852 case BT_ATT_OP_WRITE_RSP:
2853 case BT_ATT_OP_PREPARE_WRITE_RSP:
2854 case BT_ATT_OP_EXEC_WRITE_RSP:
2855 return ATT_RESPONSE;
2856 case BT_ATT_OP_NOTIFY:
2857 case BT_ATT_OP_NOTIFY_MULT:
2858 return ATT_NOTIFICATION;
2859 case BT_ATT_OP_INDICATE:
2860 return ATT_INDICATION;
2861 }
2862
2863 if (op & ATT_CMD_MASK) {
2864 return ATT_COMMAND;
2865 }
2866
2867 return ATT_UNKNOWN;
2868 }
2869
get_conn(struct bt_att_chan * att_chan)2870 static struct bt_conn *get_conn(struct bt_att_chan *att_chan)
2871 {
2872 return att_chan->chan.chan.conn;
2873 }
2874
bt_att_recv(struct bt_l2cap_chan * chan,struct net_buf * buf)2875 static int bt_att_recv(struct bt_l2cap_chan *chan, struct net_buf *buf)
2876 {
2877 struct bt_att_chan *att_chan = ATT_CHAN(chan);
2878 struct bt_conn *conn = get_conn(att_chan);
2879 struct bt_att_hdr *hdr;
2880 const struct att_handler *handler;
2881 uint8_t err;
2882 size_t i;
2883
2884 if (buf->len < sizeof(*hdr)) {
2885 LOG_ERR("Too small ATT PDU received");
2886 return 0;
2887 }
2888
2889 hdr = net_buf_pull_mem(buf, sizeof(*hdr));
2890 LOG_DBG("Received ATT chan %p code 0x%02x len %zu", att_chan, hdr->code,
2891 net_buf_frags_len(buf));
2892
2893 if (conn->state != BT_CONN_CONNECTED) {
2894 LOG_DBG("not connected: conn %p state %u", conn, conn->state);
2895 return 0;
2896 }
2897
2898 if (!att_chan->att) {
2899 LOG_DBG("Ignore recv on detached ATT chan");
2900 return 0;
2901 }
2902
2903 for (i = 0, handler = NULL; i < ARRAY_SIZE(handlers); i++) {
2904 if (hdr->code == handlers[i].op) {
2905 handler = &handlers[i];
2906 break;
2907 }
2908 }
2909
2910 /* Thread-local variable, shouldn't be used by anything else */
2911 __ASSERT_NO_MSG(!att_chan->rsp_buf);
2912
2913 /* Mark buffer free for re-use by the opcode handler.
2914 *
2915 * This allows ATT to always be able to send a RSP (or err RSP)
2916 * to the peer, regardless of the TX buffer usage by other stack
2917 * users (e.g. GATT notifications, L2CAP using global pool, SMP,
2918 * etc..), avoiding an ATT timeout due to resource usage.
2919 *
2920 * The ref is taken by `bt_att_chan_create_pdu`.
2921 */
2922 att_chan->rsp_buf = net_buf_ref(buf);
2923
2924 if (!handler) {
2925 LOG_WRN("Unhandled ATT code 0x%02x", hdr->code);
2926 if (att_op_get_type(hdr->code) != ATT_COMMAND &&
2927 att_op_get_type(hdr->code) != ATT_INDICATION) {
2928 send_err_rsp(att_chan, hdr->code, 0,
2929 BT_ATT_ERR_NOT_SUPPORTED);
2930 }
2931 goto exit;
2932 }
2933
2934 if (IS_ENABLED(CONFIG_BT_ATT_ENFORCE_FLOW)) {
2935 if (handler->type == ATT_REQUEST &&
2936 atomic_test_and_set_bit(att_chan->flags, ATT_PENDING_RSP)) {
2937 LOG_WRN("Ignoring unexpected request");
2938 goto exit;
2939 } else if (handler->type == ATT_INDICATION &&
2940 atomic_test_and_set_bit(att_chan->flags,
2941 ATT_PENDING_CFM)) {
2942 LOG_WRN("Ignoring unexpected indication");
2943 goto exit;
2944 }
2945 }
2946
2947 if (buf->len < handler->expect_len) {
2948 LOG_ERR("Invalid len %u for code 0x%02x", buf->len, hdr->code);
2949 err = BT_ATT_ERR_INVALID_PDU;
2950 } else {
2951 err = handler->func(att_chan, buf);
2952 }
2953
2954 if (handler->type == ATT_REQUEST && err) {
2955 LOG_DBG("ATT error 0x%02x", err);
2956 send_err_rsp(att_chan, hdr->code, 0, err);
2957 }
2958
2959 exit:
2960 net_buf_unref(att_chan->rsp_buf);
2961 att_chan->rsp_buf = NULL;
2962
2963 return 0;
2964 }
2965
att_get(struct bt_conn * conn)2966 static struct bt_att *att_get(struct bt_conn *conn)
2967 {
2968 struct bt_l2cap_chan *chan;
2969 struct bt_att_chan *att_chan;
2970
2971 if (conn->state != BT_CONN_CONNECTED) {
2972 LOG_WRN("Not connected");
2973 return NULL;
2974 }
2975
2976 chan = bt_l2cap_le_lookup_rx_cid(conn, BT_L2CAP_CID_ATT);
2977 if (!chan) {
2978 LOG_ERR("Unable to find ATT channel");
2979 return NULL;
2980 }
2981
2982 att_chan = ATT_CHAN(chan);
2983 if (!atomic_test_bit(att_chan->flags, ATT_CONNECTED)) {
2984 LOG_ERR("ATT channel not connected");
2985 return NULL;
2986 }
2987
2988 return att_chan->att;
2989 }
2990
bt_att_create_pdu(struct bt_conn * conn,uint8_t op,size_t len)2991 struct net_buf *bt_att_create_pdu(struct bt_conn *conn, uint8_t op, size_t len)
2992 {
2993 struct bt_att *att;
2994 struct bt_att_chan *chan, *tmp;
2995
2996 att = att_get(conn);
2997 if (!att) {
2998 return NULL;
2999 }
3000
3001 /* This allocator should _not_ be used for RSPs. */
3002 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&att->chans, chan, tmp, node) {
3003 if (len + sizeof(op) > bt_att_mtu(chan)) {
3004 continue;
3005 }
3006
3007 return bt_att_chan_create_pdu(chan, op, len);
3008 }
3009
3010 LOG_WRN("No ATT channel for MTU %zu", len + sizeof(op));
3011
3012 return NULL;
3013 }
3014
bt_att_create_rsp_pdu(struct bt_att_chan * chan,uint8_t op,size_t len)3015 struct net_buf *bt_att_create_rsp_pdu(struct bt_att_chan *chan, uint8_t op, size_t len)
3016 {
3017 if (len + sizeof(op) > bt_att_mtu(chan)) {
3018 LOG_WRN("ATT channel %p MTU too small for RSP (%u < %u)",
3019 chan, bt_att_mtu(chan), len + sizeof(op));
3020 return NULL;
3021 }
3022
3023 return bt_att_chan_create_pdu(chan, op, len);
3024 }
3025
att_reset(struct bt_att * att)3026 static void att_reset(struct bt_att *att)
3027 {
3028 struct net_buf *buf;
3029
3030 #if CONFIG_BT_ATT_PREPARE_COUNT > 0
3031 /* Discard queued buffers */
3032 while ((buf = net_buf_slist_get(&att->prep_queue))) {
3033 tx_meta_data_free(bt_att_tx_meta_data(buf));
3034 net_buf_unref(buf);
3035 }
3036 #endif /* CONFIG_BT_ATT_PREPARE_COUNT > 0 */
3037
3038 #if defined(CONFIG_BT_EATT)
3039 struct k_work_sync sync;
3040
3041 (void)k_work_cancel_delayable_sync(&att->eatt.connection_work, &sync);
3042 #endif /* CONFIG_BT_EATT */
3043
3044 while ((buf = net_buf_get(&att->tx_queue, K_NO_WAIT))) {
3045 tx_meta_data_free(bt_att_tx_meta_data(buf));
3046 net_buf_unref(buf);
3047 }
3048
3049 /* Notify pending requests */
3050 while (!sys_slist_is_empty(&att->reqs)) {
3051 struct bt_att_req *req;
3052 sys_snode_t *node;
3053
3054 node = sys_slist_get_not_empty(&att->reqs);
3055 req = CONTAINER_OF(node, struct bt_att_req, node);
3056 if (req->func) {
3057 req->func(att->conn, BT_ATT_ERR_UNLIKELY, NULL, 0,
3058 req->user_data);
3059 }
3060
3061 bt_att_req_free(req);
3062 }
3063
3064 /* FIXME: `att->conn` is not reference counted. Consider using `bt_conn_ref`
3065 * and `bt_conn_unref` to follow convention.
3066 */
3067 att->conn = NULL;
3068 k_mem_slab_free(&att_slab, (void *)att);
3069 }
3070
att_chan_detach(struct bt_att_chan * chan)3071 static void att_chan_detach(struct bt_att_chan *chan)
3072 {
3073 struct net_buf *buf;
3074
3075 LOG_DBG("chan %p", chan);
3076
3077 sys_slist_find_and_remove(&chan->att->chans, &chan->node);
3078
3079 /* Release pending buffers */
3080 while ((buf = net_buf_get(&chan->tx_queue, K_NO_WAIT))) {
3081 tx_meta_data_free(bt_att_tx_meta_data(buf));
3082 net_buf_unref(buf);
3083 }
3084
3085 if (chan->req) {
3086 /* Notify outstanding request */
3087 att_handle_rsp(chan, NULL, 0, BT_ATT_ERR_UNLIKELY);
3088 }
3089
3090 chan->att = NULL;
3091 atomic_clear_bit(chan->flags, ATT_CONNECTED);
3092 }
3093
att_timeout(struct k_work * work)3094 static void att_timeout(struct k_work *work)
3095 {
3096 char addr[BT_ADDR_LE_STR_LEN];
3097 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
3098 struct bt_att_chan *chan = CONTAINER_OF(dwork, struct bt_att_chan,
3099 timeout_work);
3100
3101 bt_addr_le_to_str(bt_conn_get_dst(chan->att->conn), addr, sizeof(addr));
3102 LOG_ERR("ATT Timeout for device %s", addr);
3103
3104 /* BLUETOOTH SPECIFICATION Version 4.2 [Vol 3, Part F] page 480:
3105 *
3106 * A transaction not completed within 30 seconds shall time out. Such a
3107 * transaction shall be considered to have failed and the local higher
3108 * layers shall be informed of this failure. No more attribute protocol
3109 * requests, commands, indications or notifications shall be sent to the
3110 * target device on this ATT Bearer.
3111 */
3112 bt_att_disconnected(&chan->chan.chan);
3113 }
3114
att_get_fixed_chan(struct bt_conn * conn)3115 static struct bt_att_chan *att_get_fixed_chan(struct bt_conn *conn)
3116 {
3117 struct bt_l2cap_chan *chan;
3118
3119 chan = bt_l2cap_le_lookup_tx_cid(conn, BT_L2CAP_CID_ATT);
3120 __ASSERT(chan, "No ATT channel found");
3121
3122 return ATT_CHAN(chan);
3123 }
3124
att_chan_attach(struct bt_att * att,struct bt_att_chan * chan)3125 static void att_chan_attach(struct bt_att *att, struct bt_att_chan *chan)
3126 {
3127 LOG_DBG("att %p chan %p flags %lu", att, chan, atomic_get(chan->flags));
3128
3129 if (sys_slist_is_empty(&att->chans)) {
3130 /* Init general queues when attaching the first channel */
3131 k_fifo_init(&att->tx_queue);
3132 #if CONFIG_BT_ATT_PREPARE_COUNT > 0
3133 sys_slist_init(&att->prep_queue);
3134 #endif
3135 }
3136
3137 sys_slist_prepend(&att->chans, &chan->node);
3138 }
3139
bt_att_connected(struct bt_l2cap_chan * chan)3140 static void bt_att_connected(struct bt_l2cap_chan *chan)
3141 {
3142 struct bt_att_chan *att_chan = ATT_CHAN(chan);
3143 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
3144
3145 LOG_DBG("chan %p cid 0x%04x", le_chan, le_chan->tx.cid);
3146
3147 atomic_set_bit(att_chan->flags, ATT_CONNECTED);
3148
3149 att_chan_mtu_updated(att_chan);
3150
3151 k_work_init_delayable(&att_chan->timeout_work, att_timeout);
3152
3153 bt_gatt_connected(le_chan->chan.conn);
3154 }
3155
bt_att_disconnected(struct bt_l2cap_chan * chan)3156 static void bt_att_disconnected(struct bt_l2cap_chan *chan)
3157 {
3158 struct bt_att_chan *att_chan = ATT_CHAN(chan);
3159 struct bt_att *att = att_chan->att;
3160 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
3161
3162 LOG_DBG("chan %p cid 0x%04x", le_chan, le_chan->tx.cid);
3163
3164 if (!att_chan->att) {
3165 LOG_DBG("Ignore disconnect on detached ATT chan");
3166 return;
3167 }
3168
3169 att_chan_detach(att_chan);
3170
3171 /* Don't reset if there are still channels to be used */
3172 if (!sys_slist_is_empty(&att->chans)) {
3173 return;
3174 }
3175
3176 att_reset(att);
3177
3178 bt_gatt_disconnected(le_chan->chan.conn);
3179 }
3180
3181 #if defined(CONFIG_BT_SMP)
att_req_retry(struct bt_att_chan * att_chan)3182 static uint8_t att_req_retry(struct bt_att_chan *att_chan)
3183 {
3184 struct bt_att_req *req = att_chan->req;
3185 struct net_buf *buf;
3186
3187 /* Resend buffer */
3188 if (!req->encode) {
3189 /* This request does not support resending */
3190 return BT_ATT_ERR_AUTHENTICATION;
3191 }
3192
3193
3194 buf = bt_att_chan_create_pdu(att_chan, req->att_op, req->len);
3195 if (!buf) {
3196 return BT_ATT_ERR_UNLIKELY;
3197 }
3198
3199 if (req->encode(buf, req->len, req->user_data)) {
3200 tx_meta_data_free(bt_att_tx_meta_data(buf));
3201 net_buf_unref(buf);
3202 return BT_ATT_ERR_UNLIKELY;
3203 }
3204
3205 if (chan_send(att_chan, buf)) {
3206 tx_meta_data_free(bt_att_tx_meta_data(buf));
3207 net_buf_unref(buf);
3208 return BT_ATT_ERR_UNLIKELY;
3209 }
3210
3211 return BT_ATT_ERR_SUCCESS;
3212 }
3213
bt_att_encrypt_change(struct bt_l2cap_chan * chan,uint8_t hci_status)3214 static void bt_att_encrypt_change(struct bt_l2cap_chan *chan,
3215 uint8_t hci_status)
3216 {
3217 struct bt_att_chan *att_chan = ATT_CHAN(chan);
3218 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
3219 struct bt_conn *conn = le_chan->chan.conn;
3220 uint8_t err;
3221
3222 LOG_DBG("chan %p conn %p handle %u sec_level 0x%02x status 0x%02x", le_chan, conn,
3223 conn->handle, conn->sec_level, hci_status);
3224
3225 if (!att_chan->att) {
3226 LOG_DBG("Ignore encrypt change on detached ATT chan");
3227 return;
3228 }
3229
3230 /*
3231 * If status (HCI status of security procedure) is non-zero, notify
3232 * outstanding request about security failure.
3233 */
3234 if (hci_status) {
3235 if (att_chan->req && att_chan->req->retrying) {
3236 att_handle_rsp(att_chan, NULL, 0,
3237 BT_ATT_ERR_AUTHENTICATION);
3238 }
3239
3240 return;
3241 }
3242
3243 bt_gatt_encrypt_change(conn);
3244
3245 if (conn->sec_level == BT_SECURITY_L1) {
3246 return;
3247 }
3248
3249 if (!(att_chan->req && att_chan->req->retrying)) {
3250 return;
3251 }
3252
3253 LOG_DBG("Retrying");
3254
3255 err = att_req_retry(att_chan);
3256 if (err) {
3257 LOG_DBG("Retry failed (%d)", err);
3258 att_handle_rsp(att_chan, NULL, 0, err);
3259 }
3260 }
3261 #endif /* CONFIG_BT_SMP */
3262
bt_att_status(struct bt_l2cap_chan * ch,atomic_t * status)3263 static void bt_att_status(struct bt_l2cap_chan *ch, atomic_t *status)
3264 {
3265 struct bt_att_chan *chan = ATT_CHAN(ch);
3266 sys_snode_t *node;
3267
3268 LOG_DBG("chan %p status %p", ch, status);
3269
3270 if (!atomic_test_bit(status, BT_L2CAP_STATUS_OUT)) {
3271 return;
3272 }
3273
3274 if (!chan->att) {
3275 LOG_DBG("Ignore status on detached ATT chan");
3276 return;
3277 }
3278
3279 /* If there is a request pending don't attempt to send */
3280 if (chan->req) {
3281 return;
3282 }
3283
3284 /* Pull next request from the list */
3285 node = sys_slist_get(&chan->att->reqs);
3286 if (!node) {
3287 return;
3288 }
3289
3290 if (bt_att_chan_req_send(chan, ATT_REQ(node)) >= 0) {
3291 return;
3292 }
3293
3294 /* Prepend back to the list as it could not be sent */
3295 sys_slist_prepend(&chan->att->reqs, node);
3296 }
3297
bt_att_released(struct bt_l2cap_chan * ch)3298 static void bt_att_released(struct bt_l2cap_chan *ch)
3299 {
3300 struct bt_att_chan *chan = ATT_CHAN(ch);
3301
3302 LOG_DBG("chan %p", chan);
3303
3304 k_mem_slab_free(&chan_slab, (void *)chan);
3305 }
3306
3307 #if defined(CONFIG_BT_EATT)
bt_att_reconfigured(struct bt_l2cap_chan * l2cap_chan)3308 static void bt_att_reconfigured(struct bt_l2cap_chan *l2cap_chan)
3309 {
3310 struct bt_att_chan *att_chan = ATT_CHAN(l2cap_chan);
3311
3312 LOG_DBG("chan %p", att_chan);
3313
3314 att_chan_mtu_updated(att_chan);
3315 }
3316 #endif /* CONFIG_BT_EATT */
3317
att_chan_new(struct bt_att * att,atomic_val_t flags)3318 static struct bt_att_chan *att_chan_new(struct bt_att *att, atomic_val_t flags)
3319 {
3320 int quota = 0;
3321 static struct bt_l2cap_chan_ops ops = {
3322 .connected = bt_att_connected,
3323 .disconnected = bt_att_disconnected,
3324 .recv = bt_att_recv,
3325 .sent = bt_att_sent,
3326 .status = bt_att_status,
3327 #if defined(CONFIG_BT_SMP)
3328 .encrypt_change = bt_att_encrypt_change,
3329 #endif /* CONFIG_BT_SMP */
3330 .released = bt_att_released,
3331 #if defined(CONFIG_BT_EATT)
3332 .reconfigured = bt_att_reconfigured,
3333 #endif /* CONFIG_BT_EATT */
3334 };
3335 struct bt_att_chan *chan;
3336
3337 SYS_SLIST_FOR_EACH_CONTAINER(&att->chans, chan, node) {
3338 if (chan->att == att) {
3339 quota++;
3340 }
3341
3342 if (quota == ATT_CHAN_MAX) {
3343 LOG_DBG("Maximum number of channels reached: %d", quota);
3344 return NULL;
3345 }
3346 }
3347
3348 if (k_mem_slab_alloc(&chan_slab, (void **)&chan, K_NO_WAIT)) {
3349 LOG_WRN("No available ATT channel for conn %p", att->conn);
3350 return NULL;
3351 }
3352
3353 (void)memset(chan, 0, sizeof(*chan));
3354 chan->chan.chan.ops = &ops;
3355 k_fifo_init(&chan->tx_queue);
3356 atomic_set(chan->flags, flags);
3357 chan->att = att;
3358 att_chan_attach(att, chan);
3359
3360 if (bt_att_is_enhanced(chan)) {
3361 /* EATT: The MTU will be sent in the ECRED conn req/rsp PDU. The
3362 * TX MTU is received on L2CAP-level.
3363 */
3364 chan->chan.rx.mtu = BT_LOCAL_ATT_MTU_EATT;
3365 } else {
3366 /* UATT: L2CAP Basic is not able to communicate the L2CAP MTU
3367 * without help. ATT has to manage the MTU. The initial MTU is
3368 * defined by spec.
3369 */
3370 chan->chan.tx.mtu = BT_ATT_DEFAULT_LE_MTU;
3371 chan->chan.rx.mtu = BT_ATT_DEFAULT_LE_MTU;
3372 }
3373
3374 return chan;
3375 }
3376
3377 #if defined(CONFIG_BT_EATT)
bt_eatt_count(struct bt_conn * conn)3378 size_t bt_eatt_count(struct bt_conn *conn)
3379 {
3380 struct bt_att *att;
3381 struct bt_att_chan *chan;
3382 size_t eatt_count = 0;
3383
3384 if (!conn) {
3385 return 0;
3386 }
3387
3388 att = att_get(conn);
3389 if (!att) {
3390 return 0;
3391 }
3392
3393 SYS_SLIST_FOR_EACH_CONTAINER(&att->chans, chan, node) {
3394 if (bt_att_is_enhanced(chan) &&
3395 atomic_test_bit(chan->flags, ATT_CONNECTED)) {
3396 eatt_count++;
3397 }
3398 }
3399
3400 return eatt_count;
3401 }
3402
att_enhanced_connection_work_handler(struct k_work * work)3403 static void att_enhanced_connection_work_handler(struct k_work *work)
3404 {
3405 const struct k_work_delayable *dwork = k_work_delayable_from_work(work);
3406 const struct bt_att *att = CONTAINER_OF(dwork, struct bt_att, eatt.connection_work);
3407 const int err = bt_eatt_connect(att->conn, att->eatt.chans_to_connect);
3408
3409 if (err == -ENOMEM) {
3410 LOG_DBG("Failed to connect %d EATT channels, central has probably "
3411 "already established some.",
3412 att->eatt.chans_to_connect);
3413 } else if (err < 0) {
3414 LOG_WRN("Failed to connect %d EATT channels (err: %d)", att->eatt.chans_to_connect,
3415 err);
3416 }
3417
3418 }
3419 #endif /* CONFIG_BT_EATT */
3420
bt_att_accept(struct bt_conn * conn,struct bt_l2cap_chan ** ch)3421 static int bt_att_accept(struct bt_conn *conn, struct bt_l2cap_chan **ch)
3422 {
3423 struct bt_att *att;
3424 struct bt_att_chan *chan;
3425
3426 LOG_DBG("conn %p handle %u", conn, conn->handle);
3427
3428 if (k_mem_slab_alloc(&att_slab, (void **)&att, K_NO_WAIT)) {
3429 LOG_ERR("No available ATT context for conn %p", conn);
3430 return -ENOMEM;
3431 }
3432
3433 att_handle_rsp_thread = k_current_get();
3434
3435 (void)memset(att, 0, sizeof(*att));
3436 att->conn = conn;
3437 sys_slist_init(&att->reqs);
3438 sys_slist_init(&att->chans);
3439
3440 #if defined(CONFIG_BT_EATT)
3441 k_work_init_delayable(&att->eatt.connection_work,
3442 att_enhanced_connection_work_handler);
3443 #endif /* CONFIG_BT_EATT */
3444
3445 chan = att_chan_new(att, 0);
3446 if (!chan) {
3447 return -ENOMEM;
3448 }
3449
3450 *ch = &chan->chan.chan;
3451
3452 return 0;
3453 }
3454
3455 /* The L2CAP channel section is sorted lexicographically. Make sure that ATT fixed channel will be
3456 * placed as the last one to ensure that SMP channel is properly initialized before bt_att_connected
3457 * tries to send security request.
3458 */
3459 BT_L2CAP_CHANNEL_DEFINE(z_att_fixed_chan, BT_L2CAP_CID_ATT, bt_att_accept, NULL);
3460
3461 #if defined(CONFIG_BT_EATT)
credit_based_connection_delay(struct bt_conn * conn)3462 static k_timeout_t credit_based_connection_delay(struct bt_conn *conn)
3463 {
3464 /*
3465 * 5.3 Vol 3, Part G, Section 5.4 L2CAP COLLISION MITIGATION
3466 * ... In this situation, the Central may retry
3467 * immediately but the Peripheral shall wait a minimum of 100 ms before retrying;
3468 * on LE connections, the Peripheral shall wait at least 2 *
3469 * (connPeripheralLatency + 1) * connInterval if that is longer.
3470 */
3471
3472 if (IS_ENABLED(CONFIG_BT_CENTRAL) && conn->role == BT_CONN_ROLE_CENTRAL) {
3473 return K_NO_WAIT;
3474 } else if (IS_ENABLED(CONFIG_BT_PERIPHERAL)) {
3475 uint8_t random;
3476 int err;
3477
3478 err = bt_rand(&random, sizeof(random));
3479 if (err) {
3480 random = 0;
3481 }
3482
3483 const uint8_t rand_delay = random & 0x7; /* Small random delay for IOP */
3484 /* The maximum value of (latency + 1) * 2 multipled with the
3485 * maximum connection interval has a maximum value of
3486 * 4000000000 which can be stored in 32-bits, so this won't
3487 * result in an overflow
3488 */
3489 const uint32_t calculated_delay_us =
3490 2 * (conn->le.latency + 1) * BT_CONN_INTERVAL_TO_US(conn->le.interval);
3491 const uint32_t calculated_delay_ms = calculated_delay_us / USEC_PER_MSEC;
3492
3493 return K_MSEC(MAX(100, calculated_delay_ms + rand_delay));
3494 }
3495
3496 /* Must be either central or peripheral */
3497 __ASSERT_NO_MSG(false);
3498 CODE_UNREACHABLE;
3499 }
3500
att_schedule_eatt_connect(struct bt_conn * conn,uint8_t chans_to_connect)3501 static int att_schedule_eatt_connect(struct bt_conn *conn, uint8_t chans_to_connect)
3502 {
3503 struct bt_att *att = att_get(conn);
3504
3505 if (!att) {
3506 return -ENOTCONN;
3507 }
3508
3509 att->eatt.chans_to_connect = chans_to_connect;
3510
3511 return k_work_reschedule(&att->eatt.connection_work,
3512 credit_based_connection_delay(conn));
3513 }
3514
handle_potential_collision(struct bt_att * att)3515 static void handle_potential_collision(struct bt_att *att)
3516 {
3517 __ASSERT_NO_MSG(att);
3518
3519 int err;
3520 size_t to_connect = att->eatt.prev_conn_req_missing_chans;
3521
3522 if (att->eatt.prev_conn_rsp_result == BT_L2CAP_LE_ERR_NO_RESOURCES &&
3523 att->eatt.prev_conn_req_result == BT_L2CAP_LE_ERR_NO_RESOURCES) {
3524 LOG_DBG("Credit based connection request collision detected");
3525
3526 /* Reset to not keep retrying on repeated failures */
3527 att->eatt.prev_conn_rsp_result = 0;
3528 att->eatt.prev_conn_req_result = 0;
3529 att->eatt.prev_conn_req_missing_chans = 0;
3530
3531 if (to_connect == 0) {
3532 return;
3533 }
3534
3535 err = att_schedule_eatt_connect(att->conn, to_connect);
3536 if (err < 0) {
3537 LOG_ERR("Failed to schedule EATT connection retry (err: %d)", err);
3538 }
3539 }
3540 }
3541
ecred_connect_req_cb(struct bt_conn * conn,uint16_t result,uint16_t psm)3542 static void ecred_connect_req_cb(struct bt_conn *conn, uint16_t result, uint16_t psm)
3543 {
3544 struct bt_att *att = att_get(conn);
3545
3546 if (!att) {
3547 return;
3548 }
3549
3550 if (psm != BT_EATT_PSM) {
3551 /* Collision mitigation is only a requirement on the EATT PSM */
3552 return;
3553 }
3554
3555 att->eatt.prev_conn_rsp_result = result;
3556
3557 handle_potential_collision(att);
3558 }
3559
ecred_connect_rsp_cb(struct bt_conn * conn,uint16_t result,uint8_t attempted_to_connect,uint8_t succeeded_to_connect,uint16_t psm)3560 static void ecred_connect_rsp_cb(struct bt_conn *conn, uint16_t result,
3561 uint8_t attempted_to_connect, uint8_t succeeded_to_connect,
3562 uint16_t psm)
3563 {
3564 struct bt_att *att = att_get(conn);
3565
3566 if (!att) {
3567 return;
3568 }
3569
3570 if (psm != BT_EATT_PSM) {
3571 /* Collision mitigation is only a requirement on the EATT PSM */
3572 return;
3573 }
3574
3575 att->eatt.prev_conn_req_result = result;
3576 att->eatt.prev_conn_req_missing_chans =
3577 attempted_to_connect - succeeded_to_connect;
3578
3579 handle_potential_collision(att);
3580 }
3581
bt_eatt_connect(struct bt_conn * conn,size_t num_channels)3582 int bt_eatt_connect(struct bt_conn *conn, size_t num_channels)
3583 {
3584 struct bt_att_chan *att_chan;
3585 struct bt_att *att;
3586 struct bt_l2cap_chan *chan[CONFIG_BT_EATT_MAX + 1] = {};
3587 size_t offset = 0;
3588 size_t i = 0;
3589 int err;
3590
3591 /* Check the encryption level for EATT */
3592 if (bt_conn_get_security(conn) < BT_SECURITY_L2) {
3593 /* Vol 3, Part G, Section 5.3.2 Channel Requirements states:
3594 * The channel shall be encrypted.
3595 */
3596 return -EPERM;
3597 }
3598
3599 if (num_channels > CONFIG_BT_EATT_MAX || num_channels == 0) {
3600 return -EINVAL;
3601 }
3602
3603 if (!conn) {
3604 return -EINVAL;
3605 }
3606
3607 att_chan = att_get_fixed_chan(conn);
3608 att = att_chan->att;
3609
3610 while (num_channels--) {
3611 att_chan = att_chan_new(att, BIT(ATT_ENHANCED));
3612 if (!att_chan) {
3613 break;
3614 }
3615
3616 chan[i] = &att_chan->chan.chan;
3617 i++;
3618 }
3619
3620 if (!i) {
3621 return -ENOMEM;
3622 }
3623
3624 while (offset < i) {
3625 /* bt_l2cap_ecred_chan_connect() uses the first L2CAP_ECRED_CHAN_MAX_PER_REQ
3626 * elements of the array or until a null-terminator is reached.
3627 */
3628 err = bt_l2cap_ecred_chan_connect(conn, &chan[offset], BT_EATT_PSM);
3629 if (err < 0) {
3630 return err;
3631 }
3632
3633 offset += L2CAP_ECRED_CHAN_MAX_PER_REQ;
3634 }
3635
3636 return 0;
3637 }
3638
3639 #if defined(CONFIG_BT_EATT_AUTO_CONNECT)
eatt_auto_connect(struct bt_conn * conn,bt_security_t level,enum bt_security_err err)3640 static void eatt_auto_connect(struct bt_conn *conn, bt_security_t level,
3641 enum bt_security_err err)
3642 {
3643 int eatt_err;
3644
3645 if (err || level < BT_SECURITY_L2 || !bt_att_fixed_chan_only(conn)) {
3646 return;
3647 }
3648
3649 eatt_err = att_schedule_eatt_connect(conn, CONFIG_BT_EATT_MAX);
3650 if (eatt_err < 0) {
3651 LOG_WRN("Automatic creation of EATT bearers failed on "
3652 "connection %s with error %d",
3653 bt_addr_le_str(bt_conn_get_dst(conn)), eatt_err);
3654 }
3655 }
3656
3657 BT_CONN_CB_DEFINE(conn_callbacks) = {
3658 .security_changed = eatt_auto_connect,
3659 };
3660
3661 #endif /* CONFIG_BT_EATT_AUTO_CONNECT */
3662
bt_eatt_disconnect(struct bt_conn * conn)3663 int bt_eatt_disconnect(struct bt_conn *conn)
3664 {
3665 struct bt_att_chan *chan;
3666 struct bt_att *att;
3667 int err = -ENOTCONN;
3668
3669 if (!conn) {
3670 return -EINVAL;
3671 }
3672
3673 chan = att_get_fixed_chan(conn);
3674 att = chan->att;
3675
3676 SYS_SLIST_FOR_EACH_CONTAINER(&att->chans, chan, node) {
3677 if (bt_att_is_enhanced(chan)) {
3678 err = bt_l2cap_chan_disconnect(&chan->chan.chan);
3679 }
3680 }
3681
3682 return err;
3683 }
3684
3685 #if defined(CONFIG_BT_TESTING)
bt_eatt_disconnect_one(struct bt_conn * conn)3686 int bt_eatt_disconnect_one(struct bt_conn *conn)
3687 {
3688 struct bt_att_chan *chan = att_get_fixed_chan(conn);
3689 struct bt_att *att = chan->att;
3690 int err = -ENOTCONN;
3691
3692 if (!conn) {
3693 return -EINVAL;
3694 }
3695
3696 SYS_SLIST_FOR_EACH_CONTAINER(&att->chans, chan, node) {
3697 if (bt_att_is_enhanced(chan)) {
3698 err = bt_l2cap_chan_disconnect(&chan->chan.chan);
3699 return err;
3700 }
3701 }
3702
3703 return err;
3704 }
3705
bt_eatt_reconfigure(struct bt_conn * conn,uint16_t mtu)3706 int bt_eatt_reconfigure(struct bt_conn *conn, uint16_t mtu)
3707 {
3708 struct bt_att_chan *att_chan = att_get_fixed_chan(conn);
3709 struct bt_att *att = att_chan->att;
3710 struct bt_l2cap_chan *chans[CONFIG_BT_EATT_MAX + 1] = {};
3711 size_t offset = 0;
3712 size_t i = 0;
3713 int err;
3714
3715 SYS_SLIST_FOR_EACH_CONTAINER(&att->chans, att_chan, node) {
3716 if (bt_att_is_enhanced(att_chan)) {
3717 chans[i] = &att_chan->chan.chan;
3718 i++;
3719 }
3720 }
3721
3722 while (offset < i) {
3723 /* bt_l2cap_ecred_chan_reconfigure() uses the first L2CAP_ECRED_CHAN_MAX_PER_REQ
3724 * elements of the array or until a null-terminator is reached.
3725 */
3726 err = bt_l2cap_ecred_chan_reconfigure(&chans[offset], mtu);
3727 if (err < 0) {
3728 return err;
3729 }
3730
3731 offset += L2CAP_ECRED_CHAN_MAX_PER_REQ;
3732 }
3733
3734 return 0;
3735 }
3736 #endif /* CONFIG_BT_TESTING */
3737 #endif /* CONFIG_BT_EATT */
3738
bt_eatt_accept(struct bt_conn * conn,struct bt_l2cap_server * server,struct bt_l2cap_chan ** chan)3739 static int bt_eatt_accept(struct bt_conn *conn, struct bt_l2cap_server *server,
3740 struct bt_l2cap_chan **chan)
3741 {
3742 struct bt_att_chan *att_chan = att_get_fixed_chan(conn);
3743 struct bt_att *att = att_chan->att;
3744
3745 LOG_DBG("conn %p handle %u", conn, conn->handle);
3746
3747 att_chan = att_chan_new(att, BIT(ATT_ENHANCED));
3748 if (att_chan) {
3749 *chan = &att_chan->chan.chan;
3750 return 0;
3751 }
3752
3753 return -ENOMEM;
3754 }
3755
bt_eatt_init(void)3756 static void bt_eatt_init(void)
3757 {
3758 int err;
3759 static struct bt_l2cap_server eatt_l2cap = {
3760 .psm = BT_EATT_PSM,
3761 .sec_level = BT_SECURITY_L2,
3762 .accept = bt_eatt_accept,
3763 };
3764 struct bt_l2cap_server *registered_server;
3765
3766 LOG_DBG("");
3767
3768 /* Check if eatt_l2cap server has already been registered. */
3769 registered_server = bt_l2cap_server_lookup_psm(eatt_l2cap.psm);
3770 if (registered_server != &eatt_l2cap) {
3771 err = bt_l2cap_server_register(&eatt_l2cap);
3772 if (err < 0) {
3773 LOG_ERR("EATT Server registration failed %d", err);
3774 }
3775 }
3776
3777 #if defined(CONFIG_BT_EATT)
3778 static const struct bt_l2cap_ecred_cb cb = {
3779 .ecred_conn_rsp = ecred_connect_rsp_cb,
3780 .ecred_conn_req = ecred_connect_req_cb,
3781 };
3782
3783 bt_l2cap_register_ecred_cb(&cb);
3784 #endif /* CONFIG_BT_EATT */
3785 }
3786
bt_att_init(void)3787 void bt_att_init(void)
3788 {
3789 k_fifo_init(&free_att_tx_meta_data);
3790 for (size_t i = 0; i < ARRAY_SIZE(tx_meta_data); i++) {
3791 k_fifo_put(&free_att_tx_meta_data, &tx_meta_data[i]);
3792 }
3793
3794 bt_gatt_init();
3795
3796 if (IS_ENABLED(CONFIG_BT_EATT)) {
3797 bt_eatt_init();
3798 }
3799 }
3800
bt_att_get_mtu(struct bt_conn * conn)3801 uint16_t bt_att_get_mtu(struct bt_conn *conn)
3802 {
3803 struct bt_att_chan *chan, *tmp;
3804 struct bt_att *att;
3805 uint16_t mtu = 0;
3806
3807 att = att_get(conn);
3808 if (!att) {
3809 return 0;
3810 }
3811
3812 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&att->chans, chan, tmp, node) {
3813 if (bt_att_mtu(chan) > mtu) {
3814 mtu = bt_att_mtu(chan);
3815 }
3816 }
3817
3818 return mtu;
3819 }
3820
att_chan_mtu_updated(struct bt_att_chan * updated_chan)3821 static void att_chan_mtu_updated(struct bt_att_chan *updated_chan)
3822 {
3823 struct bt_att *att = updated_chan->att;
3824 struct bt_att_chan *chan, *tmp;
3825 uint16_t max_tx = 0, max_rx = 0;
3826
3827 /* Get maximum MTU's of other channels */
3828 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&att->chans, chan, tmp, node) {
3829 if (chan == updated_chan) {
3830 continue;
3831 }
3832 max_tx = MAX(max_tx, chan->chan.tx.mtu);
3833 max_rx = MAX(max_rx, chan->chan.rx.mtu);
3834 }
3835
3836 /* If either maximum MTU has changed */
3837 if ((updated_chan->chan.tx.mtu > max_tx) ||
3838 (updated_chan->chan.rx.mtu > max_rx)) {
3839 max_tx = MAX(max_tx, updated_chan->chan.tx.mtu);
3840 max_rx = MAX(max_rx, updated_chan->chan.rx.mtu);
3841 bt_gatt_att_max_mtu_changed(att->conn, max_tx, max_rx);
3842 }
3843 }
3844
bt_att_req_alloc(k_timeout_t timeout)3845 struct bt_att_req *bt_att_req_alloc(k_timeout_t timeout)
3846 {
3847 struct bt_att_req *req = NULL;
3848
3849 if (k_current_get() == att_handle_rsp_thread) {
3850 /* No req will be fulfilled while blocking on the bt_recv thread.
3851 * Blocking would cause deadlock.
3852 */
3853 timeout = K_NO_WAIT;
3854 }
3855
3856 /* Reserve space for request */
3857 if (k_mem_slab_alloc(&req_slab, (void **)&req, timeout)) {
3858 LOG_DBG("No space for req");
3859 return NULL;
3860 }
3861
3862 LOG_DBG("req %p", req);
3863
3864 memset(req, 0, sizeof(*req));
3865
3866 return req;
3867 }
3868
bt_att_req_free(struct bt_att_req * req)3869 void bt_att_req_free(struct bt_att_req *req)
3870 {
3871 LOG_DBG("req %p", req);
3872
3873 if (req->buf) {
3874 tx_meta_data_free(bt_att_tx_meta_data(req->buf));
3875 net_buf_unref(req->buf);
3876 req->buf = NULL;
3877 }
3878
3879 k_mem_slab_free(&req_slab, (void *)req);
3880 }
3881
bt_att_send(struct bt_conn * conn,struct net_buf * buf)3882 int bt_att_send(struct bt_conn *conn, struct net_buf *buf)
3883 {
3884 struct bt_att *att;
3885
3886 __ASSERT_NO_MSG(conn);
3887 __ASSERT_NO_MSG(buf);
3888
3889 att = att_get(conn);
3890 if (!att) {
3891 tx_meta_data_free(bt_att_tx_meta_data(buf));
3892 net_buf_unref(buf);
3893 return -ENOTCONN;
3894 }
3895
3896 net_buf_put(&att->tx_queue, buf);
3897 att_send_process(att);
3898
3899 return 0;
3900 }
3901
bt_att_req_send(struct bt_conn * conn,struct bt_att_req * req)3902 int bt_att_req_send(struct bt_conn *conn, struct bt_att_req *req)
3903 {
3904 struct bt_att *att;
3905
3906 LOG_DBG("conn %p req %p", conn, req);
3907
3908 __ASSERT_NO_MSG(conn);
3909 __ASSERT_NO_MSG(req);
3910
3911 att = att_get(conn);
3912 if (!att) {
3913 return -ENOTCONN;
3914 }
3915
3916 sys_slist_append(&att->reqs, &req->node);
3917 att_req_send_process(att);
3918
3919 return 0;
3920 }
3921
bt_att_chan_req_cancel(struct bt_att_chan * chan,struct bt_att_req * req)3922 static bool bt_att_chan_req_cancel(struct bt_att_chan *chan,
3923 struct bt_att_req *req)
3924 {
3925 if (chan->req != req) {
3926 return false;
3927 }
3928
3929 chan->req = &cancel;
3930
3931 bt_att_req_free(req);
3932
3933 return true;
3934 }
3935
bt_att_req_cancel(struct bt_conn * conn,struct bt_att_req * req)3936 void bt_att_req_cancel(struct bt_conn *conn, struct bt_att_req *req)
3937 {
3938 struct bt_att *att;
3939 struct bt_att_chan *chan, *tmp;
3940
3941 LOG_DBG("req %p", req);
3942
3943 if (!conn || !req) {
3944 return;
3945 }
3946
3947 att = att_get(conn);
3948 if (!att) {
3949 return;
3950 }
3951
3952 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&att->chans, chan, tmp, node) {
3953 /* Check if request is outstanding */
3954 if (bt_att_chan_req_cancel(chan, req)) {
3955 return;
3956 }
3957 }
3958
3959 /* Remove request from the list */
3960 sys_slist_find_and_remove(&att->reqs, &req->node);
3961
3962 bt_att_req_free(req);
3963 }
3964
bt_att_find_req_by_user_data(struct bt_conn * conn,const void * user_data)3965 struct bt_att_req *bt_att_find_req_by_user_data(struct bt_conn *conn, const void *user_data)
3966 {
3967 struct bt_att *att;
3968 struct bt_att_chan *chan;
3969 struct bt_att_req *req;
3970
3971 att = att_get(conn);
3972 if (!att) {
3973 return NULL;
3974 }
3975
3976 SYS_SLIST_FOR_EACH_CONTAINER(&att->chans, chan, node) {
3977 if (chan->req->user_data == user_data) {
3978 return chan->req;
3979 }
3980 }
3981
3982 SYS_SLIST_FOR_EACH_CONTAINER(&att->reqs, req, node) {
3983 if (req->user_data == user_data) {
3984 return req;
3985 }
3986 }
3987
3988 return NULL;
3989 }
3990
bt_att_fixed_chan_only(struct bt_conn * conn)3991 bool bt_att_fixed_chan_only(struct bt_conn *conn)
3992 {
3993 #if defined(CONFIG_BT_EATT)
3994 return bt_eatt_count(conn) == 0;
3995 #else
3996 return true;
3997 #endif /* CONFIG_BT_EATT */
3998 }
3999
bt_att_clear_out_of_sync_sent(struct bt_conn * conn)4000 void bt_att_clear_out_of_sync_sent(struct bt_conn *conn)
4001 {
4002 struct bt_att *att = att_get(conn);
4003 struct bt_att_chan *chan;
4004
4005 if (!att) {
4006 return;
4007 }
4008
4009 SYS_SLIST_FOR_EACH_CONTAINER(&att->chans, chan, node) {
4010 atomic_clear_bit(chan->flags, ATT_OUT_OF_SYNC_SENT);
4011 }
4012 }
4013
bt_att_out_of_sync_sent_on_fixed(struct bt_conn * conn)4014 bool bt_att_out_of_sync_sent_on_fixed(struct bt_conn *conn)
4015 {
4016 struct bt_l2cap_chan *l2cap_chan;
4017 struct bt_att_chan *att_chan;
4018
4019 l2cap_chan = bt_l2cap_le_lookup_rx_cid(conn, BT_L2CAP_CID_ATT);
4020 if (!l2cap_chan) {
4021 return false;
4022 }
4023
4024 att_chan = ATT_CHAN(l2cap_chan);
4025 return atomic_test_bit(att_chan->flags, ATT_OUT_OF_SYNC_SENT);
4026 }
4027
bt_att_set_tx_meta_data(struct net_buf * buf,bt_gatt_complete_func_t func,void * user_data,enum bt_att_chan_opt chan_opt)4028 void bt_att_set_tx_meta_data(struct net_buf *buf, bt_gatt_complete_func_t func, void *user_data,
4029 enum bt_att_chan_opt chan_opt)
4030 {
4031 struct bt_att_tx_meta_data *data = bt_att_tx_meta_data(buf);
4032
4033 data->func = func;
4034 data->user_data = user_data;
4035 data->attr_count = 1;
4036 data->chan_opt = chan_opt;
4037 }
4038
bt_att_increment_tx_meta_data_attr_count(struct net_buf * buf,uint16_t attr_count)4039 void bt_att_increment_tx_meta_data_attr_count(struct net_buf *buf, uint16_t attr_count)
4040 {
4041 struct bt_att_tx_meta_data *data = bt_att_tx_meta_data(buf);
4042
4043 data->attr_count += attr_count;
4044 }
4045
bt_att_tx_meta_data_match(const struct net_buf * buf,bt_gatt_complete_func_t func,const void * user_data,enum bt_att_chan_opt chan_opt)4046 bool bt_att_tx_meta_data_match(const struct net_buf *buf, bt_gatt_complete_func_t func,
4047 const void *user_data, enum bt_att_chan_opt chan_opt)
4048 {
4049 return ((bt_att_tx_meta_data(buf)->func == func) &&
4050 (bt_att_tx_meta_data(buf)->user_data == user_data) &&
4051 (bt_att_tx_meta_data(buf)->chan_opt == chan_opt));
4052 }
4053
bt_att_chan_opt_valid(struct bt_conn * conn,enum bt_att_chan_opt chan_opt)4054 bool bt_att_chan_opt_valid(struct bt_conn *conn, enum bt_att_chan_opt chan_opt)
4055 {
4056 if ((chan_opt & (BT_ATT_CHAN_OPT_ENHANCED_ONLY | BT_ATT_CHAN_OPT_UNENHANCED_ONLY)) ==
4057 (BT_ATT_CHAN_OPT_ENHANCED_ONLY | BT_ATT_CHAN_OPT_UNENHANCED_ONLY)) {
4058 /* Enhanced and Unenhanced are mutually exclusive */
4059 return false;
4060 }
4061
4062 /* Choosing EATT requires EATT channels connected and encryption enabled */
4063 if (chan_opt & BT_ATT_CHAN_OPT_ENHANCED_ONLY) {
4064 return (bt_conn_get_security(conn) > BT_SECURITY_L1) &&
4065 !bt_att_fixed_chan_only(conn);
4066 }
4067
4068 return true;
4069 }
4070