1 /* att.c - Attribute protocol handling */
2
3 /*
4 * Copyright (c) 2015-2016 Intel Corporation
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9 #include <zephyr/kernel.h>
10 #include <string.h>
11 #include <errno.h>
12 #include <stdbool.h>
13 #include <zephyr/sys/atomic.h>
14 #include <zephyr/sys/byteorder.h>
15 #include <zephyr/sys/util.h>
16
17 #include <zephyr/bluetooth/hci.h>
18 #include <zephyr/bluetooth/bluetooth.h>
19 #include <zephyr/bluetooth/uuid.h>
20 #include <zephyr/bluetooth/att.h>
21 #include <zephyr/bluetooth/gatt.h>
22 #include <zephyr/drivers/bluetooth/hci_driver.h>
23
24 #include "common/bt_str.h"
25
26 #include "hci_core.h"
27 #include "conn_internal.h"
28 #include "l2cap_internal.h"
29 #include "smp.h"
30 #include "att_internal.h"
31 #include "gatt_internal.h"
32
33 #define LOG_LEVEL CONFIG_BT_ATT_LOG_LEVEL
34 #include <zephyr/logging/log.h>
35 LOG_MODULE_REGISTER(bt_att);
36
37 #define ATT_CHAN(_ch) CONTAINER_OF(_ch, struct bt_att_chan, chan.chan)
38 #define ATT_REQ(_node) CONTAINER_OF(_node, struct bt_att_req, node)
39
40 #define ATT_CMD_MASK 0x40
41
42 #if defined(CONFIG_BT_EATT)
43 #define ATT_CHAN_MAX (CONFIG_BT_EATT_MAX + 1)
44 #else
45 #define ATT_CHAN_MAX 1
46 #endif /* CONFIG_BT_EATT */
47
48 typedef enum __packed {
49 ATT_COMMAND,
50 ATT_REQUEST,
51 ATT_RESPONSE,
52 ATT_NOTIFICATION,
53 ATT_CONFIRMATION,
54 ATT_INDICATION,
55 ATT_UNKNOWN,
56 } att_type_t;
57
58 static att_type_t att_op_get_type(uint8_t op);
59
60 #if CONFIG_BT_ATT_PREPARE_COUNT > 0
61 struct bt_attr_data {
62 uint16_t handle;
63 uint16_t offset;
64 };
65
66 /* Pool for incoming ATT packets */
67 NET_BUF_POOL_DEFINE(prep_pool, CONFIG_BT_ATT_PREPARE_COUNT, BT_ATT_BUF_SIZE,
68 sizeof(struct bt_attr_data), NULL);
69 #endif /* CONFIG_BT_ATT_PREPARE_COUNT */
70
71 K_MEM_SLAB_DEFINE(req_slab, sizeof(struct bt_att_req),
72 CONFIG_BT_ATT_TX_COUNT, __alignof__(struct bt_att_req));
73
74 enum {
75 ATT_CONNECTED,
76 ATT_ENHANCED,
77 ATT_PENDING_SENT,
78 ATT_OUT_OF_SYNC_SENT,
79
80 /* Total number of flags - must be at the end of the enum */
81 ATT_NUM_FLAGS,
82 };
83
84 struct bt_att_tx_meta_data;
85 typedef void (*bt_att_tx_cb_t)(struct bt_conn *conn,
86 struct bt_att_tx_meta_data *user_data);
87
88 struct bt_att_tx_meta_data {
89 int err;
90 uint8_t opcode;
91 uint16_t attr_count;
92 struct bt_att_chan *att_chan;
93 bt_gatt_complete_func_t func;
94 void *user_data;
95 enum bt_att_chan_opt chan_opt;
96 };
97
98 struct bt_att_tx_meta {
99 struct bt_att_tx_meta_data *data;
100 };
101
102 /* ATT channel specific data */
103 struct bt_att_chan {
104 /* Connection this channel is associated with */
105 struct bt_att *att;
106 struct bt_l2cap_le_chan chan;
107 ATOMIC_DEFINE(flags, ATT_NUM_FLAGS);
108 struct bt_att_req *req;
109 struct k_fifo tx_queue;
110 struct k_work_delayable timeout_work;
111 sys_snode_t node;
112 };
113
bt_att_is_enhanced(struct bt_att_chan * chan)114 static bool bt_att_is_enhanced(struct bt_att_chan *chan)
115 {
116 /* Optimization. */
117 if (!IS_ENABLED(CONFIG_BT_EATT)) {
118 return false;
119 }
120
121 return atomic_test_bit(chan->flags, ATT_ENHANCED);
122 }
123
bt_att_mtu(struct bt_att_chan * chan)124 static uint16_t bt_att_mtu(struct bt_att_chan *chan)
125 {
126 /* Core v5.3 Vol 3 Part F 3.4.2:
127 *
128 * The server and client shall set ATT_MTU to the minimum of the
129 * Client Rx MTU and the Server Rx MTU.
130 */
131 return MIN(chan->chan.rx.mtu, chan->chan.tx.mtu);
132 }
133
134 /* Descriptor of application-specific authorization callbacks that are used
135 * with the CONFIG_BT_GATT_AUTHORIZATION_CUSTOM Kconfig enabled.
136 */
137 const static struct bt_gatt_authorization_cb *authorization_cb;
138
139 /* ATT connection specific data */
140 struct bt_att {
141 struct bt_conn *conn;
142 /* Shared request queue */
143 sys_slist_t reqs;
144 struct k_fifo tx_queue;
145 #if CONFIG_BT_ATT_PREPARE_COUNT > 0
146 sys_slist_t prep_queue;
147 #endif
148 /* Contains bt_att_chan instance(s) */
149 sys_slist_t chans;
150 #if defined(CONFIG_BT_EATT)
151 struct {
152 struct k_work_delayable connection_work;
153 uint8_t chans_to_connect;
154
155 uint16_t prev_conn_rsp_result;
156 uint16_t prev_conn_req_result;
157 uint8_t prev_conn_req_missing_chans;
158 } eatt;
159 #endif /* CONFIG_BT_EATT */
160 };
161
162 K_MEM_SLAB_DEFINE(att_slab, sizeof(struct bt_att),
163 CONFIG_BT_MAX_CONN, __alignof__(struct bt_att));
164 K_MEM_SLAB_DEFINE(chan_slab, sizeof(struct bt_att_chan),
165 CONFIG_BT_MAX_CONN * ATT_CHAN_MAX,
166 __alignof__(struct bt_att_chan));
167 static struct bt_att_req cancel;
168
169 /** The thread ATT response handlers likely run on.
170 *
171 * Blocking this thread while waiting for an ATT request to resolve can cause a
172 * deadlock.
173 *
174 * This can happen if the application queues ATT requests in the context of a
175 * callback from the Bluetooth stack. This is because queuing an ATT request
176 * will block until a request-resource is available, and the callbacks run on
177 * the same thread as the ATT response handler that frees request-resources.
178 *
179 * The intended use of this value is to detect the above situation.
180 */
181 static k_tid_t att_handle_rsp_thread;
182
183 static struct bt_att_tx_meta_data tx_meta_data_storage[CONFIG_BT_ATT_TX_COUNT];
184
185 struct bt_att_tx_meta_data *bt_att_get_tx_meta_data(const struct net_buf *buf);
186 static void att_on_sent_cb(struct bt_att_tx_meta_data *meta);
187
188 #if defined(CONFIG_BT_ATT_ERR_TO_STR)
bt_att_err_to_str(uint8_t att_err)189 const char *bt_att_err_to_str(uint8_t att_err)
190 {
191 /* To mapping tables are used to avoid a big gap with NULL-entries. */
192 #define ATT_ERR(err) [err] = #err
193 #define ATT_ERR_SECOND(err) [err - BT_ATT_ERR_WRITE_REQ_REJECTED] = #err
194
195 const char * const first_mapping_table[] = {
196 ATT_ERR(BT_ATT_ERR_SUCCESS),
197 ATT_ERR(BT_ATT_ERR_INVALID_HANDLE),
198 ATT_ERR(BT_ATT_ERR_READ_NOT_PERMITTED),
199 ATT_ERR(BT_ATT_ERR_WRITE_NOT_PERMITTED),
200 ATT_ERR(BT_ATT_ERR_INVALID_PDU),
201 ATT_ERR(BT_ATT_ERR_AUTHENTICATION),
202 ATT_ERR(BT_ATT_ERR_NOT_SUPPORTED),
203 ATT_ERR(BT_ATT_ERR_INVALID_OFFSET),
204 ATT_ERR(BT_ATT_ERR_AUTHORIZATION),
205 ATT_ERR(BT_ATT_ERR_PREPARE_QUEUE_FULL),
206 ATT_ERR(BT_ATT_ERR_ATTRIBUTE_NOT_FOUND),
207 ATT_ERR(BT_ATT_ERR_ATTRIBUTE_NOT_LONG),
208 ATT_ERR(BT_ATT_ERR_ENCRYPTION_KEY_SIZE),
209 ATT_ERR(BT_ATT_ERR_INVALID_ATTRIBUTE_LEN),
210 ATT_ERR(BT_ATT_ERR_UNLIKELY),
211 ATT_ERR(BT_ATT_ERR_INSUFFICIENT_ENCRYPTION),
212 ATT_ERR(BT_ATT_ERR_UNSUPPORTED_GROUP_TYPE),
213 ATT_ERR(BT_ATT_ERR_INSUFFICIENT_RESOURCES),
214 ATT_ERR(BT_ATT_ERR_DB_OUT_OF_SYNC),
215 ATT_ERR(BT_ATT_ERR_VALUE_NOT_ALLOWED),
216 };
217
218 const char * const second_mapping_table[] = {
219 ATT_ERR_SECOND(BT_ATT_ERR_WRITE_REQ_REJECTED),
220 ATT_ERR_SECOND(BT_ATT_ERR_CCC_IMPROPER_CONF),
221 ATT_ERR_SECOND(BT_ATT_ERR_PROCEDURE_IN_PROGRESS),
222 ATT_ERR_SECOND(BT_ATT_ERR_OUT_OF_RANGE),
223 };
224
225
226 if (att_err < ARRAY_SIZE(first_mapping_table) && first_mapping_table[att_err]) {
227 return first_mapping_table[att_err];
228 } else if (att_err >= BT_ATT_ERR_WRITE_REQ_REJECTED) {
229 const uint8_t second_index = att_err - BT_ATT_ERR_WRITE_REQ_REJECTED;
230
231 if (second_index < ARRAY_SIZE(second_mapping_table) &&
232 second_mapping_table[second_index]) {
233 return second_mapping_table[second_index];
234 }
235 }
236
237 return "(unknown)";
238
239 #undef ATT_ERR
240 #undef ATT_ERR_SECOND
241 }
242 #endif /* CONFIG_BT_ATT_ERR_TO_STR */
243
att_tx_destroy(struct net_buf * buf)244 static void att_tx_destroy(struct net_buf *buf)
245 {
246 struct bt_att_tx_meta_data *p_meta = bt_att_get_tx_meta_data(buf);
247 struct bt_att_tx_meta_data meta;
248
249 LOG_DBG("%p", buf);
250
251 /* Destroy the buffer first, as the callback may attempt to allocate a
252 * new one for another operation.
253 */
254 meta = *p_meta;
255
256 /* Clear the meta storage. This might help catch illegal
257 * "use-after-free"s. An initial memset is not necessary, as the
258 * metadata storage array is `static`.
259 */
260 memset(p_meta, 0x00, sizeof(*p_meta));
261
262 /* After this point, p_meta doesn't belong to us.
263 * The user data will be memset to 0 on allocation.
264 */
265 net_buf_destroy(buf);
266
267 /* ATT opcode 0 is invalid. If we get here, that means the buffer got
268 * destroyed before it was ready to be sent. Hopefully nobody sets the
269 * opcode and then destroys the buffer without sending it. :'(
270 */
271 if (meta.opcode != 0) {
272 att_on_sent_cb(&meta);
273 }
274 }
275
276 NET_BUF_POOL_DEFINE(att_pool, CONFIG_BT_ATT_TX_COUNT,
277 BT_L2CAP_SDU_BUF_SIZE(BT_ATT_BUF_SIZE),
278 CONFIG_BT_CONN_TX_USER_DATA_SIZE, att_tx_destroy);
279
bt_att_get_tx_meta_data(const struct net_buf * buf)280 struct bt_att_tx_meta_data *bt_att_get_tx_meta_data(const struct net_buf *buf)
281 {
282 __ASSERT_NO_MSG(net_buf_pool_get(buf->pool_id) == &att_pool);
283
284 /* Metadata lifetime is implicitly tied to the buffer lifetime.
285 * Treat it as part of the buffer itself.
286 */
287 return &tx_meta_data_storage[net_buf_id((struct net_buf *)buf)];
288 }
289
290 static int bt_att_chan_send(struct bt_att_chan *chan, struct net_buf *buf);
291
292 static void att_chan_mtu_updated(struct bt_att_chan *updated_chan);
293 static void bt_att_disconnected(struct bt_l2cap_chan *chan);
294
295 struct net_buf *bt_att_create_rsp_pdu(struct bt_att_chan *chan, uint8_t op);
296
297 static void bt_att_sent(struct bt_l2cap_chan *ch);
298
att_sent(void * user_data)299 static void att_sent(void *user_data)
300 {
301 struct bt_att_tx_meta_data *data = user_data;
302 struct bt_att_chan *att_chan = data->att_chan;
303 struct bt_conn *conn = att_chan->att->conn;
304 struct bt_l2cap_chan *chan = &att_chan->chan.chan;
305
306 __ASSERT_NO_MSG(!bt_att_is_enhanced(att_chan));
307
308 LOG_DBG("conn %p chan %p", conn, chan);
309
310 /* For EATT, `bt_att_sent` is assigned to the `.sent` L2 callback.
311 * L2CAP will then call it once the SDU has finished sending.
312 *
313 * For UATT, this won't happen, as static LE l2cap channels don't have
314 * SDUs. Call it manually instead.
315 */
316 bt_att_sent(chan);
317 }
318
319 /* In case of success the ownership of the buffer is transferred to the stack
320 * which takes care of releasing it when it completes transmitting to the
321 * controller.
322 *
323 * In case bt_l2cap_send_cb fails the buffer state and ownership are retained
324 * so the buffer can be safely pushed back to the queue to be processed later.
325 */
chan_send(struct bt_att_chan * chan,struct net_buf * buf)326 static int chan_send(struct bt_att_chan *chan, struct net_buf *buf)
327 {
328 struct bt_att_hdr *hdr;
329 struct net_buf_simple_state state;
330 int err;
331 struct bt_att_tx_meta_data *data = bt_att_get_tx_meta_data(buf);
332 struct bt_att_chan *prev_chan = data->att_chan;
333
334 hdr = (void *)buf->data;
335
336 LOG_DBG("code 0x%02x", hdr->code);
337
338 if (!atomic_test_bit(chan->flags, ATT_CONNECTED)) {
339 LOG_ERR("ATT channel not connected");
340 return -EINVAL;
341 }
342
343 if (IS_ENABLED(CONFIG_BT_EATT) && hdr->code == BT_ATT_OP_MTU_REQ &&
344 chan->chan.tx.cid != BT_L2CAP_CID_ATT) {
345 /* The Exchange MTU sub-procedure shall only be supported on
346 * the LE Fixed Channel Unenhanced ATT bearer
347 */
348 return -ENOTSUP;
349 }
350
351 __ASSERT_NO_MSG(buf->len >= sizeof(struct bt_att_hdr));
352 data->opcode = buf->data[0];
353 data->err = 0;
354
355 if (IS_ENABLED(CONFIG_BT_EATT) && bt_att_is_enhanced(chan)) {
356 /* Check if sent is pending already, if it does it cannot be
357 * modified so the operation will need to be queued.
358 */
359 if (atomic_test_bit(chan->flags, ATT_PENDING_SENT)) {
360 return -EAGAIN;
361 }
362
363 if (hdr->code == BT_ATT_OP_SIGNED_WRITE_CMD) {
364 return -ENOTSUP;
365 }
366
367 /* Check if the channel is ready to send in case of a request */
368 if (att_op_get_type(hdr->code) == ATT_REQUEST &&
369 !atomic_test_bit(chan->chan.chan.status,
370 BT_L2CAP_STATUS_OUT)) {
371 return -EAGAIN;
372 }
373
374 atomic_set_bit(chan->flags, ATT_PENDING_SENT);
375 data->att_chan = chan;
376
377 /* bt_l2cap_chan_send does actually return the number of bytes
378 * that could be sent immediately.
379 */
380 err = bt_l2cap_chan_send(&chan->chan.chan, buf);
381 if (err < 0) {
382 data->att_chan = prev_chan;
383 atomic_clear_bit(chan->flags, ATT_PENDING_SENT);
384 data->err = err;
385
386 return err;
387 } else {
388 /* On success, the almighty scheduler might already have
389 * run the destroy cb on the buffer. In that case, buf
390 * and its metadata are dangling pointers.
391 */
392 buf = NULL;
393 data = NULL;
394 }
395
396 return 0;
397 }
398
399 if (hdr->code == BT_ATT_OP_SIGNED_WRITE_CMD) {
400 err = bt_smp_sign(chan->att->conn, buf);
401 if (err) {
402 LOG_ERR("Error signing data");
403 net_buf_unref(buf);
404 return err;
405 }
406 }
407
408 net_buf_simple_save(&buf->b, &state);
409
410 data->att_chan = chan;
411
412 err = bt_l2cap_send_pdu(&chan->chan, buf, NULL, NULL);
413 if (err) {
414 if (err == -ENOBUFS) {
415 LOG_ERR("Ran out of TX buffers or contexts.");
416 }
417 /* In case of an error has occurred restore the buffer state */
418 net_buf_simple_restore(&buf->b, &state);
419 data->att_chan = prev_chan;
420 data->err = err;
421 }
422
423 return err;
424 }
425
att_chan_matches_chan_opt(struct bt_att_chan * chan,enum bt_att_chan_opt chan_opt)426 static bool att_chan_matches_chan_opt(struct bt_att_chan *chan, enum bt_att_chan_opt chan_opt)
427 {
428 __ASSERT_NO_MSG(chan_opt <= BT_ATT_CHAN_OPT_ENHANCED_ONLY);
429
430 if (chan_opt == BT_ATT_CHAN_OPT_NONE) {
431 return true;
432 }
433
434 if (bt_att_is_enhanced(chan)) {
435 return (chan_opt & BT_ATT_CHAN_OPT_ENHANCED_ONLY);
436 } else {
437 return (chan_opt & BT_ATT_CHAN_OPT_UNENHANCED_ONLY);
438 }
439 }
440
get_first_buf_matching_chan(struct k_fifo * fifo,struct bt_att_chan * chan)441 static struct net_buf *get_first_buf_matching_chan(struct k_fifo *fifo, struct bt_att_chan *chan)
442 {
443 if (IS_ENABLED(CONFIG_BT_EATT)) {
444 struct k_fifo skipped;
445 struct net_buf *buf;
446 struct net_buf *ret = NULL;
447 struct bt_att_tx_meta_data *meta;
448
449 k_fifo_init(&skipped);
450
451 while ((buf = net_buf_get(fifo, K_NO_WAIT))) {
452 meta = bt_att_get_tx_meta_data(buf);
453 if (!ret &&
454 att_chan_matches_chan_opt(chan, meta->chan_opt)) {
455 ret = buf;
456 } else {
457 net_buf_put(&skipped, buf);
458 }
459 }
460
461 __ASSERT_NO_MSG(k_fifo_is_empty(fifo));
462
463 while ((buf = net_buf_get(&skipped, K_NO_WAIT))) {
464 net_buf_put(fifo, buf);
465 }
466
467 __ASSERT_NO_MSG(k_fifo_is_empty(&skipped));
468
469 return ret;
470 } else {
471 return net_buf_get(fifo, K_NO_WAIT);
472 }
473 }
474
get_first_req_matching_chan(sys_slist_t * reqs,struct bt_att_chan * chan)475 static struct bt_att_req *get_first_req_matching_chan(sys_slist_t *reqs, struct bt_att_chan *chan)
476 {
477 if (IS_ENABLED(CONFIG_BT_EATT)) {
478 sys_snode_t *curr, *prev = NULL;
479 struct bt_att_tx_meta_data *meta = NULL;
480
481 SYS_SLIST_FOR_EACH_NODE(reqs, curr) {
482 meta = bt_att_get_tx_meta_data(ATT_REQ(curr)->buf);
483 if (att_chan_matches_chan_opt(chan, meta->chan_opt)) {
484 break;
485 }
486
487 prev = curr;
488 }
489
490 if (curr) {
491 sys_slist_remove(reqs, prev, curr);
492
493 return ATT_REQ(curr);
494 }
495
496 return NULL;
497 }
498
499 sys_snode_t *node = sys_slist_get(reqs);
500
501 if (node) {
502 return ATT_REQ(node);
503 } else {
504 return NULL;
505 }
506 }
507
process_queue(struct bt_att_chan * chan,struct k_fifo * queue)508 static int process_queue(struct bt_att_chan *chan, struct k_fifo *queue)
509 {
510 struct net_buf *buf;
511 int err;
512
513 buf = get_first_buf_matching_chan(queue, chan);
514 if (buf) {
515 err = bt_att_chan_send(chan, buf);
516 if (err) {
517 /* Push it back if it could not be send */
518 k_queue_prepend(&queue->_queue, buf);
519 return err;
520 }
521
522 return 0;
523 }
524
525 return -ENOENT;
526 }
527
528 /* Send requests without taking tx_sem */
chan_req_send(struct bt_att_chan * chan,struct bt_att_req * req)529 static int chan_req_send(struct bt_att_chan *chan, struct bt_att_req *req)
530 {
531 struct net_buf *buf;
532 int err;
533
534 if (bt_att_mtu(chan) < net_buf_frags_len(req->buf)) {
535 return -EMSGSIZE;
536 }
537
538 LOG_DBG("chan %p req %p len %zu", chan, req, net_buf_frags_len(req->buf));
539
540 chan->req = req;
541
542 /* Release since bt_l2cap_send_cb takes ownership of the buffer */
543 buf = req->buf;
544 req->buf = NULL;
545
546 /* This lock makes sure the value of `bt_att_mtu(chan)` does not
547 * change.
548 */
549 k_sched_lock();
550 err = bt_att_chan_send(chan, buf);
551 if (err) {
552 /* We still have the ownership of the buffer */
553 req->buf = buf;
554 chan->req = NULL;
555 } else {
556 bt_gatt_req_set_mtu(req, bt_att_mtu(chan));
557 }
558 k_sched_unlock();
559
560 return err;
561 }
562
bt_att_sent(struct bt_l2cap_chan * ch)563 static void bt_att_sent(struct bt_l2cap_chan *ch)
564 {
565 struct bt_att_chan *chan = ATT_CHAN(ch);
566 struct bt_att *att = chan->att;
567 int err;
568
569 LOG_DBG("chan %p", chan);
570
571 atomic_clear_bit(chan->flags, ATT_PENDING_SENT);
572
573 if (!att) {
574 LOG_DBG("Ignore sent on detached ATT chan");
575 return;
576 }
577
578 /* Process pending requests first since they require a response they
579 * can only be processed one at time while if other queues were
580 * processed before they may always contain a buffer starving the
581 * request queue.
582 */
583 if (!chan->req && !sys_slist_is_empty(&att->reqs)) {
584 sys_snode_t *node = sys_slist_get(&att->reqs);
585
586 if (chan_req_send(chan, ATT_REQ(node)) >= 0) {
587 return;
588 }
589
590 /* Prepend back to the list as it could not be sent */
591 sys_slist_prepend(&att->reqs, node);
592 }
593
594 /* Process channel queue */
595 err = process_queue(chan, &chan->tx_queue);
596 if (!err) {
597 return;
598 }
599
600 /* Process global queue */
601 (void)process_queue(chan, &att->tx_queue);
602 }
603
chan_rebegin_att_timeout(struct bt_att_tx_meta_data * user_data)604 static void chan_rebegin_att_timeout(struct bt_att_tx_meta_data *user_data)
605 {
606 struct bt_att_tx_meta_data *data = user_data;
607 struct bt_att_chan *chan = data->att_chan;
608
609 LOG_DBG("chan %p chan->req %p", chan, chan->req);
610
611 if (!atomic_test_bit(chan->flags, ATT_CONNECTED)) {
612 LOG_ERR("ATT channel not connected");
613 return;
614 }
615
616 /* Start timeout work. Only if we are sure that the request is really
617 * in-flight.
618 */
619 if (chan->req) {
620 k_work_reschedule(&chan->timeout_work, BT_ATT_TIMEOUT);
621 }
622 }
623
chan_req_notif_sent(struct bt_att_tx_meta_data * user_data)624 static void chan_req_notif_sent(struct bt_att_tx_meta_data *user_data)
625 {
626 struct bt_att_tx_meta_data *data = user_data;
627 struct bt_att_chan *chan = data->att_chan;
628 struct bt_conn *conn = chan->att->conn;
629 bt_gatt_complete_func_t func = data->func;
630 uint16_t attr_count = data->attr_count;
631 void *ud = data->user_data;
632
633 LOG_DBG("chan %p CID 0x%04X", chan, chan->chan.tx.cid);
634
635 if (!atomic_test_bit(chan->flags, ATT_CONNECTED)) {
636 LOG_ERR("ATT channel not connected");
637 return;
638 }
639
640 if (func) {
641 for (uint16_t i = 0; i < attr_count; i++) {
642 func(conn, ud);
643 }
644 }
645 }
646
att_on_sent_cb(struct bt_att_tx_meta_data * meta)647 static void att_on_sent_cb(struct bt_att_tx_meta_data *meta)
648 {
649 const att_type_t op_type = att_op_get_type(meta->opcode);
650
651 LOG_DBG("opcode 0x%x", meta->opcode);
652
653 if (!meta->att_chan ||
654 !meta->att_chan->att ||
655 !meta->att_chan->att->conn) {
656 LOG_DBG("Bearer not connected, dropping ATT cb");
657 return;
658 }
659
660 if (meta->err) {
661 LOG_ERR("Got err %d, not calling ATT cb", meta->err);
662 return;
663 }
664
665 if (!bt_att_is_enhanced(meta->att_chan)) {
666 /* For EATT, L2CAP will call it after the SDU is fully sent. */
667 LOG_DBG("UATT bearer, calling att_sent");
668 att_sent(meta);
669 }
670
671 switch (op_type) {
672 case ATT_RESPONSE:
673 return;
674 case ATT_CONFIRMATION:
675 return;
676 case ATT_REQUEST:
677 case ATT_INDICATION:
678 chan_rebegin_att_timeout(meta);
679 return;
680 case ATT_COMMAND:
681 case ATT_NOTIFICATION:
682 chan_req_notif_sent(meta);
683 return;
684 default:
685 __ASSERT(false, "Unknown op type 0x%02X", op_type);
686 return;
687 }
688 }
689
bt_att_chan_create_pdu(struct bt_att_chan * chan,uint8_t op,size_t len)690 static struct net_buf *bt_att_chan_create_pdu(struct bt_att_chan *chan, uint8_t op, size_t len)
691 {
692 struct bt_att_hdr *hdr;
693 struct net_buf *buf;
694 struct bt_att_tx_meta_data *data;
695 k_timeout_t timeout;
696
697 if (len + sizeof(op) > bt_att_mtu(chan)) {
698 LOG_WRN("ATT MTU exceeded, max %u, wanted %zu", bt_att_mtu(chan),
699 len + sizeof(op));
700 return NULL;
701 }
702
703 switch (att_op_get_type(op)) {
704 case ATT_RESPONSE:
705 case ATT_CONFIRMATION:
706 /* Use a timeout only when responding/confirming */
707 timeout = BT_ATT_TIMEOUT;
708 break;
709 default:
710 timeout = K_FOREVER;
711 }
712
713 /* This will reserve headspace for lower layers */
714 buf = bt_l2cap_create_pdu_timeout(&att_pool, 0, timeout);
715 if (!buf) {
716 LOG_ERR("Unable to allocate buffer for op 0x%02x", op);
717 return NULL;
718 }
719
720 /* If we got a buf from `att_pool`, then the metadata slot at its index
721 * is officially ours to use.
722 */
723 data = bt_att_get_tx_meta_data(buf);
724
725 if (IS_ENABLED(CONFIG_BT_EATT)) {
726 net_buf_reserve(buf, BT_L2CAP_SDU_BUF_SIZE(0));
727 }
728
729 data->att_chan = chan;
730
731 hdr = net_buf_add(buf, sizeof(*hdr));
732 hdr->code = op;
733
734 return buf;
735 }
736
bt_att_chan_send(struct bt_att_chan * chan,struct net_buf * buf)737 static int bt_att_chan_send(struct bt_att_chan *chan, struct net_buf *buf)
738 {
739 LOG_DBG("chan %p flags %lu code 0x%02x", chan, atomic_get(chan->flags),
740 ((struct bt_att_hdr *)buf->data)->code);
741
742 if (IS_ENABLED(CONFIG_BT_EATT) &&
743 !att_chan_matches_chan_opt(chan, bt_att_get_tx_meta_data(buf)->chan_opt)) {
744 return -EINVAL;
745 }
746
747 return chan_send(chan, buf);
748 }
749
att_send_process(struct bt_att * att)750 static void att_send_process(struct bt_att *att)
751 {
752 struct bt_att_chan *chan, *tmp, *prev = NULL;
753 int err = 0;
754
755 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&att->chans, chan, tmp, node) {
756 if (err == -ENOENT && prev &&
757 (bt_att_is_enhanced(chan) == bt_att_is_enhanced(prev))) {
758 /* If there was nothing to send for the previous channel and the current
759 * channel has the same "enhancedness", there will be nothing to send for
760 * this channel either.
761 */
762 continue;
763 }
764
765 err = process_queue(chan, &att->tx_queue);
766 if (!err) {
767 /* Success */
768 return;
769 }
770
771 prev = chan;
772 }
773 }
774
bt_att_chan_send_rsp(struct bt_att_chan * chan,struct net_buf * buf)775 static void bt_att_chan_send_rsp(struct bt_att_chan *chan, struct net_buf *buf)
776 {
777 int err;
778
779 err = chan_send(chan, buf);
780 if (err) {
781 /* Responses need to be sent back using the same channel */
782 net_buf_put(&chan->tx_queue, buf);
783 }
784 }
785
send_err_rsp(struct bt_att_chan * chan,uint8_t req,uint16_t handle,uint8_t err)786 static void send_err_rsp(struct bt_att_chan *chan, uint8_t req, uint16_t handle,
787 uint8_t err)
788 {
789 struct bt_att_error_rsp *rsp;
790 struct net_buf *buf;
791
792 /* Ignore opcode 0x00 */
793 if (!req) {
794 return;
795 }
796
797 buf = bt_att_chan_create_pdu(chan, BT_ATT_OP_ERROR_RSP, sizeof(*rsp));
798 if (!buf) {
799 return;
800 }
801
802 rsp = net_buf_add(buf, sizeof(*rsp));
803 rsp->request = req;
804 rsp->handle = sys_cpu_to_le16(handle);
805 rsp->error = err;
806
807 bt_att_chan_send_rsp(chan, buf);
808 }
809
att_mtu_req(struct bt_att_chan * chan,struct net_buf * buf)810 static uint8_t att_mtu_req(struct bt_att_chan *chan, struct net_buf *buf)
811 {
812 struct bt_att_exchange_mtu_req *req;
813 struct bt_att_exchange_mtu_rsp *rsp;
814 struct net_buf *pdu;
815 uint16_t mtu_client, mtu_server;
816
817 /* Exchange MTU sub-procedure shall only be supported on the
818 * LE Fixed Channel Unenhanced ATT bearer.
819 */
820 if (bt_att_is_enhanced(chan)) {
821 return BT_ATT_ERR_NOT_SUPPORTED;
822 }
823
824 req = (void *)buf->data;
825
826 mtu_client = sys_le16_to_cpu(req->mtu);
827
828 LOG_DBG("Client MTU %u", mtu_client);
829
830 /* Check if MTU is valid */
831 if (mtu_client < BT_ATT_DEFAULT_LE_MTU) {
832 return BT_ATT_ERR_INVALID_PDU;
833 }
834
835 pdu = bt_att_create_rsp_pdu(chan, BT_ATT_OP_MTU_RSP);
836 if (!pdu) {
837 return BT_ATT_ERR_UNLIKELY;
838 }
839
840 mtu_server = BT_LOCAL_ATT_MTU_UATT;
841
842 LOG_DBG("Server MTU %u", mtu_server);
843
844 rsp = net_buf_add(pdu, sizeof(*rsp));
845 rsp->mtu = sys_cpu_to_le16(mtu_server);
846
847 bt_att_chan_send_rsp(chan, pdu);
848
849 /* The ATT_EXCHANGE_MTU_REQ/RSP is just an alternative way of
850 * communicating the L2CAP MTU.
851 */
852 chan->chan.rx.mtu = mtu_server;
853 chan->chan.tx.mtu = mtu_client;
854
855 LOG_DBG("Negotiated MTU %u", bt_att_mtu(chan));
856
857 #if defined(CONFIG_BT_GATT_CLIENT)
858 /* Mark the MTU Exchange as complete.
859 * This will skip sending ATT Exchange MTU from our side.
860 *
861 * Core 5.3 | Vol 3, Part F 3.4.2.2:
862 * If MTU is exchanged in one direction, that is sufficient for both directions.
863 */
864 atomic_set_bit(chan->att->conn->flags, BT_CONN_ATT_MTU_EXCHANGED);
865 #endif /* CONFIG_BT_GATT_CLIENT */
866
867 att_chan_mtu_updated(chan);
868
869 return 0;
870 }
871
bt_att_chan_req_send(struct bt_att_chan * chan,struct bt_att_req * req)872 static int bt_att_chan_req_send(struct bt_att_chan *chan,
873 struct bt_att_req *req)
874 {
875 __ASSERT_NO_MSG(chan);
876 __ASSERT_NO_MSG(req);
877 __ASSERT_NO_MSG(req->func);
878 __ASSERT_NO_MSG(!chan->req);
879
880 LOG_DBG("req %p", req);
881
882 return chan_req_send(chan, req);
883 }
884
att_req_send_process(struct bt_att * att)885 static void att_req_send_process(struct bt_att *att)
886 {
887 struct bt_att_req *req = NULL;
888 struct bt_att_chan *chan, *tmp, *prev = NULL;
889
890 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&att->chans, chan, tmp, node) {
891 /* If there is an ongoing transaction, do not use the channel */
892 if (chan->req) {
893 continue;
894 }
895
896 if (!req && prev && (bt_att_is_enhanced(chan) == bt_att_is_enhanced(prev))) {
897 /* If there was nothing to send for the previous channel and the current
898 * channel has the same "enhancedness", there will be nothing to send for
899 * this channel either.
900 */
901 continue;
902 }
903
904 prev = chan;
905
906 /* Pull next request from the list */
907 req = get_first_req_matching_chan(&att->reqs, chan);
908 if (!req) {
909 continue;
910 }
911
912 if (bt_att_chan_req_send(chan, req) >= 0) {
913 return;
914 }
915
916 /* Prepend back to the list as it could not be sent */
917 sys_slist_prepend(&att->reqs, &req->node);
918 }
919 }
920
att_handle_rsp(struct bt_att_chan * chan,void * pdu,uint16_t len,int err)921 static uint8_t att_handle_rsp(struct bt_att_chan *chan, void *pdu, uint16_t len,
922 int err)
923 {
924 bt_att_func_t func = NULL;
925 void *params;
926
927 LOG_DBG("chan %p err %d len %u: %s", chan, err, len, bt_hex(pdu, len));
928
929 /* Cancel timeout if ongoing */
930 k_work_cancel_delayable(&chan->timeout_work);
931
932 if (!chan->req) {
933 LOG_WRN("No pending ATT request");
934 goto process;
935 }
936
937 /* Check if request has been cancelled */
938 if (chan->req == &cancel) {
939 chan->req = NULL;
940 goto process;
941 }
942
943 /* Reset func so it can be reused by the callback */
944 func = chan->req->func;
945 chan->req->func = NULL;
946 params = chan->req->user_data;
947
948 /* free allocated request so its memory can be reused */
949 bt_att_req_free(chan->req);
950 chan->req = NULL;
951
952 process:
953 /* Process pending requests */
954 att_req_send_process(chan->att);
955 if (func) {
956 func(chan->att->conn, err, pdu, len, params);
957 }
958
959 return 0;
960 }
961
962 #if defined(CONFIG_BT_GATT_CLIENT)
att_mtu_rsp(struct bt_att_chan * chan,struct net_buf * buf)963 static uint8_t att_mtu_rsp(struct bt_att_chan *chan, struct net_buf *buf)
964 {
965 struct bt_att_exchange_mtu_rsp *rsp;
966 uint16_t mtu;
967
968 rsp = (void *)buf->data;
969
970 mtu = sys_le16_to_cpu(rsp->mtu);
971
972 LOG_DBG("Server MTU %u", mtu);
973
974 /* Check if MTU is valid */
975 if (mtu < BT_ATT_DEFAULT_LE_MTU) {
976 return att_handle_rsp(chan, NULL, 0, BT_ATT_ERR_INVALID_PDU);
977 }
978
979 /* The following must equal the value we sent in the req. We assume this
980 * is a rsp to `gatt_exchange_mtu_encode`.
981 */
982 chan->chan.rx.mtu = BT_LOCAL_ATT_MTU_UATT;
983 /* The ATT_EXCHANGE_MTU_REQ/RSP is just an alternative way of
984 * communicating the L2CAP MTU.
985 */
986
987 chan->chan.tx.mtu = mtu;
988
989 LOG_DBG("Negotiated MTU %u", bt_att_mtu(chan));
990
991 att_chan_mtu_updated(chan);
992
993 return att_handle_rsp(chan, rsp, buf->len, 0);
994 }
995 #endif /* CONFIG_BT_GATT_CLIENT */
996
range_is_valid(uint16_t start,uint16_t end,uint16_t * err)997 static bool range_is_valid(uint16_t start, uint16_t end, uint16_t *err)
998 {
999 /* Handle 0 is invalid */
1000 if (!start || !end) {
1001 if (err) {
1002 *err = 0U;
1003 }
1004 return false;
1005 }
1006
1007 /* Check if range is valid */
1008 if (start > end) {
1009 if (err) {
1010 *err = start;
1011 }
1012 return false;
1013 }
1014
1015 return true;
1016 }
1017
1018 struct find_info_data {
1019 struct bt_att_chan *chan;
1020 struct net_buf *buf;
1021 struct bt_att_find_info_rsp *rsp;
1022 union {
1023 struct bt_att_info_16 *info16;
1024 struct bt_att_info_128 *info128;
1025 };
1026 };
1027
find_info_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)1028 static uint8_t find_info_cb(const struct bt_gatt_attr *attr, uint16_t handle,
1029 void *user_data)
1030 {
1031 struct find_info_data *data = user_data;
1032 struct bt_att_chan *chan = data->chan;
1033
1034 LOG_DBG("handle 0x%04x", handle);
1035
1036 /* Initialize rsp at first entry */
1037 if (!data->rsp) {
1038 data->rsp = net_buf_add(data->buf, sizeof(*data->rsp));
1039 data->rsp->format = (attr->uuid->type == BT_UUID_TYPE_16) ?
1040 BT_ATT_INFO_16 : BT_ATT_INFO_128;
1041 }
1042
1043 switch (data->rsp->format) {
1044 case BT_ATT_INFO_16:
1045 if (attr->uuid->type != BT_UUID_TYPE_16) {
1046 return BT_GATT_ITER_STOP;
1047 }
1048
1049 /* Fast forward to next item position */
1050 data->info16 = net_buf_add(data->buf, sizeof(*data->info16));
1051 data->info16->handle = sys_cpu_to_le16(handle);
1052 data->info16->uuid = sys_cpu_to_le16(BT_UUID_16(attr->uuid)->val);
1053
1054 if (bt_att_mtu(chan) - data->buf->len >
1055 sizeof(*data->info16)) {
1056 return BT_GATT_ITER_CONTINUE;
1057 }
1058
1059 break;
1060 case BT_ATT_INFO_128:
1061 if (attr->uuid->type != BT_UUID_TYPE_128) {
1062 return BT_GATT_ITER_STOP;
1063 }
1064
1065 /* Fast forward to next item position */
1066 data->info128 = net_buf_add(data->buf, sizeof(*data->info128));
1067 data->info128->handle = sys_cpu_to_le16(handle);
1068 memcpy(data->info128->uuid, BT_UUID_128(attr->uuid)->val,
1069 sizeof(data->info128->uuid));
1070
1071 if (bt_att_mtu(chan) - data->buf->len >
1072 sizeof(*data->info128)) {
1073 return BT_GATT_ITER_CONTINUE;
1074 }
1075 }
1076
1077 return BT_GATT_ITER_STOP;
1078 }
1079
att_find_info_rsp(struct bt_att_chan * chan,uint16_t start_handle,uint16_t end_handle)1080 static uint8_t att_find_info_rsp(struct bt_att_chan *chan, uint16_t start_handle,
1081 uint16_t end_handle)
1082 {
1083 struct find_info_data data;
1084
1085 (void)memset(&data, 0, sizeof(data));
1086
1087 data.buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_FIND_INFO_RSP);
1088 if (!data.buf) {
1089 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
1090 }
1091
1092 data.chan = chan;
1093 bt_gatt_foreach_attr(start_handle, end_handle, find_info_cb, &data);
1094
1095 if (!data.rsp) {
1096 net_buf_unref(data.buf);
1097 /* Respond since handle is set */
1098 send_err_rsp(chan, BT_ATT_OP_FIND_INFO_REQ, start_handle,
1099 BT_ATT_ERR_ATTRIBUTE_NOT_FOUND);
1100 return 0;
1101 }
1102
1103 bt_att_chan_send_rsp(chan, data.buf);
1104
1105 return 0;
1106 }
1107
att_find_info_req(struct bt_att_chan * chan,struct net_buf * buf)1108 static uint8_t att_find_info_req(struct bt_att_chan *chan, struct net_buf *buf)
1109 {
1110 struct bt_att_find_info_req *req;
1111 uint16_t start_handle, end_handle, err_handle;
1112
1113 req = (void *)buf->data;
1114
1115 start_handle = sys_le16_to_cpu(req->start_handle);
1116 end_handle = sys_le16_to_cpu(req->end_handle);
1117
1118 LOG_DBG("start_handle 0x%04x end_handle 0x%04x", start_handle, end_handle);
1119
1120 if (!range_is_valid(start_handle, end_handle, &err_handle)) {
1121 send_err_rsp(chan, BT_ATT_OP_FIND_INFO_REQ, err_handle,
1122 BT_ATT_ERR_INVALID_HANDLE);
1123 return 0;
1124 }
1125
1126 return att_find_info_rsp(chan, start_handle, end_handle);
1127 }
1128
1129 struct find_type_data {
1130 struct bt_att_chan *chan;
1131 struct net_buf *buf;
1132 struct bt_att_handle_group *group;
1133 const void *value;
1134 uint8_t value_len;
1135 uint8_t err;
1136 };
1137
find_type_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)1138 static uint8_t find_type_cb(const struct bt_gatt_attr *attr, uint16_t handle,
1139 void *user_data)
1140 {
1141 struct find_type_data *data = user_data;
1142 struct bt_att_chan *chan = data->chan;
1143 struct bt_conn *conn = chan->chan.chan.conn;
1144 int read;
1145 uint8_t uuid[16];
1146 struct net_buf *frag;
1147 size_t len;
1148
1149 /* Skip secondary services */
1150 if (!bt_uuid_cmp(attr->uuid, BT_UUID_GATT_SECONDARY)) {
1151 goto skip;
1152 }
1153
1154 /* Update group end_handle if not a primary service */
1155 if (bt_uuid_cmp(attr->uuid, BT_UUID_GATT_PRIMARY)) {
1156 if (data->group &&
1157 handle > sys_le16_to_cpu(data->group->end_handle)) {
1158 data->group->end_handle = sys_cpu_to_le16(handle);
1159 }
1160 return BT_GATT_ITER_CONTINUE;
1161 }
1162
1163 LOG_DBG("handle 0x%04x", handle);
1164
1165 /* stop if there is no space left */
1166 if (bt_att_mtu(chan) - net_buf_frags_len(data->buf) <
1167 sizeof(*data->group)) {
1168 return BT_GATT_ITER_STOP;
1169 }
1170
1171 frag = net_buf_frag_last(data->buf);
1172
1173 len = MIN(bt_att_mtu(chan) - net_buf_frags_len(data->buf),
1174 net_buf_tailroom(frag));
1175 if (!len) {
1176 frag = net_buf_alloc(net_buf_pool_get(data->buf->pool_id),
1177 K_NO_WAIT);
1178 /* If not buffer can be allocated immediately stop */
1179 if (!frag) {
1180 return BT_GATT_ITER_STOP;
1181 }
1182
1183 net_buf_frag_add(data->buf, frag);
1184 }
1185
1186 /* Read attribute value and store in the buffer */
1187 read = attr->read(conn, attr, uuid, sizeof(uuid), 0);
1188 if (read < 0) {
1189 /*
1190 * Since we don't know if it is the service with requested UUID,
1191 * we cannot respond with an error to this request.
1192 */
1193 goto skip;
1194 }
1195
1196 /* Check if data matches */
1197 if (read != data->value_len) {
1198 /* Use bt_uuid_cmp() to compare UUIDs of different form. */
1199 struct bt_uuid_128 ref_uuid;
1200 struct bt_uuid_128 recvd_uuid;
1201
1202 if (!bt_uuid_create(&recvd_uuid.uuid, data->value, data->value_len)) {
1203 LOG_WRN("Unable to create UUID: size %u", data->value_len);
1204 goto skip;
1205 }
1206 if (!bt_uuid_create(&ref_uuid.uuid, uuid, read)) {
1207 LOG_WRN("Unable to create UUID: size %d", read);
1208 goto skip;
1209 }
1210 if (bt_uuid_cmp(&recvd_uuid.uuid, &ref_uuid.uuid)) {
1211 goto skip;
1212 }
1213 } else if (memcmp(data->value, uuid, read)) {
1214 goto skip;
1215 }
1216
1217 /* If service has been found, error should be cleared */
1218 data->err = 0x00;
1219
1220 /* Fast forward to next item position */
1221 data->group = net_buf_add(frag, sizeof(*data->group));
1222 data->group->start_handle = sys_cpu_to_le16(handle);
1223 data->group->end_handle = sys_cpu_to_le16(handle);
1224
1225 /* continue to find the end_handle */
1226 return BT_GATT_ITER_CONTINUE;
1227
1228 skip:
1229 data->group = NULL;
1230 return BT_GATT_ITER_CONTINUE;
1231 }
1232
att_find_type_rsp(struct bt_att_chan * chan,uint16_t start_handle,uint16_t end_handle,const void * value,uint8_t value_len)1233 static uint8_t att_find_type_rsp(struct bt_att_chan *chan, uint16_t start_handle,
1234 uint16_t end_handle, const void *value,
1235 uint8_t value_len)
1236 {
1237 struct find_type_data data;
1238
1239 (void)memset(&data, 0, sizeof(data));
1240
1241 data.buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_FIND_TYPE_RSP);
1242 if (!data.buf) {
1243 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
1244 }
1245
1246 data.chan = chan;
1247 data.group = NULL;
1248 data.value = value;
1249 data.value_len = value_len;
1250
1251 /* Pre-set error in case no service will be found */
1252 data.err = BT_ATT_ERR_ATTRIBUTE_NOT_FOUND;
1253
1254 bt_gatt_foreach_attr(start_handle, end_handle, find_type_cb, &data);
1255
1256 /* If error has not been cleared, no service has been found */
1257 if (data.err) {
1258 net_buf_unref(data.buf);
1259 /* Respond since handle is set */
1260 send_err_rsp(chan, BT_ATT_OP_FIND_TYPE_REQ, start_handle,
1261 data.err);
1262 return 0;
1263 }
1264
1265 bt_att_chan_send_rsp(chan, data.buf);
1266
1267 return 0;
1268 }
1269
att_find_type_req(struct bt_att_chan * chan,struct net_buf * buf)1270 static uint8_t att_find_type_req(struct bt_att_chan *chan, struct net_buf *buf)
1271 {
1272 struct bt_att_find_type_req *req;
1273 uint16_t start_handle, end_handle, err_handle, type;
1274 uint8_t *value;
1275
1276 req = net_buf_pull_mem(buf, sizeof(*req));
1277
1278 start_handle = sys_le16_to_cpu(req->start_handle);
1279 end_handle = sys_le16_to_cpu(req->end_handle);
1280 type = sys_le16_to_cpu(req->type);
1281 value = buf->data;
1282
1283 LOG_DBG("start_handle 0x%04x end_handle 0x%04x type %u", start_handle, end_handle, type);
1284
1285 if (!range_is_valid(start_handle, end_handle, &err_handle)) {
1286 send_err_rsp(chan, BT_ATT_OP_FIND_TYPE_REQ, err_handle,
1287 BT_ATT_ERR_INVALID_HANDLE);
1288 return 0;
1289 }
1290
1291 /* The Attribute Protocol Find By Type Value Request shall be used with
1292 * the Attribute Type parameter set to the UUID for "Primary Service"
1293 * and the Attribute Value set to the 16-bit Bluetooth UUID or 128-bit
1294 * UUID for the specific primary service.
1295 */
1296 if (bt_uuid_cmp(BT_UUID_DECLARE_16(type), BT_UUID_GATT_PRIMARY)) {
1297 send_err_rsp(chan, BT_ATT_OP_FIND_TYPE_REQ, start_handle,
1298 BT_ATT_ERR_ATTRIBUTE_NOT_FOUND);
1299 return 0;
1300 }
1301
1302 return att_find_type_rsp(chan, start_handle, end_handle, value,
1303 buf->len);
1304 }
1305
err_to_att(int err)1306 static uint8_t err_to_att(int err)
1307 {
1308 LOG_DBG("%d", err);
1309
1310 if (err < 0 && err >= -0xff) {
1311 return -err;
1312 }
1313
1314 return BT_ATT_ERR_UNLIKELY;
1315 }
1316
1317 struct read_type_data {
1318 struct bt_att_chan *chan;
1319 struct bt_uuid *uuid;
1320 struct net_buf *buf;
1321 struct bt_att_read_type_rsp *rsp;
1322 struct bt_att_data *item;
1323 uint8_t err;
1324 };
1325
1326 typedef bool (*attr_read_cb)(struct net_buf *buf, ssize_t read,
1327 void *user_data);
1328
attr_read_authorize(struct bt_conn * conn,const struct bt_gatt_attr * attr)1329 static bool attr_read_authorize(struct bt_conn *conn,
1330 const struct bt_gatt_attr *attr)
1331 {
1332 if (!IS_ENABLED(CONFIG_BT_GATT_AUTHORIZATION_CUSTOM)) {
1333 return true;
1334 }
1335
1336 if (!authorization_cb || !authorization_cb->read_authorize) {
1337 return true;
1338 }
1339
1340 return authorization_cb->read_authorize(conn, attr);
1341 }
1342
attr_read_type_cb(struct net_buf * frag,ssize_t read,void * user_data)1343 static bool attr_read_type_cb(struct net_buf *frag, ssize_t read,
1344 void *user_data)
1345 {
1346 struct read_type_data *data = user_data;
1347
1348 if (!data->rsp->len) {
1349 /* Set len to be the first item found */
1350 data->rsp->len = read + sizeof(*data->item);
1351 } else if (data->rsp->len != read + sizeof(*data->item)) {
1352 /* All items should have the same size */
1353 frag->len -= sizeof(*data->item);
1354 data->item = NULL;
1355 return false;
1356 }
1357
1358 return true;
1359 }
1360
att_chan_read(struct bt_att_chan * chan,const struct bt_gatt_attr * attr,struct net_buf * buf,uint16_t offset,attr_read_cb cb,void * user_data)1361 static ssize_t att_chan_read(struct bt_att_chan *chan,
1362 const struct bt_gatt_attr *attr,
1363 struct net_buf *buf, uint16_t offset,
1364 attr_read_cb cb, void *user_data)
1365 {
1366 struct bt_conn *conn = chan->chan.chan.conn;
1367 ssize_t read;
1368 struct net_buf *frag;
1369 size_t len, total = 0;
1370
1371 if (bt_att_mtu(chan) <= net_buf_frags_len(buf)) {
1372 return 0;
1373 }
1374
1375 frag = net_buf_frag_last(buf);
1376
1377 /* Create necessary fragments if MTU is bigger than what a buffer can
1378 * hold.
1379 */
1380 do {
1381 len = MIN(bt_att_mtu(chan) - net_buf_frags_len(buf),
1382 net_buf_tailroom(frag));
1383 if (!len) {
1384 frag = net_buf_alloc(net_buf_pool_get(buf->pool_id),
1385 K_NO_WAIT);
1386 /* If not buffer can be allocated immediately return */
1387 if (!frag) {
1388 return total;
1389 }
1390
1391 net_buf_frag_add(buf, frag);
1392
1393 len = MIN(bt_att_mtu(chan) - net_buf_frags_len(buf),
1394 net_buf_tailroom(frag));
1395 }
1396
1397 read = attr->read(conn, attr, frag->data + frag->len, len,
1398 offset);
1399 if (read < 0) {
1400 if (total) {
1401 return total;
1402 }
1403
1404 return read;
1405 }
1406
1407 if (cb && !cb(frag, read, user_data)) {
1408 break;
1409 }
1410
1411 net_buf_add(frag, read);
1412 total += read;
1413 offset += read;
1414 } while (bt_att_mtu(chan) > net_buf_frags_len(buf) && read == len);
1415
1416 return total;
1417 }
1418
read_type_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)1419 static uint8_t read_type_cb(const struct bt_gatt_attr *attr, uint16_t handle,
1420 void *user_data)
1421 {
1422 struct read_type_data *data = user_data;
1423 struct bt_att_chan *chan = data->chan;
1424 struct bt_conn *conn = chan->chan.chan.conn;
1425 ssize_t read;
1426
1427 /* Skip if doesn't match */
1428 if (bt_uuid_cmp(attr->uuid, data->uuid)) {
1429 return BT_GATT_ITER_CONTINUE;
1430 }
1431
1432 LOG_DBG("handle 0x%04x", handle);
1433
1434 /*
1435 * If an attribute in the set of requested attributes would cause an
1436 * Error Response then this attribute cannot be included in a
1437 * Read By Type Response and the attributes before this attribute
1438 * shall be returned
1439 *
1440 * If the first attribute in the set of requested attributes would
1441 * cause an Error Response then no other attributes in the requested
1442 * attributes can be considered.
1443 */
1444 data->err = bt_gatt_check_perm(conn, attr, BT_GATT_PERM_READ_MASK);
1445 if (data->err) {
1446 if (data->rsp->len) {
1447 data->err = 0x00;
1448 }
1449 return BT_GATT_ITER_STOP;
1450 }
1451
1452 /* Check the attribute authorization logic */
1453 if (!attr_read_authorize(conn, attr)) {
1454 data->err = BT_ATT_ERR_AUTHORIZATION;
1455 return BT_GATT_ITER_STOP;
1456 }
1457
1458 /*
1459 * If any attribute is founded in handle range it means that error
1460 * should be changed from pre-set: attr not found error to no error.
1461 */
1462 data->err = 0x00;
1463
1464 /* Fast forward to next item position */
1465 data->item = net_buf_add(net_buf_frag_last(data->buf),
1466 sizeof(*data->item));
1467 data->item->handle = sys_cpu_to_le16(handle);
1468
1469 read = att_chan_read(chan, attr, data->buf, 0, attr_read_type_cb, data);
1470 if (read < 0) {
1471 data->err = err_to_att(read);
1472 return BT_GATT_ITER_STOP;
1473 }
1474
1475 if (!data->item) {
1476 return BT_GATT_ITER_STOP;
1477 }
1478
1479 /* continue only if there are still space for more items */
1480 return bt_att_mtu(chan) - net_buf_frags_len(data->buf) >
1481 data->rsp->len ? BT_GATT_ITER_CONTINUE : BT_GATT_ITER_STOP;
1482 }
1483
att_read_type_rsp(struct bt_att_chan * chan,struct bt_uuid * uuid,uint16_t start_handle,uint16_t end_handle)1484 static uint8_t att_read_type_rsp(struct bt_att_chan *chan, struct bt_uuid *uuid,
1485 uint16_t start_handle, uint16_t end_handle)
1486 {
1487 struct read_type_data data;
1488
1489 (void)memset(&data, 0, sizeof(data));
1490
1491 data.buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_READ_TYPE_RSP);
1492 if (!data.buf) {
1493 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
1494 }
1495
1496 data.chan = chan;
1497 data.uuid = uuid;
1498 data.rsp = net_buf_add(data.buf, sizeof(*data.rsp));
1499 data.rsp->len = 0U;
1500
1501 /* Pre-set error if no attr will be found in handle */
1502 data.err = BT_ATT_ERR_ATTRIBUTE_NOT_FOUND;
1503
1504 bt_gatt_foreach_attr(start_handle, end_handle, read_type_cb, &data);
1505
1506 if (data.err) {
1507 net_buf_unref(data.buf);
1508 /* Response here since handle is set */
1509 send_err_rsp(chan, BT_ATT_OP_READ_TYPE_REQ, start_handle,
1510 data.err);
1511 return 0;
1512 }
1513
1514 bt_att_chan_send_rsp(chan, data.buf);
1515
1516 return 0;
1517 }
1518
att_read_type_req(struct bt_att_chan * chan,struct net_buf * buf)1519 static uint8_t att_read_type_req(struct bt_att_chan *chan, struct net_buf *buf)
1520 {
1521 struct bt_att_read_type_req *req;
1522 uint16_t start_handle, end_handle, err_handle;
1523 union {
1524 struct bt_uuid uuid;
1525 struct bt_uuid_16 u16;
1526 struct bt_uuid_128 u128;
1527 } u;
1528 uint8_t uuid_len = buf->len - sizeof(*req);
1529
1530 /* Type can only be UUID16 or UUID128 */
1531 if (uuid_len != 2 && uuid_len != 16) {
1532 return BT_ATT_ERR_INVALID_PDU;
1533 }
1534
1535 req = net_buf_pull_mem(buf, sizeof(*req));
1536
1537 start_handle = sys_le16_to_cpu(req->start_handle);
1538 end_handle = sys_le16_to_cpu(req->end_handle);
1539 if (!bt_uuid_create(&u.uuid, req->uuid, uuid_len)) {
1540 return BT_ATT_ERR_UNLIKELY;
1541 }
1542
1543 LOG_DBG("start_handle 0x%04x end_handle 0x%04x type %s", start_handle, end_handle,
1544 bt_uuid_str(&u.uuid));
1545
1546 if (!range_is_valid(start_handle, end_handle, &err_handle)) {
1547 send_err_rsp(chan, BT_ATT_OP_READ_TYPE_REQ, err_handle,
1548 BT_ATT_ERR_INVALID_HANDLE);
1549 return 0;
1550 }
1551
1552 return att_read_type_rsp(chan, &u.uuid, start_handle, end_handle);
1553 }
1554
1555 struct read_data {
1556 struct bt_att_chan *chan;
1557 uint16_t offset;
1558 struct net_buf *buf;
1559 uint8_t err;
1560 };
1561
read_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)1562 static uint8_t read_cb(const struct bt_gatt_attr *attr, uint16_t handle,
1563 void *user_data)
1564 {
1565 struct read_data *data = user_data;
1566 struct bt_att_chan *chan = data->chan;
1567 struct bt_conn *conn = chan->chan.chan.conn;
1568 int ret;
1569
1570 LOG_DBG("handle 0x%04x", handle);
1571
1572 /*
1573 * If any attribute is founded in handle range it means that error
1574 * should be changed from pre-set: invalid handle error to no error.
1575 */
1576 data->err = 0x00;
1577
1578 /* Check attribute permissions */
1579 data->err = bt_gatt_check_perm(conn, attr, BT_GATT_PERM_READ_MASK);
1580 if (data->err) {
1581 return BT_GATT_ITER_STOP;
1582 }
1583
1584 /* Check the attribute authorization logic */
1585 if (!attr_read_authorize(conn, attr)) {
1586 data->err = BT_ATT_ERR_AUTHORIZATION;
1587 return BT_GATT_ITER_STOP;
1588 }
1589
1590 /* Read attribute value and store in the buffer */
1591 ret = att_chan_read(chan, attr, data->buf, data->offset, NULL, NULL);
1592 if (ret < 0) {
1593 data->err = err_to_att(ret);
1594 return BT_GATT_ITER_STOP;
1595 }
1596
1597 return BT_GATT_ITER_CONTINUE;
1598 }
1599
att_read_rsp(struct bt_att_chan * chan,uint8_t op,uint8_t rsp,uint16_t handle,uint16_t offset)1600 static uint8_t att_read_rsp(struct bt_att_chan *chan, uint8_t op, uint8_t rsp,
1601 uint16_t handle, uint16_t offset)
1602 {
1603 struct read_data data;
1604
1605 if (!bt_gatt_change_aware(chan->att->conn, true)) {
1606 if (!atomic_test_and_set_bit(chan->flags, ATT_OUT_OF_SYNC_SENT)) {
1607 return BT_ATT_ERR_DB_OUT_OF_SYNC;
1608 } else {
1609 return 0;
1610 }
1611 }
1612
1613 if (!handle) {
1614 return BT_ATT_ERR_INVALID_HANDLE;
1615 }
1616
1617 (void)memset(&data, 0, sizeof(data));
1618
1619 data.buf = bt_att_create_rsp_pdu(chan, rsp);
1620 if (!data.buf) {
1621 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
1622 }
1623
1624 data.chan = chan;
1625 data.offset = offset;
1626
1627 /* Pre-set error if no attr will be found in handle */
1628 data.err = BT_ATT_ERR_INVALID_HANDLE;
1629
1630 bt_gatt_foreach_attr(handle, handle, read_cb, &data);
1631
1632 /* In case of error discard data and respond with an error */
1633 if (data.err) {
1634 net_buf_unref(data.buf);
1635 /* Respond here since handle is set */
1636 send_err_rsp(chan, op, handle, data.err);
1637 return 0;
1638 }
1639
1640 bt_att_chan_send_rsp(chan, data.buf);
1641
1642 return 0;
1643 }
1644
att_read_req(struct bt_att_chan * chan,struct net_buf * buf)1645 static uint8_t att_read_req(struct bt_att_chan *chan, struct net_buf *buf)
1646 {
1647 struct bt_att_read_req *req;
1648 uint16_t handle;
1649
1650 req = (void *)buf->data;
1651
1652 handle = sys_le16_to_cpu(req->handle);
1653
1654 LOG_DBG("handle 0x%04x", handle);
1655
1656 return att_read_rsp(chan, BT_ATT_OP_READ_REQ, BT_ATT_OP_READ_RSP,
1657 handle, 0);
1658 }
1659
att_read_blob_req(struct bt_att_chan * chan,struct net_buf * buf)1660 static uint8_t att_read_blob_req(struct bt_att_chan *chan, struct net_buf *buf)
1661 {
1662 struct bt_att_read_blob_req *req;
1663 uint16_t handle, offset;
1664
1665 req = (void *)buf->data;
1666
1667 handle = sys_le16_to_cpu(req->handle);
1668 offset = sys_le16_to_cpu(req->offset);
1669
1670 LOG_DBG("handle 0x%04x offset %u", handle, offset);
1671
1672 return att_read_rsp(chan, BT_ATT_OP_READ_BLOB_REQ,
1673 BT_ATT_OP_READ_BLOB_RSP, handle, offset);
1674 }
1675
1676 #if defined(CONFIG_BT_GATT_READ_MULTIPLE)
att_read_mult_req(struct bt_att_chan * chan,struct net_buf * buf)1677 static uint8_t att_read_mult_req(struct bt_att_chan *chan, struct net_buf *buf)
1678 {
1679 struct read_data data;
1680 uint16_t handle;
1681
1682 if (!bt_gatt_change_aware(chan->att->conn, true)) {
1683 if (!atomic_test_and_set_bit(chan->flags, ATT_OUT_OF_SYNC_SENT)) {
1684 return BT_ATT_ERR_DB_OUT_OF_SYNC;
1685 } else {
1686 return 0;
1687 }
1688 }
1689
1690 (void)memset(&data, 0, sizeof(data));
1691
1692 data.buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_READ_MULT_RSP);
1693 if (!data.buf) {
1694 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
1695 }
1696
1697 data.chan = chan;
1698
1699 while (buf->len >= sizeof(uint16_t)) {
1700 handle = net_buf_pull_le16(buf);
1701
1702 LOG_DBG("handle 0x%04x ", handle);
1703
1704 /* An Error Response shall be sent by the server in response to
1705 * the Read Multiple Request [....] if a read operation is not
1706 * permitted on any of the Characteristic Values.
1707 *
1708 * If handle is not valid then return invalid handle error.
1709 * If handle is found error will be cleared by read_cb.
1710 */
1711 data.err = BT_ATT_ERR_INVALID_HANDLE;
1712
1713 bt_gatt_foreach_attr(handle, handle, read_cb, &data);
1714
1715 /* Stop reading in case of error */
1716 if (data.err) {
1717 net_buf_unref(data.buf);
1718 /* Respond here since handle is set */
1719 send_err_rsp(chan, BT_ATT_OP_READ_MULT_REQ, handle,
1720 data.err);
1721 return 0;
1722 }
1723 }
1724
1725 bt_att_chan_send_rsp(chan, data.buf);
1726
1727 return 0;
1728 }
1729 #endif /* CONFIG_BT_GATT_READ_MULTIPLE */
1730
1731 #if defined(CONFIG_BT_GATT_READ_MULT_VAR_LEN)
read_vl_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)1732 static uint8_t read_vl_cb(const struct bt_gatt_attr *attr, uint16_t handle,
1733 void *user_data)
1734 {
1735 struct read_data *data = user_data;
1736 struct bt_att_chan *chan = data->chan;
1737 struct bt_conn *conn = chan->chan.chan.conn;
1738 struct bt_att_read_mult_vl_rsp *rsp;
1739 int read;
1740
1741 LOG_DBG("handle 0x%04x", handle);
1742
1743 /*
1744 * If any attribute is founded in handle range it means that error
1745 * should be changed from pre-set: invalid handle error to no error.
1746 */
1747 data->err = 0x00;
1748
1749 /* Check attribute permissions */
1750 data->err = bt_gatt_check_perm(conn, attr, BT_GATT_PERM_READ_MASK);
1751 if (data->err) {
1752 return BT_GATT_ITER_STOP;
1753 }
1754
1755 /* Check the attribute authorization logic */
1756 if (!attr_read_authorize(conn, attr)) {
1757 data->err = BT_ATT_ERR_AUTHORIZATION;
1758 return BT_GATT_ITER_STOP;
1759 }
1760
1761 /* The Length Value Tuple List may be truncated within the first two
1762 * octets of a tuple due to the size limits of the current ATT_MTU.
1763 */
1764 if (bt_att_mtu(chan) - data->buf->len < 2) {
1765 return BT_GATT_ITER_STOP;
1766 }
1767
1768 rsp = net_buf_add(data->buf, sizeof(*rsp));
1769
1770 read = att_chan_read(chan, attr, data->buf, data->offset, NULL, NULL);
1771 if (read < 0) {
1772 data->err = err_to_att(read);
1773 return BT_GATT_ITER_STOP;
1774 }
1775
1776 rsp->len = read;
1777
1778 return BT_GATT_ITER_CONTINUE;
1779 }
1780
att_read_mult_vl_req(struct bt_att_chan * chan,struct net_buf * buf)1781 static uint8_t att_read_mult_vl_req(struct bt_att_chan *chan, struct net_buf *buf)
1782 {
1783 struct read_data data;
1784 uint16_t handle;
1785
1786 if (!bt_gatt_change_aware(chan->att->conn, true)) {
1787 if (!atomic_test_and_set_bit(chan->flags, ATT_OUT_OF_SYNC_SENT)) {
1788 return BT_ATT_ERR_DB_OUT_OF_SYNC;
1789 } else {
1790 return 0;
1791 }
1792 }
1793
1794 (void)memset(&data, 0, sizeof(data));
1795
1796 data.buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_READ_MULT_VL_RSP);
1797 if (!data.buf) {
1798 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
1799 }
1800
1801 data.chan = chan;
1802
1803 while (buf->len >= sizeof(uint16_t)) {
1804 handle = net_buf_pull_le16(buf);
1805
1806 LOG_DBG("handle 0x%04x ", handle);
1807
1808 /* If handle is not valid then return invalid handle error.
1809 * If handle is found error will be cleared by read_cb.
1810 */
1811 data.err = BT_ATT_ERR_INVALID_HANDLE;
1812
1813 bt_gatt_foreach_attr(handle, handle, read_vl_cb, &data);
1814
1815 /* Stop reading in case of error */
1816 if (data.err) {
1817 net_buf_unref(data.buf);
1818 /* Respond here since handle is set */
1819 send_err_rsp(chan, BT_ATT_OP_READ_MULT_VL_REQ, handle,
1820 data.err);
1821 return 0;
1822 }
1823 }
1824
1825 bt_att_chan_send_rsp(chan, data.buf);
1826
1827 return 0;
1828 }
1829 #endif /* CONFIG_BT_GATT_READ_MULT_VAR_LEN */
1830
1831 struct read_group_data {
1832 struct bt_att_chan *chan;
1833 struct bt_uuid *uuid;
1834 struct net_buf *buf;
1835 struct bt_att_read_group_rsp *rsp;
1836 struct bt_att_group_data *group;
1837 };
1838
attr_read_group_cb(struct net_buf * frag,ssize_t read,void * user_data)1839 static bool attr_read_group_cb(struct net_buf *frag, ssize_t read,
1840 void *user_data)
1841 {
1842 struct read_group_data *data = user_data;
1843
1844 if (!data->rsp->len) {
1845 /* Set len to be the first group found */
1846 data->rsp->len = read + sizeof(*data->group);
1847 } else if (data->rsp->len != read + sizeof(*data->group)) {
1848 /* All groups entries should have the same size */
1849 data->buf->len -= sizeof(*data->group);
1850 data->group = NULL;
1851 return false;
1852 }
1853
1854 return true;
1855 }
1856
read_group_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)1857 static uint8_t read_group_cb(const struct bt_gatt_attr *attr, uint16_t handle,
1858 void *user_data)
1859 {
1860 struct read_group_data *data = user_data;
1861 struct bt_att_chan *chan = data->chan;
1862 int read;
1863
1864 /* Update group end_handle if attribute is not a service */
1865 if (bt_uuid_cmp(attr->uuid, BT_UUID_GATT_PRIMARY) &&
1866 bt_uuid_cmp(attr->uuid, BT_UUID_GATT_SECONDARY)) {
1867 if (data->group &&
1868 handle > sys_le16_to_cpu(data->group->end_handle)) {
1869 data->group->end_handle = sys_cpu_to_le16(handle);
1870 }
1871 return BT_GATT_ITER_CONTINUE;
1872 }
1873
1874 /* If Group Type don't match skip */
1875 if (bt_uuid_cmp(attr->uuid, data->uuid)) {
1876 data->group = NULL;
1877 return BT_GATT_ITER_CONTINUE;
1878 }
1879
1880 LOG_DBG("handle 0x%04x", handle);
1881
1882 /* Stop if there is no space left */
1883 if (data->rsp->len &&
1884 bt_att_mtu(chan) - data->buf->len < data->rsp->len) {
1885 return BT_GATT_ITER_STOP;
1886 }
1887
1888 /* Fast forward to next group position */
1889 data->group = net_buf_add(data->buf, sizeof(*data->group));
1890
1891 /* Initialize group handle range */
1892 data->group->start_handle = sys_cpu_to_le16(handle);
1893 data->group->end_handle = sys_cpu_to_le16(handle);
1894
1895 /* Read attribute value and store in the buffer */
1896 read = att_chan_read(chan, attr, data->buf, 0, attr_read_group_cb,
1897 data);
1898 if (read < 0) {
1899 /* TODO: Handle read errors */
1900 return BT_GATT_ITER_STOP;
1901 }
1902
1903 if (!data->group) {
1904 return BT_GATT_ITER_STOP;
1905 }
1906
1907 /* continue only if there are still space for more items */
1908 return BT_GATT_ITER_CONTINUE;
1909 }
1910
att_read_group_rsp(struct bt_att_chan * chan,struct bt_uuid * uuid,uint16_t start_handle,uint16_t end_handle)1911 static uint8_t att_read_group_rsp(struct bt_att_chan *chan, struct bt_uuid *uuid,
1912 uint16_t start_handle, uint16_t end_handle)
1913 {
1914 struct read_group_data data;
1915
1916 (void)memset(&data, 0, sizeof(data));
1917
1918 data.buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_READ_GROUP_RSP);
1919 if (!data.buf) {
1920 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
1921 }
1922
1923 data.chan = chan;
1924 data.uuid = uuid;
1925 data.rsp = net_buf_add(data.buf, sizeof(*data.rsp));
1926 data.rsp->len = 0U;
1927 data.group = NULL;
1928
1929 bt_gatt_foreach_attr(start_handle, end_handle, read_group_cb, &data);
1930
1931 if (!data.rsp->len) {
1932 net_buf_unref(data.buf);
1933 /* Respond here since handle is set */
1934 send_err_rsp(chan, BT_ATT_OP_READ_GROUP_REQ, start_handle,
1935 BT_ATT_ERR_ATTRIBUTE_NOT_FOUND);
1936 return 0;
1937 }
1938
1939 bt_att_chan_send_rsp(chan, data.buf);
1940
1941 return 0;
1942 }
1943
att_read_group_req(struct bt_att_chan * chan,struct net_buf * buf)1944 static uint8_t att_read_group_req(struct bt_att_chan *chan, struct net_buf *buf)
1945 {
1946 struct bt_att_read_group_req *req;
1947 uint16_t start_handle, end_handle, err_handle;
1948 union {
1949 struct bt_uuid uuid;
1950 struct bt_uuid_16 u16;
1951 struct bt_uuid_128 u128;
1952 } u;
1953 uint8_t uuid_len = buf->len - sizeof(*req);
1954
1955 /* Type can only be UUID16 or UUID128 */
1956 if (uuid_len != 2 && uuid_len != 16) {
1957 return BT_ATT_ERR_INVALID_PDU;
1958 }
1959
1960 req = net_buf_pull_mem(buf, sizeof(*req));
1961
1962 start_handle = sys_le16_to_cpu(req->start_handle);
1963 end_handle = sys_le16_to_cpu(req->end_handle);
1964
1965 if (!bt_uuid_create(&u.uuid, req->uuid, uuid_len)) {
1966 return BT_ATT_ERR_UNLIKELY;
1967 }
1968
1969 LOG_DBG("start_handle 0x%04x end_handle 0x%04x type %s", start_handle, end_handle,
1970 bt_uuid_str(&u.uuid));
1971
1972 if (!range_is_valid(start_handle, end_handle, &err_handle)) {
1973 send_err_rsp(chan, BT_ATT_OP_READ_GROUP_REQ, err_handle,
1974 BT_ATT_ERR_INVALID_HANDLE);
1975 return 0;
1976 }
1977
1978 /* Core v4.2, Vol 3, sec 2.5.3 Attribute Grouping:
1979 * Not all of the grouping attributes can be used in the ATT
1980 * Read By Group Type Request. The "Primary Service" and "Secondary
1981 * Service" grouping types may be used in the Read By Group Type
1982 * Request. The "Characteristic" grouping type shall not be used in
1983 * the ATT Read By Group Type Request.
1984 */
1985 if (bt_uuid_cmp(&u.uuid, BT_UUID_GATT_PRIMARY) &&
1986 bt_uuid_cmp(&u.uuid, BT_UUID_GATT_SECONDARY)) {
1987 send_err_rsp(chan, BT_ATT_OP_READ_GROUP_REQ, start_handle,
1988 BT_ATT_ERR_UNSUPPORTED_GROUP_TYPE);
1989 return 0;
1990 }
1991
1992 return att_read_group_rsp(chan, &u.uuid, start_handle, end_handle);
1993 }
1994
1995 struct write_data {
1996 struct bt_conn *conn;
1997 struct net_buf *buf;
1998 uint8_t req;
1999 const void *value;
2000 uint16_t len;
2001 uint16_t offset;
2002 uint8_t err;
2003 };
2004
attr_write_authorize(struct bt_conn * conn,const struct bt_gatt_attr * attr)2005 static bool attr_write_authorize(struct bt_conn *conn,
2006 const struct bt_gatt_attr *attr)
2007 {
2008 if (!IS_ENABLED(CONFIG_BT_GATT_AUTHORIZATION_CUSTOM)) {
2009 return true;
2010 }
2011
2012 if (!authorization_cb || !authorization_cb->write_authorize) {
2013 return true;
2014 }
2015
2016 return authorization_cb->write_authorize(conn, attr);
2017 }
2018
write_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)2019 static uint8_t write_cb(const struct bt_gatt_attr *attr, uint16_t handle,
2020 void *user_data)
2021 {
2022 struct write_data *data = user_data;
2023 int write;
2024 uint8_t flags = 0U;
2025
2026 LOG_DBG("handle 0x%04x offset %u", handle, data->offset);
2027
2028 /* Check attribute permissions */
2029 data->err = bt_gatt_check_perm(data->conn, attr,
2030 BT_GATT_PERM_WRITE_MASK);
2031 if (data->err) {
2032 return BT_GATT_ITER_STOP;
2033 }
2034
2035 /* Check the attribute authorization logic */
2036 if (!attr_write_authorize(data->conn, attr)) {
2037 data->err = BT_ATT_ERR_AUTHORIZATION;
2038 return BT_GATT_ITER_STOP;
2039 }
2040
2041 /* Set command flag if not a request */
2042 if (!data->req) {
2043 flags |= BT_GATT_WRITE_FLAG_CMD;
2044 } else if (data->req == BT_ATT_OP_EXEC_WRITE_REQ) {
2045 flags |= BT_GATT_WRITE_FLAG_EXECUTE;
2046 }
2047
2048 /* Write attribute value */
2049 write = attr->write(data->conn, attr, data->value, data->len,
2050 data->offset, flags);
2051 if (write < 0 || write != data->len) {
2052 data->err = err_to_att(write);
2053 return BT_GATT_ITER_STOP;
2054 }
2055
2056 data->err = 0U;
2057
2058 return BT_GATT_ITER_CONTINUE;
2059 }
2060
att_write_rsp(struct bt_att_chan * chan,uint8_t req,uint8_t rsp,uint16_t handle,uint16_t offset,const void * value,uint16_t len)2061 static uint8_t att_write_rsp(struct bt_att_chan *chan, uint8_t req, uint8_t rsp,
2062 uint16_t handle, uint16_t offset, const void *value,
2063 uint16_t len)
2064 {
2065 struct write_data data;
2066
2067 if (!bt_gatt_change_aware(chan->att->conn, req ? true : false)) {
2068 if (!atomic_test_and_set_bit(chan->flags, ATT_OUT_OF_SYNC_SENT)) {
2069 return BT_ATT_ERR_DB_OUT_OF_SYNC;
2070 } else {
2071 return 0;
2072 }
2073 }
2074
2075 if (!handle) {
2076 return BT_ATT_ERR_INVALID_HANDLE;
2077 }
2078
2079 (void)memset(&data, 0, sizeof(data));
2080
2081 /* Only allocate buf if required to respond */
2082 if (rsp) {
2083 data.buf = bt_att_chan_create_pdu(chan, rsp, 0);
2084 if (!data.buf) {
2085 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
2086 }
2087 }
2088
2089 data.conn = chan->att->conn;
2090 data.req = req;
2091 data.offset = offset;
2092 data.value = value;
2093 data.len = len;
2094 data.err = BT_ATT_ERR_INVALID_HANDLE;
2095
2096 bt_gatt_foreach_attr(handle, handle, write_cb, &data);
2097
2098 if (data.err) {
2099 /* In case of error discard data and respond with an error */
2100 if (rsp) {
2101 net_buf_unref(data.buf);
2102 /* Respond here since handle is set */
2103 send_err_rsp(chan, req, handle, data.err);
2104 }
2105 return req == BT_ATT_OP_EXEC_WRITE_REQ ? data.err : 0;
2106 }
2107
2108 if (data.buf) {
2109 bt_att_chan_send_rsp(chan, data.buf);
2110 }
2111
2112 return 0;
2113 }
2114
att_write_req(struct bt_att_chan * chan,struct net_buf * buf)2115 static uint8_t att_write_req(struct bt_att_chan *chan, struct net_buf *buf)
2116 {
2117 uint16_t handle;
2118
2119 handle = net_buf_pull_le16(buf);
2120
2121 LOG_DBG("handle 0x%04x", handle);
2122
2123 return att_write_rsp(chan, BT_ATT_OP_WRITE_REQ, BT_ATT_OP_WRITE_RSP,
2124 handle, 0, buf->data, buf->len);
2125 }
2126
2127 #if CONFIG_BT_ATT_PREPARE_COUNT > 0
2128 struct prep_data {
2129 struct bt_conn *conn;
2130 struct net_buf *buf;
2131 const void *value;
2132 uint16_t len;
2133 uint16_t offset;
2134 uint8_t err;
2135 };
2136
prep_write_cb(const struct bt_gatt_attr * attr,uint16_t handle,void * user_data)2137 static uint8_t prep_write_cb(const struct bt_gatt_attr *attr, uint16_t handle,
2138 void *user_data)
2139 {
2140 struct prep_data *data = user_data;
2141 struct bt_attr_data *attr_data;
2142 int write;
2143
2144 LOG_DBG("handle 0x%04x offset %u", handle, data->offset);
2145
2146 /* Check attribute permissions */
2147 data->err = bt_gatt_check_perm(data->conn, attr,
2148 BT_GATT_PERM_WRITE_MASK);
2149 if (data->err) {
2150 return BT_GATT_ITER_STOP;
2151 }
2152
2153 /* Check the attribute authorization logic */
2154 if (!attr_write_authorize(data->conn, attr)) {
2155 data->err = BT_ATT_ERR_AUTHORIZATION;
2156 return BT_GATT_ITER_STOP;
2157 }
2158
2159 /* Check if attribute requires handler to accept the data */
2160 if (!(attr->perm & BT_GATT_PERM_PREPARE_WRITE)) {
2161 goto append;
2162 }
2163
2164 /* Write attribute value to check if device is authorized */
2165 write = attr->write(data->conn, attr, data->value, data->len,
2166 data->offset, BT_GATT_WRITE_FLAG_PREPARE);
2167 if (write != 0) {
2168 data->err = err_to_att(write);
2169 return BT_GATT_ITER_STOP;
2170 }
2171
2172 append:
2173 /* Copy data into the outstanding queue */
2174 data->buf = net_buf_alloc(&prep_pool, K_NO_WAIT);
2175 if (!data->buf) {
2176 data->err = BT_ATT_ERR_PREPARE_QUEUE_FULL;
2177 return BT_GATT_ITER_STOP;
2178 }
2179
2180 attr_data = net_buf_user_data(data->buf);
2181 attr_data->handle = handle;
2182 attr_data->offset = data->offset;
2183
2184 net_buf_add_mem(data->buf, data->value, data->len);
2185
2186 data->err = 0U;
2187
2188 return BT_GATT_ITER_CONTINUE;
2189 }
2190
att_prep_write_rsp(struct bt_att_chan * chan,uint16_t handle,uint16_t offset,const void * value,uint8_t len)2191 static uint8_t att_prep_write_rsp(struct bt_att_chan *chan, uint16_t handle,
2192 uint16_t offset, const void *value, uint8_t len)
2193 {
2194 struct prep_data data;
2195 struct bt_att_prepare_write_rsp *rsp;
2196
2197 if (!bt_gatt_change_aware(chan->att->conn, true)) {
2198 if (!atomic_test_and_set_bit(chan->flags, ATT_OUT_OF_SYNC_SENT)) {
2199 return BT_ATT_ERR_DB_OUT_OF_SYNC;
2200 } else {
2201 return 0;
2202 }
2203 }
2204
2205 if (!handle) {
2206 return BT_ATT_ERR_INVALID_HANDLE;
2207 }
2208
2209 (void)memset(&data, 0, sizeof(data));
2210
2211 data.conn = chan->att->conn;
2212 data.offset = offset;
2213 data.value = value;
2214 data.len = len;
2215 data.err = BT_ATT_ERR_INVALID_HANDLE;
2216
2217 bt_gatt_foreach_attr(handle, handle, prep_write_cb, &data);
2218
2219 if (data.err) {
2220 /* Respond here since handle is set */
2221 send_err_rsp(chan, BT_ATT_OP_PREPARE_WRITE_REQ, handle,
2222 data.err);
2223 return 0;
2224 }
2225
2226 LOG_DBG("buf %p handle 0x%04x offset %u", data.buf, handle, offset);
2227
2228 /* Store buffer in the outstanding queue */
2229 net_buf_slist_put(&chan->att->prep_queue, data.buf);
2230
2231 /* Generate response */
2232 data.buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_PREPARE_WRITE_RSP);
2233 if (!data.buf) {
2234 return BT_ATT_ERR_INSUFFICIENT_RESOURCES;
2235 }
2236
2237 rsp = net_buf_add(data.buf, sizeof(*rsp));
2238 rsp->handle = sys_cpu_to_le16(handle);
2239 rsp->offset = sys_cpu_to_le16(offset);
2240 net_buf_add(data.buf, len);
2241 memcpy(rsp->value, value, len);
2242
2243 bt_att_chan_send_rsp(chan, data.buf);
2244
2245 return 0;
2246 }
2247 #endif /* CONFIG_BT_ATT_PREPARE_COUNT */
2248
att_prepare_write_req(struct bt_att_chan * chan,struct net_buf * buf)2249 static uint8_t att_prepare_write_req(struct bt_att_chan *chan, struct net_buf *buf)
2250 {
2251 #if CONFIG_BT_ATT_PREPARE_COUNT == 0
2252 return BT_ATT_ERR_NOT_SUPPORTED;
2253 #else
2254 struct bt_att_prepare_write_req *req;
2255 uint16_t handle, offset;
2256
2257 req = net_buf_pull_mem(buf, sizeof(*req));
2258
2259 handle = sys_le16_to_cpu(req->handle);
2260 offset = sys_le16_to_cpu(req->offset);
2261
2262 LOG_DBG("handle 0x%04x offset %u", handle, offset);
2263
2264 return att_prep_write_rsp(chan, handle, offset, buf->data, buf->len);
2265 #endif /* CONFIG_BT_ATT_PREPARE_COUNT */
2266 }
2267
2268 #if CONFIG_BT_ATT_PREPARE_COUNT > 0
exec_write_reassemble(uint16_t handle,uint16_t offset,sys_slist_t * list,struct net_buf_simple * buf)2269 static uint8_t exec_write_reassemble(uint16_t handle, uint16_t offset,
2270 sys_slist_t *list,
2271 struct net_buf_simple *buf)
2272 {
2273 struct net_buf *entry, *next;
2274 sys_snode_t *prev;
2275
2276 prev = NULL;
2277 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(list, entry, next, node) {
2278 struct bt_attr_data *tmp_data = net_buf_user_data(entry);
2279
2280 LOG_DBG("entry %p handle 0x%04x, offset %u", entry, tmp_data->handle,
2281 tmp_data->offset);
2282
2283 if (tmp_data->handle == handle) {
2284 if (tmp_data->offset == 0) {
2285 /* Multiple writes to the same handle can occur
2286 * in a prepare write queue. If the offset is 0,
2287 * that should mean that it's a new write to the
2288 * same handle, and we break to process the
2289 * first write.
2290 */
2291
2292 LOG_DBG("tmp_data->offset == 0");
2293 break;
2294 }
2295
2296 if (tmp_data->offset != buf->len + offset) {
2297 /* We require that the offset is increasing
2298 * properly to avoid badly reassembled buffers
2299 */
2300
2301 LOG_DBG("Bad offset %u (%u, %u)", tmp_data->offset, buf->len,
2302 offset);
2303
2304 return BT_ATT_ERR_INVALID_OFFSET;
2305 }
2306
2307 if (buf->len + entry->len > buf->size) {
2308 return BT_ATT_ERR_INVALID_ATTRIBUTE_LEN;
2309 }
2310
2311 net_buf_simple_add_mem(buf, entry->data, entry->len);
2312 sys_slist_remove(list, prev, &entry->node);
2313 net_buf_unref(entry);
2314 } else {
2315 prev = &entry->node;
2316 }
2317 }
2318
2319 return BT_ATT_ERR_SUCCESS;
2320 }
2321
att_exec_write_rsp(struct bt_att_chan * chan,uint8_t flags)2322 static uint8_t att_exec_write_rsp(struct bt_att_chan *chan, uint8_t flags)
2323 {
2324 struct net_buf *buf;
2325 uint8_t err = 0U;
2326
2327 /* The following code will iterate on all prepare writes in the
2328 * prep_queue, and reassemble those that share the same handle.
2329 * Once a handle has been reassembled, it is sent to the upper layers,
2330 * and the next handle is processed
2331 */
2332 while (!sys_slist_is_empty(&chan->att->prep_queue)) {
2333 struct bt_attr_data *data;
2334 uint16_t handle;
2335
2336 NET_BUF_SIMPLE_DEFINE_STATIC(reassembled_data,
2337 MIN(BT_ATT_MAX_ATTRIBUTE_LEN,
2338 CONFIG_BT_ATT_PREPARE_COUNT * BT_ATT_BUF_SIZE));
2339
2340 buf = net_buf_slist_get(&chan->att->prep_queue);
2341 data = net_buf_user_data(buf);
2342 handle = data->handle;
2343
2344 LOG_DBG("buf %p handle 0x%04x offset %u", buf, handle, data->offset);
2345
2346 net_buf_simple_reset(&reassembled_data);
2347 net_buf_simple_add_mem(&reassembled_data, buf->data, buf->len);
2348
2349 err = exec_write_reassemble(handle, data->offset,
2350 &chan->att->prep_queue,
2351 &reassembled_data);
2352 if (err != BT_ATT_ERR_SUCCESS) {
2353 send_err_rsp(chan, BT_ATT_OP_EXEC_WRITE_REQ,
2354 handle, err);
2355 return 0;
2356 }
2357
2358 /* Just discard the data if an error was set */
2359 if (!err && flags == BT_ATT_FLAG_EXEC) {
2360 err = att_write_rsp(chan, BT_ATT_OP_EXEC_WRITE_REQ, 0,
2361 handle, data->offset,
2362 reassembled_data.data,
2363 reassembled_data.len);
2364 if (err) {
2365 /* Respond here since handle is set */
2366 send_err_rsp(chan, BT_ATT_OP_EXEC_WRITE_REQ,
2367 data->handle, err);
2368 }
2369 }
2370
2371 net_buf_unref(buf);
2372 }
2373
2374 if (err) {
2375 return 0;
2376 }
2377
2378 /* Generate response */
2379 buf = bt_att_create_rsp_pdu(chan, BT_ATT_OP_EXEC_WRITE_RSP);
2380 if (!buf) {
2381 return BT_ATT_ERR_UNLIKELY;
2382 }
2383
2384 bt_att_chan_send_rsp(chan, buf);
2385
2386 return 0;
2387 }
2388 #endif /* CONFIG_BT_ATT_PREPARE_COUNT */
2389
2390
att_exec_write_req(struct bt_att_chan * chan,struct net_buf * buf)2391 static uint8_t att_exec_write_req(struct bt_att_chan *chan, struct net_buf *buf)
2392 {
2393 #if CONFIG_BT_ATT_PREPARE_COUNT == 0
2394 return BT_ATT_ERR_NOT_SUPPORTED;
2395 #else
2396 struct bt_att_exec_write_req *req;
2397
2398 req = (void *)buf->data;
2399
2400 LOG_DBG("flags 0x%02x", req->flags);
2401
2402 return att_exec_write_rsp(chan, req->flags);
2403 #endif /* CONFIG_BT_ATT_PREPARE_COUNT */
2404 }
2405
att_write_cmd(struct bt_att_chan * chan,struct net_buf * buf)2406 static uint8_t att_write_cmd(struct bt_att_chan *chan, struct net_buf *buf)
2407 {
2408 uint16_t handle;
2409
2410 handle = net_buf_pull_le16(buf);
2411
2412 LOG_DBG("handle 0x%04x", handle);
2413
2414 return att_write_rsp(chan, 0, 0, handle, 0, buf->data, buf->len);
2415 }
2416
2417 #if defined(CONFIG_BT_SIGNING)
att_signed_write_cmd(struct bt_att_chan * chan,struct net_buf * buf)2418 static uint8_t att_signed_write_cmd(struct bt_att_chan *chan, struct net_buf *buf)
2419 {
2420 struct bt_conn *conn = chan->chan.chan.conn;
2421 struct bt_att_signed_write_cmd *req;
2422 uint16_t handle;
2423 int err;
2424
2425 /* The Signed Write Without Response sub-procedure shall only be supported
2426 * on the LE Fixed Channel Unenhanced ATT bearer.
2427 */
2428 if (bt_att_is_enhanced(chan)) {
2429 /* No response for this command */
2430 return 0;
2431 }
2432
2433 req = (void *)buf->data;
2434
2435 handle = sys_le16_to_cpu(req->handle);
2436
2437 LOG_DBG("handle 0x%04x", handle);
2438
2439 /* Verifying data requires full buffer including attribute header */
2440 net_buf_push(buf, sizeof(struct bt_att_hdr));
2441 err = bt_smp_sign_verify(conn, buf);
2442 if (err) {
2443 LOG_ERR("Error verifying data");
2444 /* No response for this command */
2445 return 0;
2446 }
2447
2448 net_buf_pull(buf, sizeof(struct bt_att_hdr));
2449 net_buf_pull(buf, sizeof(*req));
2450
2451 return att_write_rsp(chan, 0, 0, handle, 0, buf->data,
2452 buf->len - sizeof(struct bt_att_signature));
2453 }
2454 #endif /* CONFIG_BT_SIGNING */
2455
2456 #if defined(CONFIG_BT_GATT_CLIENT)
2457 #if defined(CONFIG_BT_ATT_RETRY_ON_SEC_ERR)
att_change_security(struct bt_conn * conn,uint8_t err)2458 static int att_change_security(struct bt_conn *conn, uint8_t err)
2459 {
2460 bt_security_t sec;
2461
2462 switch (err) {
2463 case BT_ATT_ERR_INSUFFICIENT_ENCRYPTION:
2464 if (conn->sec_level >= BT_SECURITY_L2)
2465 return -EALREADY;
2466 sec = BT_SECURITY_L2;
2467 break;
2468 case BT_ATT_ERR_AUTHENTICATION:
2469 if (conn->sec_level < BT_SECURITY_L2) {
2470 /* BLUETOOTH SPECIFICATION Version 4.2 [Vol 3, Part C]
2471 * page 375:
2472 *
2473 * If an LTK is not available, the service request
2474 * shall be rejected with the error code 'Insufficient
2475 * Authentication'.
2476 * Note: When the link is not encrypted, the error code
2477 * "Insufficient Authentication" does not indicate that
2478 * MITM protection is required.
2479 */
2480 sec = BT_SECURITY_L2;
2481 } else if (conn->sec_level < BT_SECURITY_L3) {
2482 /* BLUETOOTH SPECIFICATION Version 4.2 [Vol 3, Part C]
2483 * page 375:
2484 *
2485 * If an authenticated pairing is required but only an
2486 * unauthenticated pairing has occurred and the link is
2487 * currently encrypted, the service request shall be
2488 * rejected with the error code 'Insufficient
2489 * Authentication'.
2490 * Note: When unauthenticated pairing has occurred and
2491 * the link is currently encrypted, the error code
2492 * 'Insufficient Authentication' indicates that MITM
2493 * protection is required.
2494 */
2495 sec = BT_SECURITY_L3;
2496 } else if (conn->sec_level < BT_SECURITY_L4) {
2497 /* BLUETOOTH SPECIFICATION Version 4.2 [Vol 3, Part C]
2498 * page 375:
2499 *
2500 * If LE Secure Connections authenticated pairing is
2501 * required but LE legacy pairing has occurred and the
2502 * link is currently encrypted, the service request
2503 * shall be rejected with the error code ''Insufficient
2504 * Authentication'.
2505 */
2506 sec = BT_SECURITY_L4;
2507 } else {
2508 return -EALREADY;
2509 }
2510 break;
2511 default:
2512 return -EINVAL;
2513 }
2514
2515 return bt_conn_set_security(conn, sec);
2516 }
2517 #endif /* CONFIG_BT_ATT_RETRY_ON_SEC_ERR */
2518
att_error_rsp(struct bt_att_chan * chan,struct net_buf * buf)2519 static uint8_t att_error_rsp(struct bt_att_chan *chan, struct net_buf *buf)
2520 {
2521 struct bt_att_error_rsp *rsp;
2522 uint8_t err;
2523
2524 rsp = (void *)buf->data;
2525
2526 LOG_DBG("request 0x%02x handle 0x%04x error 0x%02x", rsp->request,
2527 sys_le16_to_cpu(rsp->handle), rsp->error);
2528
2529 /* Don't retry if there is no req pending or it has been cancelled.
2530 *
2531 * BLUETOOTH SPECIFICATION Version 5.2 [Vol 3, Part F]
2532 * page 1423:
2533 *
2534 * If an error code is received in the ATT_ERROR_RSP PDU that is not
2535 * understood by the client, for example an error code that was reserved
2536 * for future use that is now being used in a future version of the
2537 * specification, then the ATT_ERROR_RSP PDU shall still be considered to
2538 * state that the given request cannot be performed for an unknown reason.
2539 */
2540 if (!chan->req || chan->req == &cancel || !rsp->error) {
2541 err = BT_ATT_ERR_UNLIKELY;
2542 goto done;
2543 }
2544
2545 err = rsp->error;
2546
2547 #if defined(CONFIG_BT_ATT_RETRY_ON_SEC_ERR)
2548 int ret;
2549
2550 /* Check if error can be handled by elevating security. */
2551 ret = att_change_security(chan->chan.chan.conn, err);
2552 if (ret == 0 || ret == -EBUSY) {
2553 /* ATT timeout work is normally cancelled in att_handle_rsp.
2554 * However retrying is special case, so the timeout shall
2555 * be cancelled here.
2556 */
2557 k_work_cancel_delayable(&chan->timeout_work);
2558
2559 chan->req->retrying = true;
2560 return 0;
2561 }
2562 #endif /* CONFIG_BT_ATT_RETRY_ON_SEC_ERR */
2563
2564 done:
2565 return att_handle_rsp(chan, NULL, 0, err);
2566 }
2567
att_handle_find_info_rsp(struct bt_att_chan * chan,struct net_buf * buf)2568 static uint8_t att_handle_find_info_rsp(struct bt_att_chan *chan,
2569 struct net_buf *buf)
2570 {
2571 LOG_DBG("");
2572
2573 return att_handle_rsp(chan, buf->data, buf->len, 0);
2574 }
2575
att_handle_find_type_rsp(struct bt_att_chan * chan,struct net_buf * buf)2576 static uint8_t att_handle_find_type_rsp(struct bt_att_chan *chan,
2577 struct net_buf *buf)
2578 {
2579 LOG_DBG("");
2580
2581 return att_handle_rsp(chan, buf->data, buf->len, 0);
2582 }
2583
att_handle_read_type_rsp(struct bt_att_chan * chan,struct net_buf * buf)2584 static uint8_t att_handle_read_type_rsp(struct bt_att_chan *chan,
2585 struct net_buf *buf)
2586 {
2587 LOG_DBG("");
2588
2589 return att_handle_rsp(chan, buf->data, buf->len, 0);
2590 }
2591
att_handle_read_rsp(struct bt_att_chan * chan,struct net_buf * buf)2592 static uint8_t att_handle_read_rsp(struct bt_att_chan *chan,
2593 struct net_buf *buf)
2594 {
2595 LOG_DBG("");
2596
2597 return att_handle_rsp(chan, buf->data, buf->len, 0);
2598 }
2599
att_handle_read_blob_rsp(struct bt_att_chan * chan,struct net_buf * buf)2600 static uint8_t att_handle_read_blob_rsp(struct bt_att_chan *chan,
2601 struct net_buf *buf)
2602 {
2603 LOG_DBG("");
2604
2605 return att_handle_rsp(chan, buf->data, buf->len, 0);
2606 }
2607
2608 #if defined(CONFIG_BT_GATT_READ_MULTIPLE)
att_handle_read_mult_rsp(struct bt_att_chan * chan,struct net_buf * buf)2609 static uint8_t att_handle_read_mult_rsp(struct bt_att_chan *chan,
2610 struct net_buf *buf)
2611 {
2612 LOG_DBG("");
2613
2614 return att_handle_rsp(chan, buf->data, buf->len, 0);
2615 }
2616
2617 #endif /* CONFIG_BT_GATT_READ_MULTIPLE */
2618
2619 #if defined(CONFIG_BT_GATT_READ_MULT_VAR_LEN)
att_handle_read_mult_vl_rsp(struct bt_att_chan * chan,struct net_buf * buf)2620 static uint8_t att_handle_read_mult_vl_rsp(struct bt_att_chan *chan,
2621 struct net_buf *buf)
2622 {
2623 LOG_DBG("");
2624
2625 return att_handle_rsp(chan, buf->data, buf->len, 0);
2626 }
2627 #endif /* CONFIG_BT_GATT_READ_MULT_VAR_LEN */
2628
att_handle_read_group_rsp(struct bt_att_chan * chan,struct net_buf * buf)2629 static uint8_t att_handle_read_group_rsp(struct bt_att_chan *chan,
2630 struct net_buf *buf)
2631 {
2632 LOG_DBG("");
2633
2634 return att_handle_rsp(chan, buf->data, buf->len, 0);
2635 }
2636
att_handle_write_rsp(struct bt_att_chan * chan,struct net_buf * buf)2637 static uint8_t att_handle_write_rsp(struct bt_att_chan *chan,
2638 struct net_buf *buf)
2639 {
2640 LOG_DBG("");
2641
2642 return att_handle_rsp(chan, buf->data, buf->len, 0);
2643 }
2644
att_handle_prepare_write_rsp(struct bt_att_chan * chan,struct net_buf * buf)2645 static uint8_t att_handle_prepare_write_rsp(struct bt_att_chan *chan,
2646 struct net_buf *buf)
2647 {
2648 LOG_DBG("");
2649
2650 return att_handle_rsp(chan, buf->data, buf->len, 0);
2651 }
2652
att_handle_exec_write_rsp(struct bt_att_chan * chan,struct net_buf * buf)2653 static uint8_t att_handle_exec_write_rsp(struct bt_att_chan *chan,
2654 struct net_buf *buf)
2655 {
2656 LOG_DBG("");
2657
2658 return att_handle_rsp(chan, buf->data, buf->len, 0);
2659 }
2660
att_notify(struct bt_att_chan * chan,struct net_buf * buf)2661 static uint8_t att_notify(struct bt_att_chan *chan, struct net_buf *buf)
2662 {
2663 uint16_t handle;
2664
2665 handle = net_buf_pull_le16(buf);
2666
2667 LOG_DBG("chan %p handle 0x%04x", chan, handle);
2668
2669 bt_gatt_notification(chan->att->conn, handle, buf->data, buf->len);
2670
2671 return 0;
2672 }
2673
att_indicate(struct bt_att_chan * chan,struct net_buf * buf)2674 static uint8_t att_indicate(struct bt_att_chan *chan, struct net_buf *buf)
2675 {
2676 uint16_t handle;
2677
2678 handle = net_buf_pull_le16(buf);
2679
2680 LOG_DBG("chan %p handle 0x%04x", chan, handle);
2681
2682 bt_gatt_notification(chan->att->conn, handle, buf->data, buf->len);
2683
2684 buf = bt_att_chan_create_pdu(chan, BT_ATT_OP_CONFIRM, 0);
2685 if (!buf) {
2686 return 0;
2687 }
2688
2689 bt_att_chan_send_rsp(chan, buf);
2690
2691 return 0;
2692 }
2693
att_notify_mult(struct bt_att_chan * chan,struct net_buf * buf)2694 static uint8_t att_notify_mult(struct bt_att_chan *chan, struct net_buf *buf)
2695 {
2696 LOG_DBG("chan %p", chan);
2697
2698 bt_gatt_mult_notification(chan->att->conn, buf->data, buf->len);
2699
2700 return 0;
2701 }
2702 #endif /* CONFIG_BT_GATT_CLIENT */
2703
att_confirm(struct bt_att_chan * chan,struct net_buf * buf)2704 static uint8_t att_confirm(struct bt_att_chan *chan, struct net_buf *buf)
2705 {
2706 LOG_DBG("");
2707
2708 return att_handle_rsp(chan, buf->data, buf->len, 0);
2709 }
2710
2711 static const struct att_handler {
2712 uint8_t op;
2713 uint8_t expect_len;
2714 att_type_t type;
2715 uint8_t (*func)(struct bt_att_chan *chan, struct net_buf *buf);
2716 } handlers[] = {
2717 { BT_ATT_OP_MTU_REQ,
2718 sizeof(struct bt_att_exchange_mtu_req),
2719 ATT_REQUEST,
2720 att_mtu_req },
2721 { BT_ATT_OP_FIND_INFO_REQ,
2722 sizeof(struct bt_att_find_info_req),
2723 ATT_REQUEST,
2724 att_find_info_req },
2725 { BT_ATT_OP_FIND_TYPE_REQ,
2726 sizeof(struct bt_att_find_type_req),
2727 ATT_REQUEST,
2728 att_find_type_req },
2729 { BT_ATT_OP_READ_TYPE_REQ,
2730 sizeof(struct bt_att_read_type_req),
2731 ATT_REQUEST,
2732 att_read_type_req },
2733 { BT_ATT_OP_READ_REQ,
2734 sizeof(struct bt_att_read_req),
2735 ATT_REQUEST,
2736 att_read_req },
2737 { BT_ATT_OP_READ_BLOB_REQ,
2738 sizeof(struct bt_att_read_blob_req),
2739 ATT_REQUEST,
2740 att_read_blob_req },
2741 #if defined(CONFIG_BT_GATT_READ_MULTIPLE)
2742 { BT_ATT_OP_READ_MULT_REQ,
2743 BT_ATT_READ_MULT_MIN_LEN_REQ,
2744 ATT_REQUEST,
2745 att_read_mult_req },
2746 #endif /* CONFIG_BT_GATT_READ_MULTIPLE */
2747 #if defined(CONFIG_BT_GATT_READ_MULT_VAR_LEN)
2748 { BT_ATT_OP_READ_MULT_VL_REQ,
2749 BT_ATT_READ_MULT_MIN_LEN_REQ,
2750 ATT_REQUEST,
2751 att_read_mult_vl_req },
2752 #endif /* CONFIG_BT_GATT_READ_MULT_VAR_LEN */
2753 { BT_ATT_OP_READ_GROUP_REQ,
2754 sizeof(struct bt_att_read_group_req),
2755 ATT_REQUEST,
2756 att_read_group_req },
2757 { BT_ATT_OP_WRITE_REQ,
2758 sizeof(struct bt_att_write_req),
2759 ATT_REQUEST,
2760 att_write_req },
2761 { BT_ATT_OP_PREPARE_WRITE_REQ,
2762 sizeof(struct bt_att_prepare_write_req),
2763 ATT_REQUEST,
2764 att_prepare_write_req },
2765 { BT_ATT_OP_EXEC_WRITE_REQ,
2766 sizeof(struct bt_att_exec_write_req),
2767 ATT_REQUEST,
2768 att_exec_write_req },
2769 { BT_ATT_OP_CONFIRM,
2770 0,
2771 ATT_CONFIRMATION,
2772 att_confirm },
2773 { BT_ATT_OP_WRITE_CMD,
2774 sizeof(struct bt_att_write_cmd),
2775 ATT_COMMAND,
2776 att_write_cmd },
2777 #if defined(CONFIG_BT_SIGNING)
2778 { BT_ATT_OP_SIGNED_WRITE_CMD,
2779 (sizeof(struct bt_att_write_cmd) +
2780 sizeof(struct bt_att_signature)),
2781 ATT_COMMAND,
2782 att_signed_write_cmd },
2783 #endif /* CONFIG_BT_SIGNING */
2784 #if defined(CONFIG_BT_GATT_CLIENT)
2785 { BT_ATT_OP_ERROR_RSP,
2786 sizeof(struct bt_att_error_rsp),
2787 ATT_RESPONSE,
2788 att_error_rsp },
2789 { BT_ATT_OP_MTU_RSP,
2790 sizeof(struct bt_att_exchange_mtu_rsp),
2791 ATT_RESPONSE,
2792 att_mtu_rsp },
2793 { BT_ATT_OP_FIND_INFO_RSP,
2794 sizeof(struct bt_att_find_info_rsp),
2795 ATT_RESPONSE,
2796 att_handle_find_info_rsp },
2797 { BT_ATT_OP_FIND_TYPE_RSP,
2798 sizeof(struct bt_att_handle_group),
2799 ATT_RESPONSE,
2800 att_handle_find_type_rsp },
2801 { BT_ATT_OP_READ_TYPE_RSP,
2802 sizeof(struct bt_att_read_type_rsp),
2803 ATT_RESPONSE,
2804 att_handle_read_type_rsp },
2805 { BT_ATT_OP_READ_RSP,
2806 0,
2807 ATT_RESPONSE,
2808 att_handle_read_rsp },
2809 { BT_ATT_OP_READ_BLOB_RSP,
2810 0,
2811 ATT_RESPONSE,
2812 att_handle_read_blob_rsp },
2813 #if defined(CONFIG_BT_GATT_READ_MULTIPLE)
2814 { BT_ATT_OP_READ_MULT_RSP,
2815 0,
2816 ATT_RESPONSE,
2817 att_handle_read_mult_rsp },
2818 #endif /* CONFIG_BT_GATT_READ_MULTIPLE */
2819 #if defined(CONFIG_BT_GATT_READ_MULT_VAR_LEN)
2820 { BT_ATT_OP_READ_MULT_VL_RSP,
2821 sizeof(struct bt_att_read_mult_vl_rsp),
2822 ATT_RESPONSE,
2823 att_handle_read_mult_vl_rsp },
2824 #endif /* CONFIG_BT_GATT_READ_MULT_VAR_LEN */
2825 { BT_ATT_OP_READ_GROUP_RSP,
2826 sizeof(struct bt_att_read_group_rsp),
2827 ATT_RESPONSE,
2828 att_handle_read_group_rsp },
2829 { BT_ATT_OP_WRITE_RSP,
2830 0,
2831 ATT_RESPONSE,
2832 att_handle_write_rsp },
2833 { BT_ATT_OP_PREPARE_WRITE_RSP,
2834 sizeof(struct bt_att_prepare_write_rsp),
2835 ATT_RESPONSE,
2836 att_handle_prepare_write_rsp },
2837 { BT_ATT_OP_EXEC_WRITE_RSP,
2838 0,
2839 ATT_RESPONSE,
2840 att_handle_exec_write_rsp },
2841 { BT_ATT_OP_NOTIFY,
2842 sizeof(struct bt_att_notify),
2843 ATT_NOTIFICATION,
2844 att_notify },
2845 { BT_ATT_OP_INDICATE,
2846 sizeof(struct bt_att_indicate),
2847 ATT_INDICATION,
2848 att_indicate },
2849 { BT_ATT_OP_NOTIFY_MULT,
2850 sizeof(struct bt_att_notify_mult),
2851 ATT_NOTIFICATION,
2852 att_notify_mult },
2853 #endif /* CONFIG_BT_GATT_CLIENT */
2854 };
2855
att_op_get_type(uint8_t op)2856 static att_type_t att_op_get_type(uint8_t op)
2857 {
2858 switch (op) {
2859 case BT_ATT_OP_MTU_REQ:
2860 case BT_ATT_OP_FIND_INFO_REQ:
2861 case BT_ATT_OP_FIND_TYPE_REQ:
2862 case BT_ATT_OP_READ_TYPE_REQ:
2863 case BT_ATT_OP_READ_REQ:
2864 case BT_ATT_OP_READ_BLOB_REQ:
2865 case BT_ATT_OP_READ_MULT_REQ:
2866 case BT_ATT_OP_READ_MULT_VL_REQ:
2867 case BT_ATT_OP_READ_GROUP_REQ:
2868 case BT_ATT_OP_WRITE_REQ:
2869 case BT_ATT_OP_PREPARE_WRITE_REQ:
2870 case BT_ATT_OP_EXEC_WRITE_REQ:
2871 return ATT_REQUEST;
2872 case BT_ATT_OP_CONFIRM:
2873 return ATT_CONFIRMATION;
2874 case BT_ATT_OP_WRITE_CMD:
2875 case BT_ATT_OP_SIGNED_WRITE_CMD:
2876 return ATT_COMMAND;
2877 case BT_ATT_OP_ERROR_RSP:
2878 case BT_ATT_OP_MTU_RSP:
2879 case BT_ATT_OP_FIND_INFO_RSP:
2880 case BT_ATT_OP_FIND_TYPE_RSP:
2881 case BT_ATT_OP_READ_TYPE_RSP:
2882 case BT_ATT_OP_READ_RSP:
2883 case BT_ATT_OP_READ_BLOB_RSP:
2884 case BT_ATT_OP_READ_MULT_RSP:
2885 case BT_ATT_OP_READ_MULT_VL_RSP:
2886 case BT_ATT_OP_READ_GROUP_RSP:
2887 case BT_ATT_OP_WRITE_RSP:
2888 case BT_ATT_OP_PREPARE_WRITE_RSP:
2889 case BT_ATT_OP_EXEC_WRITE_RSP:
2890 return ATT_RESPONSE;
2891 case BT_ATT_OP_NOTIFY:
2892 case BT_ATT_OP_NOTIFY_MULT:
2893 return ATT_NOTIFICATION;
2894 case BT_ATT_OP_INDICATE:
2895 return ATT_INDICATION;
2896 }
2897
2898 if (op & ATT_CMD_MASK) {
2899 return ATT_COMMAND;
2900 }
2901
2902 return ATT_UNKNOWN;
2903 }
2904
get_conn(struct bt_att_chan * att_chan)2905 static struct bt_conn *get_conn(struct bt_att_chan *att_chan)
2906 {
2907 return att_chan->chan.chan.conn;
2908 }
2909
bt_att_recv(struct bt_l2cap_chan * chan,struct net_buf * buf)2910 static int bt_att_recv(struct bt_l2cap_chan *chan, struct net_buf *buf)
2911 {
2912 struct bt_att_chan *att_chan = ATT_CHAN(chan);
2913 struct bt_conn *conn = get_conn(att_chan);
2914 struct bt_att_hdr *hdr;
2915 const struct att_handler *handler;
2916 uint8_t err;
2917 size_t i;
2918
2919 if (buf->len < sizeof(*hdr)) {
2920 LOG_ERR("Too small ATT PDU received");
2921 return 0;
2922 }
2923
2924 hdr = net_buf_pull_mem(buf, sizeof(*hdr));
2925 LOG_DBG("Received ATT chan %p code 0x%02x len %zu", att_chan, hdr->code,
2926 net_buf_frags_len(buf));
2927
2928 if (conn->state != BT_CONN_CONNECTED) {
2929 LOG_DBG("not connected: conn %p state %u", conn, conn->state);
2930 return 0;
2931 }
2932
2933 if (!att_chan->att) {
2934 LOG_DBG("Ignore recv on detached ATT chan");
2935 return 0;
2936 }
2937
2938 for (i = 0, handler = NULL; i < ARRAY_SIZE(handlers); i++) {
2939 if (hdr->code == handlers[i].op) {
2940 handler = &handlers[i];
2941 break;
2942 }
2943 }
2944
2945 if (!handler) {
2946 LOG_WRN("Unhandled ATT code 0x%02x", hdr->code);
2947 if (att_op_get_type(hdr->code) != ATT_COMMAND &&
2948 att_op_get_type(hdr->code) != ATT_INDICATION) {
2949 send_err_rsp(att_chan, hdr->code, 0,
2950 BT_ATT_ERR_NOT_SUPPORTED);
2951 }
2952 return 0;
2953 }
2954
2955 if (buf->len < handler->expect_len) {
2956 LOG_ERR("Invalid len %u for code 0x%02x", buf->len, hdr->code);
2957 err = BT_ATT_ERR_INVALID_PDU;
2958 } else {
2959 err = handler->func(att_chan, buf);
2960 }
2961
2962 if (handler->type == ATT_REQUEST && err) {
2963 LOG_DBG("ATT error 0x%02x", err);
2964 send_err_rsp(att_chan, hdr->code, 0, err);
2965 }
2966
2967 return 0;
2968 }
2969
att_get(struct bt_conn * conn)2970 static struct bt_att *att_get(struct bt_conn *conn)
2971 {
2972 struct bt_l2cap_chan *chan;
2973 struct bt_att_chan *att_chan;
2974
2975 if (conn->state != BT_CONN_CONNECTED) {
2976 LOG_WRN("Not connected");
2977 return NULL;
2978 }
2979
2980 chan = bt_l2cap_le_lookup_rx_cid(conn, BT_L2CAP_CID_ATT);
2981 if (!chan) {
2982 LOG_ERR("Unable to find ATT channel");
2983 return NULL;
2984 }
2985
2986 att_chan = ATT_CHAN(chan);
2987 if (!atomic_test_bit(att_chan->flags, ATT_CONNECTED)) {
2988 LOG_ERR("ATT channel not connected");
2989 return NULL;
2990 }
2991
2992 return att_chan->att;
2993 }
2994
bt_att_create_pdu(struct bt_conn * conn,uint8_t op,size_t len)2995 struct net_buf *bt_att_create_pdu(struct bt_conn *conn, uint8_t op, size_t len)
2996 {
2997 struct bt_att *att;
2998 struct bt_att_chan *chan, *tmp;
2999
3000 att = att_get(conn);
3001 if (!att) {
3002 return NULL;
3003 }
3004
3005 /* This allocator should _not_ be used for RSPs. */
3006 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&att->chans, chan, tmp, node) {
3007 if (len + sizeof(op) > bt_att_mtu(chan)) {
3008 continue;
3009 }
3010
3011 return bt_att_chan_create_pdu(chan, op, len);
3012 }
3013
3014 LOG_WRN("No ATT channel for MTU %zu", len + sizeof(op));
3015
3016 return NULL;
3017 }
3018
bt_att_create_rsp_pdu(struct bt_att_chan * chan,uint8_t op)3019 struct net_buf *bt_att_create_rsp_pdu(struct bt_att_chan *chan, uint8_t op)
3020 {
3021 size_t headroom;
3022 struct bt_att_hdr *hdr;
3023 struct bt_att_tx_meta_data *data;
3024 struct net_buf *buf;
3025
3026 buf = net_buf_alloc(&att_pool, BT_ATT_TIMEOUT);
3027 if (!buf) {
3028 LOG_ERR("Unable to allocate buffer for op 0x%02x", op);
3029 return NULL;
3030 }
3031
3032 headroom = BT_L2CAP_BUF_SIZE(0);
3033
3034 if (bt_att_is_enhanced(chan)) {
3035 headroom += BT_L2CAP_SDU_HDR_SIZE;
3036 }
3037
3038 net_buf_reserve(buf, headroom);
3039
3040 data = bt_att_get_tx_meta_data(buf);
3041 data->att_chan = chan;
3042
3043 hdr = net_buf_add(buf, sizeof(*hdr));
3044 hdr->code = op;
3045
3046 return buf;
3047 }
3048
att_reset(struct bt_att * att)3049 static void att_reset(struct bt_att *att)
3050 {
3051 struct net_buf *buf;
3052
3053 #if CONFIG_BT_ATT_PREPARE_COUNT > 0
3054 /* Discard queued buffers */
3055 while ((buf = net_buf_slist_get(&att->prep_queue))) {
3056 net_buf_unref(buf);
3057 }
3058 #endif /* CONFIG_BT_ATT_PREPARE_COUNT > 0 */
3059
3060 #if defined(CONFIG_BT_EATT)
3061 struct k_work_sync sync;
3062
3063 (void)k_work_cancel_delayable_sync(&att->eatt.connection_work, &sync);
3064 #endif /* CONFIG_BT_EATT */
3065
3066 while ((buf = net_buf_get(&att->tx_queue, K_NO_WAIT))) {
3067 net_buf_unref(buf);
3068 }
3069
3070 /* Notify pending requests */
3071 while (!sys_slist_is_empty(&att->reqs)) {
3072 struct bt_att_req *req;
3073 sys_snode_t *node;
3074
3075 node = sys_slist_get_not_empty(&att->reqs);
3076 req = CONTAINER_OF(node, struct bt_att_req, node);
3077 if (req->func) {
3078 req->func(att->conn, -ECONNRESET, NULL, 0,
3079 req->user_data);
3080 }
3081
3082 bt_att_req_free(req);
3083 }
3084
3085 /* FIXME: `att->conn` is not reference counted. Consider using `bt_conn_ref`
3086 * and `bt_conn_unref` to follow convention.
3087 */
3088 att->conn = NULL;
3089 k_mem_slab_free(&att_slab, (void *)att);
3090 }
3091
att_chan_detach(struct bt_att_chan * chan)3092 static void att_chan_detach(struct bt_att_chan *chan)
3093 {
3094 struct net_buf *buf;
3095
3096 LOG_DBG("chan %p", chan);
3097
3098 sys_slist_find_and_remove(&chan->att->chans, &chan->node);
3099
3100 /* Release pending buffers */
3101 while ((buf = net_buf_get(&chan->tx_queue, K_NO_WAIT))) {
3102 net_buf_unref(buf);
3103 }
3104
3105 if (chan->req) {
3106 /* Notify outstanding request */
3107 att_handle_rsp(chan, NULL, 0, -ECONNRESET);
3108 }
3109
3110 chan->att = NULL;
3111 atomic_clear_bit(chan->flags, ATT_CONNECTED);
3112 }
3113
att_timeout(struct k_work * work)3114 static void att_timeout(struct k_work *work)
3115 {
3116 char addr[BT_ADDR_LE_STR_LEN];
3117 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
3118 struct bt_att_chan *chan = CONTAINER_OF(dwork, struct bt_att_chan,
3119 timeout_work);
3120
3121 bt_addr_le_to_str(bt_conn_get_dst(chan->att->conn), addr, sizeof(addr));
3122 LOG_ERR("ATT Timeout for device %s", addr);
3123
3124 /* BLUETOOTH SPECIFICATION Version 4.2 [Vol 3, Part F] page 480:
3125 *
3126 * A transaction not completed within 30 seconds shall time out. Such a
3127 * transaction shall be considered to have failed and the local higher
3128 * layers shall be informed of this failure. No more attribute protocol
3129 * requests, commands, indications or notifications shall be sent to the
3130 * target device on this ATT Bearer.
3131 */
3132 bt_att_disconnected(&chan->chan.chan);
3133 }
3134
att_get_fixed_chan(struct bt_conn * conn)3135 static struct bt_att_chan *att_get_fixed_chan(struct bt_conn *conn)
3136 {
3137 struct bt_l2cap_chan *chan;
3138
3139 chan = bt_l2cap_le_lookup_tx_cid(conn, BT_L2CAP_CID_ATT);
3140 __ASSERT(chan, "No ATT channel found");
3141
3142 return ATT_CHAN(chan);
3143 }
3144
att_chan_attach(struct bt_att * att,struct bt_att_chan * chan)3145 static void att_chan_attach(struct bt_att *att, struct bt_att_chan *chan)
3146 {
3147 LOG_DBG("att %p chan %p flags %lu", att, chan, atomic_get(chan->flags));
3148
3149 if (sys_slist_is_empty(&att->chans)) {
3150 /* Init general queues when attaching the first channel */
3151 k_fifo_init(&att->tx_queue);
3152 #if CONFIG_BT_ATT_PREPARE_COUNT > 0
3153 sys_slist_init(&att->prep_queue);
3154 #endif
3155 }
3156
3157 sys_slist_prepend(&att->chans, &chan->node);
3158 }
3159
bt_att_connected(struct bt_l2cap_chan * chan)3160 static void bt_att_connected(struct bt_l2cap_chan *chan)
3161 {
3162 struct bt_att_chan *att_chan = ATT_CHAN(chan);
3163 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
3164
3165 LOG_DBG("chan %p cid 0x%04x", le_chan, le_chan->tx.cid);
3166
3167 atomic_set_bit(att_chan->flags, ATT_CONNECTED);
3168
3169 att_chan_mtu_updated(att_chan);
3170
3171 k_work_init_delayable(&att_chan->timeout_work, att_timeout);
3172
3173 bt_gatt_connected(le_chan->chan.conn);
3174 }
3175
bt_att_disconnected(struct bt_l2cap_chan * chan)3176 static void bt_att_disconnected(struct bt_l2cap_chan *chan)
3177 {
3178 struct bt_att_chan *att_chan = ATT_CHAN(chan);
3179 struct bt_att *att = att_chan->att;
3180 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
3181
3182 LOG_DBG("chan %p cid 0x%04x", le_chan, le_chan->tx.cid);
3183
3184 if (!att_chan->att) {
3185 LOG_DBG("Ignore disconnect on detached ATT chan");
3186 return;
3187 }
3188
3189 att_chan_detach(att_chan);
3190
3191 /* Don't reset if there are still channels to be used */
3192 if (!sys_slist_is_empty(&att->chans)) {
3193 return;
3194 }
3195
3196 att_reset(att);
3197
3198 bt_gatt_disconnected(le_chan->chan.conn);
3199 }
3200
3201 #if defined(CONFIG_BT_SMP)
att_req_retry(struct bt_att_chan * att_chan)3202 static uint8_t att_req_retry(struct bt_att_chan *att_chan)
3203 {
3204 struct bt_att_req *req = att_chan->req;
3205 struct net_buf *buf;
3206
3207 /* Resend buffer */
3208 if (!req->encode) {
3209 /* This request does not support resending */
3210 return BT_ATT_ERR_AUTHENTICATION;
3211 }
3212
3213
3214 buf = bt_att_chan_create_pdu(att_chan, req->att_op, req->len);
3215 if (!buf) {
3216 return BT_ATT_ERR_UNLIKELY;
3217 }
3218
3219 if (req->encode(buf, req->len, req->user_data)) {
3220 net_buf_unref(buf);
3221 return BT_ATT_ERR_UNLIKELY;
3222 }
3223
3224 if (chan_send(att_chan, buf)) {
3225 net_buf_unref(buf);
3226 return BT_ATT_ERR_UNLIKELY;
3227 }
3228
3229 return BT_ATT_ERR_SUCCESS;
3230 }
3231
bt_att_encrypt_change(struct bt_l2cap_chan * chan,uint8_t hci_status)3232 static void bt_att_encrypt_change(struct bt_l2cap_chan *chan,
3233 uint8_t hci_status)
3234 {
3235 struct bt_att_chan *att_chan = ATT_CHAN(chan);
3236 struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
3237 struct bt_conn *conn = le_chan->chan.conn;
3238 uint8_t err;
3239
3240 LOG_DBG("chan %p conn %p handle %u sec_level 0x%02x status 0x%02x %s", le_chan, conn,
3241 conn->handle, conn->sec_level, hci_status, bt_hci_err_to_str(hci_status));
3242
3243 if (!att_chan->att) {
3244 LOG_DBG("Ignore encrypt change on detached ATT chan");
3245 return;
3246 }
3247
3248 /*
3249 * If status (HCI status of security procedure) is non-zero, notify
3250 * outstanding request about security failure.
3251 */
3252 if (hci_status) {
3253 if (att_chan->req && att_chan->req->retrying) {
3254 att_handle_rsp(att_chan, NULL, 0,
3255 BT_ATT_ERR_AUTHENTICATION);
3256 }
3257
3258 return;
3259 }
3260
3261 bt_gatt_encrypt_change(conn);
3262
3263 if (conn->sec_level == BT_SECURITY_L1) {
3264 return;
3265 }
3266
3267 if (!(att_chan->req && att_chan->req->retrying)) {
3268 return;
3269 }
3270
3271 LOG_DBG("Retrying");
3272
3273 err = att_req_retry(att_chan);
3274 if (err) {
3275 LOG_DBG("Retry failed (%d)", err);
3276 att_handle_rsp(att_chan, NULL, 0, err);
3277 }
3278 }
3279 #endif /* CONFIG_BT_SMP */
3280
bt_att_status(struct bt_l2cap_chan * ch,atomic_t * status)3281 static void bt_att_status(struct bt_l2cap_chan *ch, atomic_t *status)
3282 {
3283 struct bt_att_chan *chan = ATT_CHAN(ch);
3284 sys_snode_t *node;
3285
3286 LOG_DBG("chan %p status %p", ch, status);
3287
3288 if (!atomic_test_bit(status, BT_L2CAP_STATUS_OUT)) {
3289 return;
3290 }
3291
3292 if (!chan->att) {
3293 LOG_DBG("Ignore status on detached ATT chan");
3294 return;
3295 }
3296
3297 /* If there is a request pending don't attempt to send */
3298 if (chan->req) {
3299 return;
3300 }
3301
3302 /* Pull next request from the list */
3303 node = sys_slist_get(&chan->att->reqs);
3304 if (!node) {
3305 return;
3306 }
3307
3308 if (bt_att_chan_req_send(chan, ATT_REQ(node)) >= 0) {
3309 return;
3310 }
3311
3312 /* Prepend back to the list as it could not be sent */
3313 sys_slist_prepend(&chan->att->reqs, node);
3314 }
3315
bt_att_released(struct bt_l2cap_chan * ch)3316 static void bt_att_released(struct bt_l2cap_chan *ch)
3317 {
3318 struct bt_att_chan *chan = ATT_CHAN(ch);
3319
3320 LOG_DBG("chan %p", chan);
3321
3322 k_mem_slab_free(&chan_slab, (void *)chan);
3323 }
3324
3325 #if defined(CONFIG_BT_EATT)
bt_att_reconfigured(struct bt_l2cap_chan * l2cap_chan)3326 static void bt_att_reconfigured(struct bt_l2cap_chan *l2cap_chan)
3327 {
3328 struct bt_att_chan *att_chan = ATT_CHAN(l2cap_chan);
3329
3330 LOG_DBG("chan %p", att_chan);
3331
3332 att_chan_mtu_updated(att_chan);
3333 }
3334 #endif /* CONFIG_BT_EATT */
3335
att_chan_new(struct bt_att * att,atomic_val_t flags)3336 static struct bt_att_chan *att_chan_new(struct bt_att *att, atomic_val_t flags)
3337 {
3338 int quota = 0;
3339 static struct bt_l2cap_chan_ops ops = {
3340 .connected = bt_att_connected,
3341 .disconnected = bt_att_disconnected,
3342 .recv = bt_att_recv,
3343 .sent = bt_att_sent,
3344 .status = bt_att_status,
3345 #if defined(CONFIG_BT_SMP)
3346 .encrypt_change = bt_att_encrypt_change,
3347 #endif /* CONFIG_BT_SMP */
3348 .released = bt_att_released,
3349 #if defined(CONFIG_BT_EATT)
3350 .reconfigured = bt_att_reconfigured,
3351 #endif /* CONFIG_BT_EATT */
3352 };
3353 struct bt_att_chan *chan;
3354
3355 SYS_SLIST_FOR_EACH_CONTAINER(&att->chans, chan, node) {
3356 if (chan->att == att) {
3357 quota++;
3358 }
3359
3360 if (quota == ATT_CHAN_MAX) {
3361 LOG_DBG("Maximum number of channels reached: %d", quota);
3362 return NULL;
3363 }
3364 }
3365
3366 if (k_mem_slab_alloc(&chan_slab, (void **)&chan, K_NO_WAIT)) {
3367 LOG_WRN("No available ATT channel for conn %p", att->conn);
3368 return NULL;
3369 }
3370
3371 (void)memset(chan, 0, sizeof(*chan));
3372 chan->chan.chan.ops = &ops;
3373 k_fifo_init(&chan->tx_queue);
3374 atomic_set(chan->flags, flags);
3375 chan->att = att;
3376 att_chan_attach(att, chan);
3377
3378 if (bt_att_is_enhanced(chan)) {
3379 /* EATT: The MTU will be sent in the ECRED conn req/rsp PDU. The
3380 * TX MTU is received on L2CAP-level.
3381 */
3382 chan->chan.rx.mtu = BT_LOCAL_ATT_MTU_EATT;
3383 } else {
3384 /* UATT: L2CAP Basic is not able to communicate the L2CAP MTU
3385 * without help. ATT has to manage the MTU. The initial MTU is
3386 * defined by spec.
3387 */
3388 chan->chan.tx.mtu = BT_ATT_DEFAULT_LE_MTU;
3389 chan->chan.rx.mtu = BT_ATT_DEFAULT_LE_MTU;
3390 }
3391
3392 return chan;
3393 }
3394
3395 #if defined(CONFIG_BT_EATT)
bt_eatt_count(struct bt_conn * conn)3396 size_t bt_eatt_count(struct bt_conn *conn)
3397 {
3398 struct bt_att *att;
3399 struct bt_att_chan *chan;
3400 size_t eatt_count = 0;
3401
3402 if (!conn) {
3403 return 0;
3404 }
3405
3406 att = att_get(conn);
3407 if (!att) {
3408 return 0;
3409 }
3410
3411 SYS_SLIST_FOR_EACH_CONTAINER(&att->chans, chan, node) {
3412 if (bt_att_is_enhanced(chan) &&
3413 atomic_test_bit(chan->flags, ATT_CONNECTED)) {
3414 eatt_count++;
3415 }
3416 }
3417
3418 return eatt_count;
3419 }
3420
att_enhanced_connection_work_handler(struct k_work * work)3421 static void att_enhanced_connection_work_handler(struct k_work *work)
3422 {
3423 const struct k_work_delayable *dwork = k_work_delayable_from_work(work);
3424 const struct bt_att *att = CONTAINER_OF(dwork, struct bt_att, eatt.connection_work);
3425 const int err = bt_eatt_connect(att->conn, att->eatt.chans_to_connect);
3426
3427 if (err == -ENOMEM) {
3428 LOG_DBG("Failed to connect %d EATT channels, central has probably "
3429 "already established some.",
3430 att->eatt.chans_to_connect);
3431 } else if (err < 0) {
3432 LOG_WRN("Failed to connect %d EATT channels (err: %d)", att->eatt.chans_to_connect,
3433 err);
3434 }
3435
3436 }
3437 #endif /* CONFIG_BT_EATT */
3438
bt_att_accept(struct bt_conn * conn,struct bt_l2cap_chan ** ch)3439 static int bt_att_accept(struct bt_conn *conn, struct bt_l2cap_chan **ch)
3440 {
3441 struct bt_att *att;
3442 struct bt_att_chan *chan;
3443
3444 LOG_DBG("conn %p handle %u", conn, conn->handle);
3445
3446 if (k_mem_slab_alloc(&att_slab, (void **)&att, K_NO_WAIT)) {
3447 LOG_ERR("No available ATT context for conn %p", conn);
3448 return -ENOMEM;
3449 }
3450
3451 att_handle_rsp_thread = k_current_get();
3452
3453 (void)memset(att, 0, sizeof(*att));
3454 att->conn = conn;
3455 sys_slist_init(&att->reqs);
3456 sys_slist_init(&att->chans);
3457
3458 #if defined(CONFIG_BT_EATT)
3459 k_work_init_delayable(&att->eatt.connection_work,
3460 att_enhanced_connection_work_handler);
3461 #endif /* CONFIG_BT_EATT */
3462
3463 chan = att_chan_new(att, 0);
3464 if (!chan) {
3465 return -ENOMEM;
3466 }
3467
3468 *ch = &chan->chan.chan;
3469
3470 return 0;
3471 }
3472
3473 /* The L2CAP channel section is sorted lexicographically. Make sure that ATT fixed channel will be
3474 * placed as the last one to ensure that SMP channel is properly initialized before bt_att_connected
3475 * tries to send security request.
3476 */
3477 BT_L2CAP_CHANNEL_DEFINE(z_att_fixed_chan, BT_L2CAP_CID_ATT, bt_att_accept, NULL);
3478
3479 #if defined(CONFIG_BT_EATT)
credit_based_connection_delay(struct bt_conn * conn)3480 static k_timeout_t credit_based_connection_delay(struct bt_conn *conn)
3481 {
3482 /*
3483 * 5.3 Vol 3, Part G, Section 5.4 L2CAP COLLISION MITIGATION
3484 * ... In this situation, the Central may retry
3485 * immediately but the Peripheral shall wait a minimum of 100 ms before retrying;
3486 * on LE connections, the Peripheral shall wait at least 2 *
3487 * (connPeripheralLatency + 1) * connInterval if that is longer.
3488 */
3489
3490 if (IS_ENABLED(CONFIG_BT_CENTRAL) && conn->role == BT_CONN_ROLE_CENTRAL) {
3491 return K_NO_WAIT;
3492 } else if (IS_ENABLED(CONFIG_BT_PERIPHERAL)) {
3493 uint8_t random;
3494 int err;
3495
3496 err = bt_rand(&random, sizeof(random));
3497 if (err) {
3498 random = 0;
3499 }
3500
3501 const uint8_t rand_delay = random & 0x7; /* Small random delay for IOP */
3502 /* The maximum value of (latency + 1) * 2 multiplied with the
3503 * maximum connection interval has a maximum value of
3504 * 4000000000 which can be stored in 32-bits, so this won't
3505 * result in an overflow
3506 */
3507 const uint32_t calculated_delay_us =
3508 2 * (conn->le.latency + 1) * BT_CONN_INTERVAL_TO_US(conn->le.interval);
3509 const uint32_t calculated_delay_ms = calculated_delay_us / USEC_PER_MSEC;
3510
3511 return K_MSEC(MAX(100, calculated_delay_ms + rand_delay));
3512 }
3513
3514 /* Must be either central or peripheral */
3515 __ASSERT_NO_MSG(false);
3516 CODE_UNREACHABLE;
3517 }
3518
att_schedule_eatt_connect(struct bt_conn * conn,uint8_t chans_to_connect)3519 static int att_schedule_eatt_connect(struct bt_conn *conn, uint8_t chans_to_connect)
3520 {
3521 struct bt_att *att = att_get(conn);
3522
3523 if (!att) {
3524 return -ENOTCONN;
3525 }
3526
3527 att->eatt.chans_to_connect = chans_to_connect;
3528
3529 return k_work_reschedule(&att->eatt.connection_work,
3530 credit_based_connection_delay(conn));
3531 }
3532
handle_potential_collision(struct bt_att * att)3533 static void handle_potential_collision(struct bt_att *att)
3534 {
3535 __ASSERT_NO_MSG(att);
3536
3537 int err;
3538 size_t to_connect = att->eatt.prev_conn_req_missing_chans;
3539
3540 if (att->eatt.prev_conn_rsp_result == BT_L2CAP_LE_ERR_NO_RESOURCES &&
3541 att->eatt.prev_conn_req_result == BT_L2CAP_LE_ERR_NO_RESOURCES) {
3542 LOG_DBG("Credit based connection request collision detected");
3543
3544 /* Reset to not keep retrying on repeated failures */
3545 att->eatt.prev_conn_rsp_result = 0;
3546 att->eatt.prev_conn_req_result = 0;
3547 att->eatt.prev_conn_req_missing_chans = 0;
3548
3549 if (to_connect == 0) {
3550 return;
3551 }
3552
3553 err = att_schedule_eatt_connect(att->conn, to_connect);
3554 if (err < 0) {
3555 LOG_ERR("Failed to schedule EATT connection retry (err: %d)", err);
3556 }
3557 }
3558 }
3559
ecred_connect_req_cb(struct bt_conn * conn,uint16_t result,uint16_t psm)3560 static void ecred_connect_req_cb(struct bt_conn *conn, uint16_t result, uint16_t psm)
3561 {
3562 struct bt_att *att = att_get(conn);
3563
3564 if (!att) {
3565 return;
3566 }
3567
3568 if (psm != BT_EATT_PSM) {
3569 /* Collision mitigation is only a requirement on the EATT PSM */
3570 return;
3571 }
3572
3573 att->eatt.prev_conn_rsp_result = result;
3574
3575 handle_potential_collision(att);
3576 }
3577
ecred_connect_rsp_cb(struct bt_conn * conn,uint16_t result,uint8_t attempted_to_connect,uint8_t succeeded_to_connect,uint16_t psm)3578 static void ecred_connect_rsp_cb(struct bt_conn *conn, uint16_t result,
3579 uint8_t attempted_to_connect, uint8_t succeeded_to_connect,
3580 uint16_t psm)
3581 {
3582 struct bt_att *att = att_get(conn);
3583
3584 if (!att) {
3585 return;
3586 }
3587
3588 if (psm != BT_EATT_PSM) {
3589 /* Collision mitigation is only a requirement on the EATT PSM */
3590 return;
3591 }
3592
3593 att->eatt.prev_conn_req_result = result;
3594 att->eatt.prev_conn_req_missing_chans =
3595 attempted_to_connect - succeeded_to_connect;
3596
3597 handle_potential_collision(att);
3598 }
3599
bt_eatt_connect(struct bt_conn * conn,size_t num_channels)3600 int bt_eatt_connect(struct bt_conn *conn, size_t num_channels)
3601 {
3602 struct bt_att_chan *att_chan;
3603 struct bt_att *att;
3604 struct bt_l2cap_chan *chan[CONFIG_BT_EATT_MAX + 1] = {};
3605 size_t offset = 0;
3606 size_t i = 0;
3607 int err;
3608
3609 if (!conn) {
3610 return -EINVAL;
3611 }
3612
3613 /* Check the encryption level for EATT */
3614 if (bt_conn_get_security(conn) < BT_SECURITY_L2) {
3615 /* Vol 3, Part G, Section 5.3.2 Channel Requirements states:
3616 * The channel shall be encrypted.
3617 */
3618 return -EPERM;
3619 }
3620
3621 if (num_channels > CONFIG_BT_EATT_MAX || num_channels == 0) {
3622 return -EINVAL;
3623 }
3624
3625 att_chan = att_get_fixed_chan(conn);
3626 att = att_chan->att;
3627
3628 while (num_channels--) {
3629 att_chan = att_chan_new(att, BIT(ATT_ENHANCED));
3630 if (!att_chan) {
3631 break;
3632 }
3633
3634 chan[i] = &att_chan->chan.chan;
3635 i++;
3636 }
3637
3638 if (!i) {
3639 return -ENOMEM;
3640 }
3641
3642 while (offset < i) {
3643 /* bt_l2cap_ecred_chan_connect() uses the first L2CAP_ECRED_CHAN_MAX_PER_REQ
3644 * elements of the array or until a null-terminator is reached.
3645 */
3646 err = bt_l2cap_ecred_chan_connect(conn, &chan[offset], BT_EATT_PSM);
3647 if (err < 0) {
3648 return err;
3649 }
3650
3651 offset += L2CAP_ECRED_CHAN_MAX_PER_REQ;
3652 }
3653
3654 return 0;
3655 }
3656
3657 #if defined(CONFIG_BT_EATT_AUTO_CONNECT)
eatt_auto_connect(struct bt_conn * conn,bt_security_t level,enum bt_security_err err)3658 static void eatt_auto_connect(struct bt_conn *conn, bt_security_t level,
3659 enum bt_security_err err)
3660 {
3661 int eatt_err;
3662
3663 if (err || level < BT_SECURITY_L2 || !bt_att_fixed_chan_only(conn)) {
3664 return;
3665 }
3666
3667 eatt_err = att_schedule_eatt_connect(conn, CONFIG_BT_EATT_MAX);
3668 if (eatt_err < 0) {
3669 LOG_WRN("Automatic creation of EATT bearers failed on "
3670 "connection %s with error %d",
3671 bt_addr_le_str(bt_conn_get_dst(conn)), eatt_err);
3672 }
3673 }
3674
3675 BT_CONN_CB_DEFINE(conn_callbacks) = {
3676 .security_changed = eatt_auto_connect,
3677 };
3678
3679 #endif /* CONFIG_BT_EATT_AUTO_CONNECT */
3680
bt_eatt_disconnect(struct bt_conn * conn)3681 int bt_eatt_disconnect(struct bt_conn *conn)
3682 {
3683 struct bt_att_chan *chan;
3684 struct bt_att *att;
3685 int err = -ENOTCONN;
3686
3687 if (!conn) {
3688 return -EINVAL;
3689 }
3690
3691 chan = att_get_fixed_chan(conn);
3692 att = chan->att;
3693
3694 SYS_SLIST_FOR_EACH_CONTAINER(&att->chans, chan, node) {
3695 if (bt_att_is_enhanced(chan)) {
3696 err = bt_l2cap_chan_disconnect(&chan->chan.chan);
3697 }
3698 }
3699
3700 return err;
3701 }
3702
3703 #if defined(CONFIG_BT_TESTING)
bt_eatt_disconnect_one(struct bt_conn * conn)3704 int bt_eatt_disconnect_one(struct bt_conn *conn)
3705 {
3706 struct bt_att *att;
3707 struct bt_att_chan *chan;
3708
3709 if (!conn) {
3710 return -EINVAL;
3711 }
3712
3713 chan = att_get_fixed_chan(conn);
3714 att = chan->att;
3715
3716 SYS_SLIST_FOR_EACH_CONTAINER(&att->chans, chan, node) {
3717 if (bt_att_is_enhanced(chan)) {
3718 return bt_l2cap_chan_disconnect(&chan->chan.chan);
3719 }
3720 }
3721
3722 return -ENOTCONN;
3723 }
3724
bt_eatt_reconfigure(struct bt_conn * conn,uint16_t mtu)3725 int bt_eatt_reconfigure(struct bt_conn *conn, uint16_t mtu)
3726 {
3727 struct bt_att_chan *att_chan = att_get_fixed_chan(conn);
3728 struct bt_att *att = att_chan->att;
3729 struct bt_l2cap_chan *chans[CONFIG_BT_EATT_MAX + 1] = {};
3730 size_t offset = 0;
3731 size_t i = 0;
3732 int err;
3733
3734 SYS_SLIST_FOR_EACH_CONTAINER(&att->chans, att_chan, node) {
3735 if (bt_att_is_enhanced(att_chan)) {
3736 chans[i] = &att_chan->chan.chan;
3737 i++;
3738 }
3739 }
3740
3741 while (offset < i) {
3742 /* bt_l2cap_ecred_chan_reconfigure() uses the first L2CAP_ECRED_CHAN_MAX_PER_REQ
3743 * elements of the array or until a null-terminator is reached.
3744 */
3745 err = bt_l2cap_ecred_chan_reconfigure(&chans[offset], mtu);
3746 if (err < 0) {
3747 return err;
3748 }
3749
3750 offset += L2CAP_ECRED_CHAN_MAX_PER_REQ;
3751 }
3752
3753 return 0;
3754 }
3755 #endif /* CONFIG_BT_TESTING */
3756 #endif /* CONFIG_BT_EATT */
3757
bt_eatt_accept(struct bt_conn * conn,struct bt_l2cap_server * server,struct bt_l2cap_chan ** chan)3758 static int bt_eatt_accept(struct bt_conn *conn, struct bt_l2cap_server *server,
3759 struct bt_l2cap_chan **chan)
3760 {
3761 struct bt_att_chan *att_chan = att_get_fixed_chan(conn);
3762 struct bt_att *att = att_chan->att;
3763
3764 LOG_DBG("conn %p handle %u", conn, conn->handle);
3765
3766 att_chan = att_chan_new(att, BIT(ATT_ENHANCED));
3767 if (att_chan) {
3768 *chan = &att_chan->chan.chan;
3769 return 0;
3770 }
3771
3772 return -ENOMEM;
3773 }
3774
bt_eatt_init(void)3775 static void bt_eatt_init(void)
3776 {
3777 int err;
3778 static struct bt_l2cap_server eatt_l2cap = {
3779 .psm = BT_EATT_PSM,
3780 .sec_level = BT_SECURITY_L2,
3781 .accept = bt_eatt_accept,
3782 };
3783 struct bt_l2cap_server *registered_server;
3784
3785 LOG_DBG("");
3786
3787 /* Check if eatt_l2cap server has already been registered. */
3788 registered_server = bt_l2cap_server_lookup_psm(eatt_l2cap.psm);
3789 if (registered_server != &eatt_l2cap) {
3790 err = bt_l2cap_server_register(&eatt_l2cap);
3791 if (err < 0) {
3792 LOG_ERR("EATT Server registration failed %d", err);
3793 }
3794 }
3795
3796 #if defined(CONFIG_BT_EATT)
3797 static const struct bt_l2cap_ecred_cb cb = {
3798 .ecred_conn_rsp = ecred_connect_rsp_cb,
3799 .ecred_conn_req = ecred_connect_req_cb,
3800 };
3801
3802 bt_l2cap_register_ecred_cb(&cb);
3803 #endif /* CONFIG_BT_EATT */
3804 }
3805
bt_att_init(void)3806 void bt_att_init(void)
3807 {
3808 bt_gatt_init();
3809
3810 if (IS_ENABLED(CONFIG_BT_EATT)) {
3811 bt_eatt_init();
3812 }
3813 }
3814
bt_att_get_mtu(struct bt_conn * conn)3815 uint16_t bt_att_get_mtu(struct bt_conn *conn)
3816 {
3817 struct bt_att_chan *chan, *tmp;
3818 struct bt_att *att;
3819 uint16_t mtu = 0;
3820
3821 att = att_get(conn);
3822 if (!att) {
3823 return 0;
3824 }
3825
3826 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&att->chans, chan, tmp, node) {
3827 if (bt_att_mtu(chan) > mtu) {
3828 mtu = bt_att_mtu(chan);
3829 }
3830 }
3831
3832 return mtu;
3833 }
3834
att_chan_mtu_updated(struct bt_att_chan * updated_chan)3835 static void att_chan_mtu_updated(struct bt_att_chan *updated_chan)
3836 {
3837 struct bt_att *att = updated_chan->att;
3838 struct bt_att_chan *chan, *tmp;
3839 uint16_t max_tx = 0, max_rx = 0;
3840
3841 /* Get maximum MTU's of other channels */
3842 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&att->chans, chan, tmp, node) {
3843 if (chan == updated_chan) {
3844 continue;
3845 }
3846 max_tx = MAX(max_tx, chan->chan.tx.mtu);
3847 max_rx = MAX(max_rx, chan->chan.rx.mtu);
3848 }
3849
3850 /* If either maximum MTU has changed */
3851 if ((updated_chan->chan.tx.mtu > max_tx) ||
3852 (updated_chan->chan.rx.mtu > max_rx)) {
3853 max_tx = MAX(max_tx, updated_chan->chan.tx.mtu);
3854 max_rx = MAX(max_rx, updated_chan->chan.rx.mtu);
3855 bt_gatt_att_max_mtu_changed(att->conn, max_tx, max_rx);
3856 }
3857 }
3858
bt_att_req_alloc(k_timeout_t timeout)3859 struct bt_att_req *bt_att_req_alloc(k_timeout_t timeout)
3860 {
3861 struct bt_att_req *req = NULL;
3862
3863 if (k_current_get() == att_handle_rsp_thread) {
3864 /* No req will be fulfilled while blocking on the bt_recv thread.
3865 * Blocking would cause deadlock.
3866 */
3867 LOG_DBG("Timeout discarded. No blocking on bt_recv thread.");
3868 timeout = K_NO_WAIT;
3869 }
3870
3871 /* Reserve space for request */
3872 if (k_mem_slab_alloc(&req_slab, (void **)&req, timeout)) {
3873 LOG_DBG("No space for req");
3874 return NULL;
3875 }
3876
3877 LOG_DBG("req %p", req);
3878
3879 memset(req, 0, sizeof(*req));
3880
3881 return req;
3882 }
3883
bt_att_req_free(struct bt_att_req * req)3884 void bt_att_req_free(struct bt_att_req *req)
3885 {
3886 LOG_DBG("req %p", req);
3887
3888 if (req->buf) {
3889 net_buf_unref(req->buf);
3890 req->buf = NULL;
3891 }
3892
3893 k_mem_slab_free(&req_slab, (void *)req);
3894 }
3895
bt_att_send(struct bt_conn * conn,struct net_buf * buf)3896 int bt_att_send(struct bt_conn *conn, struct net_buf *buf)
3897 {
3898 struct bt_att *att;
3899
3900 __ASSERT_NO_MSG(conn);
3901 __ASSERT_NO_MSG(buf);
3902
3903 att = att_get(conn);
3904 if (!att) {
3905 net_buf_unref(buf);
3906 return -ENOTCONN;
3907 }
3908
3909 net_buf_put(&att->tx_queue, buf);
3910 att_send_process(att);
3911
3912 return 0;
3913 }
3914
bt_att_req_send(struct bt_conn * conn,struct bt_att_req * req)3915 int bt_att_req_send(struct bt_conn *conn, struct bt_att_req *req)
3916 {
3917 struct bt_att *att;
3918
3919 LOG_DBG("conn %p req %p", conn, req);
3920
3921 __ASSERT_NO_MSG(conn);
3922 __ASSERT_NO_MSG(req);
3923
3924 k_sched_lock();
3925
3926 att = att_get(conn);
3927 if (!att) {
3928 k_sched_unlock();
3929 return -ENOTCONN;
3930 }
3931
3932 sys_slist_append(&att->reqs, &req->node);
3933 att_req_send_process(att);
3934
3935 k_sched_unlock();
3936
3937 return 0;
3938 }
3939
bt_att_chan_req_cancel(struct bt_att_chan * chan,struct bt_att_req * req)3940 static bool bt_att_chan_req_cancel(struct bt_att_chan *chan,
3941 struct bt_att_req *req)
3942 {
3943 if (chan->req != req) {
3944 return false;
3945 }
3946
3947 chan->req = &cancel;
3948
3949 bt_att_req_free(req);
3950
3951 return true;
3952 }
3953
bt_att_req_cancel(struct bt_conn * conn,struct bt_att_req * req)3954 void bt_att_req_cancel(struct bt_conn *conn, struct bt_att_req *req)
3955 {
3956 struct bt_att *att;
3957 struct bt_att_chan *chan, *tmp;
3958
3959 LOG_DBG("req %p", req);
3960
3961 if (!conn || !req) {
3962 return;
3963 }
3964
3965 att = att_get(conn);
3966 if (!att) {
3967 return;
3968 }
3969
3970 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&att->chans, chan, tmp, node) {
3971 /* Check if request is outstanding */
3972 if (bt_att_chan_req_cancel(chan, req)) {
3973 return;
3974 }
3975 }
3976
3977 /* Remove request from the list */
3978 sys_slist_find_and_remove(&att->reqs, &req->node);
3979
3980 bt_att_req_free(req);
3981 }
3982
bt_att_find_req_by_user_data(struct bt_conn * conn,const void * user_data)3983 struct bt_att_req *bt_att_find_req_by_user_data(struct bt_conn *conn, const void *user_data)
3984 {
3985 struct bt_att *att;
3986 struct bt_att_chan *chan;
3987 struct bt_att_req *req;
3988
3989 att = att_get(conn);
3990 if (!att) {
3991 return NULL;
3992 }
3993
3994 SYS_SLIST_FOR_EACH_CONTAINER(&att->chans, chan, node) {
3995 if (chan->req->user_data == user_data) {
3996 return chan->req;
3997 }
3998 }
3999
4000 SYS_SLIST_FOR_EACH_CONTAINER(&att->reqs, req, node) {
4001 if (req->user_data == user_data) {
4002 return req;
4003 }
4004 }
4005
4006 return NULL;
4007 }
4008
bt_att_fixed_chan_only(struct bt_conn * conn)4009 bool bt_att_fixed_chan_only(struct bt_conn *conn)
4010 {
4011 #if defined(CONFIG_BT_EATT)
4012 return bt_eatt_count(conn) == 0;
4013 #else
4014 return true;
4015 #endif /* CONFIG_BT_EATT */
4016 }
4017
bt_att_clear_out_of_sync_sent(struct bt_conn * conn)4018 void bt_att_clear_out_of_sync_sent(struct bt_conn *conn)
4019 {
4020 struct bt_att *att = att_get(conn);
4021 struct bt_att_chan *chan;
4022
4023 if (!att) {
4024 return;
4025 }
4026
4027 SYS_SLIST_FOR_EACH_CONTAINER(&att->chans, chan, node) {
4028 atomic_clear_bit(chan->flags, ATT_OUT_OF_SYNC_SENT);
4029 }
4030 }
4031
bt_att_out_of_sync_sent_on_fixed(struct bt_conn * conn)4032 bool bt_att_out_of_sync_sent_on_fixed(struct bt_conn *conn)
4033 {
4034 struct bt_l2cap_chan *l2cap_chan;
4035 struct bt_att_chan *att_chan;
4036
4037 l2cap_chan = bt_l2cap_le_lookup_rx_cid(conn, BT_L2CAP_CID_ATT);
4038 if (!l2cap_chan) {
4039 return false;
4040 }
4041
4042 att_chan = ATT_CHAN(l2cap_chan);
4043 return atomic_test_bit(att_chan->flags, ATT_OUT_OF_SYNC_SENT);
4044 }
4045
bt_att_set_tx_meta_data(struct net_buf * buf,bt_gatt_complete_func_t func,void * user_data,enum bt_att_chan_opt chan_opt)4046 void bt_att_set_tx_meta_data(struct net_buf *buf, bt_gatt_complete_func_t func, void *user_data,
4047 enum bt_att_chan_opt chan_opt)
4048 {
4049 struct bt_att_tx_meta_data *data = bt_att_get_tx_meta_data(buf);
4050
4051 data->func = func;
4052 data->user_data = user_data;
4053 data->attr_count = 1;
4054 data->chan_opt = chan_opt;
4055 }
4056
bt_att_increment_tx_meta_data_attr_count(struct net_buf * buf,uint16_t attr_count)4057 void bt_att_increment_tx_meta_data_attr_count(struct net_buf *buf, uint16_t attr_count)
4058 {
4059 struct bt_att_tx_meta_data *data = bt_att_get_tx_meta_data(buf);
4060
4061 data->attr_count += attr_count;
4062 }
4063
bt_att_tx_meta_data_match(const struct net_buf * buf,bt_gatt_complete_func_t func,const void * user_data,enum bt_att_chan_opt chan_opt)4064 bool bt_att_tx_meta_data_match(const struct net_buf *buf, bt_gatt_complete_func_t func,
4065 const void *user_data, enum bt_att_chan_opt chan_opt)
4066 {
4067 const struct bt_att_tx_meta_data *meta = bt_att_get_tx_meta_data(buf);
4068
4069 return ((meta->func == func) &&
4070 (meta->user_data == user_data) &&
4071 (meta->chan_opt == chan_opt));
4072 }
4073
bt_att_chan_opt_valid(struct bt_conn * conn,enum bt_att_chan_opt chan_opt)4074 bool bt_att_chan_opt_valid(struct bt_conn *conn, enum bt_att_chan_opt chan_opt)
4075 {
4076 if ((chan_opt & (BT_ATT_CHAN_OPT_ENHANCED_ONLY | BT_ATT_CHAN_OPT_UNENHANCED_ONLY)) ==
4077 (BT_ATT_CHAN_OPT_ENHANCED_ONLY | BT_ATT_CHAN_OPT_UNENHANCED_ONLY)) {
4078 /* Enhanced and Unenhanced are mutually exclusive */
4079 return false;
4080 }
4081
4082 /* Choosing EATT requires EATT channels connected and encryption enabled */
4083 if (chan_opt & BT_ATT_CHAN_OPT_ENHANCED_ONLY) {
4084 return (bt_conn_get_security(conn) > BT_SECURITY_L1) &&
4085 !bt_att_fixed_chan_only(conn);
4086 }
4087
4088 return true;
4089 }
4090
bt_gatt_authorization_cb_register(const struct bt_gatt_authorization_cb * cb)4091 int bt_gatt_authorization_cb_register(const struct bt_gatt_authorization_cb *cb)
4092 {
4093 if (!IS_ENABLED(CONFIG_BT_GATT_AUTHORIZATION_CUSTOM)) {
4094 return -ENOSYS;
4095 }
4096
4097 if (!cb) {
4098 authorization_cb = NULL;
4099 return 0;
4100 }
4101
4102 if (authorization_cb) {
4103 return -EALREADY;
4104 }
4105
4106 authorization_cb = cb;
4107
4108 return 0;
4109 }
4110