1 /******************************************************************************
2 *
3 * Copyright (C) 2014 Google, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 ******************************************************************************/
18 #include <string.h>
19 #include "sdkconfig.h"
20 #include "esp_bt.h"
21
22 #include "common/bt_defs.h"
23 #include "common/bt_trace.h"
24 #include "stack/hcidefs.h"
25 #include "stack/hcimsgs.h"
26 #include "stack/btu.h"
27 #include "common/bt_vendor_lib.h"
28 #include "hci/hci_internals.h"
29 #include "hci/hci_hal.h"
30 #include "hci/hci_layer.h"
31 #include "osi/allocator.h"
32 #include "hci/packet_fragmenter.h"
33 #include "osi/list.h"
34 #include "osi/alarm.h"
35 #include "osi/thread.h"
36 #include "osi/mutex.h"
37 #include "osi/fixed_queue.h"
38 #include "osi/fixed_pkt_queue.h"
39
40 #define HCI_HOST_TASK_PINNED_TO_CORE (TASK_PINNED_TO_CORE)
41 #define HCI_HOST_TASK_STACK_SIZE (2048 + BT_TASK_EXTRA_STACK_SIZE)
42 #define HCI_HOST_TASK_PRIO (BT_TASK_MAX_PRIORITIES - 3)
43 #define HCI_HOST_TASK_NAME "hciT"
44 #define HCI_HOST_TASK_WORKQUEUE_NUM (2)
45 #define HCI_HOST_TASK_WORKQUEUE0_LEN (1) // for downstream datapath
46 #define HCI_HOST_TASK_WORKQUEUE1_LEN (1) // for upstream datapath
47
48 #define HCI_DOWNSTREAM_DATA_QUEUE_IDX (0)
49
50 typedef struct {
51 bool timer_is_set;
52 osi_alarm_t *command_response_timer;
53 list_t *commands_pending_response;
54 osi_mutex_t commands_pending_response_lock;
55 } command_waiting_response_t;
56
57 typedef struct {
58 int command_credits;
59 fixed_pkt_queue_t *command_queue;
60 fixed_queue_t *packet_queue;
61 struct osi_event *downstream_data_ready;
62 command_waiting_response_t cmd_waiting_q;
63
64 /*
65 non_repeating_timer_t *command_response_timer;
66 list_t *commands_pending_response;
67 osi_mutex_t commands_pending_response_lock;
68 */
69 } hci_host_env_t;
70
71 // Using a define here, because it can be stringified for the property lookup
72 static const uint32_t COMMAND_PENDING_TIMEOUT = 8000;
73
74 // Our interface
75 static bool interface_created;
76 static hci_t interface;
77 static hci_host_env_t hci_host_env;
78 static osi_thread_t *hci_host_thread;
79 static bool hci_host_startup_flag;
80
81 // Modules we import and callbacks we export
82 static const hci_hal_t *hal;
83 static const hci_hal_callbacks_t hal_callbacks;
84 static const packet_fragmenter_t *packet_fragmenter;
85 static const packet_fragmenter_callbacks_t packet_fragmenter_callbacks;
86
87 static int hci_layer_init_env(void);
88 static void hci_layer_deinit_env(void);
89 static void hci_downstream_data_handler(void *arg);
90 static void event_command_ready(fixed_pkt_queue_t *queue);
91 static void event_packet_ready(fixed_queue_t *queue);
92 static void restart_command_waiting_response_timer(command_waiting_response_t *cmd_wait_q);
93 static void command_timed_out(void *context);
94 static void hal_says_packet_ready(BT_HDR *packet);
95 static bool filter_incoming_event(BT_HDR *packet);
96 static serial_data_type_t event_to_data_type(uint16_t event);
97 static pkt_linked_item_t *get_waiting_command(command_opcode_t opcode);
98 static void dispatch_reassembled(BT_HDR *packet);
99 static void dispatch_adv_report(pkt_linked_item_t *linked_pkt);
100
101 // Module lifecycle functions
hci_start_up(void)102 int hci_start_up(void)
103 {
104 if (hci_layer_init_env()) {
105 goto error;
106 }
107
108 const size_t workqueue_len[] = {HCI_HOST_TASK_WORKQUEUE0_LEN, HCI_HOST_TASK_WORKQUEUE1_LEN};
109 hci_host_thread = osi_thread_create(HCI_HOST_TASK_NAME, HCI_HOST_TASK_STACK_SIZE, HCI_HOST_TASK_PRIO, HCI_HOST_TASK_PINNED_TO_CORE,
110 HCI_HOST_TASK_WORKQUEUE_NUM, workqueue_len);
111 if (hci_host_thread == NULL) {
112 return -2;
113 }
114
115 osi_event_bind(hci_host_env.downstream_data_ready, hci_host_thread, HCI_DOWNSTREAM_DATA_QUEUE_IDX);
116
117 packet_fragmenter->init(&packet_fragmenter_callbacks);
118 hal->open(&hal_callbacks, hci_host_thread);
119
120 hci_host_startup_flag = true;
121 return 0;
122 error:
123 hci_shut_down();
124 return -1;
125 }
126
hci_shut_down(void)127 void hci_shut_down(void)
128 {
129 hci_host_startup_flag = false;
130 hci_layer_deinit_env();
131
132 packet_fragmenter->cleanup();
133
134 //low_power_manager->cleanup();
135 hal->close();
136
137 osi_thread_free(hci_host_thread);
138 hci_host_thread = NULL;
139 }
140
hci_downstream_data_post(uint32_t timeout)141 bool hci_downstream_data_post(uint32_t timeout)
142 {
143 if (hci_host_env.downstream_data_ready == NULL) {
144 HCI_TRACE_WARNING("%s downstream_data_ready event not created", __func__);
145 return false;
146 }
147 return osi_thread_post_event(hci_host_env.downstream_data_ready, timeout);
148 }
149
hci_layer_init_env(void)150 static int hci_layer_init_env(void)
151 {
152 command_waiting_response_t *cmd_wait_q;
153
154 // The host is only allowed to send at most one command initially,
155 // as per the Bluetooth spec, Volume 2, Part E, 4.4 (Command Flow Control)
156 // This value can change when you get a command complete or command status event.
157 hci_host_env.command_credits = 1;
158 hci_host_env.command_queue = fixed_pkt_queue_new(QUEUE_SIZE_MAX);
159 if (hci_host_env.command_queue) {
160 fixed_pkt_queue_register_dequeue(hci_host_env.command_queue, event_command_ready);
161 } else {
162 HCI_TRACE_ERROR("%s unable to create pending command queue.", __func__);
163 return -1;
164 }
165
166 struct osi_event *event = osi_event_create(hci_downstream_data_handler, NULL);
167 assert(event != NULL);
168 hci_host_env.downstream_data_ready = event;
169
170 hci_host_env.packet_queue = fixed_queue_new(QUEUE_SIZE_MAX);
171 if (hci_host_env.packet_queue) {
172 fixed_queue_register_dequeue(hci_host_env.packet_queue, event_packet_ready);
173 } else {
174 HCI_TRACE_ERROR("%s unable to create pending packet queue.", __func__);
175 return -1;
176 }
177
178 // Init Commands waiting response list and timer
179 cmd_wait_q = &hci_host_env.cmd_waiting_q;
180 cmd_wait_q->timer_is_set = false;
181 cmd_wait_q->commands_pending_response = list_new(NULL);
182 if (!cmd_wait_q->commands_pending_response) {
183 HCI_TRACE_ERROR("%s unable to create list for commands pending response.", __func__);
184 return -1;
185 }
186 osi_mutex_new(&cmd_wait_q->commands_pending_response_lock);
187 cmd_wait_q->command_response_timer = osi_alarm_new("cmd_rsp_to", command_timed_out, cmd_wait_q, COMMAND_PENDING_TIMEOUT);
188 if (!cmd_wait_q->command_response_timer) {
189 HCI_TRACE_ERROR("%s unable to create command response timer.", __func__);
190 return -1;
191 }
192 #if (BLE_50_FEATURE_SUPPORT == TRUE)
193 btsnd_hcic_ble_sync_sem_init();
194 #endif // #if (BLE_50_FEATURE_SUPPORT == TRUE)
195
196 return 0;
197 }
198
hci_layer_deinit_env(void)199 static void hci_layer_deinit_env(void)
200 {
201 command_waiting_response_t *cmd_wait_q;
202
203 osi_event_delete(hci_host_env.downstream_data_ready);
204 hci_host_env.downstream_data_ready = NULL;
205
206 if (hci_host_env.command_queue) {
207 fixed_pkt_queue_free(hci_host_env.command_queue, (fixed_pkt_queue_free_cb)osi_free_func);
208 }
209 if (hci_host_env.packet_queue) {
210 fixed_queue_free(hci_host_env.packet_queue, osi_free_func);
211 }
212
213 cmd_wait_q = &hci_host_env.cmd_waiting_q;
214 list_free(cmd_wait_q->commands_pending_response);
215 osi_mutex_free(&cmd_wait_q->commands_pending_response_lock);
216 osi_alarm_free(cmd_wait_q->command_response_timer);
217 cmd_wait_q->command_response_timer = NULL;
218 #if (BLE_50_FEATURE_SUPPORT == TRUE)
219 btsnd_hcic_ble_sync_sem_deinit();
220 #endif // #if (BLE_50_FEATURE_SUPPORT == TRUE)
221 }
222
hci_downstream_data_handler(void * arg)223 static void hci_downstream_data_handler(void *arg)
224 {
225 /*
226 * Previous task handles RX queue and two TX Queues, Since there is
227 * a RX Thread Task in H4 layer which receives packet from driver layer.
228 * Now HCI Host Task has been optimized to only process TX Queue
229 * including command and data queue. And command queue has high priority,
230 * All packets will be directly copied to single queue in driver layer with
231 * H4 type header added (1 byte).
232 */
233 while (esp_vhci_host_check_send_available()) {
234 /*Now Target only allowed one packet per TX*/
235 BT_HDR *pkt = packet_fragmenter->fragment_current_packet();
236 if (pkt != NULL) {
237 packet_fragmenter->fragment_and_dispatch(pkt);
238 } else if (!fixed_pkt_queue_is_empty(hci_host_env.command_queue) &&
239 hci_host_env.command_credits > 0) {
240 fixed_pkt_queue_process(hci_host_env.command_queue);
241 } else if (!fixed_queue_is_empty(hci_host_env.packet_queue)) {
242 fixed_queue_process(hci_host_env.packet_queue);
243 } else {
244 // No downstream packet to send, stop processing
245 break;
246 }
247 }
248 }
249
transmit_command(BT_HDR * command,command_complete_cb complete_callback,command_status_cb status_callback,void * context)250 static void transmit_command(
251 BT_HDR *command,
252 command_complete_cb complete_callback,
253 command_status_cb status_callback,
254 void *context)
255 {
256 hci_cmd_metadata_t *metadata = HCI_GET_CMD_METAMSG(command);
257 pkt_linked_item_t *linked_pkt = HCI_GET_CMD_LINKED_STRUCT(metadata);
258
259 assert(command->layer_specific == HCI_CMD_BUF_TYPE_METADATA);
260 metadata->flags_vnd |= HCI_CMD_MSG_F_VND_QUEUED;
261
262 // Store the command message type in the event field
263 // in case the upper layer didn't already
264 command->event = MSG_STACK_TO_HC_HCI_CMD;
265
266 HCI_TRACE_DEBUG("HCI Enqueue Command opcode=0x%x\n", metadata->opcode);
267 BTTRC_DUMP_BUFFER(NULL, command->data + command->offset, command->len);
268
269 fixed_pkt_queue_enqueue(hci_host_env.command_queue, linked_pkt, FIXED_PKT_QUEUE_MAX_TIMEOUT);
270 hci_downstream_data_post(OSI_THREAD_MAX_TIMEOUT);
271
272 }
273
transmit_command_futured(BT_HDR * command)274 static future_t *transmit_command_futured(BT_HDR *command)
275 {
276 hci_cmd_metadata_t *metadata = HCI_GET_CMD_METAMSG(command);
277 pkt_linked_item_t *linked_pkt = HCI_GET_CMD_LINKED_STRUCT(metadata);
278
279 assert(command->layer_specific == HCI_CMD_BUF_TYPE_METADATA);
280 metadata->flags_vnd |= (HCI_CMD_MSG_F_VND_QUEUED | HCI_CMD_MSG_F_VND_FUTURE);
281
282 future_t *future = future_new();
283
284 metadata->complete_future = future;
285
286 // Store the command message type in the event field
287 // in case the upper layer didn't already
288 command->event = MSG_STACK_TO_HC_HCI_CMD;
289
290 fixed_pkt_queue_enqueue(hci_host_env.command_queue, linked_pkt, FIXED_PKT_QUEUE_MAX_TIMEOUT);
291 hci_downstream_data_post(OSI_THREAD_MAX_TIMEOUT);
292 return future;
293 }
294
transmit_downward(uint16_t type,void * data)295 static void transmit_downward(uint16_t type, void *data)
296 {
297 if (type == MSG_STACK_TO_HC_HCI_CMD) {
298 HCI_TRACE_ERROR("%s legacy transmit of command. Use transmit_command instead.\n", __func__);
299 assert(0);
300 } else {
301 fixed_queue_enqueue(hci_host_env.packet_queue, data, FIXED_QUEUE_MAX_TIMEOUT);
302 }
303
304 hci_downstream_data_post(OSI_THREAD_MAX_TIMEOUT);
305 }
306
307
308 // Command/packet transmitting functions
event_command_ready(fixed_pkt_queue_t * queue)309 static void event_command_ready(fixed_pkt_queue_t *queue)
310 {
311 pkt_linked_item_t *wait_entry = NULL;
312 command_waiting_response_t *cmd_wait_q = &hci_host_env.cmd_waiting_q;
313
314 wait_entry = fixed_pkt_queue_dequeue(queue, FIXED_QUEUE_MAX_TIMEOUT);
315 hci_cmd_metadata_t *metadata = (hci_cmd_metadata_t *)(wait_entry->data);
316 metadata->flags_vnd |= HCI_CMD_MSG_F_VND_SENT;
317 metadata->flags_vnd &= ~HCI_CMD_MSG_F_VND_QUEUED;
318
319 if (metadata->flags_src & HCI_CMD_MSG_F_SRC_NOACK) {
320 packet_fragmenter->fragment_and_dispatch(&metadata->command);
321 hci_cmd_free_cb free_func = metadata->command_free_cb ? metadata->command_free_cb : (hci_cmd_free_cb) osi_free_func;
322 free_func(wait_entry);
323 return;
324 }
325 hci_host_env.command_credits--;
326 // Move it to the list of commands awaiting response
327 osi_mutex_lock(&cmd_wait_q->commands_pending_response_lock, OSI_MUTEX_MAX_TIMEOUT);
328 list_append(cmd_wait_q->commands_pending_response, wait_entry);
329 osi_mutex_unlock(&cmd_wait_q->commands_pending_response_lock);
330
331 // Send it off
332 packet_fragmenter->fragment_and_dispatch(&metadata->command);
333
334 restart_command_waiting_response_timer(cmd_wait_q);
335 }
336
event_packet_ready(fixed_queue_t * queue)337 static void event_packet_ready(fixed_queue_t *queue)
338 {
339 BT_HDR *packet = (BT_HDR *)fixed_queue_dequeue(queue, FIXED_QUEUE_MAX_TIMEOUT);
340 // The queue may be the command queue or the packet queue, we don't care
341
342 packet_fragmenter->fragment_and_dispatch(packet);
343 }
344
345 // Callback for the fragmenter to send a fragment
transmit_fragment(BT_HDR * packet,bool send_transmit_finished)346 static void transmit_fragment(BT_HDR *packet, bool send_transmit_finished)
347 {
348 uint16_t event = packet->event & MSG_EVT_MASK;
349 serial_data_type_t type = event_to_data_type(event);
350
351 hal->transmit_data(type, packet->data + packet->offset, packet->len);
352
353 if (event != MSG_STACK_TO_HC_HCI_CMD && send_transmit_finished) {
354 osi_free(packet);
355 }
356 }
357
fragmenter_transmit_finished(BT_HDR * packet,bool all_fragments_sent)358 static void fragmenter_transmit_finished(BT_HDR *packet, bool all_fragments_sent)
359 {
360 if (all_fragments_sent) {
361 osi_free(packet);
362 } else {
363 // This is kind of a weird case, since we're dispatching a partially sent packet
364 // up to a higher layer.
365 // TODO(zachoverflow): rework upper layer so this isn't necessary.
366 //osi_free(packet);
367
368 /* dispatch_reassembled(packet) will send the packet back to the higher layer
369 when controller buffer is not enough. hci will send the remain packet back
370 to the l2cap layer and saved in the Link Queue (p_lcb->link_xmit_data_q).
371 The l2cap layer will resend the packet to lower layer when controller buffer
372 can be used.
373 */
374
375 dispatch_reassembled(packet);
376 //data_dispatcher_dispatch(interface.event_dispatcher, packet->event & MSG_EVT_MASK, packet);
377 }
378 }
379
restart_command_waiting_response_timer(command_waiting_response_t * cmd_wait_q)380 static void restart_command_waiting_response_timer(command_waiting_response_t *cmd_wait_q)
381 {
382 osi_mutex_lock(&cmd_wait_q->commands_pending_response_lock, OSI_MUTEX_MAX_TIMEOUT);
383 if (cmd_wait_q->timer_is_set) {
384 osi_alarm_cancel(cmd_wait_q->command_response_timer);
385 cmd_wait_q->timer_is_set = false;
386 }
387 if (!list_is_empty(cmd_wait_q->commands_pending_response)) {
388 osi_alarm_set(cmd_wait_q->command_response_timer, COMMAND_PENDING_TIMEOUT);
389 cmd_wait_q->timer_is_set = true;
390 }
391 osi_mutex_unlock(&cmd_wait_q->commands_pending_response_lock);
392 }
393
command_timed_out(void * context)394 static void command_timed_out(void *context)
395 {
396 command_waiting_response_t *cmd_wait_q = (command_waiting_response_t *)context;
397 pkt_linked_item_t *wait_entry;
398
399 osi_mutex_lock(&cmd_wait_q->commands_pending_response_lock, OSI_MUTEX_MAX_TIMEOUT);
400 wait_entry = (list_is_empty(cmd_wait_q->commands_pending_response) ?
401 NULL : list_front(cmd_wait_q->commands_pending_response));
402 osi_mutex_unlock(&cmd_wait_q->commands_pending_response_lock);
403
404 if (wait_entry == NULL) {
405 HCI_TRACE_ERROR("%s with no commands pending response", __func__);
406 } else
407 // We shouldn't try to recover the stack from this command timeout.
408 // If it's caused by a software bug, fix it. If it's a hardware bug, fix it.
409 {
410 hci_cmd_metadata_t *metadata = (hci_cmd_metadata_t *)(wait_entry->data);
411 HCI_TRACE_ERROR("%s hci layer timeout waiting for response to a command. opcode: 0x%x", __func__, metadata->opcode);
412 UNUSED(metadata);
413 }
414 }
415
416 // Event/packet receiving functions
hal_says_packet_ready(BT_HDR * packet)417 static void hal_says_packet_ready(BT_HDR *packet)
418 {
419 if (packet->event != MSG_HC_TO_STACK_HCI_EVT) {
420 packet_fragmenter->reassemble_and_dispatch(packet);
421 } else if (!filter_incoming_event(packet)) {
422 dispatch_reassembled(packet);
423 }
424 }
425
hal_says_adv_rpt_ready(pkt_linked_item_t * linked_pkt)426 static void hal_says_adv_rpt_ready(pkt_linked_item_t *linked_pkt)
427 {
428 dispatch_adv_report(linked_pkt);
429 }
430
431 // Returns true if the event was intercepted and should not proceed to
432 // higher layers. Also inspects an incoming event for interesting
433 // information, like how many commands are now able to be sent.
filter_incoming_event(BT_HDR * packet)434 static bool filter_incoming_event(BT_HDR *packet)
435 {
436 pkt_linked_item_t *wait_entry = NULL;
437 hci_cmd_metadata_t *metadata = NULL;
438 uint8_t *stream = packet->data + packet->offset;
439 uint8_t event_code;
440 command_opcode_t opcode;
441
442 STREAM_TO_UINT8(event_code, stream);
443 STREAM_SKIP_UINT8(stream); // Skip the parameter total length field
444
445 HCI_TRACE_DEBUG("Receive packet event_code=0x%x\n", event_code);
446
447 if (event_code == HCI_COMMAND_COMPLETE_EVT) {
448 STREAM_TO_UINT8(hci_host_env.command_credits, stream);
449 STREAM_TO_UINT16(opcode, stream);
450 wait_entry = get_waiting_command(opcode);
451 metadata = (hci_cmd_metadata_t *)(wait_entry->data);
452 if (!wait_entry) {
453 HCI_TRACE_WARNING("%s command complete event with no matching command. opcode: 0x%x.", __func__, opcode);
454 } else if (metadata->command_complete_cb) {
455 metadata->command_complete_cb(packet, metadata->context);
456 #if (BLE_50_FEATURE_SUPPORT == TRUE)
457 BlE_SYNC *sync_info = btsnd_hcic_ble_get_sync_info();
458 if(!sync_info) {
459 HCI_TRACE_WARNING("%s sync_info is NULL. opcode = 0x%x", __func__, opcode);
460 } else {
461 if (sync_info->sync_sem && sync_info->opcode == opcode) {
462 osi_sem_give(&sync_info->sync_sem);
463 sync_info->opcode = 0;
464 }
465 }
466 #endif // #if (BLE_50_FEATURE_SUPPORT == TRUE)
467 } else if (metadata->flags_vnd & HCI_CMD_MSG_F_VND_FUTURE) {
468 future_ready((future_t *)(metadata->complete_future), packet);
469 }
470
471 goto intercepted;
472 } else if (event_code == HCI_COMMAND_STATUS_EVT) {
473 uint8_t status;
474 STREAM_TO_UINT8(status, stream);
475 STREAM_TO_UINT8(hci_host_env.command_credits, stream);
476 STREAM_TO_UINT16(opcode, stream);
477
478 // If a command generates a command status event, it won't be getting a command complete event
479
480 wait_entry = get_waiting_command(opcode);
481 metadata = (hci_cmd_metadata_t *)(wait_entry->data);
482 if (!wait_entry) {
483 HCI_TRACE_WARNING("%s command status event with no matching command. opcode: 0x%x", __func__, opcode);
484 } else if (metadata->command_status_cb) {
485 metadata->command_status_cb(status, &metadata->command, metadata->context);
486 }
487
488 goto intercepted;
489 }
490
491 return false;
492 intercepted:
493 restart_command_waiting_response_timer(&hci_host_env.cmd_waiting_q);
494
495 /*Tell HCI Host Task to continue TX Pending commands*/
496 if (hci_host_env.command_credits &&
497 !fixed_pkt_queue_is_empty(hci_host_env.command_queue)) {
498 hci_downstream_data_post(OSI_THREAD_MAX_TIMEOUT);
499 }
500
501 if (wait_entry) {
502 // If it has a callback, it's responsible for freeing the packet
503 if (event_code == HCI_COMMAND_STATUS_EVT ||
504 (!metadata->command_complete_cb && !metadata->complete_future)) {
505 osi_free(packet);
506 }
507
508 // If it has a callback, it's responsible for freeing the command
509 if (event_code == HCI_COMMAND_COMPLETE_EVT || !metadata->command_status_cb) {
510 hci_cmd_free_cb free_func = metadata->command_free_cb ? metadata->command_free_cb : (hci_cmd_free_cb) osi_free_func;
511 free_func(wait_entry);
512 }
513 } else {
514 osi_free(packet);
515 }
516
517 return true;
518 }
519
520 // Callback for the fragmenter to dispatch up a completely reassembled packet
dispatch_reassembled(BT_HDR * packet)521 static void dispatch_reassembled(BT_HDR *packet)
522 {
523 // Events should already have been dispatched before this point
524 //Tell Up-layer received packet.
525 if (btu_task_post(SIG_BTU_HCI_MSG, packet, OSI_THREAD_MAX_TIMEOUT) == false) {
526 osi_free(packet);
527 }
528 }
529
dispatch_adv_report(pkt_linked_item_t * linked_pkt)530 static void dispatch_adv_report(pkt_linked_item_t *linked_pkt)
531 {
532 // Events should already have been dispatched before this point
533 //Tell Up-layer received packet.
534 if (btu_task_post(SIG_BTU_HCI_ADV_RPT_MSG, linked_pkt, OSI_THREAD_MAX_TIMEOUT) == false) {
535 osi_free(linked_pkt);
536 #if (BLE_ADV_REPORT_FLOW_CONTROL == TRUE)
537 hci_adv_credits_try_release(1);
538 #endif
539 }
540 }
541 // Misc internal functions
542
543 // TODO(zachoverflow): we seem to do this a couple places, like the HCI inject module. #centralize
event_to_data_type(uint16_t event)544 static serial_data_type_t event_to_data_type(uint16_t event)
545 {
546 if (event == MSG_STACK_TO_HC_HCI_ACL) {
547 return DATA_TYPE_ACL;
548 } else if (event == MSG_STACK_TO_HC_HCI_SCO) {
549 return DATA_TYPE_SCO;
550 } else if (event == MSG_STACK_TO_HC_HCI_CMD) {
551 return DATA_TYPE_COMMAND;
552 } else {
553 HCI_TRACE_ERROR("%s invalid event type, could not translate 0x%x\n", __func__, event);
554 }
555
556 return 0;
557 }
558
get_waiting_command(command_opcode_t opcode)559 static pkt_linked_item_t *get_waiting_command(command_opcode_t opcode)
560 {
561 command_waiting_response_t *cmd_wait_q = &hci_host_env.cmd_waiting_q;
562 osi_mutex_lock(&cmd_wait_q->commands_pending_response_lock, OSI_MUTEX_MAX_TIMEOUT);
563
564 for (const list_node_t *node = list_begin(cmd_wait_q->commands_pending_response);
565 node != list_end(cmd_wait_q->commands_pending_response);
566 node = list_next(node)) {
567 pkt_linked_item_t *wait_entry = list_node(node);
568 if (wait_entry) {
569 hci_cmd_metadata_t *metadata = (hci_cmd_metadata_t *)(wait_entry->data);
570 if (metadata->opcode == opcode) {
571 list_remove(cmd_wait_q->commands_pending_response, wait_entry);
572 osi_mutex_unlock(&cmd_wait_q->commands_pending_response_lock);
573 return wait_entry;
574 }
575 }
576 }
577
578 osi_mutex_unlock(&cmd_wait_q->commands_pending_response_lock);
579 return NULL;
580 }
581
init_layer_interface(void)582 static void init_layer_interface(void)
583 {
584 if (!interface_created) {
585 interface.transmit_command = transmit_command;
586 interface.transmit_command_futured = transmit_command_futured;
587 interface.transmit_downward = transmit_downward;
588 interface_created = true;
589 }
590 }
591
592 static const hci_hal_callbacks_t hal_callbacks = {
593 hal_says_packet_ready,
594 hal_says_adv_rpt_ready,
595 };
596
597 static const packet_fragmenter_callbacks_t packet_fragmenter_callbacks = {
598 transmit_fragment,
599 dispatch_reassembled,
600 fragmenter_transmit_finished
601 };
602
hci_layer_get_interface(void)603 const hci_t *hci_layer_get_interface(void)
604 {
605 hal = hci_hal_h4_get_interface();
606 packet_fragmenter = packet_fragmenter_get_interface();
607
608 init_layer_interface();
609 return &interface;
610 }
611