/Zephyr-latest/subsys/sip_svc/ |
D | sip_svc_id_mgr.c | 168 id_map->items = k_malloc(items_size); in sip_svc_id_map_create() 169 if (!id_map->items) { in sip_svc_id_map_create() 176 id_map->items[i].id = SIP_SVC_ID_INVALID; in sip_svc_id_map_create() 177 id_map->items[i].arg1 = NULL; in sip_svc_id_map_create() 178 id_map->items[i].arg2 = NULL; in sip_svc_id_map_create() 179 id_map->items[i].arg3 = NULL; in sip_svc_id_map_create() 180 id_map->items[i].arg4 = NULL; in sip_svc_id_map_create() 181 id_map->items[i].arg5 = NULL; in sip_svc_id_map_create() 182 id_map->items[i].arg6 = NULL; in sip_svc_id_map_create() 193 k_free(id_map->items); in sip_svc_id_map_delete() [all …]
|
/Zephyr-latest/scripts/build/ |
D | gen_iter_sections.py | 19 def gen_ld(filepath: str, items: list, alignment: int): 21 for item in items: 25 def gen_cmake(filepath: str, items: list, alignment: int): 27 for item in items: 66 items = get_tagged_items(args.input, args.tag) 68 gen_ld(args.ld_output, items, args.alignment) 69 gen_cmake(args.cmake_output, items, args.alignment)
|
D | gen_app_partitions.py | 210 for partition, item in partitions.items(): 282 generic_partitions = {key: value for key, value in partitions.items() 284 pinned_partitions = {key: value for key, value in partitions.items() 292 decreasing_tuples = sorted(generic_partitions.items(), 306 decreasing_tuples = sorted(pinned_partitions.items(),
|
D | gen_relocate_app.py | 386 sorted(complete_list_of_sections.items()): 585 for memory_type, files in rel_dict.items(): 596 for category, sections in file_sections.items(): 601 for (region, section_category_map) in sections_by_category.items(): 602 for (category, sections) in section_category_map.items(): 609 for mem_type, list_of_sections in sorted(complete_list_of_sections.items()):
|
/Zephyr-latest/tests/lib/p4workq/src/ |
D | main.c | 30 static struct test_item items[MAX_ITEMS]; variable 80 if (items[ii].item.thread == NULL && in stress_handler() 81 &items[ii] != titem && !items[ii].active) { in stress_handler() 82 stress_sub(&items[ii]); in stress_handler() 104 memset(items, 0, sizeof(items)); in ZTEST() 108 items[0].item.priority = -1; in ZTEST() 109 stress_handler(&items[0].item); in ZTEST() 152 struct k_p4wq_work *item = &items[num_items++].item; in add_new_item()
|
/Zephyr-latest/doc/kernel/services/data_passing/ |
D | fifos.rst | 8 to add and remove data items of any size. 22 * A **queue** of data items that have been added but not yet removed. 27 FIFO data items must be aligned on a word boundary, as the kernel reserves 31 reserved space requirements for data items if they are added with 36 FIFO data items are restricted to single active instance across all FIFO 44 There is no limit to the number of items that may be queued. 56 If desired, **multiple data items** can be added to a FIFO in a single operation 58 useful if multiple writers are adding sets of related data items to the FIFO, 59 as it ensures the data items in each set are not interleaved with other data 60 items. Adding multiple data items to a FIFO is also more efficient than adding [all …]
|
D | lifos.rst | 8 to add and remove data items of any size. 22 * A **queue** of data items that have been added but not yet removed. 27 LIFO data items must be aligned on a word boundary, as the kernel reserves 31 space requirements for data items if they are added with 36 LIFO data items are restricted to single active instance across all LIFO 44 There is no limit to the number of items that may be queued. 123 to obtain data items from a producer thread, 143 Use a LIFO to asynchronously transfer data items of arbitrary size
|
D | message_queues.rst | 8 fixed-size data items. 22 * A **ring buffer** of data items that have been sent but not yet received. 26 * A **maximum quantity** of data items that can be queued in the ring buffer. 77 that is capable of holding 10 items, each of which is 12 bytes long. 108 to pass data items from a producing thread to one or more consuming threads. 139 to process data items generated by one or more producing threads. Note that 185 Use a message queue to transfer small data items between threads 189 A message queue can be used to transfer large data items, if desired. 194 to transfer large data items by exchanging a pointer to the data item,
|
/Zephyr-latest/subsys/rtio/ |
D | Kconfig.workq | 23 int "Number of threads to use for processing work-items" 27 int "Pool of work items to use with the RTIO Work-queues" 30 Configure the Pool of work items appropriately to your
|
/Zephyr-latest/scripts/logging/dictionary/dictionary_parser/ |
D | mipi_syst.py | 86 for _, one_inst in instances.items(): 108 for addr, one_str in database.get_string_mappings().items():
|
D | log_database.py | 208 for _, sect in self.database['sections'].items(): 273 for _, sect in json_db['sections'].items(): 285 for addr, one_str in database.get_string_mappings().items(): 300 for _, sect in json_db['sections'].items():
|
/Zephyr-latest/doc/develop/tools/ |
D | vscode.rst | 107 .. _C/C++ Extension Pack: https://marketplace.visualstudio.com/items?itemName=ms-vscode.cpptools-ex… 111 .. _Checkpatch Extension: https://marketplace.visualstudio.com/items?itemName=idanp.checkpatch 112 .. _EditorConfig Extension: https://marketplace.visualstudio.com/items?itemName=EditorConfig.Editor… 114 .. _reStructuredText Extension Pack: https://marketplace.visualstudio.com/items?itemName=lextudio.r… 116 .. _nRF Kconfig Extension: https://marketplace.visualstudio.com/items?itemName=nordic-semiconductor… 117 .. _nRF DeviceTree Extension: https://marketplace.visualstudio.com/items?itemName=nordic-semiconduc… 118 .. _GNU Linker Map files Extension: https://marketplace.visualstudio.com/items?itemName=trond-snekv…
|
/Zephyr-latest/scripts/footprint/ |
D | upload_data.py | 144 items = [] 145 for component,value in data.items(): 146 items.append([component,value]) 148 table = tabulate(items, headers=['Component', 'Size'], tablefmt='orgtbl')
|
D | compare_footprint | 232 for type, data in {'base': base_results, 'current': current_results}.items(): 243 for test, platforms in metrics['current'].items(): 248 for platform, test_data in platforms.items(): 278 for platform, data in deltas[test].items(): 280 for metric, value in data.items():
|
/Zephyr-latest/tests/kernel/workq/work_queue/ |
D | README.txt | 34 - Initializing test items 35 - Submitting test items 60 - Initializing delayed test items 61 - Submitting delayed test items
|
/Zephyr-latest/scripts/dts/ |
D | gen_defines.py | 402 for cell_name, cell_value in irq.data.items(): 578 for macro, val in macro2val.items(): 594 for prop_name, prop in node.props.items(): 652 for macro, val in macro2val.items(): 850 for cell, val in data.items(): 867 for cell, val in data.items(): 881 for name, node in edt.chosen_nodes.items(): 885 for macro, value in chosen.items(): 909 for compat, okay_nodes in edt.compat2okay.items(): 940 for compat, nodes in edt.compat2nodes.items(): [all …]
|
/Zephyr-latest/scripts/release/ |
D | bug_bash.py | 81 for user, score in self.get_tally().items(): 89 sorted(rev_tally.items(), key=operator.itemgetter(0), reverse=True)) 97 for score, users in self.get_rev_tally().items():
|
/Zephyr-latest/scripts/ |
D | snippets.py | 64 for variable, value in snippet_data.get('append', {}).items(): 68 for board, settings in snippet_data.get('boards', {}).items(): 72 for variable, value in settings.get('append', {}).items(): 158 for board, appends in snippet.board2appends.items(): 176 for name, values in appends.items():
|
D | set_assignees.py | 108 area_counter = dict(sorted(area_counter.items(), key=lambda item: item[1], reverse=True)) 120 … _all_maintainers = dict(sorted(found_maintainers.items(), key=lambda item: item[1], reverse=True)) 131 for area, count in area_counter.items(): 246 for _, area in maintainer_file.areas.items(): 260 for areas, maintainers in dict(label_to_maintainer).items():
|
/Zephyr-latest/scripts/ci/ |
D | upload_test_results_es.py | 94 for key,val in value.items(): 146 for k,v in src_dict.items(): 163 for property_name, rule in rules.items(): 216 for k,v in flat.items(): 231 … t_clone.update({ args.flatten + args.flatten_separator + k : v for k,v in flat_item.items() })
|
/Zephyr-latest/doc/_extensions/zephyr/domain/static/js/ |
D | codesample-livesearch.js | 53 const items = list.querySelectorAll("li"); 55 items.forEach((item) => {
|
/Zephyr-latest/scripts/pylib/twister/twisterlib/ |
D | config_parser.py | 158 for k, v in self.common.items(): 168 for k, v in self.scenarios[name].items(): 235 for k, kinfo in self.testsuite_valid_keys.items():
|
D | testplan.py | 206 for _, ts in self.testsuites.items(): 294 self.instances = OrderedDict(sorted(self.instances.items(), 297 self.instances = OrderedDict(sorted(self.instances.items())) 306 temp_list = list(self.instances.items()) 314 to_run = {k : v for k,v in self.instances.items() if v.status == TwisterStatus.NONE} 330 sliced_instances = islice(to_run.items(), start, end) 331 skipped = {k : v for k,v in self.instances.items() if v.status == TwisterStatus.SKIP} 332 errors = {k : v for k,v in self.instances.items() if v.status == TwisterStatus.ERROR} 365 dupes = [item for item, count in collections.Counter(self.scenarios).items() if count > 1] 378 for _, tc in self.testsuites.items(): [all …]
|
/Zephyr-latest/doc/kernel/services/threads/ |
D | workqueue.rst | 11 work items in a first in, first out manner. Each work item is processed by 21 * A **queue** of work items that have been added, but not yet processed. 23 * A **thread** that processes the work items in the queue. The priority of the 33 when no work items are available. 39 * Precise tracking of the status of cancelled work items, so that the 43 * Direct submission of delayable work items to the queue with 63 Any number of **work items** can be defined. Each work item is referenced 77 the preceding work items in its queue the thread will remove the next work 80 other items in the queue, a queued work item may be processed quickly or it 103 used with care, since the workqueue cannot process subsequent work items in [all …]
|
/Zephyr-latest/doc/connectivity/usb/api/ |
D | hid.rst | 14 HID items reference
|