| /Linux-v6.6/tools/testing/selftests/net/ |
| D | devlink_port_split.py | 60 ports = json.loads(stdout)['port'] 84 values = list(json.loads(stdout)['port'].values())[0] 102 values = list(json.loads(stdout)['port'].values())[0] 266 validate_devlink_output(json.loads(stdout)) 267 devs = json.loads(stdout)['dev']
|
| /Linux-v6.6/kernel/sched/ |
| D | loadavg.c | 71 void get_avenrun(unsigned long *loads, unsigned long offset, int shift) in get_avenrun() argument 73 loads[0] = (avenrun[0] + offset) << shift; in get_avenrun() 74 loads[1] = (avenrun[1] + offset) << shift; in get_avenrun() 75 loads[2] = (avenrun[2] + offset) << shift; in get_avenrun()
|
| /Linux-v6.6/arch/powerpc/perf/ |
| D | power9-pmu.c | 174 GENERIC_EVENT_ATTR(mem-loads, MEM_LOADS); 178 CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1); 182 CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1); 185 CACHE_EVENT_ATTR(LLC-loads, PM_DATA_FROM_L3); 188 CACHE_EVENT_ATTR(branch-loads, PM_BR_CMPL);
|
| D | power8-pmu.c | 134 CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1); 139 CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1); 143 CACHE_EVENT_ATTR(LLC-loads, PM_DATA_FROM_L3); 149 CACHE_EVENT_ATTR(branch-loads, PM_BRU_FIN);
|
| D | power10-pmu.c | 127 GENERIC_EVENT_ATTR(mem-loads, MEM_LOADS); 134 CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1); 138 CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1); 141 CACHE_EVENT_ATTR(LLC-loads, PM_DATA_FROM_L3); 146 CACHE_EVENT_ATTR(branch-loads, PM_BR_CMPL);
|
| /Linux-v6.6/tools/perf/Documentation/ |
| D | perf-mem.txt | 19 right set of options to display a memory access profile. By default, loads 20 and stores are sampled. Use the -t option to limit to loads or stores. 93 Specify desired latency for loads event. Supported on Intel and Arm64
|
| /Linux-v6.6/arch/alpha/lib/ |
| D | ev6-copy_user.S | 64 EXI( ldbu $1,0($17) ) # .. .. .. L : Keep loads separate from stores 116 EXI ( ldbu $2,0($17) ) # .. .. .. L : No loads in the same quad 203 EXI ( ldbu $2,0($17) ) # .. .. .. L : No loads in the same quad
|
| /Linux-v6.6/tools/testing/selftests/kvm/x86_64/ |
| D | pmu_event_filter_test.c | 111 uint64_t loads; member 504 const uint64_t loads = rdmsr(msr_base + 0); in masked_events_guest_test() local 515 pmc_results.loads = rdmsr(msr_base + 0) - loads; in masked_events_guest_test() 703 TEST_ASSERT(bool_eq(pmc_results.loads, test->flags & ALLOW_LOADS) && in run_masked_events_tests() 708 test->msg, pmc_results.loads, pmc_results.stores, in run_masked_events_tests()
|
| /Linux-v6.6/scripts/atomic/kerneldoc/ |
| D | read | 6 * Atomically loads the value of @v with ${desc_order} ordering.
|
| /Linux-v6.6/include/uapi/linux/ |
| D | sysinfo.h | 10 __kernel_ulong_t loads[3]; /* 1, 5, and 15 minute load averages */ member
|
| /Linux-v6.6/Documentation/arch/x86/ |
| D | tsx_async_abort.rst | 13 case certain loads may speculatively pass invalid data to dependent operations 15 Synchronization Extensions (TSX) transaction. This includes loads with no 16 fault or assist condition. Such loads may speculatively expose stale data from
|
| /Linux-v6.6/include/linux/sched/ |
| D | loadavg.h | 16 extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
|
| /Linux-v6.6/Documentation/core-api/ |
| D | refcount-vs-atomic.rst | 41 A strong (full) memory ordering guarantees that all prior loads and 49 A RELEASE memory ordering guarantees that all prior loads and 57 An ACQUIRE memory ordering guarantees that all post loads and
|
| /Linux-v6.6/Documentation/ |
| D | memory-barriers.txt | 178 perceived by the loads made by another CPU in the same order as the stores were 247 (*) Overlapping loads and stores within a particular CPU will appear to be 275 (*) It _must_not_ be assumed that independent loads and stores will be issued 369 deferral and combination of memory operations; speculative loads; speculative 388 to have any effect on loads. 401 case where two loads are performed such that the second depends on the 408 loads only; it is not required to have any effect on stores, independent 409 loads or overlapping loads. 417 that touched by the load will be perceptible to any loads issued after 434 dependency barriers. Nowadays, APIs for marking loads from shared [all …]
|
| /Linux-v6.6/tools/memory-model/Documentation/ |
| D | control-dependencies.txt | 42 fuse the load from "a" with other loads. Without the WRITE_ONCE(), 219 (*) Control dependencies can order prior loads against later stores. 221 Not prior loads against later loads, nor prior stores against 224 stores and later loads, smp_mb().
|
| D | access-marking.txt | 38 using READ_ONCE() for loads and WRITE_ONCE() for stores is usually 64 1. Data-racy loads from shared variables whose values are used only 95 In theory, plain C-language loads can also be used for this use case. 119 In theory, plain C-language loads can also be used for this use case. 130 that data_race() loads are subject to load fusing, which can result in 140 In theory, plain C-language loads can also be used for this use case. 183 5. Any other loads for which there is not supposed to be a concurrent 187 loads nor concurrent stores to that same variable.
|
| D | recipes.txt | 46 tearing, load/store fusing, and invented loads and stores. 204 and another CPU execute a pair of loads from this same pair of variables, 311 smp_rmb() macro orders prior loads against later loads. Therefore, if 354 second, while another CPU loads from the second variable and then stores 475 that one CPU first stores to one variable and then loads from a second, 476 while another CPU stores to the second variable and then loads from the
|
| D | explanation.txt | 79 for the loads, the model will predict whether it is possible for the 80 code to run in such a way that the loads will indeed obtain the 142 shared memory locations and another CPU loads from those locations in 154 A memory model will predict what values P1 might obtain for its loads 197 Since r1 = 1, P0 must store 1 to flag before P1 loads 1 from 198 it, as loads can obtain values only from earlier stores. 200 P1 loads from flag before loading from buf, since CPUs execute 223 each CPU stores to its own shared location and then loads from the 272 X: P1 loads 1 from flag executes before 273 Y: P1 loads 0 from buf executes before [all …]
|
| /Linux-v6.6/kernel/debug/kdb/ |
| D | kdb_main.c | 2489 val->loads[0] = avenrun[0]; in kdb_sysinfo() 2490 val->loads[1] = avenrun[1]; in kdb_sysinfo() 2491 val->loads[2] = avenrun[2]; in kdb_sysinfo() 2528 LOAD_INT(val.loads[0]), LOAD_FRAC(val.loads[0]), in kdb_summary() 2529 LOAD_INT(val.loads[1]), LOAD_FRAC(val.loads[1]), in kdb_summary() 2530 LOAD_INT(val.loads[2]), LOAD_FRAC(val.loads[2])); in kdb_summary()
|
| /Linux-v6.6/fs/xfs/scrub/ |
| D | xfarray.h | 92 uint64_t loads; member
|
| /Linux-v6.6/arch/mips/include/asm/ |
| D | mips-r2-to-r6-emul.h | 22 u64 loads; member
|
| D | fpu_emulator.h | 26 unsigned long loads; member
|
| /Linux-v6.6/arch/mips/kernel/ |
| D | mips-r2-to-r6-emul.c | 1274 MIPS_R2_STATS(loads); in mipsr2_decoder() 1348 MIPS_R2_STATS(loads); in mipsr2_decoder() 1608 MIPS_R2_STATS(loads); in mipsr2_decoder() 1727 MIPS_R2_STATS(loads); in mipsr2_decoder() 2267 (unsigned long)__this_cpu_read(mipsr2emustats.loads), in mipsr2_emul_show() 2268 (unsigned long)__this_cpu_read(mipsr2bdemustats.loads)); in mipsr2_emul_show() 2324 __this_cpu_write((mipsr2emustats).loads, 0); in mipsr2_clear_show() 2325 __this_cpu_write((mipsr2bdemustats).loads, 0); in mipsr2_clear_show()
|
| /Linux-v6.6/drivers/tee/optee/ |
| D | Kconfig | 16 This loads the BL32 image for OP-TEE as firmware when the driver is
|
| /Linux-v6.6/tools/net/ynl/ |
| D | cli.py | 37 attrs = json.loads(args.json_text)
|