Home
last modified time | relevance | path

Searched refs:stack (Results 1 – 25 of 799) sorted by relevance

12345678910>>...32

/Linux-v6.1/arch/um/os-Linux/skas/
Dmem.c26 unsigned long *stack) in check_init_stack() argument
28 if (stack == NULL) { in check_init_stack()
29 stack = (unsigned long *) mm_idp->stack + 2; in check_init_stack()
30 *stack = 0; in check_init_stack()
32 return stack; in check_init_stack()
80 ret = *((unsigned long *) mm_idp->stack); in do_syscall_stub()
81 offset = *((unsigned long *) mm_idp->stack + 1); in do_syscall_stub()
83 data = (unsigned long *)(mm_idp->stack + offset - STUB_DATA); in do_syscall_stub()
116 unsigned long *stack = check_init_stack(mm_idp, *addr); in run_syscall_stub() local
118 *stack += sizeof(long); in run_syscall_stub()
[all …]
/Linux-v6.1/drivers/misc/altera-stapl/
Daltera.c213 long *stack = astate->stack; in altera_execute() local
528 stack[stack_ptr] = stack[stack_ptr - 1]; in altera_execute()
534 swap(stack[stack_ptr - 2], stack[stack_ptr - 1]); in altera_execute()
539 stack[stack_ptr - 1] += stack[stack_ptr]; in altera_execute()
545 stack[stack_ptr - 1] -= stack[stack_ptr]; in altera_execute()
551 stack[stack_ptr - 1] *= stack[stack_ptr]; in altera_execute()
557 stack[stack_ptr - 1] /= stack[stack_ptr]; in altera_execute()
563 stack[stack_ptr - 1] %= stack[stack_ptr]; in altera_execute()
569 stack[stack_ptr - 1] <<= stack[stack_ptr]; in altera_execute()
575 stack[stack_ptr - 1] >>= stack[stack_ptr]; in altera_execute()
[all …]
/Linux-v6.1/arch/x86/kernel/
Ddumpstack_32.c38 static bool in_hardirq_stack(unsigned long *stack, struct stack_info *info) in in_hardirq_stack() argument
47 if (stack < begin || stack > end) in in_hardirq_stack()
63 static bool in_softirq_stack(unsigned long *stack, struct stack_info *info) in in_softirq_stack() argument
72 if (stack < begin || stack > end) in in_softirq_stack()
88 static bool in_doublefault_stack(unsigned long *stack, struct stack_info *info) in in_doublefault_stack() argument
93 void *begin = ss->stack; in in_doublefault_stack()
94 void *end = begin + sizeof(ss->stack); in in_doublefault_stack()
96 if ((void *)stack < begin || (void *)stack >= end) in in_doublefault_stack()
108 int get_stack_info(unsigned long *stack, struct task_struct *task, in get_stack_info() argument
111 if (!stack) in get_stack_info()
[all …]
Ddumpstack_64.c94 static __always_inline bool in_exception_stack(unsigned long *stack, struct stack_info *info) in in_exception_stack() argument
96 unsigned long begin, end, stk = (unsigned long)stack; in in_exception_stack()
135 static __always_inline bool in_irq_stack(unsigned long *stack, struct stack_info *info) in in_irq_stack() argument
153 if (stack < begin || stack >= end) in in_irq_stack()
170 bool noinstr get_stack_info_noinstr(unsigned long *stack, struct task_struct *task, in get_stack_info_noinstr() argument
173 if (in_task_stack(stack, task, info)) in get_stack_info_noinstr()
179 if (in_exception_stack(stack, info)) in get_stack_info_noinstr()
182 if (in_irq_stack(stack, info)) in get_stack_info_noinstr()
185 if (in_entry_stack(stack, info)) in get_stack_info_noinstr()
191 int get_stack_info(unsigned long *stack, struct task_struct *task, in get_stack_info() argument
[all …]
Ddumpstack.c32 bool noinstr in_task_stack(unsigned long *stack, struct task_struct *task, in in_task_stack() argument
38 if (stack < begin || stack >= end) in in_task_stack()
50 bool noinstr in_entry_stack(unsigned long *stack, struct stack_info *info) in in_entry_stack() argument
57 if ((void *)stack < begin || (void *)stack >= end) in in_entry_stack()
187 unsigned long *stack, const char *log_lvl) in show_trace_log_lvl() argument
197 unwind_start(&state, task, regs, stack); in show_trace_log_lvl()
198 stack = stack ? : get_stack_pointer(task, regs); in show_trace_log_lvl()
217 for ( ; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) { in show_trace_log_lvl()
220 if (get_stack_info(stack, task, &stack_info, &visit_mask)) { in show_trace_log_lvl()
227 stack = (unsigned long *)PAGE_ALIGN((unsigned long)stack); in show_trace_log_lvl()
[all …]
/Linux-v6.1/tools/lib/traceevent/plugins/
Dplugin_function.c15 char **stack; member
53 static void add_child(struct func_stack *stack, const char *child, int pos) in add_child() argument
60 if (pos < stack->size) in add_child()
61 free(stack->stack[pos]); in add_child()
65 ptr = realloc(stack->stack, sizeof(char *) * in add_child()
66 (stack->size + STK_BLK)); in add_child()
72 stack->stack = ptr; in add_child()
74 for (i = stack->size; i < stack->size + STK_BLK; i++) in add_child()
75 stack->stack[i] = NULL; in add_child()
76 stack->size += STK_BLK; in add_child()
[all …]
/Linux-v6.1/arch/um/kernel/skas/
Dmmu.c21 unsigned long stack = 0; in init_new_context() local
24 stack = get_zeroed_page(GFP_KERNEL); in init_new_context()
25 if (stack == 0) in init_new_context()
28 to_mm->id.stack = stack; in init_new_context()
34 to_mm->id.u.pid = copy_context_skas0(stack, in init_new_context()
36 else to_mm->id.u.pid = start_userspace(stack); in init_new_context()
54 if (to_mm->id.stack != 0) in init_new_context()
55 free_page(to_mm->id.stack); in init_new_context()
77 free_page(mmu->id.stack); in destroy_context()
/Linux-v6.1/Documentation/mm/
Dvmalloced-kernel-stacks.rst21 Kernel stack overflows are often hard to debug and make the kernel
25 Virtually-mapped kernel stacks with guard pages causes kernel stack
31 causes reliable faults when the stack overflows. The usability of
32 the stack trace after overflow and response to the overflow itself
49 needs to work while the stack points to a virtual address with
51 most likely) needs to ensure that the stack's page table entries
52 are populated before running on a possibly unpopulated stack.
53 - If the stack overflows into a guard page, something reasonable
64 with guard pages. This causes kernel stack overflows to be caught
75 VMAP_STACK is enabled, it is not possible to run DMA on stack
[all …]
/Linux-v6.1/arch/nios2/kernel/
Dtraps.c60 void show_stack(struct task_struct *task, unsigned long *stack, in show_stack() argument
66 if (!stack) { in show_stack()
68 stack = (unsigned long *)task->thread.ksp; in show_stack()
70 stack = (unsigned long *)&stack; in show_stack()
73 addr = (unsigned long) stack; in show_stack()
76 printk("%sStack from %08lx:", loglvl, (unsigned long)stack); in show_stack()
78 if (stack + 1 > endstack) in show_stack()
82 printk("%s %08lx", loglvl, *stack++); in show_stack()
87 while (stack + 1 <= endstack) { in show_stack()
88 addr = *stack++; in show_stack()
/Linux-v6.1/arch/um/os-Linux/
Dhelper.c46 unsigned long stack, sp; in run_helper() local
49 stack = alloc_stack(0, __cant_sleep()); in run_helper()
50 if (stack == 0) in run_helper()
68 sp = stack + UM_KERN_PAGE_SIZE; in run_helper()
114 free_stack(stack, 0); in run_helper()
121 unsigned long stack, sp; in run_helper_thread() local
124 stack = alloc_stack(0, __cant_sleep()); in run_helper_thread()
125 if (stack == 0) in run_helper_thread()
128 sp = stack + UM_KERN_PAGE_SIZE; in run_helper_thread()
147 free_stack(stack, 0); in run_helper_thread()
[all …]
/Linux-v6.1/lib/
Dstackdepot.c122 struct stack_record *stack; in depot_alloc_stack() local
123 size_t required_size = struct_size(stack, entries, size); in depot_alloc_stack()
146 stack = stack_slabs[depot_index] + depot_offset; in depot_alloc_stack()
148 stack->hash = hash; in depot_alloc_stack()
149 stack->size = size; in depot_alloc_stack()
150 stack->handle.slabindex = depot_index; in depot_alloc_stack()
151 stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN; in depot_alloc_stack()
152 stack->handle.valid = 1; in depot_alloc_stack()
153 stack->handle.extra = 0; in depot_alloc_stack()
154 memcpy(stack->entries, entries, flex_array_size(stack, entries, size)); in depot_alloc_stack()
[all …]
/Linux-v6.1/arch/openrisc/kernel/
Dunwinder.c60 void unwind_stack(void *data, unsigned long *stack, in unwind_stack() argument
67 while (!kstack_end(stack)) { in unwind_stack()
68 frameinfo = container_of(stack, in unwind_stack()
83 stack++; in unwind_stack()
93 void unwind_stack(void *data, unsigned long *stack, in unwind_stack() argument
98 while (!kstack_end(stack)) { in unwind_stack()
99 addr = *stack++; in unwind_stack()
/Linux-v6.1/mm/kmsan/
Dinit.c147 static void smallstack_push(struct smallstack *stack, struct page *pages) in smallstack_push() argument
149 KMSAN_WARN_ON(stack->index == MAX_BLOCKS); in smallstack_push()
150 stack->items[stack->index] = pages; in smallstack_push()
151 stack->index++; in smallstack_push()
155 static struct page *smallstack_pop(struct smallstack *stack) in smallstack_pop() argument
159 KMSAN_WARN_ON(stack->index == 0); in smallstack_pop()
160 stack->index--; in smallstack_pop()
161 ret = stack->items[stack->index]; in smallstack_pop()
162 stack->items[stack->index] = NULL; in smallstack_pop()
/Linux-v6.1/tools/testing/selftests/vDSO/
Dvdso_standalone_test_x86.c73 __attribute__((externally_visible)) void c_main(void **stack) in c_main() argument
76 long argc = (long)*stack; in c_main()
77 stack += argc + 2; in c_main()
80 while(*stack) in c_main()
81 stack++; in c_main()
82 stack++; in c_main()
85 vdso_init_from_auxv((void *)stack); in c_main()
/Linux-v6.1/drivers/gpu/drm/i915/
Dintel_runtime_pm.c77 depot_stack_handle_t stack, *stacks; in track_intel_runtime_pm_wakeref() local
83 stack = __save_depot_stack(); in track_intel_runtime_pm_wakeref()
84 if (!stack) in track_intel_runtime_pm_wakeref()
90 rpm->debug.last_acquire = stack; in track_intel_runtime_pm_wakeref()
96 stacks[rpm->debug.count++] = stack; in track_intel_runtime_pm_wakeref()
99 stack = -1; in track_intel_runtime_pm_wakeref()
104 return stack; in track_intel_runtime_pm_wakeref()
108 depot_stack_handle_t stack) in untrack_intel_runtime_pm_wakeref() argument
116 if (unlikely(stack == -1)) in untrack_intel_runtime_pm_wakeref()
121 if (rpm->debug.owners[n] == stack) { in untrack_intel_runtime_pm_wakeref()
[all …]
/Linux-v6.1/Documentation/x86/
Dkernel-stacks.rst14 Like all other architectures, x86_64 has a kernel stack for every
17 zombie. While the thread is in user space the kernel stack is empty
25 * Interrupt stack. IRQ_STACK_SIZE
29 kernel switches from the current task to the interrupt stack. Like
32 of every per thread stack.
34 The interrupt stack is also used when processing a softirq.
36 Switching to the kernel interrupt stack is done by software based on a
41 to automatically switch to a new stack for designated events such as
46 point to dedicated stacks; each stack can be a different size.
50 loads such a descriptor, the hardware automatically sets the new stack
[all …]
/Linux-v6.1/arch/um/kernel/
Dsysrq.c30 void show_stack(struct task_struct *task, unsigned long *stack, in show_stack() argument
42 if (!stack) in show_stack()
43 stack = get_stack_pointer(task, segv_regs); in show_stack()
47 if (kstack_end(stack)) in show_stack()
51 pr_cont(" %08lx", READ_ONCE_NOCHECK(*stack)); in show_stack()
52 stack++; in show_stack()
/Linux-v6.1/include/linux/sched/
Dtask_stack.h21 return task->stack; in task_stack_page()
29 return (unsigned long *)((unsigned long)task->stack + THREAD_SIZE) - 1; in end_of_stack()
31 return task->stack; in end_of_stack()
37 #define task_stack_page(task) ((void *)(task)->stack)
89 void *stack = task_stack_page(current); in object_is_on_stack() local
91 return (obj >= stack) && (obj < (stack + THREAD_SIZE)); in object_is_on_stack()
/Linux-v6.1/tools/perf/scripts/python/
Dstackcollapse.py97 stack = list()
103 stack.append(tidy_function_name(entry['sym']['name'],
108 stack.append(tidy_function_name(param_dict['symbol'],
119 stack.append(comm)
121 stack_string = ';'.join(reversed(stack))
126 for stack in list:
127 print("%s %d" % (stack, lines[stack]))
/Linux-v6.1/mm/kasan/
Dreport_tags.c36 depot_stack_handle_t stack; in kasan_complete_mode_report_info() local
72 stack = READ_ONCE(entry->stack); in kasan_complete_mode_report_info()
84 info->free_track.stack = stack; in kasan_complete_mode_report_info()
99 info->alloc_track.stack = stack; in kasan_complete_mode_report_info()
/Linux-v6.1/arch/x86/include/asm/
Dstacktrace.h31 bool in_task_stack(unsigned long *stack, struct task_struct *task,
34 bool in_entry_stack(unsigned long *stack, struct stack_info *info);
36 int get_stack_info(unsigned long *stack, struct task_struct *task,
38 bool get_stack_info_noinstr(unsigned long *stack, struct task_struct *task,
42 bool get_stack_guard_info(unsigned long *stack, struct stack_info *info) in get_stack_guard_info() argument
45 if (get_stack_info_noinstr(stack, current, info)) in get_stack_guard_info()
48 return get_stack_info_noinstr((void *)stack + PAGE_SIZE, current, info); in get_stack_guard_info()
/Linux-v6.1/arch/powerpc/kernel/
Dstacktrace.c43 unsigned long *stack = (unsigned long *) sp; in arch_stack_walk() local
49 newsp = stack[0]; in arch_stack_walk()
50 ip = stack[STACK_FRAME_LR_SAVE]; in arch_stack_walk()
112 unsigned long *stack = (unsigned long *) sp; in arch_stack_walk_reliable() local
119 newsp = stack[0]; in arch_stack_walk_reliable()
139 stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { in arch_stack_walk_reliable()
144 ip = stack[STACK_FRAME_LR_SAVE]; in arch_stack_walk_reliable()
152 ip = ftrace_graph_ret_addr(task, &graph_idx, ip, stack); in arch_stack_walk_reliable()
/Linux-v6.1/scripts/kconfig/
Dsymbol.c975 static void dep_stack_insert(struct dep_stack *stack, struct symbol *sym) in dep_stack_insert() argument
977 memset(stack, 0, sizeof(*stack)); in dep_stack_insert()
979 check_top->next = stack; in dep_stack_insert()
980 stack->prev = check_top; in dep_stack_insert()
981 stack->sym = sym; in dep_stack_insert()
982 check_top = stack; in dep_stack_insert()
999 struct dep_stack *stack; in sym_check_print_recursive() local
1010 for (stack = check_top; stack != NULL; stack = stack->prev) in sym_check_print_recursive()
1011 if (stack->sym == last_sym) in sym_check_print_recursive()
1013 if (!stack) { in sym_check_print_recursive()
[all …]
/Linux-v6.1/arch/loongarch/kernel/
Dprocess.c128 unsigned long usp = args->stack; in copy_thread()
215 bool in_irq_stack(unsigned long stack, struct stack_info *info) in in_irq_stack() argument
221 if (stack < begin || stack >= end) in in_irq_stack()
236 bool in_task_stack(unsigned long stack, struct task_struct *task, in in_task_stack() argument
242 if (stack < begin || stack >= end) in in_task_stack()
253 int get_stack_info(unsigned long stack, struct task_struct *task, in get_stack_info() argument
258 if (!stack || stack & (SZREG - 1)) in get_stack_info()
261 if (in_task_stack(stack, task, info)) in get_stack_info()
267 if (in_irq_stack(stack, info)) in get_stack_info()
/Linux-v6.1/security/
DKconfig.hardening8 stack variable initializations, this warning is silenced for
38 prompt "Initialize kernel stack variables at function entry"
44 This option enables initialization of stack variables at
56 bool "no automatic stack variable initialization (weakest)"
58 Disable automatic stack variable initialization.
60 classes of uninitialized stack variable exploits
69 Zero-initialize any structures on the stack containing
71 uninitialized stack variable exploits and information
82 Zero-initialize any structures on the stack that may
85 of uninitialized stack variable exploits and information
[all …]

12345678910>>...32