Lines Matching refs:curr

1621 	struct task_struct *curr = current;  in print_circular_bug_header()  local
1632 curr->comm, task_pid_nr(curr)); in print_circular_bug_header()
1654 struct task_struct *curr = current; in print_circular_bug() local
1682 lockdep_print_held_locks(curr); in print_circular_bug()
2025 print_bad_irq_dependency(struct task_struct *curr, in print_bad_irq_dependency() argument
2046 curr->comm, task_pid_nr(curr), in print_bad_irq_dependency()
2047 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT, in print_bad_irq_dependency()
2048 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT, in print_bad_irq_dependency()
2049 curr->hardirqs_enabled, in print_bad_irq_dependency()
2050 curr->softirqs_enabled); in print_bad_irq_dependency()
2079 lockdep_print_held_locks(curr); in print_bad_irq_dependency()
2224 static int check_irq_usage(struct task_struct *curr, struct held_lock *prev, in check_irq_usage() argument
2293 print_bad_irq_dependency(curr, &this, &that, in check_irq_usage()
2316 static inline int check_irq_usage(struct task_struct *curr, in check_irq_usage() argument
2349 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, in print_deadlock_bug() argument
2361 curr->comm, task_pid_nr(curr)); in print_deadlock_bug()
2368 lockdep_print_held_locks(curr); in print_deadlock_bug()
2383 check_deadlock(struct task_struct *curr, struct held_lock *next) in check_deadlock() argument
2389 for (i = 0; i < curr->lockdep_depth; i++) { in check_deadlock()
2390 prev = curr->held_locks + i; in check_deadlock()
2412 print_deadlock_bug(curr, prev, next); in check_deadlock()
2441 check_prev_add(struct task_struct *curr, struct held_lock *prev, in check_prev_add() argument
2480 if (!check_irq_usage(curr, prev, next)) in check_prev_add()
2551 check_prevs_add(struct task_struct *curr, struct held_lock *next) in check_prevs_add() argument
2554 int depth = curr->lockdep_depth; in check_prevs_add()
2568 if (curr->held_locks[depth].irq_context != in check_prevs_add()
2569 curr->held_locks[depth-1].irq_context) in check_prevs_add()
2573 int distance = curr->lockdep_depth - depth + 1; in check_prevs_add()
2574 hlock = curr->held_locks + depth - 1; in check_prevs_add()
2581 int ret = check_prev_add(curr, hlock, next, distance, in check_prevs_add()
2605 if (curr->held_locks[depth].irq_context != in check_prevs_add()
2606 curr->held_locks[depth-1].irq_context) in check_prevs_add()
2637 static inline int get_first_held_lock(struct task_struct *curr, in get_first_held_lock() argument
2643 for (i = curr->lockdep_depth - 1; i >= 0; i--) { in get_first_held_lock()
2644 hlock_curr = curr->held_locks + i; in get_first_held_lock()
2668 print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next) in print_chain_keys_held_locks() argument
2672 int depth = curr->lockdep_depth; in print_chain_keys_held_locks()
2673 int i = get_first_held_lock(curr, hlock_next); in print_chain_keys_held_locks()
2678 hlock = curr->held_locks + i; in print_chain_keys_held_locks()
2704 static void print_collision(struct task_struct *curr, in print_collision() argument
2717 print_chain_keys_held_locks(curr, hlock_next); in print_collision()
2733 static int check_no_collision(struct task_struct *curr, in check_no_collision() argument
2740 i = get_first_held_lock(curr, hlock); in check_no_collision()
2742 if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) { in check_no_collision()
2743 print_collision(curr, hlock, chain); in check_no_collision()
2748 id = curr->held_locks[i].class_idx; in check_no_collision()
2751 print_collision(curr, hlock, chain); in check_no_collision()
2793 static inline int add_chain_cache(struct task_struct *curr, in add_chain_cache() argument
2821 i = get_first_held_lock(curr, hlock); in add_chain_cache()
2822 chain->depth = curr->lockdep_depth + 1 - i; in add_chain_cache()
2825 BUILD_BUG_ON((1UL << 6) <= ARRAY_SIZE(curr->held_locks)); in add_chain_cache()
2831 int lock_id = curr->held_locks[i].class_idx; in add_chain_cache()
2876 static inline int lookup_chain_cache_add(struct task_struct *curr, in lookup_chain_cache_add() argument
2885 if (!check_no_collision(curr, hlock, chain)) in lookup_chain_cache_add()
2915 if (!add_chain_cache(curr, hlock, chain_key)) in lookup_chain_cache_add()
2921 static int validate_chain(struct task_struct *curr, in validate_chain() argument
2936 lookup_chain_cache_add(curr, hlock, chain_key)) { in validate_chain()
2955 int ret = check_deadlock(curr, hlock); in validate_chain()
2971 if (!check_prevs_add(curr, hlock)) in validate_chain()
2985 static inline int validate_chain(struct task_struct *curr, in validate_chain() argument
2997 static void check_chain_key(struct task_struct *curr) in check_chain_key() argument
3004 for (i = 0; i < curr->lockdep_depth; i++) { in check_chain_key()
3005 hlock = curr->held_locks + i; in check_chain_key()
3013 curr->lockdep_depth, i, in check_chain_key()
3032 if (chain_key != curr->curr_chain_key) { in check_chain_key()
3039 curr->lockdep_depth, i, in check_chain_key()
3041 (unsigned long long)curr->curr_chain_key); in check_chain_key()
3047 static int mark_lock(struct task_struct *curr, struct held_lock *this,
3068 print_usage_bug(struct task_struct *curr, struct held_lock *this, in print_usage_bug() argument
3084 curr->comm, task_pid_nr(curr), in print_usage_bug()
3085 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT, in print_usage_bug()
3086 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT, in print_usage_bug()
3087 trace_hardirqs_enabled(curr), in print_usage_bug()
3088 trace_softirqs_enabled(curr)); in print_usage_bug()
3094 print_irqtrace_events(curr); in print_usage_bug()
3098 lockdep_print_held_locks(curr); in print_usage_bug()
3108 valid_state(struct task_struct *curr, struct held_lock *this, in valid_state() argument
3112 print_usage_bug(curr, this, bad_bit, new_bit); in valid_state()
3123 print_irq_inversion_bug(struct task_struct *curr, in print_irq_inversion_bug() argument
3141 curr->comm, task_pid_nr(curr)); in print_irq_inversion_bug()
3170 lockdep_print_held_locks(curr); in print_irq_inversion_bug()
3187 check_usage_forwards(struct task_struct *curr, struct held_lock *this, in check_usage_forwards() argument
3204 print_irq_inversion_bug(curr, &root, target_entry, in check_usage_forwards()
3214 check_usage_backwards(struct task_struct *curr, struct held_lock *this, in check_usage_backwards() argument
3231 print_irq_inversion_bug(curr, &root, target_entry, in check_usage_backwards()
3236 void print_irqtrace_events(struct task_struct *curr) in print_irqtrace_events() argument
3238 printk("irq event stamp: %u\n", curr->irq_events); in print_irqtrace_events()
3240 curr->hardirq_enable_event, (void *)curr->hardirq_enable_ip, in print_irqtrace_events()
3241 (void *)curr->hardirq_enable_ip); in print_irqtrace_events()
3243 curr->hardirq_disable_event, (void *)curr->hardirq_disable_ip, in print_irqtrace_events()
3244 (void *)curr->hardirq_disable_ip); in print_irqtrace_events()
3246 curr->softirq_enable_event, (void *)curr->softirq_enable_ip, in print_irqtrace_events()
3247 (void *)curr->softirq_enable_ip); in print_irqtrace_events()
3249 curr->softirq_disable_event, (void *)curr->softirq_disable_ip, in print_irqtrace_events()
3250 (void *)curr->softirq_disable_ip); in print_irqtrace_events()
3288 mark_lock_irq(struct task_struct *curr, struct held_lock *this, in mark_lock_irq() argument
3309 if (!valid_state(curr, this, new_bit, excl_bit)) in mark_lock_irq()
3317 !usage(curr, this, excl_bit, state_name(new_bit & ~LOCK_USAGE_READ_MASK))) in mark_lock_irq()
3324 if (!valid_state(curr, this, new_bit, excl_bit + LOCK_USAGE_READ_MASK)) in mark_lock_irq()
3328 !usage(curr, this, excl_bit + LOCK_USAGE_READ_MASK, in mark_lock_irq()
3343 mark_held_locks(struct task_struct *curr, enum lock_usage_bit base_bit) in mark_held_locks() argument
3348 for (i = 0; i < curr->lockdep_depth; i++) { in mark_held_locks()
3350 hlock = curr->held_locks + i; in mark_held_locks()
3360 if (!mark_lock(curr, hlock, hlock_bit)) in mark_held_locks()
3372 struct task_struct *curr = current; in __trace_hardirqs_on_caller() local
3375 curr->hardirqs_enabled = 1; in __trace_hardirqs_on_caller()
3381 if (!mark_held_locks(curr, LOCK_ENABLED_HARDIRQ)) in __trace_hardirqs_on_caller()
3388 if (curr->softirqs_enabled) in __trace_hardirqs_on_caller()
3389 if (!mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ)) in __trace_hardirqs_on_caller()
3392 curr->hardirq_enable_ip = ip; in __trace_hardirqs_on_caller()
3393 curr->hardirq_enable_event = ++curr->irq_events; in __trace_hardirqs_on_caller()
3444 struct task_struct *curr = current; in lockdep_hardirqs_off() local
3456 if (curr->hardirqs_enabled) { in lockdep_hardirqs_off()
3460 curr->hardirqs_enabled = 0; in lockdep_hardirqs_off()
3461 curr->hardirq_disable_ip = ip; in lockdep_hardirqs_off()
3462 curr->hardirq_disable_event = ++curr->irq_events; in lockdep_hardirqs_off()
3474 struct task_struct *curr = current; in trace_softirqs_on() local
3486 if (curr->softirqs_enabled) { in trace_softirqs_on()
3495 curr->softirqs_enabled = 1; in trace_softirqs_on()
3496 curr->softirq_enable_ip = ip; in trace_softirqs_on()
3497 curr->softirq_enable_event = ++curr->irq_events; in trace_softirqs_on()
3504 if (curr->hardirqs_enabled) in trace_softirqs_on()
3505 mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ); in trace_softirqs_on()
3514 struct task_struct *curr = current; in trace_softirqs_off() local
3525 if (curr->softirqs_enabled) { in trace_softirqs_off()
3529 curr->softirqs_enabled = 0; in trace_softirqs_off()
3530 curr->softirq_disable_ip = ip; in trace_softirqs_off()
3531 curr->softirq_disable_event = ++curr->irq_events; in trace_softirqs_off()
3542 mark_usage(struct task_struct *curr, struct held_lock *hlock, int check) in mark_usage() argument
3553 if (curr->hardirq_context) in mark_usage()
3554 if (!mark_lock(curr, hlock, in mark_usage()
3557 if (curr->softirq_context) in mark_usage()
3558 if (!mark_lock(curr, hlock, in mark_usage()
3562 if (curr->hardirq_context) in mark_usage()
3563 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ)) in mark_usage()
3565 if (curr->softirq_context) in mark_usage()
3566 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ)) in mark_usage()
3572 if (!mark_lock(curr, hlock, in mark_usage()
3575 if (curr->softirqs_enabled) in mark_usage()
3576 if (!mark_lock(curr, hlock, in mark_usage()
3580 if (!mark_lock(curr, hlock, in mark_usage()
3583 if (curr->softirqs_enabled) in mark_usage()
3584 if (!mark_lock(curr, hlock, in mark_usage()
3592 if (!mark_lock(curr, hlock, LOCK_USED)) in mark_usage()
3603 static int separate_irq_context(struct task_struct *curr, in separate_irq_context() argument
3606 unsigned int depth = curr->lockdep_depth; in separate_irq_context()
3614 prev_hlock = curr->held_locks + depth-1; in separate_irq_context()
3629 static int mark_lock(struct task_struct *curr, struct held_lock *this, in mark_lock() argument
3666 ret = mark_lock_irq(curr, this, new_bit); in mark_lock()
3679 print_irqtrace_events(curr); in mark_lock()
3689 mark_usage(struct task_struct *curr, struct held_lock *hlock, int check) in mark_usage() argument
3699 static inline int separate_irq_context(struct task_struct *curr, in separate_irq_context() argument
3771 print_lock_nested_lock_not_held(struct task_struct *curr, in print_lock_nested_lock_not_held() argument
3786 pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr)); in print_lock_nested_lock_not_held()
3796 lockdep_print_held_locks(curr); in print_lock_nested_lock_not_held()
3817 struct task_struct *curr = current; in __lock_acquire() local
3857 depth = curr->lockdep_depth; in __lock_acquire()
3867 hlock = curr->held_locks + depth - 1; in __lock_acquire()
3885 hlock = curr->held_locks + depth; in __lock_acquire()
3896 hlock->irq_context = task_irq_context(curr); in __lock_acquire()
3909 if (!mark_usage(curr, hlock, check)) in __lock_acquire()
3928 chain_key = curr->curr_chain_key; in __lock_acquire()
3939 if (separate_irq_context(curr, hlock)) { in __lock_acquire()
3946 print_lock_nested_lock_not_held(curr, hlock, ip); in __lock_acquire()
3955 if (!validate_chain(curr, hlock, chain_head, chain_key)) in __lock_acquire()
3958 curr->curr_chain_key = chain_key; in __lock_acquire()
3959 curr->lockdep_depth++; in __lock_acquire()
3960 check_chain_key(curr); in __lock_acquire()
3965 if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) { in __lock_acquire()
3969 curr->lockdep_depth, MAX_LOCK_DEPTH); in __lock_acquire()
3978 if (unlikely(curr->lockdep_depth > max_lockdep_depth)) in __lock_acquire()
3979 max_lockdep_depth = curr->lockdep_depth; in __lock_acquire()
3984 static void print_unlock_imbalance_bug(struct task_struct *curr, in print_unlock_imbalance_bug() argument
3999 curr->comm, task_pid_nr(curr)); in print_unlock_imbalance_bug()
4005 lockdep_print_held_locks(curr); in print_unlock_imbalance_bug()
4048 static struct held_lock *find_held_lock(struct task_struct *curr, in find_held_lock() argument
4056 hlock = curr->held_locks + i; in find_held_lock()
4083 static int reacquire_held_locks(struct task_struct *curr, unsigned int depth, in reacquire_held_locks() argument
4092 for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) { in reacquire_held_locks()
4120 struct task_struct *curr = current; in __lock_set_class() local
4129 depth = curr->lockdep_depth; in __lock_set_class()
4137 hlock = find_held_lock(curr, lock, depth, &i); in __lock_set_class()
4139 print_unlock_imbalance_bug(curr, lock, ip); in __lock_set_class()
4147 curr->lockdep_depth = i; in __lock_set_class()
4148 curr->curr_chain_key = hlock->prev_chain_key; in __lock_set_class()
4150 if (reacquire_held_locks(curr, depth, i, &merged)) in __lock_set_class()
4157 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged)) in __lock_set_class()
4164 struct task_struct *curr = current; in __lock_downgrade() local
4172 depth = curr->lockdep_depth; in __lock_downgrade()
4180 hlock = find_held_lock(curr, lock, depth, &i); in __lock_downgrade()
4182 print_unlock_imbalance_bug(curr, lock, ip); in __lock_downgrade()
4186 curr->lockdep_depth = i; in __lock_downgrade()
4187 curr->curr_chain_key = hlock->prev_chain_key; in __lock_downgrade()
4193 if (reacquire_held_locks(curr, depth, i, &merged)) in __lock_downgrade()
4204 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth)) in __lock_downgrade()
4220 struct task_struct *curr = current; in __lock_release() local
4228 depth = curr->lockdep_depth; in __lock_release()
4234 print_unlock_imbalance_bug(curr, lock, ip); in __lock_release()
4242 hlock = find_held_lock(curr, lock, depth, &i); in __lock_release()
4244 print_unlock_imbalance_bug(curr, lock, ip); in __lock_release()
4271 curr->lockdep_depth = i; in __lock_release()
4272 curr->curr_chain_key = hlock->prev_chain_key; in __lock_release()
4281 if (reacquire_held_locks(curr, depth, i + 1, &merged)) in __lock_release()
4289 DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged); in __lock_release()
4302 struct task_struct *curr = current; in __lock_is_held() local
4305 for (i = 0; i < curr->lockdep_depth; i++) { in __lock_is_held()
4306 struct held_lock *hlock = curr->held_locks + i; in __lock_is_held()
4322 struct task_struct *curr = current; in __lock_pin_lock() local
4328 for (i = 0; i < curr->lockdep_depth; i++) { in __lock_pin_lock()
4329 struct held_lock *hlock = curr->held_locks + i; in __lock_pin_lock()
4349 struct task_struct *curr = current; in __lock_repin_lock() local
4355 for (i = 0; i < curr->lockdep_depth; i++) { in __lock_repin_lock()
4356 struct held_lock *hlock = curr->held_locks + i; in __lock_repin_lock()
4369 struct task_struct *curr = current; in __lock_unpin_lock() local
4375 for (i = 0; i < curr->lockdep_depth; i++) { in __lock_unpin_lock()
4376 struct held_lock *hlock = curr->held_locks + i; in __lock_unpin_lock()
4589 static void print_lock_contention_bug(struct task_struct *curr, in print_lock_contention_bug() argument
4604 curr->comm, task_pid_nr(curr)); in print_lock_contention_bug()
4610 lockdep_print_held_locks(curr); in print_lock_contention_bug()
4619 struct task_struct *curr = current; in __lock_contended() local
4625 depth = curr->lockdep_depth; in __lock_contended()
4633 hlock = find_held_lock(curr, lock, depth, &i); in __lock_contended()
4635 print_lock_contention_bug(curr, lock, ip); in __lock_contended()
4660 struct task_struct *curr = current; in __lock_acquired() local
4667 depth = curr->lockdep_depth; in __lock_acquired()
4675 hlock = find_held_lock(curr, lock, depth, &i); in __lock_acquired()
4677 print_lock_contention_bug(curr, lock, _RET_IP_); in __lock_acquired()
5246 print_freed_lock_bug(struct task_struct *curr, const void *mem_from, in print_freed_lock_bug() argument
5260 curr->comm, task_pid_nr(curr), mem_from, mem_to-1); in print_freed_lock_bug()
5262 lockdep_print_held_locks(curr); in print_freed_lock_bug()
5282 struct task_struct *curr = current; in debug_check_no_locks_freed() local
5291 for (i = 0; i < curr->lockdep_depth; i++) { in debug_check_no_locks_freed()
5292 hlock = curr->held_locks + i; in debug_check_no_locks_freed()
5298 print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock); in debug_check_no_locks_freed()
5373 struct task_struct *curr = current; in lockdep_sys_exit() local
5375 if (unlikely(curr->lockdep_depth)) { in lockdep_sys_exit()
5384 curr->comm, curr->pid); in lockdep_sys_exit()
5385 lockdep_print_held_locks(curr); in lockdep_sys_exit()
5397 struct task_struct *curr = current; in lockdep_rcu_suspicious() local
5436 lockdep_print_held_locks(curr); in lockdep_rcu_suspicious()