Lines Matching refs:curr
1155 struct task_struct *curr = current; in print_circular_bug_header() local
1166 curr->comm, task_pid_nr(curr)); in print_circular_bug_header()
1191 struct task_struct *curr = current; in print_circular_bug() local
1218 lockdep_print_held_locks(curr); in print_circular_bug()
1500 print_bad_irq_dependency(struct task_struct *curr, in print_bad_irq_dependency() argument
1521 curr->comm, task_pid_nr(curr), in print_bad_irq_dependency()
1522 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT, in print_bad_irq_dependency()
1523 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT, in print_bad_irq_dependency()
1524 curr->hardirqs_enabled, in print_bad_irq_dependency()
1525 curr->softirqs_enabled); in print_bad_irq_dependency()
1554 lockdep_print_held_locks(curr); in print_bad_irq_dependency()
1574 check_usage(struct task_struct *curr, struct held_lock *prev, in check_usage() argument
1600 return print_bad_irq_dependency(curr, &this, &that, in check_usage()
1647 static int check_irq_usage(struct task_struct *curr, struct held_lock *prev, in check_irq_usage() argument
1656 if (!check_usage(curr, prev, next, bit, in check_irq_usage()
1668 if (!check_usage(curr, prev, next, bit, in check_irq_usage()
1676 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, in check_prev_add_irq() argument
1680 if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \ in check_prev_add_irq()
1703 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, in check_prev_add_irq() argument
1737 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, in print_deadlock_bug() argument
1749 curr->comm, task_pid_nr(curr)); in print_deadlock_bug()
1756 lockdep_print_held_locks(curr); in print_deadlock_bug()
1773 check_deadlock(struct task_struct *curr, struct held_lock *next, in check_deadlock() argument
1780 for (i = 0; i < curr->lockdep_depth; i++) { in check_deadlock()
1781 prev = curr->held_locks + i; in check_deadlock()
1803 return print_deadlock_bug(curr, prev, next); in check_deadlock()
1831 check_prev_add(struct task_struct *curr, struct held_lock *prev, in check_prev_add() argument
1866 if (!check_prev_add_irq(curr, prev, next)) in check_prev_add()
1939 check_prevs_add(struct task_struct *curr, struct held_lock *next) in check_prevs_add() argument
1941 int depth = curr->lockdep_depth; in check_prevs_add()
1961 if (curr->held_locks[depth].irq_context != in check_prevs_add()
1962 curr->held_locks[depth-1].irq_context) in check_prevs_add()
1966 int distance = curr->lockdep_depth - depth + 1; in check_prevs_add()
1967 hlock = curr->held_locks + depth - 1; in check_prevs_add()
1974 int ret = check_prev_add(curr, hlock, next, distance, &trace, save_trace); in check_prevs_add()
1997 if (curr->held_locks[depth].irq_context != in check_prevs_add()
1998 curr->held_locks[depth-1].irq_context) in check_prevs_add()
2029 static inline int get_first_held_lock(struct task_struct *curr, in get_first_held_lock() argument
2035 for (i = curr->lockdep_depth - 1; i >= 0; i--) { in get_first_held_lock()
2036 hlock_curr = curr->held_locks + i; in get_first_held_lock()
2060 print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next) in print_chain_keys_held_locks() argument
2064 int depth = curr->lockdep_depth; in print_chain_keys_held_locks()
2068 for (i = get_first_held_lock(curr, hlock_next); i < depth; i++) { in print_chain_keys_held_locks()
2069 hlock = curr->held_locks + i; in print_chain_keys_held_locks()
2095 static void print_collision(struct task_struct *curr, in print_collision() argument
2108 print_chain_keys_held_locks(curr, hlock_next); in print_collision()
2124 static int check_no_collision(struct task_struct *curr, in check_no_collision() argument
2131 i = get_first_held_lock(curr, hlock); in check_no_collision()
2133 if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) { in check_no_collision()
2134 print_collision(curr, hlock, chain); in check_no_collision()
2139 id = curr->held_locks[i].class_idx - 1; in check_no_collision()
2142 print_collision(curr, hlock, chain); in check_no_collision()
2227 static inline int add_chain_cache(struct task_struct *curr, in add_chain_cache() argument
2260 i = get_first_held_lock(curr, hlock); in add_chain_cache()
2261 chain->depth = curr->lockdep_depth + 1 - i; in add_chain_cache()
2264 BUILD_BUG_ON((1UL << 6) <= ARRAY_SIZE(curr->held_locks)); in add_chain_cache()
2270 int lock_id = curr->held_locks[i].class_idx - 1; in add_chain_cache()
2327 static inline int lookup_chain_cache_add(struct task_struct *curr, in lookup_chain_cache_add() argument
2336 if (!check_no_collision(curr, hlock, chain)) in lookup_chain_cache_add()
2366 if (!add_chain_cache(curr, hlock, chain_key)) in lookup_chain_cache_add()
2372 static int validate_chain(struct task_struct *curr, struct lockdep_map *lock, in validate_chain() argument
2386 lookup_chain_cache_add(curr, hlock, chain_key)) { in validate_chain()
2399 int ret = check_deadlock(curr, hlock, lock, hlock->read); in validate_chain()
2415 if (!check_prevs_add(curr, hlock)) in validate_chain()
2429 static inline int validate_chain(struct task_struct *curr, in validate_chain() argument
2441 static void check_chain_key(struct task_struct *curr) in check_chain_key() argument
2448 for (i = 0; i < curr->lockdep_depth; i++) { in check_chain_key()
2449 hlock = curr->held_locks + i; in check_chain_key()
2457 curr->lockdep_depth, i, in check_chain_key()
2474 if (chain_key != curr->curr_chain_key) { in check_chain_key()
2481 curr->lockdep_depth, i, in check_chain_key()
2483 (unsigned long long)curr->curr_chain_key); in check_chain_key()
2507 print_usage_bug(struct task_struct *curr, struct held_lock *this, in print_usage_bug() argument
2523 curr->comm, task_pid_nr(curr), in print_usage_bug()
2524 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT, in print_usage_bug()
2525 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT, in print_usage_bug()
2526 trace_hardirqs_enabled(curr), in print_usage_bug()
2527 trace_softirqs_enabled(curr)); in print_usage_bug()
2533 print_irqtrace_events(curr); in print_usage_bug()
2537 lockdep_print_held_locks(curr); in print_usage_bug()
2549 valid_state(struct task_struct *curr, struct held_lock *this, in valid_state() argument
2553 return print_usage_bug(curr, this, bad_bit, new_bit); in valid_state()
2557 static int mark_lock(struct task_struct *curr, struct held_lock *this,
2566 print_irq_inversion_bug(struct task_struct *curr, in print_irq_inversion_bug() argument
2584 curr->comm, task_pid_nr(curr)); in print_irq_inversion_bug()
2613 lockdep_print_held_locks(curr); in print_irq_inversion_bug()
2631 check_usage_forwards(struct task_struct *curr, struct held_lock *this, in check_usage_forwards() argument
2646 return print_irq_inversion_bug(curr, &root, target_entry, in check_usage_forwards()
2655 check_usage_backwards(struct task_struct *curr, struct held_lock *this, in check_usage_backwards() argument
2670 return print_irq_inversion_bug(curr, &root, target_entry, in check_usage_backwards()
2674 void print_irqtrace_events(struct task_struct *curr) in print_irqtrace_events() argument
2676 printk("irq event stamp: %u\n", curr->irq_events); in print_irqtrace_events()
2678 curr->hardirq_enable_event, (void *)curr->hardirq_enable_ip, in print_irqtrace_events()
2679 (void *)curr->hardirq_enable_ip); in print_irqtrace_events()
2681 curr->hardirq_disable_event, (void *)curr->hardirq_disable_ip, in print_irqtrace_events()
2682 (void *)curr->hardirq_disable_ip); in print_irqtrace_events()
2684 curr->softirq_enable_event, (void *)curr->softirq_enable_ip, in print_irqtrace_events()
2685 (void *)curr->softirq_enable_ip); in print_irqtrace_events()
2687 curr->softirq_disable_event, (void *)curr->softirq_disable_ip, in print_irqtrace_events()
2688 (void *)curr->softirq_disable_ip); in print_irqtrace_events()
2726 mark_lock_irq(struct task_struct *curr, struct held_lock *this, in mark_lock_irq() argument
2747 if (!valid_state(curr, this, new_bit, excl_bit)) in mark_lock_irq()
2755 !usage(curr, this, excl_bit, state_name(new_bit & ~1))) in mark_lock_irq()
2762 if (!valid_state(curr, this, new_bit, excl_bit + 1)) in mark_lock_irq()
2766 !usage(curr, this, excl_bit + 1, in mark_lock_irq()
2787 mark_held_locks(struct task_struct *curr, enum mark_type mark) in mark_held_locks() argument
2793 for (i = 0; i < curr->lockdep_depth; i++) { in mark_held_locks()
2794 hlock = curr->held_locks + i; in mark_held_locks()
2805 if (!mark_lock(curr, hlock, usage_bit)) in mark_held_locks()
2817 struct task_struct *curr = current; in __trace_hardirqs_on_caller() local
2820 curr->hardirqs_enabled = 1; in __trace_hardirqs_on_caller()
2826 if (!mark_held_locks(curr, HARDIRQ)) in __trace_hardirqs_on_caller()
2833 if (curr->softirqs_enabled) in __trace_hardirqs_on_caller()
2834 if (!mark_held_locks(curr, SOFTIRQ)) in __trace_hardirqs_on_caller()
2837 curr->hardirq_enable_ip = ip; in __trace_hardirqs_on_caller()
2838 curr->hardirq_enable_event = ++curr->irq_events; in __trace_hardirqs_on_caller()
2888 struct task_struct *curr = current; in lockdep_hardirqs_off() local
2900 if (curr->hardirqs_enabled) { in lockdep_hardirqs_off()
2904 curr->hardirqs_enabled = 0; in lockdep_hardirqs_off()
2905 curr->hardirq_disable_ip = ip; in lockdep_hardirqs_off()
2906 curr->hardirq_disable_event = ++curr->irq_events; in lockdep_hardirqs_off()
2917 struct task_struct *curr = current; in trace_softirqs_on() local
2929 if (curr->softirqs_enabled) { in trace_softirqs_on()
2938 curr->softirqs_enabled = 1; in trace_softirqs_on()
2939 curr->softirq_enable_ip = ip; in trace_softirqs_on()
2940 curr->softirq_enable_event = ++curr->irq_events; in trace_softirqs_on()
2947 if (curr->hardirqs_enabled) in trace_softirqs_on()
2948 mark_held_locks(curr, SOFTIRQ); in trace_softirqs_on()
2957 struct task_struct *curr = current; in trace_softirqs_off() local
2968 if (curr->softirqs_enabled) { in trace_softirqs_off()
2972 curr->softirqs_enabled = 0; in trace_softirqs_off()
2973 curr->softirq_disable_ip = ip; in trace_softirqs_off()
2974 curr->softirq_disable_event = ++curr->irq_events; in trace_softirqs_off()
2984 static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) in mark_irqflags() argument
2992 if (curr->hardirq_context) in mark_irqflags()
2993 if (!mark_lock(curr, hlock, in mark_irqflags()
2996 if (curr->softirq_context) in mark_irqflags()
2997 if (!mark_lock(curr, hlock, in mark_irqflags()
3001 if (curr->hardirq_context) in mark_irqflags()
3002 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ)) in mark_irqflags()
3004 if (curr->softirq_context) in mark_irqflags()
3005 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ)) in mark_irqflags()
3011 if (!mark_lock(curr, hlock, in mark_irqflags()
3014 if (curr->softirqs_enabled) in mark_irqflags()
3015 if (!mark_lock(curr, hlock, in mark_irqflags()
3019 if (!mark_lock(curr, hlock, in mark_irqflags()
3022 if (curr->softirqs_enabled) in mark_irqflags()
3023 if (!mark_lock(curr, hlock, in mark_irqflags()
3037 static int separate_irq_context(struct task_struct *curr, in separate_irq_context() argument
3040 unsigned int depth = curr->lockdep_depth; in separate_irq_context()
3048 prev_hlock = curr->held_locks + depth-1; in separate_irq_context()
3063 int mark_lock_irq(struct task_struct *curr, struct held_lock *this, in mark_lock_irq() argument
3070 static inline int mark_irqflags(struct task_struct *curr, in mark_irqflags() argument
3081 static inline int separate_irq_context(struct task_struct *curr, in separate_irq_context() argument
3092 static int mark_lock(struct task_struct *curr, struct held_lock *this, in mark_lock() argument
3127 ret = mark_lock_irq(curr, this, new_bit); in mark_lock()
3149 print_irqtrace_events(curr); in mark_lock()
3227 print_lock_nested_lock_not_held(struct task_struct *curr, in print_lock_nested_lock_not_held() argument
3242 pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr)); in print_lock_nested_lock_not_held()
3252 lockdep_print_held_locks(curr); in print_lock_nested_lock_not_held()
3271 struct task_struct *curr = current; in __lock_acquire() local
3317 depth = curr->lockdep_depth; in __lock_acquire()
3327 hlock = curr->held_locks + depth - 1; in __lock_acquire()
3345 hlock = curr->held_locks + depth; in __lock_acquire()
3356 hlock->irq_context = task_irq_context(curr); in __lock_acquire()
3368 if (check && !mark_irqflags(curr, hlock)) in __lock_acquire()
3372 if (!mark_lock(curr, hlock, LOCK_USED)) in __lock_acquire()
3391 chain_key = curr->curr_chain_key; in __lock_acquire()
3402 if (separate_irq_context(curr, hlock)) { in __lock_acquire()
3409 return print_lock_nested_lock_not_held(curr, hlock, ip); in __lock_acquire()
3411 if (!validate_chain(curr, lock, hlock, chain_head, chain_key)) in __lock_acquire()
3414 curr->curr_chain_key = chain_key; in __lock_acquire()
3415 curr->lockdep_depth++; in __lock_acquire()
3416 check_chain_key(curr); in __lock_acquire()
3421 if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) { in __lock_acquire()
3425 curr->lockdep_depth, MAX_LOCK_DEPTH); in __lock_acquire()
3434 if (unlikely(curr->lockdep_depth > max_lockdep_depth)) in __lock_acquire()
3435 max_lockdep_depth = curr->lockdep_depth; in __lock_acquire()
3441 print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock, in print_unlock_imbalance_bug() argument
3455 curr->comm, task_pid_nr(curr)); in print_unlock_imbalance_bug()
3461 lockdep_print_held_locks(curr); in print_unlock_imbalance_bug()
3506 static struct held_lock *find_held_lock(struct task_struct *curr, in find_held_lock() argument
3514 hlock = curr->held_locks + i; in find_held_lock()
3541 static int reacquire_held_locks(struct task_struct *curr, unsigned int depth, in reacquire_held_locks() argument
3546 for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) { in reacquire_held_locks()
3564 struct task_struct *curr = current; in __lock_set_class() local
3570 depth = curr->lockdep_depth; in __lock_set_class()
3578 hlock = find_held_lock(curr, lock, depth, &i); in __lock_set_class()
3580 return print_unlock_imbalance_bug(curr, lock, ip); in __lock_set_class()
3586 curr->lockdep_depth = i; in __lock_set_class()
3587 curr->curr_chain_key = hlock->prev_chain_key; in __lock_set_class()
3589 if (reacquire_held_locks(curr, depth, i)) in __lock_set_class()
3596 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth)) in __lock_set_class()
3603 struct task_struct *curr = current; in __lock_downgrade() local
3608 depth = curr->lockdep_depth; in __lock_downgrade()
3616 hlock = find_held_lock(curr, lock, depth, &i); in __lock_downgrade()
3618 return print_unlock_imbalance_bug(curr, lock, ip); in __lock_downgrade()
3620 curr->lockdep_depth = i; in __lock_downgrade()
3621 curr->curr_chain_key = hlock->prev_chain_key; in __lock_downgrade()
3627 if (reacquire_held_locks(curr, depth, i)) in __lock_downgrade()
3634 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth)) in __lock_downgrade()
3649 struct task_struct *curr = current; in __lock_release() local
3657 depth = curr->lockdep_depth; in __lock_release()
3663 return print_unlock_imbalance_bug(curr, lock, ip); in __lock_release()
3669 hlock = find_held_lock(curr, lock, depth, &i); in __lock_release()
3671 return print_unlock_imbalance_bug(curr, lock, ip); in __lock_release()
3696 curr->lockdep_depth = i; in __lock_release()
3697 curr->curr_chain_key = hlock->prev_chain_key; in __lock_release()
3699 if (reacquire_held_locks(curr, depth, i + 1)) in __lock_release()
3706 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1)) in __lock_release()
3714 struct task_struct *curr = current; in __lock_is_held() local
3717 for (i = 0; i < curr->lockdep_depth; i++) { in __lock_is_held()
3718 struct held_lock *hlock = curr->held_locks + i; in __lock_is_held()
3734 struct task_struct *curr = current; in __lock_pin_lock() local
3740 for (i = 0; i < curr->lockdep_depth; i++) { in __lock_pin_lock()
3741 struct held_lock *hlock = curr->held_locks + i; in __lock_pin_lock()
3761 struct task_struct *curr = current; in __lock_repin_lock() local
3767 for (i = 0; i < curr->lockdep_depth; i++) { in __lock_repin_lock()
3768 struct held_lock *hlock = curr->held_locks + i; in __lock_repin_lock()
3781 struct task_struct *curr = current; in __lock_unpin_lock() local
3787 for (i = 0; i < curr->lockdep_depth; i++) { in __lock_unpin_lock()
3788 struct held_lock *hlock = curr->held_locks + i; in __lock_unpin_lock()
4002 print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock, in print_lock_contention_bug() argument
4016 curr->comm, task_pid_nr(curr)); in print_lock_contention_bug()
4022 lockdep_print_held_locks(curr); in print_lock_contention_bug()
4033 struct task_struct *curr = current; in __lock_contended() local
4039 depth = curr->lockdep_depth; in __lock_contended()
4047 hlock = find_held_lock(curr, lock, depth, &i); in __lock_contended()
4049 print_lock_contention_bug(curr, lock, ip); in __lock_contended()
4074 struct task_struct *curr = current; in __lock_acquired() local
4081 depth = curr->lockdep_depth; in __lock_acquired()
4089 hlock = find_held_lock(curr, lock, depth, &i); in __lock_acquired()
4091 print_lock_contention_bug(curr, lock, _RET_IP_); in __lock_acquired()
4344 print_freed_lock_bug(struct task_struct *curr, const void *mem_from, in print_freed_lock_bug() argument
4358 curr->comm, task_pid_nr(curr), mem_from, mem_to-1); in print_freed_lock_bug()
4360 lockdep_print_held_locks(curr); in print_freed_lock_bug()
4380 struct task_struct *curr = current; in debug_check_no_locks_freed() local
4389 for (i = 0; i < curr->lockdep_depth; i++) { in debug_check_no_locks_freed()
4390 hlock = curr->held_locks + i; in debug_check_no_locks_freed()
4396 print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock); in debug_check_no_locks_freed()
4471 struct task_struct *curr = current; in lockdep_sys_exit() local
4473 if (unlikely(curr->lockdep_depth)) { in lockdep_sys_exit()
4482 curr->comm, curr->pid); in lockdep_sys_exit()
4483 lockdep_print_held_locks(curr); in lockdep_sys_exit()
4495 struct task_struct *curr = current; in lockdep_rcu_suspicious() local
4534 lockdep_print_held_locks(curr); in lockdep_rcu_suspicious()