Lines Matching refs:sk_lock

563 		mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);  in __sk_receive_skb()
567 mutex_release(&sk->sk_lock.dep_map, _RET_IP_); in __sk_receive_skb()
2873 __releases(&sk->sk_lock.slock) in __lock_sock()
2874 __acquires(&sk->sk_lock.slock) in __lock_sock()
2879 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait, in __lock_sock()
2881 spin_unlock_bh(&sk->sk_lock.slock); in __lock_sock()
2883 spin_lock_bh(&sk->sk_lock.slock); in __lock_sock()
2887 finish_wait(&sk->sk_lock.wq, &wait); in __lock_sock()
2891 __releases(&sk->sk_lock.slock) in __release_sock()
2892 __acquires(&sk->sk_lock.slock) in __release_sock()
2899 spin_unlock_bh(&sk->sk_lock.slock); in __release_sock()
2913 spin_lock_bh(&sk->sk_lock.slock); in __release_sock()
2925 spin_lock_bh(&sk->sk_lock.slock); in __sk_flush_backlog()
2927 spin_unlock_bh(&sk->sk_lock.slock); in __sk_flush_backlog()
3447 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); in lock_sock_nested()
3450 spin_lock_bh(&sk->sk_lock.slock); in lock_sock_nested()
3453 sk->sk_lock.owned = 1; in lock_sock_nested()
3454 spin_unlock_bh(&sk->sk_lock.slock); in lock_sock_nested()
3460 spin_lock_bh(&sk->sk_lock.slock); in release_sock()
3471 if (waitqueue_active(&sk->sk_lock.wq)) in release_sock()
3472 wake_up(&sk->sk_lock.wq); in release_sock()
3473 spin_unlock_bh(&sk->sk_lock.slock); in release_sock()
3477 bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock) in __lock_sock_fast()
3480 spin_lock_bh(&sk->sk_lock.slock); in __lock_sock_fast()
3502 sk->sk_lock.owned = 1; in __lock_sock_fast()
3503 __acquire(&sk->sk_lock.slock); in __lock_sock_fast()
3504 spin_unlock_bh(&sk->sk_lock.slock); in __lock_sock_fast()