Lines Matching refs:sk

41 	struct sock *sk;  in smc_close_cleanup_listen()  local
44 while ((sk = smc_accept_dequeue(parent, NULL))) in smc_close_cleanup_listen()
45 smc_close_non_accepted(sk); in smc_close_cleanup_listen()
52 struct sock *sk = &smc->sk; in smc_close_stream_wait() local
61 add_wait_queue(sk_sleep(sk), &wait); in smc_close_stream_wait()
65 rc = sk_wait_event(sk, &timeout, in smc_close_stream_wait()
67 sk->sk_err == ECONNABORTED || in smc_close_stream_wait()
68 sk->sk_err == ECONNRESET || in smc_close_stream_wait()
74 remove_wait_queue(sk_sleep(sk), &wait); in smc_close_stream_wait()
82 smc->sk.sk_state_change(&smc->sk); in smc_close_wake_tx_prepared()
113 struct sock *sk = &smc->sk; in smc_close_cancel_work() local
115 release_sock(sk); in smc_close_cancel_work()
118 lock_sock(sk); in smc_close_cancel_work()
126 struct sock *sk = &smc->sk; in smc_close_active_abort() local
129 if (sk->sk_state != SMC_INIT && smc->clcsock && smc->clcsock->sk) { in smc_close_active_abort()
130 sk->sk_err = ECONNABORTED; in smc_close_active_abort()
131 if (smc->clcsock && smc->clcsock->sk) in smc_close_active_abort()
132 tcp_abort(smc->clcsock->sk, ECONNABORTED); in smc_close_active_abort()
134 switch (sk->sk_state) { in smc_close_active_abort()
138 sk->sk_state = SMC_PEERABORTWAIT; in smc_close_active_abort()
140 if (sk->sk_state != SMC_PEERABORTWAIT) in smc_close_active_abort()
142 sk->sk_state = SMC_CLOSED; in smc_close_active_abort()
143 sock_put(sk); /* (postponed) passive closing */ in smc_close_active_abort()
148 sk->sk_state = SMC_PEERABORTWAIT; in smc_close_active_abort()
150 if (sk->sk_state != SMC_PEERABORTWAIT) in smc_close_active_abort()
152 sk->sk_state = SMC_CLOSED; in smc_close_active_abort()
155 sock_put(sk); /* passive closing */ in smc_close_active_abort()
159 sk->sk_state = SMC_PEERABORTWAIT; in smc_close_active_abort()
161 if (sk->sk_state != SMC_PEERABORTWAIT) in smc_close_active_abort()
163 sk->sk_state = SMC_CLOSED; in smc_close_active_abort()
173 sock_set_flag(sk, SOCK_DEAD); in smc_close_active_abort()
174 sk->sk_state_change(sk); in smc_close_active_abort()
177 release_sock(sk); in smc_close_active_abort()
179 lock_sock(sk); in smc_close_active_abort()
194 struct sock *sk = &smc->sk; in smc_close_active() local
200 0 : sock_flag(sk, SOCK_LINGER) ? in smc_close_active()
201 sk->sk_lingertime : SMC_MAX_STREAM_WAIT_TIMEOUT; in smc_close_active()
203 old_state = sk->sk_state; in smc_close_active()
205 switch (sk->sk_state) { in smc_close_active()
207 sk->sk_state = SMC_CLOSED; in smc_close_active()
210 sk->sk_state = SMC_CLOSED; in smc_close_active()
211 sk->sk_state_change(sk); /* wake up accept */ in smc_close_active()
212 if (smc->clcsock && smc->clcsock->sk) { in smc_close_active()
213 smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready; in smc_close_active()
214 smc->clcsock->sk->sk_user_data = NULL; in smc_close_active()
217 smc_close_cleanup_listen(sk); in smc_close_active()
218 release_sock(sk); in smc_close_active()
220 lock_sock(sk); in smc_close_active()
224 release_sock(sk); in smc_close_active()
226 lock_sock(sk); in smc_close_active()
227 if (sk->sk_state == SMC_ACTIVE) { in smc_close_active()
230 sk->sk_state = SMC_PEERCLOSEWAIT1; in smc_close_active()
243 sk->sk_state = SMC_CLOSED; in smc_close_active()
249 release_sock(sk); in smc_close_active()
251 lock_sock(sk); in smc_close_active()
252 if (sk->sk_state != SMC_APPCLOSEWAIT1 && in smc_close_active()
253 sk->sk_state != SMC_APPCLOSEWAIT2) in smc_close_active()
259 sk->sk_state = SMC_CLOSED; in smc_close_active()
260 sock_put(sk); /* postponed passive closing */ in smc_close_active()
263 sk->sk_state = SMC_PEERFINCLOSEWAIT; in smc_close_active()
280 sk->sk_state = SMC_CLOSED; in smc_close_active()
283 sk->sk_state = SMC_CLOSED; in smc_close_active()
290 if (old_state != sk->sk_state) in smc_close_active()
291 sk->sk_state_change(sk); in smc_close_active()
299 struct sock *sk = &smc->sk; in smc_close_passive_abort_received() local
301 switch (sk->sk_state) { in smc_close_passive_abort_received()
305 sk->sk_state = SMC_PROCESSABORT; in smc_close_passive_abort_received()
306 sock_put(sk); /* passive closing */ in smc_close_passive_abort_received()
309 sk->sk_state = SMC_PROCESSABORT; in smc_close_passive_abort_received()
316 sk->sk_state = SMC_PROCESSABORT; in smc_close_passive_abort_received()
318 sk->sk_state = SMC_CLOSED; in smc_close_passive_abort_received()
319 sock_put(sk); /* passive closing */ in smc_close_passive_abort_received()
323 sk->sk_state = SMC_CLOSED; in smc_close_passive_abort_received()
324 sock_put(sk); /* passive closing */ in smc_close_passive_abort_received()
327 sk->sk_state = SMC_CLOSED; in smc_close_passive_abort_received()
347 struct sock *sk = &smc->sk; in smc_close_passive_work() local
350 lock_sock(sk); in smc_close_passive_work()
351 old_state = sk->sk_state; in smc_close_passive_work()
357 release_sock(&smc->sk); in smc_close_passive_work()
359 lock_sock(&smc->sk); in smc_close_passive_work()
363 switch (sk->sk_state) { in smc_close_passive_work()
365 sk->sk_state = SMC_APPCLOSEWAIT1; in smc_close_passive_work()
368 sk->sk_state = SMC_APPCLOSEWAIT1; in smc_close_passive_work()
375 sk->sk_state = SMC_PEERCLOSEWAIT2; in smc_close_passive_work()
381 if (sock_flag(sk, SOCK_DEAD) && in smc_close_passive_work()
384 sk->sk_state = SMC_CLOSED; in smc_close_passive_work()
387 sk->sk_state = SMC_APPFINCLOSEWAIT; in smc_close_passive_work()
389 sock_put(sk); /* passive closing */ in smc_close_passive_work()
393 sk->sk_state = SMC_CLOSED; in smc_close_passive_work()
394 sock_put(sk); /* passive closing */ in smc_close_passive_work()
412 sk->sk_data_ready(sk); /* wakeup blocked rcvbuf consumers */ in smc_close_passive_work()
413 sk->sk_write_space(sk); /* wakeup blocked sndbuf producers */ in smc_close_passive_work()
415 if (old_state != sk->sk_state) { in smc_close_passive_work()
416 sk->sk_state_change(sk); in smc_close_passive_work()
417 if ((sk->sk_state == SMC_CLOSED) && in smc_close_passive_work()
418 (sock_flag(sk, SOCK_DEAD) || !sk->sk_socket)) { in smc_close_passive_work()
424 release_sock(sk); in smc_close_passive_work()
427 sock_put(sk); /* sock_hold done by schedulers of close_work */ in smc_close_passive_work()
433 struct sock *sk = &smc->sk; in smc_close_shutdown_write() local
439 0 : sock_flag(sk, SOCK_LINGER) ? in smc_close_shutdown_write()
440 sk->sk_lingertime : SMC_MAX_STREAM_WAIT_TIMEOUT; in smc_close_shutdown_write()
442 old_state = sk->sk_state; in smc_close_shutdown_write()
444 switch (sk->sk_state) { in smc_close_shutdown_write()
447 release_sock(sk); in smc_close_shutdown_write()
449 lock_sock(sk); in smc_close_shutdown_write()
450 if (sk->sk_state != SMC_ACTIVE) in smc_close_shutdown_write()
454 sk->sk_state = SMC_PEERCLOSEWAIT1; in smc_close_shutdown_write()
460 release_sock(sk); in smc_close_shutdown_write()
462 lock_sock(sk); in smc_close_shutdown_write()
463 if (sk->sk_state != SMC_APPCLOSEWAIT1) in smc_close_shutdown_write()
467 sk->sk_state = SMC_APPCLOSEWAIT2; in smc_close_shutdown_write()
480 if (old_state != sk->sk_state) in smc_close_shutdown_write()
481 sk->sk_state_change(sk); in smc_close_shutdown_write()