Lines Matching full:rx
29 static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, in rxrpc_service_prealloc_one() argument
38 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); in rxrpc_service_prealloc_one()
43 max = rx->sk.sk_max_ack_backlog; in rxrpc_service_prealloc_one()
44 tmp = rx->sk.sk_ack_backlog; in rxrpc_service_prealloc_one()
67 struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp); in rxrpc_service_prealloc_one()
94 call = rxrpc_alloc_call(rx, gfp, debug_id); in rxrpc_service_prealloc_one()
104 write_lock(&rx->call_lock); in rxrpc_service_prealloc_one()
110 pp = &rx->calls.rb_node; in rxrpc_service_prealloc_one()
129 rb_insert_color(&call->sock_node, &rx->calls); in rxrpc_service_prealloc_one()
133 list_add(&call->sock_link, &rx->sock_calls); in rxrpc_service_prealloc_one()
135 write_unlock(&rx->call_lock); in rxrpc_service_prealloc_one()
148 write_unlock(&rx->call_lock); in rxrpc_service_prealloc_one()
161 int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp) in rxrpc_service_prealloc() argument
163 struct rxrpc_backlog *b = rx->backlog; in rxrpc_service_prealloc()
169 rx->backlog = b; in rxrpc_service_prealloc()
172 if (rx->discard_new_call) in rxrpc_service_prealloc()
175 while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp, in rxrpc_service_prealloc()
185 void rxrpc_discard_prealloc(struct rxrpc_sock *rx) in rxrpc_discard_prealloc() argument
187 struct rxrpc_backlog *b = rx->backlog; in rxrpc_discard_prealloc()
188 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); in rxrpc_discard_prealloc()
193 rx->backlog = NULL; in rxrpc_discard_prealloc()
198 spin_lock_bh(&rx->incoming_lock); in rxrpc_discard_prealloc()
199 spin_unlock_bh(&rx->incoming_lock); in rxrpc_discard_prealloc()
227 rcu_assign_pointer(call->socket, rx); in rxrpc_discard_prealloc()
228 if (rx->discard_new_call) { in rxrpc_discard_prealloc()
230 rx->discard_new_call(call, call->user_call_ID); in rxrpc_discard_prealloc()
234 rxrpc_release_call(rx, call); in rxrpc_discard_prealloc()
246 static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, in rxrpc_alloc_incoming_call() argument
252 struct rxrpc_backlog *b = rx->backlog; in rxrpc_alloc_incoming_call()
286 rxrpc_new_incoming_peer(rx, local, peer); in rxrpc_alloc_incoming_call()
297 rxrpc_new_incoming_connection(rx, conn, skb); in rxrpc_alloc_incoming_call()
332 struct rxrpc_sock *rx, in rxrpc_new_incoming_call() argument
342 spin_lock(&rx->incoming_lock); in rxrpc_new_incoming_call()
343 if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED || in rxrpc_new_incoming_call()
344 rx->sk.sk_state == RXRPC_CLOSE) { in rxrpc_new_incoming_call()
357 * rx->incoming_lock, so the values should remain stable. in rxrpc_new_incoming_call()
361 call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb); in rxrpc_new_incoming_call()
385 rxrpc_incoming_call(rx, call, skb); in rxrpc_new_incoming_call()
388 if (rx->notify_new_call) in rxrpc_new_incoming_call()
389 rx->notify_new_call(&rx->sk, call, call->user_call_ID); in rxrpc_new_incoming_call()
391 sk_acceptq_added(&rx->sk); in rxrpc_new_incoming_call()
404 if (rx->discard_new_call) in rxrpc_new_incoming_call()
437 spin_unlock(&rx->incoming_lock); in rxrpc_new_incoming_call()
446 struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx, in rxrpc_accept_call() argument
449 __releases(&rx->sk.sk_lock.slock) in rxrpc_accept_call()
460 write_lock(&rx->call_lock); in rxrpc_accept_call()
462 if (list_empty(&rx->to_be_accepted)) { in rxrpc_accept_call()
463 write_unlock(&rx->call_lock); in rxrpc_accept_call()
464 release_sock(&rx->sk); in rxrpc_accept_call()
470 pp = &rx->calls.rb_node; in rxrpc_accept_call()
487 call = list_entry(rx->to_be_accepted.next, in rxrpc_accept_call()
489 write_unlock(&rx->call_lock); in rxrpc_accept_call()
497 release_sock(&rx->sk); in rxrpc_accept_call()
502 write_lock(&rx->call_lock); in rxrpc_accept_call()
504 sk_acceptq_removed(&rx->sk); in rxrpc_accept_call()
508 pp = &rx->calls.rb_node; in rxrpc_accept_call()
539 rb_insert_color(&call->sock_node, &rx->calls); in rxrpc_accept_call()
544 write_unlock(&rx->call_lock); in rxrpc_accept_call()
546 rxrpc_service_prealloc(rx, GFP_KERNEL); in rxrpc_accept_call()
547 release_sock(&rx->sk); in rxrpc_accept_call()
554 write_unlock(&rx->call_lock); in rxrpc_accept_call()
555 rxrpc_release_call(rx, call); in rxrpc_accept_call()
561 write_unlock(&rx->call_lock); in rxrpc_accept_call()
563 rxrpc_service_prealloc(rx, GFP_KERNEL); in rxrpc_accept_call()
564 release_sock(&rx->sk); in rxrpc_accept_call()
573 int rxrpc_reject_call(struct rxrpc_sock *rx) in rxrpc_reject_call() argument
583 write_lock(&rx->call_lock); in rxrpc_reject_call()
585 if (list_empty(&rx->to_be_accepted)) { in rxrpc_reject_call()
586 write_unlock(&rx->call_lock); in rxrpc_reject_call()
593 call = list_entry(rx->to_be_accepted.next, in rxrpc_reject_call()
596 sk_acceptq_removed(&rx->sk); in rxrpc_reject_call()
614 write_unlock(&rx->call_lock); in rxrpc_reject_call()
617 rxrpc_release_call(rx, call); in rxrpc_reject_call()
620 rxrpc_service_prealloc(rx, GFP_KERNEL); in rxrpc_reject_call()
646 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); in rxrpc_kernel_charge_accept() local
647 struct rxrpc_backlog *b = rx->backlog; in rxrpc_kernel_charge_accept()
652 return rxrpc_service_prealloc_one(rx, b, notify_rx, in rxrpc_kernel_charge_accept()