Lines Matching full:call
2 /* incoming call handling
26 * Preallocate a single service call, connection and peer and, if possible,
37 struct rxrpc_call *call; in rxrpc_service_prealloc_one() local
94 call = rxrpc_alloc_call(rx, gfp, debug_id); in rxrpc_service_prealloc_one()
95 if (!call) in rxrpc_service_prealloc_one()
97 call->flags |= (1 << RXRPC_CALL_IS_SERVICE); in rxrpc_service_prealloc_one()
98 call->state = RXRPC_CALL_SERVER_PREALLOC; in rxrpc_service_prealloc_one()
100 trace_rxrpc_call(call->debug_id, rxrpc_call_new_service, in rxrpc_service_prealloc_one()
101 atomic_read(&call->usage), in rxrpc_service_prealloc_one()
123 call->user_call_ID = user_call_ID; in rxrpc_service_prealloc_one()
124 call->notify_rx = notify_rx; in rxrpc_service_prealloc_one()
125 rxrpc_get_call(call, rxrpc_call_got_kernel); in rxrpc_service_prealloc_one()
126 user_attach_call(call, user_call_ID); in rxrpc_service_prealloc_one()
127 rxrpc_get_call(call, rxrpc_call_got_userid); in rxrpc_service_prealloc_one()
128 rb_link_node(&call->sock_node, parent, pp); in rxrpc_service_prealloc_one()
129 rb_insert_color(&call->sock_node, &rx->calls); in rxrpc_service_prealloc_one()
130 set_bit(RXRPC_CALL_HAS_USERID, &call->flags); in rxrpc_service_prealloc_one()
133 list_add(&call->sock_link, &rx->sock_calls); in rxrpc_service_prealloc_one()
137 rxnet = call->rxnet; in rxrpc_service_prealloc_one()
139 list_add_tail(&call->link, &rxnet->calls); in rxrpc_service_prealloc_one()
142 b->call_backlog[call_head] = call; in rxrpc_service_prealloc_one()
144 _leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID); in rxrpc_service_prealloc_one()
149 rxrpc_cleanup_call(call); in rxrpc_service_prealloc_one()
156 * entire backlog of a socket. When a new call comes in, if we don't have
157 * sufficient of each available, the call gets rejected as busy or ignored.
226 struct rxrpc_call *call = b->call_backlog[tail]; in rxrpc_discard_prealloc() local
227 rcu_assign_pointer(call->socket, rx); in rxrpc_discard_prealloc()
229 _debug("discard %lx", call->user_call_ID); in rxrpc_discard_prealloc()
230 rx->discard_new_call(call, call->user_call_ID); in rxrpc_discard_prealloc()
231 rxrpc_put_call(call, rxrpc_call_put_kernel); in rxrpc_discard_prealloc()
233 rxrpc_call_completed(call); in rxrpc_discard_prealloc()
234 rxrpc_release_call(rx, call); in rxrpc_discard_prealloc()
235 rxrpc_put_call(call, rxrpc_call_put); in rxrpc_discard_prealloc()
243 * Allocate a new incoming call from the prealloc pool, along with a connection
253 struct rxrpc_call *call; in rxrpc_alloc_incoming_call() local
302 /* And now we can allocate and set up a new call */ in rxrpc_alloc_incoming_call()
303 call = b->call_backlog[call_tail]; in rxrpc_alloc_incoming_call()
308 rxrpc_see_call(call); in rxrpc_alloc_incoming_call()
309 call->conn = conn; in rxrpc_alloc_incoming_call()
310 call->security = conn->security; in rxrpc_alloc_incoming_call()
311 call->peer = rxrpc_get_peer(conn->params.peer); in rxrpc_alloc_incoming_call()
312 call->cong_cwnd = call->peer->cong_cwnd; in rxrpc_alloc_incoming_call()
313 return call; in rxrpc_alloc_incoming_call()
317 * Set up a new incoming call. Called in BH context with the RCU read lock
320 * If this is for a kernel service, when we allocate the call, it will have
329 * The call is returned with the user access mutex held.
338 struct rxrpc_call *call; in rxrpc_new_incoming_call() local
350 call = NULL; in rxrpc_new_incoming_call()
354 /* The peer, connection and call may all have sprung into existence due in rxrpc_new_incoming_call()
361 call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb); in rxrpc_new_incoming_call()
362 if (!call) { in rxrpc_new_incoming_call()
365 call = NULL; in rxrpc_new_incoming_call()
369 trace_rxrpc_receive(call, rxrpc_receive_incoming, in rxrpc_new_incoming_call()
372 /* Lock the call to prevent rxrpc_kernel_send/recv_data() and in rxrpc_new_incoming_call()
377 * behaved enough not to access the call before the first notification in rxrpc_new_incoming_call()
381 if (!mutex_trylock(&call->user_mutex)) in rxrpc_new_incoming_call()
384 /* Make the call live. */ in rxrpc_new_incoming_call()
385 rxrpc_incoming_call(rx, call, skb); in rxrpc_new_incoming_call()
386 conn = call->conn; in rxrpc_new_incoming_call()
389 rx->notify_new_call(&rx->sk, call, call->user_call_ID); in rxrpc_new_incoming_call()
397 set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events); in rxrpc_new_incoming_call()
398 rxrpc_queue_conn(call->conn); in rxrpc_new_incoming_call()
402 write_lock(&call->state_lock); in rxrpc_new_incoming_call()
403 if (call->state < RXRPC_CALL_COMPLETE) { in rxrpc_new_incoming_call()
405 call->state = RXRPC_CALL_SERVER_RECV_REQUEST; in rxrpc_new_incoming_call()
407 call->state = RXRPC_CALL_SERVER_ACCEPTING; in rxrpc_new_incoming_call()
409 write_unlock(&call->state_lock); in rxrpc_new_incoming_call()
413 rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED, in rxrpc_new_incoming_call()
417 rxrpc_abort_call("CON", call, sp->hdr.seq, in rxrpc_new_incoming_call()
425 if (call->state == RXRPC_CALL_SERVER_ACCEPTING) in rxrpc_new_incoming_call()
426 rxrpc_notify_socket(call); in rxrpc_new_incoming_call()
431 * service to prevent the call from being deallocated too early. in rxrpc_new_incoming_call()
433 rxrpc_put_call(call, rxrpc_call_put); in rxrpc_new_incoming_call()
435 _leave(" = %p{%d}", call, call->debug_id); in rxrpc_new_incoming_call()
438 return call; in rxrpc_new_incoming_call()
442 * handle acceptance of a call by userspace
443 * - assign the user call ID to the call at the front of the queue
450 __acquires(call->user_mutex) in rxrpc_accept_call()
452 struct rxrpc_call *call; in rxrpc_accept_call() local
474 call = rb_entry(parent, struct rxrpc_call, sock_node); in rxrpc_accept_call()
476 if (user_call_ID < call->user_call_ID) in rxrpc_accept_call()
478 else if (user_call_ID > call->user_call_ID) in rxrpc_accept_call()
484 /* Dequeue the first call and check it's still valid. We gain in rxrpc_accept_call()
487 call = list_entry(rx->to_be_accepted.next, in rxrpc_accept_call()
496 if (mutex_lock_interruptible(&call->user_mutex) < 0) { in rxrpc_accept_call()
503 list_del_init(&call->accept_link); in rxrpc_accept_call()
505 rxrpc_see_call(call); in rxrpc_accept_call()
512 call = rb_entry(parent, struct rxrpc_call, sock_node); in rxrpc_accept_call()
514 if (user_call_ID < call->user_call_ID) in rxrpc_accept_call()
516 else if (user_call_ID > call->user_call_ID) in rxrpc_accept_call()
522 write_lock_bh(&call->state_lock); in rxrpc_accept_call()
523 switch (call->state) { in rxrpc_accept_call()
525 call->state = RXRPC_CALL_SERVER_RECV_REQUEST; in rxrpc_accept_call()
528 ret = call->error; in rxrpc_accept_call()
535 call->notify_rx = notify_rx; in rxrpc_accept_call()
536 call->user_call_ID = user_call_ID; in rxrpc_accept_call()
537 rxrpc_get_call(call, rxrpc_call_got_userid); in rxrpc_accept_call()
538 rb_link_node(&call->sock_node, parent, pp); in rxrpc_accept_call()
539 rb_insert_color(&call->sock_node, &rx->calls); in rxrpc_accept_call()
540 if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags)) in rxrpc_accept_call()
543 write_unlock_bh(&call->state_lock); in rxrpc_accept_call()
545 rxrpc_notify_socket(call); in rxrpc_accept_call()
548 _leave(" = %p{%d}", call, call->debug_id); in rxrpc_accept_call()
549 return call; in rxrpc_accept_call()
552 _debug("release %p", call); in rxrpc_accept_call()
553 write_unlock_bh(&call->state_lock); in rxrpc_accept_call()
555 rxrpc_release_call(rx, call); in rxrpc_accept_call()
556 rxrpc_put_call(call, rxrpc_call_put); in rxrpc_accept_call()
570 * Handle rejection of a call by userspace
571 * - reject the call at the front of the queue
575 struct rxrpc_call *call; in rxrpc_reject_call() local
590 /* Dequeue the first call and check it's still valid. We gain in rxrpc_reject_call()
593 call = list_entry(rx->to_be_accepted.next, in rxrpc_reject_call()
595 list_del_init(&call->accept_link); in rxrpc_reject_call()
597 rxrpc_see_call(call); in rxrpc_reject_call()
599 write_lock_bh(&call->state_lock); in rxrpc_reject_call()
600 switch (call->state) { in rxrpc_reject_call()
602 __rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, -ECONNABORTED); in rxrpc_reject_call()
606 ret = call->error; in rxrpc_reject_call()
613 write_unlock_bh(&call->state_lock); in rxrpc_reject_call()
616 rxrpc_send_abort_packet(call); in rxrpc_reject_call()
617 rxrpc_release_call(rx, call); in rxrpc_reject_call()
618 rxrpc_put_call(call, rxrpc_call_put); in rxrpc_reject_call()
628 * @notify_rx: Event notification function for the call
629 * @user_attach_call: Func to attach call to user_call_ID
630 * @user_call_ID: The tag to attach to the preallocated call
636 * The user is given a ref to hold on the call.
638 * Note that the call may be come connected before this function returns.