| /Linux-v6.1/net/rxrpc/ |
| D | call_object.c | 2 /* RxRPC individual remote procedure call handling 50 struct rxrpc_call *call = from_timer(call, t, timer); in rxrpc_call_timer_expired() local 52 _enter("%d", call->debug_id); in rxrpc_call_timer_expired() 54 if (call->state < RXRPC_CALL_COMPLETE) { in rxrpc_call_timer_expired() 55 trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies); in rxrpc_call_timer_expired() 56 __rxrpc_queue_call(call); in rxrpc_call_timer_expired() 58 rxrpc_put_call(call, rxrpc_call_put); in rxrpc_call_timer_expired() 62 void rxrpc_reduce_call_timer(struct rxrpc_call *call, in rxrpc_reduce_call_timer() argument 67 if (rxrpc_try_get_call(call, rxrpc_call_got_timer)) { in rxrpc_reduce_call_timer() 68 trace_rxrpc_timer(call, why, now); in rxrpc_reduce_call_timer() [all …]
|
| D | call_event.c | 23 static void rxrpc_propose_ping(struct rxrpc_call *call, in rxrpc_propose_ping() argument 28 !test_and_set_bit(RXRPC_CALL_EV_PING, &call->events)) in rxrpc_propose_ping() 29 rxrpc_queue_call(call); in rxrpc_propose_ping() 34 if (time_before(ping_at, call->ping_at)) { in rxrpc_propose_ping() 35 WRITE_ONCE(call->ping_at, ping_at); in rxrpc_propose_ping() 36 rxrpc_reduce_call_timer(call, ping_at, now, in rxrpc_propose_ping() 45 static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason, in __rxrpc_propose_ACK() argument 57 rxrpc_propose_ping(call, immediate, background); in __rxrpc_propose_ACK() 66 call->ackr_reason, rxrpc_ack_priority[call->ackr_reason]); in __rxrpc_propose_ACK() 67 if (ack_reason == call->ackr_reason) { in __rxrpc_propose_ACK() [all …]
|
| D | recvmsg.c | 20 * Post a call for attention by the socket or kernel service. Further 23 void rxrpc_notify_socket(struct rxrpc_call *call) in rxrpc_notify_socket() argument 28 _enter("%d", call->debug_id); in rxrpc_notify_socket() 30 if (!list_empty(&call->recvmsg_link)) in rxrpc_notify_socket() 35 rx = rcu_dereference(call->socket); in rxrpc_notify_socket() 38 if (call->notify_rx) { in rxrpc_notify_socket() 39 spin_lock_bh(&call->notify_lock); in rxrpc_notify_socket() 40 call->notify_rx(sk, call, call->user_call_ID); in rxrpc_notify_socket() 41 spin_unlock_bh(&call->notify_lock); in rxrpc_notify_socket() 44 if (list_empty(&call->recvmsg_link)) { in rxrpc_notify_socket() [all …]
|
| D | sendmsg.c | 23 static bool rxrpc_check_tx_space(struct rxrpc_call *call, rxrpc_seq_t *_tx_win) in rxrpc_check_tx_space() argument 26 min_t(unsigned int, call->tx_winsize, in rxrpc_check_tx_space() 27 call->cong_cwnd + call->cong_extra); in rxrpc_check_tx_space() 28 rxrpc_seq_t tx_win = READ_ONCE(call->tx_hard_ack); in rxrpc_check_tx_space() 32 return call->tx_top - tx_win < win_size; in rxrpc_check_tx_space() 39 struct rxrpc_call *call, in rxrpc_wait_for_tx_window_intr() argument 44 if (rxrpc_check_tx_space(call, NULL)) in rxrpc_wait_for_tx_window_intr() 47 if (call->state >= RXRPC_CALL_COMPLETE) in rxrpc_wait_for_tx_window_intr() 48 return call->error; in rxrpc_wait_for_tx_window_intr() 53 trace_rxrpc_transmit(call, rxrpc_transmit_wait); in rxrpc_wait_for_tx_window_intr() [all …]
|
| D | input.c | 27 struct rxrpc_call *call, rxrpc_seq_t seq) in rxrpc_proto_abort() argument 29 if (rxrpc_abort_call(why, call, seq, RX_PROTOCOL_ERROR, -EBADMSG)) { in rxrpc_proto_abort() 30 set_bit(RXRPC_CALL_EV_ABORT, &call->events); in rxrpc_proto_abort() 31 rxrpc_queue_call(call); in rxrpc_proto_abort() 38 static void rxrpc_congestion_management(struct rxrpc_call *call, in rxrpc_congestion_management() argument 44 unsigned int cumulative_acks = call->cong_cumul_acks; in rxrpc_congestion_management() 45 unsigned int cwnd = call->cong_cwnd; in rxrpc_congestion_management() 49 (call->tx_top - call->tx_hard_ack) - summary->nr_acks; in rxrpc_congestion_management() 51 if (test_and_clear_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags)) { in rxrpc_congestion_management() 53 call->cong_ssthresh = max_t(unsigned int, in rxrpc_congestion_management() [all …]
|
| D | output.c | 36 static void rxrpc_tx_backoff(struct rxrpc_call *call, int ret) in rxrpc_tx_backoff() argument 39 u16 tx_backoff = READ_ONCE(call->tx_backoff); in rxrpc_tx_backoff() 42 WRITE_ONCE(call->tx_backoff, tx_backoff + 1); in rxrpc_tx_backoff() 44 WRITE_ONCE(call->tx_backoff, 0); in rxrpc_tx_backoff() 50 * lets the far side know we're still interested in this call and helps keep 56 static void rxrpc_set_keepalive(struct rxrpc_call *call) in rxrpc_set_keepalive() argument 58 unsigned long now = jiffies, keepalive_at = call->next_rx_timo / 6; in rxrpc_set_keepalive() 61 WRITE_ONCE(call->keepalive_at, keepalive_at); in rxrpc_set_keepalive() 62 rxrpc_reduce_call_timer(call, keepalive_at, now, in rxrpc_set_keepalive() 70 struct rxrpc_call *call, in rxrpc_fill_out_ack() argument [all …]
|
| D | call_accept.c | 2 /* incoming call handling 25 static void rxrpc_dummy_notify(struct sock *sk, struct rxrpc_call *call, in rxrpc_dummy_notify() argument 31 * Preallocate a single service call, connection and peer and, if possible, 42 struct rxrpc_call *call, *xcall; in rxrpc_service_prealloc_one() local 100 call = rxrpc_alloc_call(rx, gfp, debug_id); in rxrpc_service_prealloc_one() 101 if (!call) in rxrpc_service_prealloc_one() 103 call->flags |= (1 << RXRPC_CALL_IS_SERVICE); in rxrpc_service_prealloc_one() 104 call->state = RXRPC_CALL_SERVER_PREALLOC; in rxrpc_service_prealloc_one() 106 trace_rxrpc_call(call->debug_id, rxrpc_call_new_service, in rxrpc_service_prealloc_one() 107 refcount_read(&call->ref), in rxrpc_service_prealloc_one() [all …]
|
| D | conn_client.c | 8 * call so as to handle retransmitted DATA packets in case the server didn't 15 * or a call ID counter overflows. 351 * Create or find a client bundle to use for a call. 353 * If we return with a connection, the call will be on its waiting list. It's 354 * left to the caller to assign a channel and wake up the call. 357 struct rxrpc_call *call, in rxrpc_prep_call() argument 364 _enter("{%d,%lx},", call->debug_id, call->user_call_ID); in rxrpc_prep_call() 370 call->cong_cwnd = cp->peer->cong_cwnd; in rxrpc_prep_call() 371 if (call->cong_cwnd >= call->cong_ssthresh) in rxrpc_prep_call() 372 call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE; in rxrpc_prep_call() [all …]
|
| /Linux-v6.1/fs/afs/ |
| D | rxrpc.c | 26 /* asynchronous incoming call initial processing */ 132 * Allocate a call. 138 struct afs_call *call; in afs_alloc_call() local 141 call = kzalloc(sizeof(*call), gfp); in afs_alloc_call() 142 if (!call) in afs_alloc_call() 145 call->type = type; in afs_alloc_call() 146 call->net = net; in afs_alloc_call() 147 call->debug_id = atomic_inc_return(&rxrpc_debug_id); in afs_alloc_call() 148 refcount_set(&call->ref, 1); in afs_alloc_call() 149 INIT_WORK(&call->async_work, afs_process_async_call); in afs_alloc_call() [all …]
|
| D | cmservice.c | 103 * route an incoming cache manager call 106 bool afs_cm_incoming_call(struct afs_call *call) in afs_cm_incoming_call() argument 108 _enter("{%u, CB.OP %u}", call->service_id, call->operation_ID); in afs_cm_incoming_call() 110 switch (call->operation_ID) { in afs_cm_incoming_call() 112 call->type = &afs_SRXCBCallBack; in afs_cm_incoming_call() 115 call->type = &afs_SRXCBInitCallBackState; in afs_cm_incoming_call() 118 call->type = &afs_SRXCBInitCallBackState3; in afs_cm_incoming_call() 121 call->type = &afs_SRXCBProbe; in afs_cm_incoming_call() 124 call->type = &afs_SRXCBProbeUuid; in afs_cm_incoming_call() 127 call->type = &afs_SRXCBTellMeAboutYourself; in afs_cm_incoming_call() [all …]
|
| D | vlclient.c | 15 * Deliver reply data to a VL.GetEntryByNameU call. 17 static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call) in afs_deliver_vl_get_entry_by_name_u() argument 27 ret = afs_transfer_reply(call); in afs_deliver_vl_get_entry_by_name_u() 32 uvldb = call->buffer; in afs_deliver_vl_get_entry_by_name_u() 33 entry = call->ret_vldb; in afs_deliver_vl_get_entry_by_name_u() 109 static void afs_destroy_vl_get_entry_by_name_u(struct afs_call *call) in afs_destroy_vl_get_entry_by_name_u() argument 111 kfree(call->ret_vldb); in afs_destroy_vl_get_entry_by_name_u() 112 afs_flat_call_destructor(call); in afs_destroy_vl_get_entry_by_name_u() 134 struct afs_call *call; in afs_vl_get_entry_by_name_u() local 148 call = afs_alloc_flat_call(net, &afs_RXVLGetEntryByNameU, reqsz, in afs_vl_get_entry_by_name_u() [all …]
|
| D | yfsclient.c | 138 static void yfs_check_req(struct afs_call *call, __be32 *bp) in yfs_check_req() argument 140 size_t len = (void *)bp - call->request; in yfs_check_req() 142 if (len > call->request_size) in yfs_check_req() 144 call->type->name, len, call->request_size); in yfs_check_req() 145 else if (len < call->request_size) in yfs_check_req() 147 call->type->name, len, call->request_size); in yfs_check_req() 174 struct afs_call *call, in xdr_decode_YFSFetchStatus() argument 220 afs_protocol_error(call, afs_eproto_bad_status); in xdr_decode_YFSFetchStatus() 228 struct afs_call *call, in xdr_decode_YFSCallBack() argument 235 cb_expiry = ktime_add(call->issue_time, xdr_to_u64(x->expiration_time) * 100); in xdr_decode_YFSCallBack() [all …]
|
| D | fsclient.c | 55 struct afs_call *call, in xdr_decode_AFSFetchStatus() argument 60 bool inline_error = (call->operation_ID == afs_FS_InlineBulkStatus); in xdr_decode_AFSFetchStatus() 128 afs_protocol_error(call, afs_eproto_bad_status); in xdr_decode_AFSFetchStatus() 132 static time64_t xdr_decode_expiry(struct afs_call *call, u32 expiry) in xdr_decode_expiry() argument 134 return ktime_divns(call->issue_time, NSEC_PER_SEC) + expiry; in xdr_decode_expiry() 138 struct afs_call *call, in xdr_decode_AFSCallBack() argument 145 cb->expires_at = xdr_decode_expiry(call, ntohl(*bp++)); in xdr_decode_AFSCallBack() 238 static int afs_deliver_fs_fetch_status(struct afs_call *call) in afs_deliver_fs_fetch_status() argument 240 struct afs_operation *op = call->op; in afs_deliver_fs_fetch_status() 245 ret = afs_transfer_reply(call); in afs_deliver_fs_fetch_status() [all …]
|
| /Linux-v6.1/include/trace/ |
| D | trace_events.h | 8 * struct trace_event_raw_<call> { 101 * struct trace_event_data_offsets_<call> { 115 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ argument 116 struct trace_event_data_offsets_##call { \ 141 * trace_raw_output_<call>(struct trace_iterator *iter, int flags) 144 * struct trace_event_raw_<call> *field; <-- defined in stage 1 153 * if (entry->type != event_<call>->event.type) { 161 * return trace_output_call(iter, <call>, <TP_printk> "\n"); 187 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ argument 189 trace_raw_output_##call(struct trace_iterator *iter, int flags, \ [all …]
|
| D | trace_custom_events.h | 62 #define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ argument 63 struct trace_custom_event_data_offsets_##call { \ 77 #define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ argument 79 trace_custom_raw_output_##call(struct trace_iterator *iter, int flags, \ 84 struct trace_custom_event_raw_##call *field; \ 97 static struct trace_event_functions trace_custom_event_type_funcs_##call = { \ 98 .trace = trace_custom_raw_output_##call, \ 108 #define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, func, print) \ argument 109 static struct trace_event_fields trace_custom_event_fields_##call[] = { \ 120 #define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ argument [all …]
|
| D | bpf_probe.h | 80 #define __BPF_DECLARE_TRACE(call, proto, args) \ argument 82 __bpf_trace_##call(void *__data, proto) \ 89 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ argument 90 __BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args)) 97 #define __DEFINE_EVENT(template, call, proto, args, size) \ argument 98 static inline void bpf_test_probe_##call(void) \ 100 check_trace_callback_type_##call(__bpf_trace_##template); \ 102 typedef void (*btf_trace_##call)(void *__data, proto); \ 105 btf_trace_##call handler; \ 106 } __bpf_trace_tp_map_##call __used \ [all …]
|
| /Linux-v6.1/include/asm-generic/ |
| D | syscall.h | 3 * Access to user system call parameters and results 23 * syscall_get_nr - find what system call a task is executing 27 * If @task is executing a system call or is at system call 28 * tracing about to attempt one, returns the system call number. 29 * If @task is not executing a system call, i.e. it's blocked 33 * system call number can be meaningful. If the actual arch value 36 * It's only valid to call this when @task is known to be blocked. 41 * syscall_rollback - roll back registers after an aborted system call 42 * @task: task of interest, must be in system call exit tracing 45 * It's only valid to call this when @task is stopped for system [all …]
|
| /Linux-v6.1/include/trace/events/ |
| D | rxrpc.h | 427 __field(unsigned int, call ) 435 __entry->call = call_debug_id; 443 __entry->call, 556 TP_PROTO(struct rxrpc_call *call), 558 TP_ARGS(call), 561 __field(unsigned int, call ) 568 __entry->call = call->debug_id; 569 __entry->compl = call->completion; 570 __entry->error = call->error; 571 __entry->abort_code = call->abort_code; [all …]
|
| D | afs.h | 347 EM(afs_server_trace_put_call, "PUT call ") \ 651 TP_PROTO(struct afs_call *call, struct iov_iter *iter, 654 TP_ARGS(call, iter, want_more, ret), 658 __field(unsigned int, call ) 666 __entry->call = call->debug_id; 667 __entry->state = call->state; 668 __entry->unmarshall = call->unmarshall; 675 __entry->call, 684 TP_PROTO(struct rxrpc_call *rxcall, struct afs_call *call), 686 TP_ARGS(rxcall, call), [all …]
|
| /Linux-v6.1/tools/ |
| D | Makefile | 67 $(call descend,power/$@) 70 $(call descend,power/$@) 73 $(call descend,$@) 76 $(call descend,$@) 79 $(call descend,lib/api) 82 $(call descend,include/nolibc) 85 $(call descend,include/nolibc,$(patsubst nolibc_%,%,$@)) 96 $(call descend,testing/$@) 99 $(call descend,lib/$@) 102 $(call descend,power/x86/$@) [all …]
|
| /Linux-v6.1/Documentation/networking/ |
| D | rxrpc.rst | 64 (3) Retention of the reusable bits of the transport system set up for one call 122 (#) Each RxRPC operation is a "call". A connection may make up to four 147 explicitly sequenced per call. 158 (#) An call is complete when the request has been sent, the reply has been 162 (#) An call may be aborted by either end at any time up to its completion. 182 the last call currently using it has completed in case a new call is made 215 be used in all other sendmsgs or recvmsgs associated with that call. The 220 first sendmsg() of a call (struct msghdr::msg_name). 226 first sendmsg() of the call must specify the target address. The server's 229 (#) Once the application has received the last message associated with a call, [all …]
|
| /Linux-v6.1/include/linux/firmware/intel/ |
| D | stratix10-smc.h | 13 * This file defines the Secure Monitor Call (SMC) message protocol used for 29 * FAST call executes atomic operations, returns when the requested operation 31 * STD call starts a operation which can be preempted by a non-secure 32 * interrupt. The call can return before the requested operation has 51 * Return values in INTEL_SIP_SMC_* call 81 * Sync call used by service driver at EL1 to request the FPGA in EL3 to 84 * Call register usage: 101 * Async call used by service driver at EL1 to provide FPGA configuration data 104 * Call register usage: 127 * Sync call used by service driver at EL1 to track the completed write [all …]
|
| /Linux-v6.1/tools/perf/util/ |
| D | thread-stack.h | 3 * thread-stack.h: Synthesize a thread's stack using call / return events 24 * Call/Return flags. 26 * CALL_RETURN_NO_CALL: 'return' but no matching 'call' 27 * CALL_RETURN_NO_RETURN: 'call' but no matching 'return' 28 * CALL_RETURN_NON_CALL: a branch but not a 'call' to the start of a different 38 * struct call_return - paired call/return information. 39 * @thread: thread in which call/return occurred 40 * @comm: comm in which call/return occurred 41 * @cp: call path 42 * @call_time: timestamp of call (if known) [all …]
|
| /Linux-v6.1/arch/x86/ |
| D | Makefile_32.cpu | 5 tune = $(call cc-option,-mtune=$(1),$(2)) 8 align := -falign-functions=0 $(call cc-option,-falign-jumps=0) $(call cc-option,-falign-loops=0) 19 cflags-$(CONFIG_MPENTIUMII) += -march=i686 $(call tune,pentium2) 20 cflags-$(CONFIG_MPENTIUMIII) += -march=i686 $(call tune,pentium3) 21 cflags-$(CONFIG_MPENTIUMM) += -march=i686 $(call tune,pentium3) 22 cflags-$(CONFIG_MPENTIUM4) += -march=i686 $(call tune,pentium4) 27 cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,-march=athlon) 29 cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) $(align) 30 cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586) 31 cflags-$(CONFIG_MWINCHIP3D) += $(call cc-option,-march=winchip2,-march=i586) [all …]
|
| /Linux-v6.1/arch/powerpc/ |
| D | Makefile | 13 HAS_BIARCH := $(call cc-option-yn, -m32) 49 ifeq ($(call ld-ifversion, -ge, 22500, y),y) 64 KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-save-toc-indirect) 66 KBUILD_CFLAGS += $(call cc-option,-mbig-endian) 75 cflags-$(CONFIG_PPC64_ELF_ABI_V1) += $(call cc-option,-mabi=elfv1) 76 cflags-$(CONFIG_PPC64_ELF_ABI_V1) += $(call cc-option,-mcall-aixdesc) 77 aflags-$(CONFIG_PPC64_ELF_ABI_V1) += $(call cc-option,-mabi=elfv1) 86 cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian) 88 aflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian) 110 ifeq ($(call cc-option-yn,-mcmodel=medium),y) [all …]
|