/Linux-v5.4/drivers/greybus/ |
D | operation.c | 33 static int gb_operation_response_send(struct gb_operation *operation, 42 static int gb_operation_get_active(struct gb_operation *operation) in gb_operation_get_active() argument 44 struct gb_connection *connection = operation->connection; in gb_operation_get_active() 52 if (gb_operation_is_incoming(operation)) in gb_operation_get_active() 56 if (!gb_operation_is_core(operation)) in gb_operation_get_active() 63 if (operation->active++ == 0) in gb_operation_get_active() 64 list_add_tail(&operation->links, &connection->operations); in gb_operation_get_active() 66 trace_gb_operation_get_active(operation); in gb_operation_get_active() 79 static void gb_operation_put_active(struct gb_operation *operation) in gb_operation_put_active() argument 81 struct gb_connection *connection = operation->connection; in gb_operation_put_active() [all …]
|
D | greybus_trace.h | 92 TP_PROTO(struct gb_operation *operation), 94 TP_ARGS(operation), 107 __entry->cport_id = operation->connection->hd_cport_id; 108 __entry->id = operation->id; 109 __entry->type = operation->type; 110 __entry->flags = operation->flags; 111 __entry->active = operation->active; 112 __entry->waiters = atomic_read(&operation->waiters); 113 __entry->errno = operation->errno; 123 TP_PROTO(struct gb_operation *operation), \ [all …]
|
D | svc.c | 19 struct gb_operation *operation; member 22 static int gb_svc_queue_deferred_request(struct gb_operation *operation); 911 static void gb_svc_process_hello_deferred(struct gb_operation *operation) in gb_svc_process_hello_deferred() argument 913 struct gb_connection *connection = operation->connection; in gb_svc_process_hello_deferred() 943 static void gb_svc_process_module_inserted(struct gb_operation *operation) in gb_svc_process_module_inserted() argument 946 struct gb_connection *connection = operation->connection; in gb_svc_process_module_inserted() 956 request = operation->request->payload; in gb_svc_process_module_inserted() 991 static void gb_svc_process_module_removed(struct gb_operation *operation) in gb_svc_process_module_removed() argument 994 struct gb_connection *connection = operation->connection; in gb_svc_process_module_removed() 1000 request = operation->request->payload; in gb_svc_process_module_removed() [all …]
|
D | connection.c | 537 struct gb_operation *operation; in gb_connection_shutdown_operation() local 540 operation = gb_operation_create_core(connection, in gb_connection_shutdown_operation() 544 if (!operation) in gb_connection_shutdown_operation() 547 req = operation->request->payload; in gb_connection_shutdown_operation() 550 ret = gb_operation_request_send_sync(operation); in gb_connection_shutdown_operation() 552 gb_operation_put(operation); in gb_connection_shutdown_operation() 608 struct gb_operation *operation; in gb_connection_cancel_operations() local 611 operation = list_last_entry(&connection->operations, in gb_connection_cancel_operations() 613 gb_operation_get(operation); in gb_connection_cancel_operations() 616 if (gb_operation_is_incoming(operation)) in gb_connection_cancel_operations() [all …]
|
D | control.c | 154 struct gb_operation *operation; in gb_control_disconnecting_operation() local 157 operation = gb_operation_create_core(control->connection, in gb_control_disconnecting_operation() 161 if (!operation) in gb_control_disconnecting_operation() 164 request = operation->request->payload; in gb_control_disconnecting_operation() 167 ret = gb_operation_request_send_sync(operation); in gb_control_disconnecting_operation() 173 gb_operation_put(operation); in gb_control_disconnecting_operation() 180 struct gb_operation *operation; in gb_control_mode_switch_operation() local 183 operation = gb_operation_create_core(control->connection, in gb_control_mode_switch_operation() 188 if (!operation) in gb_control_mode_switch_operation() 191 ret = gb_operation_request_send_sync(operation); in gb_control_mode_switch_operation() [all …]
|
/Linux-v5.4/include/linux/greybus/ |
D | operation.h | 53 struct gb_operation *operation; member 117 gb_operation_is_incoming(struct gb_operation *operation) in gb_operation_is_incoming() argument 119 return operation->flags & GB_OPERATION_FLAG_INCOMING; in gb_operation_is_incoming() 123 gb_operation_is_unidirectional(struct gb_operation *operation) in gb_operation_is_unidirectional() argument 125 return operation->flags & GB_OPERATION_FLAG_UNIDIRECTIONAL; in gb_operation_is_unidirectional() 129 gb_operation_short_response_allowed(struct gb_operation *operation) in gb_operation_short_response_allowed() argument 131 return operation->flags & GB_OPERATION_FLAG_SHORT_RESPONSE; in gb_operation_short_response_allowed() 134 static inline bool gb_operation_is_core(struct gb_operation *operation) in gb_operation_is_core() argument 136 return operation->flags & GB_OPERATION_FLAG_CORE; in gb_operation_is_core() 142 int gb_operation_result(struct gb_operation *operation); [all …]
|
/Linux-v5.4/Documentation/filesystems/caching/ |
D | operations.txt | 40 An operation is recorded in an fscache_operation struct: 52 Someone wanting to issue an operation should allocate something with this 58 with the operation to be initialised and the release function to use. 67 operation and waited for afterwards. 74 There are a number of parameters that can be set in the operation record's flag 78 (1) The operation may be done synchronously (FSCACHE_OP_MYTHREAD). A thread 79 may decide it wants to handle an operation itself without deferring it to 89 before submitting the operation, and the operating thread must wait for it 96 (2) The operation may be fast asynchronous (FSCACHE_OP_FAST), in which case it 97 will be given to keventd to process. Such an operation is not permitted [all …]
|
/Linux-v5.4/net/netfilter/ |
D | xt_ecn.c | 40 if (einfo->operation & XT_ECN_OP_MATCH_ECE) { in match_tcp() 50 if (einfo->operation & XT_ECN_OP_MATCH_CWR) { in match_tcp() 74 if (info->operation & XT_ECN_OP_MATCH_IP && !match_ip(skb, info)) in ecn_mt4() 77 if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) && in ecn_mt4() 89 if (info->operation & XT_ECN_OP_MATCH_MASK) in ecn_mt_check4() 95 if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) && in ecn_mt_check4() 116 if (info->operation & XT_ECN_OP_MATCH_IP && !match_ipv6(skb, info)) in ecn_mt6() 119 if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) && in ecn_mt6() 131 if (info->operation & XT_ECN_OP_MATCH_MASK) in ecn_mt_check6() 137 if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) && in ecn_mt_check6()
|
/Linux-v5.4/security/tomoyo/ |
D | file.c | 169 [r->param.path.operation], in tomoyo_audit_path_log() 183 [tomoyo_pp2mac[r->param.path2.operation]], in tomoyo_audit_path2_log() 199 [tomoyo_pnnn2mac[r->param.mkdev.operation]], in tomoyo_audit_mkdev_log() 214 const u8 type = r->param.path_number.operation; in tomoyo_audit_path_number_log() 258 if (acl->perm & (1 << r->param.path.operation)) { in tomoyo_check_path_acl() 281 return (acl->perm & (1 << r->param.path_number.operation)) && in tomoyo_check_path_number_acl() 302 return (acl->perm & (1 << r->param.path2.operation)) && in tomoyo_check_path2_acl() 322 return (acl->perm & (1 << r->param.mkdev.operation)) && in tomoyo_check_mkdev_acl() 573 static int tomoyo_path_permission(struct tomoyo_request_info *r, u8 operation, in tomoyo_path_permission() argument 578 r->type = tomoyo_p2mac[operation]; in tomoyo_path_permission() [all …]
|
D | network.c | 27 u8 operation; member 288 const char *operation = tomoyo_read_token(param); in tomoyo_write_inet_network() local 294 if (tomoyo_permstr(operation, tomoyo_socket_keyword[type])) in tomoyo_write_inet_network() 333 const char *operation = tomoyo_read_token(param); in tomoyo_write_unix_network() local 339 if (tomoyo_permstr(operation, tomoyo_socket_keyword[type])) in tomoyo_write_unix_network() 365 const u8 operation, const char *address) in tomoyo_audit_net_log() argument 369 tomoyo_socket_keyword[operation], address); in tomoyo_audit_net_log() 394 r->param.inet_network.operation, buf); in tomoyo_audit_inet_log() 407 r->param.unix_network.operation, in tomoyo_audit_unix_log() 426 if (!(acl->perm & (1 << r->param.inet_network.operation)) || in tomoyo_check_inet_acl() [all …]
|
/Linux-v5.4/net/ipv4/netfilter/ |
D | ipt_ECN.c | 55 if ((!(einfo->operation & IPT_ECN_OP_SET_ECE) || in set_ect_tcp() 57 (!(einfo->operation & IPT_ECN_OP_SET_CWR) || in set_ect_tcp() 66 if (einfo->operation & IPT_ECN_OP_SET_ECE) in set_ect_tcp() 68 if (einfo->operation & IPT_ECN_OP_SET_CWR) in set_ect_tcp() 81 if (einfo->operation & IPT_ECN_OP_SET_IP) in ecn_tg() 85 if (einfo->operation & (IPT_ECN_OP_SET_ECE | IPT_ECN_OP_SET_CWR) && in ecn_tg() 98 if (einfo->operation & IPT_ECN_OP_MASK) in ecn_tg_check() 104 if ((einfo->operation & (IPT_ECN_OP_SET_ECE|IPT_ECN_OP_SET_CWR)) && in ecn_tg_check()
|
/Linux-v5.4/drivers/staging/greybus/ |
D | loopback.c | 54 struct gb_operation *operation; member 374 struct gb_operation *operation; in gb_loopback_operation_sync() local 379 operation = gb_operation_create(gb->connection, type, request_size, in gb_loopback_operation_sync() 381 if (!operation) in gb_loopback_operation_sync() 385 memcpy(operation->request->payload, request, request_size); in gb_loopback_operation_sync() 387 ret = gb_operation_request_send_sync(operation); in gb_loopback_operation_sync() 393 if (response_size == operation->response->payload_size) { in gb_loopback_operation_sync() 394 memcpy(response, operation->response->payload, in gb_loopback_operation_sync() 399 operation->response->payload_size, in gb_loopback_operation_sync() 412 gb_operation_put(operation); in gb_loopback_operation_sync() [all …]
|
D | i2c.c | 83 struct gb_operation *operation; in gb_i2c_operation_create() local 116 operation = gb_operation_create(connection, GB_I2C_TYPE_TRANSFER, in gb_i2c_operation_create() 118 if (!operation) in gb_i2c_operation_create() 121 request = operation->request->payload; in gb_i2c_operation_create() 130 return operation; in gb_i2c_operation_create() 143 return operation; in gb_i2c_operation_create() 178 struct gb_operation *operation; in gb_i2c_transfer_operation() local 181 operation = gb_i2c_operation_create(connection, msgs, msg_count); in gb_i2c_transfer_operation() 182 if (!operation) in gb_i2c_transfer_operation() 189 ret = gb_operation_request_send_sync(operation); in gb_i2c_transfer_operation() [all …]
|
D | usb.c | 102 struct gb_operation *operation; in hub_control() local 111 operation = gb_operation_create(dev->connection, in hub_control() 116 if (!operation) in hub_control() 119 request = operation->request->payload; in hub_control() 125 ret = gb_operation_request_send_sync(operation); in hub_control() 131 response = operation->response->payload; in hub_control() 135 gb_operation_put(operation); in hub_control()
|
D | spilib.c | 169 struct gb_operation *operation; in gb_spi_operation_create() local 232 operation = gb_operation_create(connection, GB_SPI_TYPE_TRANSFER, in gb_spi_operation_create() 234 if (!operation) in gb_spi_operation_create() 237 request = operation->request->payload; in gb_spi_operation_create() 290 return operation; in gb_spi_operation_create() 329 struct gb_operation *operation; in gb_spi_transfer_one_message() local 344 operation = gb_spi_operation_create(spi, connection, msg); in gb_spi_transfer_one_message() 345 if (!operation) { in gb_spi_transfer_one_message() 351 ret = gb_operation_request_send_sync_timeout(operation, in gb_spi_transfer_one_message() 354 response = operation->response->payload; in gb_spi_transfer_one_message() [all …]
|
/Linux-v5.4/drivers/net/ethernet/apm/xgene/ |
D | xgene_enet_cle.c | 96 SET_VAL(CLE_BR_OP, br->operation) | in xgene_cle_dn_to_hw() 214 .operation = EQT, 225 .operation = EQT, 250 .operation = EQT, 262 .operation = EQT, 273 .operation = EQT, 298 .operation = EQT, 310 .operation = EQT, 322 .operation = EQT, 334 .operation = EQT, [all …]
|
/Linux-v5.4/arch/arm/mm/ |
D | cache-uniphier.c | 115 u32 operation) in __uniphier_cache_maint_common() argument 149 writel_relaxed(UNIPHIER_SSCOQM_CE | operation, in __uniphier_cache_maint_common() 153 if (likely(UNIPHIER_SSCOQM_S_IS_RANGE(operation))) { in __uniphier_cache_maint_common() 169 u32 operation) in __uniphier_cache_maint_all() argument 172 UNIPHIER_SSCOQM_S_ALL | operation); in __uniphier_cache_maint_all() 179 u32 operation) in __uniphier_cache_maint_range() argument 193 __uniphier_cache_maint_all(data, operation); in __uniphier_cache_maint_range() 208 UNIPHIER_SSCOQM_S_RANGE | operation); in __uniphier_cache_maint_range() 237 u32 operation) in uniphier_cache_maint_range() argument 242 __uniphier_cache_maint_range(data, start, end, operation); in uniphier_cache_maint_range() [all …]
|
/Linux-v5.4/Documentation/crypto/ |
D | async-tx-api.txt | 11 3.4 When does the operation execute? 12 3.5 When does the operation complete? 30 that is written to the API can optimize for asynchronous operation and 44 operation will be offloaded when an engine is available and carried out 48 API automatically handles cases where the transition from one operation 50 3/ dmaengine extensions to support multiple clients and operation types 57 async_<operation>(<op specific parameters>, struct async_submit ctl *submit) 76 The return value is non-NULL and points to a 'descriptor' when the operation 90 3.4 When does the operation execute? 92 async_<operation> call. Offload engine drivers batch operations to [all …]
|
D | userspace-if.rst | 61 user space application. User space invokes the cipher operation with the 62 send()/write() system call family. The result of the cipher operation is 83 In-place Cipher operation 86 Just like the in-kernel operation of the kernel crypto API, the user 87 space interface allows the cipher operation in-place. That means that 96 operation. 101 The message digest type to be used for the cipher operation is selected 135 operation is performed without the initial HMAC state change caused by 141 The operation is very similar to the message digest discussion. During 168 - specification of the cipher operation type with one of these flags: [all …]
|
/Linux-v5.4/drivers/hv/ |
D | hv_snapshot.c | 110 switch (vss_msg->vss_hdr.operation) { in vss_handle_handshake() 138 if (vss_msg->vss_hdr.operation == VSS_OP_REGISTER || in vss_on_msg() 139 vss_msg->vss_hdr.operation == VSS_OP_REGISTER1) { in vss_on_msg() 153 if (vss_msg->vss_hdr.operation == VSS_OP_HOT_BACKUP) in vss_on_msg() 173 int op = vss_transaction.msg->vss_hdr.operation; in vss_send_op() 187 vss_msg->vss_hdr.operation = op; in vss_send_op() 208 switch (vss_transaction.msg->vss_hdr.operation) { in vss_handle_request() 227 vss_transaction.msg->vss_hdr.operation); in vss_handle_request()
|
/Linux-v5.4/tools/testing/selftests/tc-testing/ |
D | tdc_multibatch.py | 52 file_prefix = args.file_prefix + args.operation + "_" 55 operation = args.operation variable 63 num_filters, handle, operation, i + mac_prefix, device, file))
|
/Linux-v5.4/drivers/block/xen-blkback/ |
D | blkback.c | 506 int operation) in xen_vbd_translate() argument 511 if ((operation != REQ_OP_READ) && vbd->readonly) in xen_vbd_translate() 732 pending_req->operation, pending_req->status); in xen_blkbk_unmap_and_respond_callback() 948 (pending_req->operation != BLKIF_OP_READ)); in xen_blkbk_map_seg() 1044 make_response(ring, req->u.discard.id, req->operation, status); in dispatch_discard_io() 1054 make_response(ring, req->u.other.id, req->operation, in dispatch_other_io() 1080 if (pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE && in __end_block_io_op() 1085 } else if (pending_req->operation == BLKIF_OP_WRITE_BARRIER && in __end_block_io_op() 1175 switch (req.operation) { in __do_block_io_op() 1232 int operation; in dispatch_rw_block_io() local [all …]
|
/Linux-v5.4/drivers/net/wireless/intersil/prism54/ |
D | islpci_mgt.c | 58 pimfor_encode_header(int operation, u32 oid, u32 length, pimfor_header_t *h) in pimfor_encode_header() argument 61 h->operation = operation; in pimfor_encode_header() 151 islpci_mgt_transmit(struct net_device *ndev, int operation, unsigned long oid, in islpci_mgt_transmit() argument 184 pimfor_encode_header(operation, oid, length, (pimfor_header_t *) p); in islpci_mgt_transmit() 197 h->operation, oid, h->device_id, h->flags, length); in islpci_mgt_transmit() 324 header->operation, header->oid, header->device_id, in islpci_mgt_receive() 359 if (header->operation == PIMFOR_OP_TRAP) { in islpci_mgt_receive() 431 int operation, unsigned long oid, in islpci_mgt_transaction() argument 447 err = islpci_mgt_transmit(ndev, operation, oid, senddata, sendlen); in islpci_mgt_transaction()
|
/Linux-v5.4/Documentation/media/uapi/dvb/ |
D | fe-set-frontend.rst | 38 Points to parameters for tuning operation. 44 This ioctl call starts a tuning operation using specified parameters. 46 and the tuning could be initiated. The result of the tuning operation in 50 operation is initiated before the previous one was completed, the 51 previous operation will be aborted in favor of the new one. This command
|
/Linux-v5.4/Documentation/core-api/ |
D | atomic_ops.rst | 64 been set with this operation or set with another operation. A proper implicit 65 or explicit memory barrier is needed before the value set with the operation 186 Now, we move onto the atomic operation interfaces typically implemented with 209 atomic_t and return the new counter value after the operation is 214 the operation. It must be done such that all memory operations before 215 and after the atomic operation calls are strongly ordered with respect 216 to the atomic operation itself. 219 before and after the atomic operation. 245 the atomic operation:: 251 provide explicit memory barrier semantics around the operation:: [all …]
|