/Linux-v5.4/drivers/media/mc/ |
D | mc-request.c | 39 static void media_request_clean(struct media_request *req) in media_request_clean() argument 44 WARN_ON(req->state != MEDIA_REQUEST_STATE_CLEANING); in media_request_clean() 45 WARN_ON(req->updating_count); in media_request_clean() 46 WARN_ON(req->access_count); in media_request_clean() 48 list_for_each_entry_safe(obj, obj_safe, &req->objects, list) { in media_request_clean() 53 req->updating_count = 0; in media_request_clean() 54 req->access_count = 0; in media_request_clean() 55 WARN_ON(req->num_incomplete_objects); in media_request_clean() 56 req->num_incomplete_objects = 0; in media_request_clean() 57 wake_up_interruptible_all(&req->poll_wait); in media_request_clean() [all …]
|
/Linux-v5.4/drivers/s390/scsi/ |
D | zfcp_fsf.c | 78 static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req) in zfcp_fsf_class_not_supp() argument 80 dev_err(&req->adapter->ccw_device->dev, "FCP device not " in zfcp_fsf_class_not_supp() 82 zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1"); in zfcp_fsf_class_not_supp() 83 req->status |= ZFCP_STATUS_FSFREQ_ERROR; in zfcp_fsf_class_not_supp() 90 void zfcp_fsf_req_free(struct zfcp_fsf_req *req) in zfcp_fsf_req_free() argument 92 if (likely(req->pool)) { in zfcp_fsf_req_free() 93 if (likely(!zfcp_fsf_req_is_status_read_buffer(req))) in zfcp_fsf_req_free() 94 mempool_free(req->qtcb, req->adapter->pool.qtcb_pool); in zfcp_fsf_req_free() 95 mempool_free(req, req->pool); in zfcp_fsf_req_free() 99 if (likely(!zfcp_fsf_req_is_status_read_buffer(req))) in zfcp_fsf_req_free() [all …]
|
/Linux-v5.4/drivers/staging/greybus/ |
D | audio_apbridgea.c | 16 struct audio_apbridgea_set_config_request req; in gb_audio_apbridgea_set_config() local 18 req.hdr.type = AUDIO_APBRIDGEA_TYPE_SET_CONFIG; in gb_audio_apbridgea_set_config() 19 req.hdr.i2s_port = cpu_to_le16(i2s_port); in gb_audio_apbridgea_set_config() 20 req.format = cpu_to_le32(format); in gb_audio_apbridgea_set_config() 21 req.rate = cpu_to_le32(rate); in gb_audio_apbridgea_set_config() 22 req.mclk_freq = cpu_to_le32(mclk_freq); in gb_audio_apbridgea_set_config() 24 return gb_hd_output(connection->hd, &req, sizeof(req), in gb_audio_apbridgea_set_config() 33 struct audio_apbridgea_register_cport_request req; in gb_audio_apbridgea_register_cport() local 36 req.hdr.type = AUDIO_APBRIDGEA_TYPE_REGISTER_CPORT; in gb_audio_apbridgea_register_cport() 37 req.hdr.i2s_port = cpu_to_le16(i2s_port); in gb_audio_apbridgea_register_cport() [all …]
|
D | audio_gb.c | 50 struct gb_audio_get_control_request req; in gb_audio_gb_get_control() local 54 req.control_id = control_id; in gb_audio_gb_get_control() 55 req.index = index; in gb_audio_gb_get_control() 58 &req, sizeof(req), &resp, sizeof(resp)); in gb_audio_gb_get_control() 72 struct gb_audio_set_control_request req; in gb_audio_gb_set_control() local 74 req.control_id = control_id; in gb_audio_gb_set_control() 75 req.index = index; in gb_audio_gb_set_control() 76 memcpy(&req.value, value, sizeof(req.value)); in gb_audio_gb_set_control() 79 &req, sizeof(req), NULL, 0); in gb_audio_gb_set_control() 86 struct gb_audio_enable_widget_request req; in gb_audio_gb_enable_widget() local [all …]
|
/Linux-v5.4/drivers/nvme/target/ |
D | io-cmd-file.c | 90 static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos, in nvmet_file_submit_bvec() argument 93 struct kiocb *iocb = &req->f.iocb; in nvmet_file_submit_bvec() 98 if (req->cmd->rw.opcode == nvme_cmd_write) { in nvmet_file_submit_bvec() 99 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) in nvmet_file_submit_bvec() 101 call_iter = req->ns->file->f_op->write_iter; in nvmet_file_submit_bvec() 104 call_iter = req->ns->file->f_op->read_iter; in nvmet_file_submit_bvec() 108 iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count); in nvmet_file_submit_bvec() 111 iocb->ki_filp = req->ns->file; in nvmet_file_submit_bvec() 112 iocb->ki_flags = ki_flags | iocb_flags(req->ns->file); in nvmet_file_submit_bvec() 119 struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb); in nvmet_file_io_done() local [all …]
|
D | admin-cmd.c | 32 static void nvmet_execute_get_log_page_noop(struct nvmet_req *req) in nvmet_execute_get_log_page_noop() argument 34 nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len)); in nvmet_execute_get_log_page_noop() 37 static void nvmet_execute_get_log_page_error(struct nvmet_req *req) in nvmet_execute_get_log_page_error() argument 39 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_get_log_page_error() 49 if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot], in nvmet_execute_get_log_page_error() 60 nvmet_req_complete(req, 0); in nvmet_execute_get_log_page_error() 63 static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req, in nvmet_get_smart_log_nsid() argument 69 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid); in nvmet_get_smart_log_nsid() 72 le32_to_cpu(req->cmd->get_log_page.nsid)); in nvmet_get_smart_log_nsid() 73 req->error_loc = offsetof(struct nvme_rw_command, nsid); in nvmet_get_smart_log_nsid() [all …]
|
D | io-cmd-bdev.c | 78 static u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts) in blk_to_nvme_status() argument 93 req->error_loc = offsetof(struct nvme_rw_command, length); in blk_to_nvme_status() 97 req->error_loc = offsetof(struct nvme_rw_command, slba); in blk_to_nvme_status() 100 req->error_loc = offsetof(struct nvme_common_command, opcode); in blk_to_nvme_status() 101 switch (req->cmd->common.opcode) { in blk_to_nvme_status() 112 req->error_loc = offsetof(struct nvme_rw_command, nsid); in blk_to_nvme_status() 118 req->error_loc = offsetof(struct nvme_common_command, opcode); in blk_to_nvme_status() 121 switch (req->cmd->common.opcode) { in blk_to_nvme_status() 124 req->error_slba = le64_to_cpu(req->cmd->rw.slba); in blk_to_nvme_status() 127 req->error_slba = in blk_to_nvme_status() [all …]
|
D | fabrics-cmd.c | 10 static void nvmet_execute_prop_set(struct nvmet_req *req) in nvmet_execute_prop_set() argument 12 u64 val = le64_to_cpu(req->cmd->prop_set.value); in nvmet_execute_prop_set() 15 if (req->cmd->prop_set.attrib & 1) { in nvmet_execute_prop_set() 16 req->error_loc = in nvmet_execute_prop_set() 22 switch (le32_to_cpu(req->cmd->prop_set.offset)) { in nvmet_execute_prop_set() 24 nvmet_update_cc(req->sq->ctrl, val); in nvmet_execute_prop_set() 27 req->error_loc = in nvmet_execute_prop_set() 32 nvmet_req_complete(req, status); in nvmet_execute_prop_set() 35 static void nvmet_execute_prop_get(struct nvmet_req *req) in nvmet_execute_prop_get() argument 37 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_prop_get() [all …]
|
/Linux-v5.4/drivers/block/drbd/ |
D | drbd_req.c | 25 static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request *req) in _drbd_start_io_acct() argument 29 generic_start_io_acct(q, bio_op(req->master_bio), in _drbd_start_io_acct() 30 req->i.size >> 9, &device->vdisk->part0); in _drbd_start_io_acct() 34 static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req) in _drbd_end_io_acct() argument 38 generic_end_io_acct(q, bio_op(req->master_bio), in _drbd_end_io_acct() 39 &device->vdisk->part0, req->start_jif); in _drbd_end_io_acct() 44 struct drbd_request *req; in drbd_req_new() local 46 req = mempool_alloc(&drbd_request_mempool, GFP_NOIO); in drbd_req_new() 47 if (!req) in drbd_req_new() 49 memset(req, 0, sizeof(*req)); in drbd_req_new() [all …]
|
/Linux-v5.4/net/sunrpc/ |
D | backchannel_rqst.c | 54 static void xprt_free_allocation(struct rpc_rqst *req) in xprt_free_allocation() argument 58 dprintk("RPC: free allocations for req= %p\n", req); in xprt_free_allocation() 59 WARN_ON_ONCE(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state)); in xprt_free_allocation() 60 xbufp = &req->rq_rcv_buf; in xprt_free_allocation() 62 xbufp = &req->rq_snd_buf; in xprt_free_allocation() 64 kfree(req); in xprt_free_allocation() 81 struct rpc_rqst *req; in xprt_alloc_bc_req() local 84 req = kzalloc(sizeof(*req), gfp_flags); in xprt_alloc_bc_req() 85 if (req == NULL) in xprt_alloc_bc_req() 88 req->rq_xprt = xprt; in xprt_alloc_bc_req() [all …]
|
/Linux-v5.4/drivers/macintosh/ |
D | adb-iop.c | 65 static void adb_iop_end_req(struct adb_request *req, int state) in adb_iop_end_req() argument 67 req->complete = 1; in adb_iop_end_req() 68 current_req = req->next; in adb_iop_end_req() 69 if (req->done) in adb_iop_end_req() 70 (*req->done)(req); in adb_iop_end_req() 82 struct adb_request *req; in adb_iop_complete() local 87 req = current_req; in adb_iop_complete() 88 if ((adb_iop_state == sending) && req && req->reply_expected) { in adb_iop_complete() 105 struct adb_request *req; in adb_iop_listen() local 113 req = current_req; in adb_iop_listen() [all …]
|
D | via-pmu.c | 202 static int pmu_send_request(struct adb_request *req, int sync); 234 int pmu_polled_request(struct adb_request *req); 587 struct adb_request req; in init_pmu() local 593 pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask); in init_pmu() 595 while (!req.complete) { in init_pmu() 620 pmu_request(&req, NULL, 2, PMU_SYSTEM_READY, 2); in init_pmu() 621 while (!req.complete) in init_pmu() 626 pmu_request(&req, NULL, 1, PMU_GET_VERSION); in init_pmu() 627 pmu_wait_complete(&req); in init_pmu() 628 if (req.reply_len > 0) in init_pmu() [all …]
|
D | via-macii.c | 87 static int macii_send_request(struct adb_request *req, int sync); 88 static int macii_write(struct adb_request *req); 192 static struct adb_request req; in macii_queue_poll() local 203 adb_request(&req, NULL, ADBREQ_NOSEND, 1, ADB_READREG(next_device, 0)); in macii_queue_poll() 205 req.sent = 0; in macii_queue_poll() 206 req.complete = 0; in macii_queue_poll() 207 req.reply_len = 0; in macii_queue_poll() 208 req.next = current_req; in macii_queue_poll() 211 current_req = &req; in macii_queue_poll() 213 current_req = &req; in macii_queue_poll() [all …]
|
/Linux-v5.4/crypto/ |
D | chacha20poly1305.c | 44 struct ahash_request req; /* must be last member */ member 50 struct skcipher_request req; /* must be last member */ member 72 static inline void async_done_continue(struct aead_request *req, int err, in async_done_continue() argument 76 struct chachapoly_req_ctx *rctx = aead_request_ctx(req); in async_done_continue() 79 err = cont(req); in async_done_continue() 83 aead_request_complete(req, err); in async_done_continue() 86 static void chacha_iv(u8 *iv, struct aead_request *req, u32 icb) in chacha_iv() argument 88 struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); in chacha_iv() 93 memcpy(iv + sizeof(leicb) + ctx->saltlen, req->iv, in chacha_iv() 97 static int poly_verify_tag(struct aead_request *req) in poly_verify_tag() argument [all …]
|
D | seqiv.c | 23 static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err) in seqiv_aead_encrypt_complete2() argument 25 struct aead_request *subreq = aead_request_ctx(req); in seqiv_aead_encrypt_complete2() 34 geniv = crypto_aead_reqtfm(req); in seqiv_aead_encrypt_complete2() 35 memcpy(req->iv, subreq->iv, crypto_aead_ivsize(geniv)); in seqiv_aead_encrypt_complete2() 44 struct aead_request *req = base->data; in seqiv_aead_encrypt_complete() local 46 seqiv_aead_encrypt_complete2(req, err); in seqiv_aead_encrypt_complete() 47 aead_request_complete(req, err); in seqiv_aead_encrypt_complete() 50 static int seqiv_aead_encrypt(struct aead_request *req) in seqiv_aead_encrypt() argument 52 struct crypto_aead *geniv = crypto_aead_reqtfm(req); in seqiv_aead_encrypt() 54 struct aead_request *subreq = aead_request_ctx(req); in seqiv_aead_encrypt() [all …]
|
/Linux-v5.4/drivers/infiniband/hw/hfi1/ |
D | user_sdma.c | 79 static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts); 82 static void user_sdma_free_request(struct user_sdma_request *req, bool unpin); 83 static int pin_vector_pages(struct user_sdma_request *req, 87 static int check_header_template(struct user_sdma_request *req, 90 static int set_txreq_header(struct user_sdma_request *req, 92 static int set_txreq_header_ahg(struct user_sdma_request *req, 330 struct user_sdma_request *req; in hfi1_user_sdma_process_request() local 337 if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) { in hfi1_user_sdma_process_request() 342 iovec[idx].iov_len, sizeof(info) + sizeof(req->hdr)); in hfi1_user_sdma_process_request() 392 req = pq->reqs + info.comp_idx; in hfi1_user_sdma_process_request() [all …]
|
/Linux-v5.4/drivers/clk/sunxi/ |
D | clk-sunxi.c | 33 static void sun4i_get_pll1_factors(struct factors_request *req) in sun4i_get_pll1_factors() argument 38 div = req->rate / 6000000; in sun4i_get_pll1_factors() 39 req->rate = 6000000 * div; in sun4i_get_pll1_factors() 42 req->m = 0; in sun4i_get_pll1_factors() 45 if (req->rate >= 768000000 || req->rate == 42000000 || in sun4i_get_pll1_factors() 46 req->rate == 54000000) in sun4i_get_pll1_factors() 47 req->k = 1; in sun4i_get_pll1_factors() 49 req->k = 0; in sun4i_get_pll1_factors() 53 req->p = 3; in sun4i_get_pll1_factors() 57 req->p = 2; in sun4i_get_pll1_factors() [all …]
|
D | clk-sun9i-core.c | 26 static void sun9i_a80_get_pll4_factors(struct factors_request *req) in sun9i_a80_get_pll4_factors() argument 33 n = DIV_ROUND_UP(req->rate, 6000000); in sun9i_a80_get_pll4_factors() 53 req->rate = ((24000000 * n) >> p) / (m + 1); in sun9i_a80_get_pll4_factors() 54 req->n = n; in sun9i_a80_get_pll4_factors() 55 req->m = m; in sun9i_a80_get_pll4_factors() 56 req->p = p; in sun9i_a80_get_pll4_factors() 99 static void sun9i_a80_get_gt_factors(struct factors_request *req) in sun9i_a80_get_gt_factors() argument 103 if (req->parent_rate < req->rate) in sun9i_a80_get_gt_factors() 104 req->rate = req->parent_rate; in sun9i_a80_get_gt_factors() 106 div = DIV_ROUND_UP(req->parent_rate, req->rate); in sun9i_a80_get_gt_factors() [all …]
|
/Linux-v5.4/include/net/ |
D | request_sock.h | 33 struct request_sock *req); 35 struct request_sock *req); 38 void (*destructor)(struct request_sock *req); 39 void (*syn_ack_timeout)(const struct request_sock *req); 42 int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req); 73 static inline struct sock *req_to_sk(struct request_sock *req) in req_to_sk() argument 75 return (struct sock *)req; in req_to_sk() 82 struct request_sock *req; in reqsk_alloc() local 84 req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN); in reqsk_alloc() 85 if (!req) in reqsk_alloc() [all …]
|
/Linux-v5.4/drivers/s390/cio/ |
D | ccwreq.c | 43 struct ccw_request *req = &cdev->private->req; in ccwreq_next_path() local 45 if (!req->singlepath) { in ccwreq_next_path() 46 req->mask = 0; in ccwreq_next_path() 49 req->retries = req->maxretries; in ccwreq_next_path() 50 req->mask = lpm_adjust(req->mask >> 1, req->lpm); in ccwreq_next_path() 52 return req->mask; in ccwreq_next_path() 60 struct ccw_request *req = &cdev->private->req; in ccwreq_stop() local 62 if (req->done) in ccwreq_stop() 64 req->done = 1; in ccwreq_stop() 67 if (rc && rc != -ENODEV && req->drc) in ccwreq_stop() [all …]
|
/Linux-v5.4/arch/powerpc/platforms/52xx/ |
D | mpc52xx_lpbfifo.c | 52 struct mpc52xx_lpbfifo_request *req; member 62 static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req) in mpc52xx_lpbfifo_kick() argument 64 size_t transfer_size = req->size - req->pos; in mpc52xx_lpbfifo_kick() 70 int dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA); in mpc52xx_lpbfifo_kick() 71 int write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE; in mpc52xx_lpbfifo_kick() 72 int poll_dma = req->flags & MPC52XX_LPBFIFO_FLAG_POLL_DMA; in mpc52xx_lpbfifo_kick() 95 data = req->data + req->pos; in mpc52xx_lpbfifo_kick() 149 bd->data[0] = req->data_phys + req->pos; in mpc52xx_lpbfifo_kick() 163 req->offset + req->pos); in mpc52xx_lpbfifo_kick() 166 bit_fields = req->cs << 24 | 0x000008; in mpc52xx_lpbfifo_kick() [all …]
|
/Linux-v5.4/include/media/ |
D | media-request.h | 87 media_request_lock_for_access(struct media_request *req) in media_request_lock_for_access() argument 92 spin_lock_irqsave(&req->lock, flags); in media_request_lock_for_access() 93 if (req->state == MEDIA_REQUEST_STATE_COMPLETE) { in media_request_lock_for_access() 94 req->access_count++; in media_request_lock_for_access() 97 spin_unlock_irqrestore(&req->lock, flags); in media_request_lock_for_access() 111 static inline void media_request_unlock_for_access(struct media_request *req) in media_request_unlock_for_access() argument 115 spin_lock_irqsave(&req->lock, flags); in media_request_unlock_for_access() 116 if (!WARN_ON(!req->access_count)) in media_request_unlock_for_access() 117 req->access_count--; in media_request_unlock_for_access() 118 spin_unlock_irqrestore(&req->lock, flags); in media_request_unlock_for_access() [all …]
|
/Linux-v5.4/net/9p/ |
D | client.c | 270 struct p9_req_t *req = kmem_cache_alloc(p9_req_cache, GFP_NOFS); in p9_tag_alloc() local 274 if (!req) in p9_tag_alloc() 277 if (p9_fcall_init(c, &req->tc, alloc_msize)) in p9_tag_alloc() 279 if (p9_fcall_init(c, &req->rc, alloc_msize)) in p9_tag_alloc() 282 p9pdu_reset(&req->tc); in p9_tag_alloc() 283 p9pdu_reset(&req->rc); in p9_tag_alloc() 284 req->t_err = 0; in p9_tag_alloc() 285 req->status = REQ_STATUS_ALLOC; in p9_tag_alloc() 286 init_waitqueue_head(&req->wq); in p9_tag_alloc() 287 INIT_LIST_HEAD(&req->req_list); in p9_tag_alloc() [all …]
|
/Linux-v5.4/drivers/crypto/inside-secure/ |
D | safexcel_hash.c | 54 static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req) in safexcel_queued_len() argument 56 return req->len - req->processed; in safexcel_queued_len() 79 struct safexcel_ahash_req *req, in safexcel_context_control() argument 92 if (!req->processed) { in safexcel_context_control() 94 if (req->finish) { in safexcel_context_control() 112 memcpy(ctx->base.ctxr->data, req->state, req->state_sz); in safexcel_context_control() 114 if (req->finish) { in safexcel_context_control() 116 if ((req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) || in safexcel_context_control() 117 req->hmac_zlen || (req->processed != req->block_sz)) { in safexcel_context_control() 118 count = req->processed / EIP197_COUNTER_BLOCK_SIZE; in safexcel_context_control() [all …]
|
/Linux-v5.4/kernel/power/ |
D | qos.c | 144 struct pm_qos_request *req; in pm_qos_debug_show() local 181 plist_for_each_entry(req, &c->list, node) { in pm_qos_debug_show() 184 if ((req->node).prio != c->default_value) { in pm_qos_debug_show() 190 (req->node).prio, state); in pm_qos_debug_show() 273 struct pm_qos_flags_request *req) in pm_qos_flags_remove_req() argument 277 list_del(&req->node); in pm_qos_flags_remove_req() 278 list_for_each_entry(req, &pqf->list, node) in pm_qos_flags_remove_req() 279 val |= req->flags; in pm_qos_flags_remove_req() 296 struct pm_qos_flags_request *req, in pm_qos_update_flags() argument 308 pm_qos_flags_remove_req(pqf, req); in pm_qos_update_flags() [all …]
|