/Linux-v4.19/drivers/hid/intel-ish-hid/ishtp/ |
D | client.c | 31 static void ishtp_read_list_flush(struct ishtp_cl *cl) in ishtp_read_list_flush() argument 37 spin_lock_irqsave(&cl->dev->read_list_spinlock, flags); in ishtp_read_list_flush() 38 list_for_each_entry_safe(rb, next, &cl->dev->read_list.list, list) in ishtp_read_list_flush() 39 if (rb->cl && ishtp_cl_cmp_id(cl, rb->cl)) { in ishtp_read_list_flush() 43 spin_unlock_irqrestore(&cl->dev->read_list_spinlock, flags); in ishtp_read_list_flush() 55 int ishtp_cl_flush_queues(struct ishtp_cl *cl) in ishtp_cl_flush_queues() argument 57 if (WARN_ON(!cl || !cl->dev)) in ishtp_cl_flush_queues() 60 ishtp_read_list_flush(cl); in ishtp_cl_flush_queues() 74 static void ishtp_cl_init(struct ishtp_cl *cl, struct ishtp_device *dev) in ishtp_cl_init() argument 76 memset(cl, 0, sizeof(struct ishtp_cl)); in ishtp_cl_init() [all …]
|
D | client-buffers.c | 28 int ishtp_cl_alloc_rx_ring(struct ishtp_cl *cl) in ishtp_cl_alloc_rx_ring() argument 30 size_t len = cl->device->fw_client->props.max_msg_length; in ishtp_cl_alloc_rx_ring() 36 for (j = 0; j < cl->rx_ring_size; ++j) { in ishtp_cl_alloc_rx_ring() 37 rb = ishtp_io_rb_init(cl); in ishtp_cl_alloc_rx_ring() 45 spin_lock_irqsave(&cl->free_list_spinlock, flags); in ishtp_cl_alloc_rx_ring() 46 list_add_tail(&rb->list, &cl->free_rb_list.list); in ishtp_cl_alloc_rx_ring() 47 spin_unlock_irqrestore(&cl->free_list_spinlock, flags); in ishtp_cl_alloc_rx_ring() 53 dev_err(&cl->device->dev, "error in allocating Rx buffers\n"); in ishtp_cl_alloc_rx_ring() 54 ishtp_cl_free_rx_ring(cl); in ishtp_cl_alloc_rx_ring() 66 int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl) in ishtp_cl_alloc_tx_ring() argument [all …]
|
D | hbm.c | 64 static inline void ishtp_hbm_cl_hdr(struct ishtp_cl *cl, uint8_t hbm_cmd, in ishtp_hbm_cl_hdr() argument 72 cmd->host_addr = cl->host_client_id; in ishtp_hbm_cl_hdr() 73 cmd->fw_addr = cl->fw_client_id; in ishtp_hbm_cl_hdr() 85 static inline bool ishtp_hbm_cl_addr_equal(struct ishtp_cl *cl, void *buf) in ishtp_hbm_cl_addr_equal() argument 89 return cl->host_client_id == cmd->host_addr && in ishtp_hbm_cl_addr_equal() 90 cl->fw_client_id == cmd->fw_addr; in ishtp_hbm_cl_addr_equal() 294 struct ishtp_cl *cl) in ishtp_hbm_cl_flow_control_req() argument 303 spin_lock_irqsave(&cl->fc_spinlock, flags); in ishtp_hbm_cl_flow_control_req() 305 ishtp_hbm_cl_hdr(cl, ISHTP_FLOW_CONTROL_CMD, data, len); in ishtp_hbm_cl_flow_control_req() 311 if (cl->out_flow_ctrl_creds) { in ishtp_hbm_cl_flow_control_req() [all …]
|
/Linux-v4.19/net/sched/ |
D | sch_hfsc.c | 186 eltree_insert(struct hfsc_class *cl) in eltree_insert() argument 188 struct rb_node **p = &cl->sched->eligible.rb_node; in eltree_insert() 195 if (cl->cl_e >= cl1->cl_e) in eltree_insert() 200 rb_link_node(&cl->el_node, parent, p); in eltree_insert() 201 rb_insert_color(&cl->el_node, &cl->sched->eligible); in eltree_insert() 205 eltree_remove(struct hfsc_class *cl) in eltree_remove() argument 207 rb_erase(&cl->el_node, &cl->sched->eligible); in eltree_remove() 211 eltree_update(struct hfsc_class *cl) in eltree_update() argument 213 eltree_remove(cl); in eltree_update() 214 eltree_insert(cl); in eltree_update() [all …]
|
D | sch_cbq.c | 168 #define L2T(cl, len) qdisc_l2t((cl)->R_tab, len) argument 186 struct cbq_class *cl; in cbq_reclassify() local 188 for (cl = this->tparent; cl; cl = cl->tparent) { in cbq_reclassify() 189 struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT]; in cbq_reclassify() 215 struct cbq_class *cl = NULL; in cbq_classify() local 224 (cl = cbq_class_lookup(q, prio)) != NULL) in cbq_classify() 225 return cl; in cbq_classify() 240 cl = (void *)res.class; in cbq_classify() 241 if (!cl) { in cbq_classify() 243 cl = cbq_class_lookup(q, res.classid); in cbq_classify() [all …]
|
D | sch_htb.c | 216 struct htb_class *cl; in htb_classify() local 227 cl = htb_find(skb->priority, sch); in htb_classify() 228 if (cl) { in htb_classify() 229 if (cl->level == 0) in htb_classify() 230 return cl; in htb_classify() 232 tcf = rcu_dereference_bh(cl->filter_list); in htb_classify() 250 cl = (void *)res.class; in htb_classify() 251 if (!cl) { in htb_classify() 254 cl = htb_find(res.classid, sch); in htb_classify() 255 if (!cl) in htb_classify() [all …]
|
D | sch_drr.c | 53 static void drr_purge_queue(struct drr_class *cl) in drr_purge_queue() argument 55 unsigned int len = cl->qdisc->q.qlen; in drr_purge_queue() 56 unsigned int backlog = cl->qdisc->qstats.backlog; in drr_purge_queue() 58 qdisc_reset(cl->qdisc); in drr_purge_queue() 59 qdisc_tree_reduce_backlog(cl->qdisc, len, backlog); in drr_purge_queue() 71 struct drr_class *cl = (struct drr_class *)*arg; in drr_change_class() local 95 if (cl != NULL) { in drr_change_class() 97 err = gen_replace_estimator(&cl->bstats, NULL, in drr_change_class() 98 &cl->rate_est, in drr_change_class() 110 cl->quantum = quantum; in drr_change_class() [all …]
|
D | sch_qfq.c | 220 static void qfq_purge_queue(struct qfq_class *cl) in qfq_purge_queue() argument 222 unsigned int len = cl->qdisc->q.qlen; in qfq_purge_queue() 223 unsigned int backlog = cl->qdisc->qstats.backlog; in qfq_purge_queue() 225 qdisc_reset(cl->qdisc); in qfq_purge_queue() 226 qdisc_tree_reduce_backlog(cl->qdisc, len, backlog); in qfq_purge_queue() 325 struct qfq_class *cl) in qfq_add_to_agg() argument 327 cl->agg = agg; in qfq_add_to_agg() 330 if (cl->qdisc->q.qlen > 0) { /* adding an active class */ in qfq_add_to_agg() 331 list_add_tail(&cl->alist, &agg->active); in qfq_add_to_agg() 333 cl && q->in_serv_agg != agg) /* agg was inactive */ in qfq_add_to_agg() [all …]
|
/Linux-v4.19/drivers/misc/mei/ |
D | client.c | 364 cb->cl->tx_cb_queued++; in mei_tx_cb_enqueue() 376 if (!WARN_ON(cb->cl->tx_cb_queued == 0)) in mei_tx_cb_dequeue() 377 cb->cl->tx_cb_queued--; in mei_tx_cb_dequeue() 391 static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, in mei_io_cb_init() argument 403 cb->cl = cl; in mei_io_cb_init() 416 const struct mei_cl *cl) in mei_io_list_flush_cl() argument 421 if (mei_cl_cmp_id(cl, cb->cl)) in mei_io_list_flush_cl() 433 const struct mei_cl *cl) in mei_io_tx_list_free_cl() argument 438 if (mei_cl_cmp_id(cl, cb->cl)) in mei_io_tx_list_free_cl() 468 struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length, in mei_cl_alloc_cb() argument [all …]
|
D | client.h | 93 int mei_cl_link(struct mei_cl *cl); 94 int mei_cl_unlink(struct mei_cl *cl); 98 struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, 100 struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length, 103 struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length, 106 int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp); 119 static inline bool mei_cl_is_connected(struct mei_cl *cl) in mei_cl_is_connected() argument 121 return cl->state == MEI_FILE_CONNECTED; in mei_cl_is_connected() 131 static inline u8 mei_cl_me_id(const struct mei_cl *cl) in mei_cl_me_id() argument 133 return cl->me_cl ? cl->me_cl->client_id : 0; in mei_cl_me_id() [all …]
|
D | interrupt.c | 43 struct mei_cl *cl; in mei_irq_compl_handler() local 46 cl = cb->cl; in mei_irq_compl_handler() 50 mei_cl_complete(cl, cb); in mei_irq_compl_handler() 63 static inline int mei_cl_hbm_equal(struct mei_cl *cl, in mei_cl_hbm_equal() argument 66 return mei_cl_host_addr(cl) == mei_hdr->host_addr && in mei_cl_hbm_equal() 67 mei_cl_me_id(cl) == mei_hdr->me_addr; in mei_cl_hbm_equal() 96 static int mei_cl_irq_read_msg(struct mei_cl *cl, in mei_cl_irq_read_msg() argument 100 struct mei_device *dev = cl->dev; in mei_cl_irq_read_msg() 104 cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list); in mei_cl_irq_read_msg() 106 if (!mei_cl_is_fixed_address(cl)) { in mei_cl_irq_read_msg() [all …]
|
D | main.c | 51 struct mei_cl *cl; in mei_open() local 68 cl = mei_cl_alloc_linked(dev); in mei_open() 69 if (IS_ERR(cl)) { in mei_open() 70 err = PTR_ERR(cl); in mei_open() 74 cl->fp = file; in mei_open() 75 file->private_data = cl; in mei_open() 96 struct mei_cl *cl = file->private_data; in mei_release() local 100 if (WARN_ON(!cl || !cl->dev)) in mei_release() 103 dev = cl->dev; in mei_release() 107 rets = mei_cl_disconnect(cl); in mei_release() [all …]
|
D | bus.c | 43 ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, in __mei_cl_send() argument 50 if (WARN_ON(!cl || !cl->dev)) in __mei_cl_send() 53 bus = cl->dev; in __mei_cl_send() 61 if (!mei_cl_is_connected(cl)) { in __mei_cl_send() 67 if (!mei_me_cl_is_active(cl->me_cl)) { in __mei_cl_send() 72 if (length > mei_cl_mtu(cl)) { in __mei_cl_send() 77 while (cl->tx_cb_queued >= bus->tx_queue_limit) { in __mei_cl_send() 79 rets = wait_event_interruptible(cl->tx_wait, in __mei_cl_send() 80 cl->writing_state == MEI_WRITE_COMPLETE || in __mei_cl_send() 81 (!mei_cl_is_connected(cl))); in __mei_cl_send() [all …]
|
D | hbm.c | 162 void mei_hbm_cl_hdr(struct mei_cl *cl, u8 hbm_cmd, void *buf, size_t len) in mei_hbm_cl_hdr() argument 169 cmd->host_addr = mei_cl_host_addr(cl); in mei_hbm_cl_hdr() 170 cmd->me_addr = mei_cl_me_id(cl); in mei_hbm_cl_hdr() 184 static inline int mei_hbm_cl_write(struct mei_device *dev, struct mei_cl *cl, in mei_hbm_cl_write() argument 190 mei_hbm_cl_hdr(cl, hbm_cmd, buf, len); in mei_hbm_cl_write() 205 bool mei_hbm_cl_addr_equal(struct mei_cl *cl, struct mei_hbm_cl_cmd *cmd) in mei_hbm_cl_addr_equal() argument 207 return mei_cl_host_addr(cl) == cmd->host_addr && in mei_hbm_cl_addr_equal() 208 mei_cl_me_id(cl) == cmd->me_addr; in mei_hbm_cl_addr_equal() 223 struct mei_cl *cl; in mei_hbm_cl_find_by_cmd() local 225 list_for_each_entry(cl, &dev->file_list, link) in mei_hbm_cl_find_by_cmd() [all …]
|
/Linux-v4.19/drivers/md/bcache/ |
D | closure.h | 169 void closure_sub(struct closure *cl, int v); 170 void closure_put(struct closure *cl); 172 bool closure_wait(struct closure_waitlist *list, struct closure *cl); 173 void __closure_sync(struct closure *cl); 181 static inline void closure_sync(struct closure *cl) in closure_sync() argument 183 if ((atomic_read(&cl->remaining) & CLOSURE_REMAINING_MASK) != 1) in closure_sync() 184 __closure_sync(cl); in closure_sync() 190 void closure_debug_create(struct closure *cl); 191 void closure_debug_destroy(struct closure *cl); 196 static inline void closure_debug_create(struct closure *cl) {} in closure_debug_create() argument [all …]
|
D | closure.c | 16 static inline void closure_put_after_sub(struct closure *cl, int flags) in closure_put_after_sub() argument 24 if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) { in closure_put_after_sub() 25 atomic_set(&cl->remaining, in closure_put_after_sub() 27 closure_queue(cl); in closure_put_after_sub() 29 struct closure *parent = cl->parent; in closure_put_after_sub() 30 closure_fn *destructor = cl->fn; in closure_put_after_sub() 32 closure_debug_destroy(cl); in closure_put_after_sub() 35 destructor(cl); in closure_put_after_sub() 44 void closure_sub(struct closure *cl, int v) in closure_sub() argument 46 closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining)); in closure_sub() [all …]
|
D | request.c | 28 static void bch_data_insert_start(struct closure *cl); 58 static void bch_data_insert_keys(struct closure *cl) in bch_data_insert_keys() argument 60 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); in bch_data_insert_keys() 73 while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING) in bch_data_insert_keys() 74 closure_sync(&s->cl); in bch_data_insert_keys() 79 op->flush_journal ? cl : NULL); in bch_data_insert_keys() 94 continue_at(cl, bch_data_insert_start, op->wq); in bch_data_insert_keys() 99 closure_return(cl); in bch_data_insert_keys() 120 static void bch_data_invalidate(struct closure *cl) in bch_data_invalidate() argument 122 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); in bch_data_invalidate() [all …]
|
/Linux-v4.19/drivers/hsi/ |
D | hsi_core.c | 73 struct hsi_client *cl = to_hsi_client(dev); in hsi_client_release() local 75 kfree(cl->tx_cfg.channels); in hsi_client_release() 76 kfree(cl->rx_cfg.channels); in hsi_client_release() 77 kfree(cl); in hsi_client_release() 83 struct hsi_client *cl; in hsi_new_client() local 86 cl = kzalloc(sizeof(*cl), GFP_KERNEL); in hsi_new_client() 87 if (!cl) in hsi_new_client() 90 cl->tx_cfg = info->tx_cfg; in hsi_new_client() 91 if (cl->tx_cfg.channels) { in hsi_new_client() 92 size = cl->tx_cfg.num_channels * sizeof(*cl->tx_cfg.channels); in hsi_new_client() [all …]
|
/Linux-v4.19/drivers/clk/ |
D | clkdev.c | 134 struct clk_lookup *p, *cl = NULL; in clk_find() local 156 cl = p; in clk_find() 163 return cl; in clk_find() 168 struct clk_lookup *cl; in clk_get_sys() local 173 cl = clk_find(dev_id, con_id); in clk_get_sys() 174 if (!cl) in clk_get_sys() 177 clk = __clk_create_clk(cl->clk_hw, dev_id, con_id); in clk_get_sys() 183 cl = NULL; in clk_get_sys() 190 return cl ? clk : ERR_PTR(-ENOENT); in clk_get_sys() 215 static void __clkdev_add(struct clk_lookup *cl) in __clkdev_add() argument [all …]
|
/Linux-v4.19/include/linux/hsi/ |
D | hsi.h | 151 static inline void hsi_client_set_drvdata(struct hsi_client *cl, void *data) in hsi_client_set_drvdata() argument 153 dev_set_drvdata(&cl->device, data); in hsi_client_set_drvdata() 156 static inline void *hsi_client_drvdata(struct hsi_client *cl) in hsi_client_drvdata() argument 158 return dev_get_drvdata(&cl->device); in hsi_client_drvdata() 161 int hsi_register_port_event(struct hsi_client *cl, 163 int hsi_unregister_port_event(struct hsi_client *cl); 200 struct hsi_client *cl; member 243 int (*setup)(struct hsi_client *cl); 244 int (*flush)(struct hsi_client *cl); 245 int (*start_tx)(struct hsi_client *cl); [all …]
|
/Linux-v4.19/drivers/hsi/clients/ |
D | ssi_protocol.c | 48 void ssi_waketest(struct hsi_client *cl, unsigned int enable); 154 struct hsi_client *cl; member 231 struct ssi_protocol *ssi = hsi_client_drvdata(msg->cl); in ssip_release_cmd() 233 dev_dbg(&msg->cl->device, "Release cmd 0x%08x\n", ssip_get_cmd(msg)); in ssip_release_cmd() 345 if (slave->device.parent == ssi->cl->device.parent) { in ssip_slave_get_master() 346 master = ssi->cl; in ssip_slave_get_master() 399 static void ssip_reset(struct hsi_client *cl) in ssip_reset() argument 401 struct ssi_protocol *ssi = hsi_client_drvdata(cl); in ssip_reset() 407 hsi_flush(cl); in ssip_reset() 410 hsi_stop_tx(cl); in ssip_reset() [all …]
|
D | hsi_char.c | 98 struct hsi_client *cl; member 118 struct hsi_client *cl; member 240 struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl); in hsc_rx_completed() 260 struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl); in hsc_tx_completed() 280 struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl); in hsc_break_req_destructor() 288 struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl); in hsc_break_received() 307 hsi_flush(msg->cl); in hsc_break_received() 308 ret = hsi_async_read(msg->cl, msg); in hsc_break_received() 313 static int hsc_break_request(struct hsi_client *cl) in hsc_break_request() argument 315 struct hsc_client_data *cl_data = hsi_client_drvdata(cl); in hsc_break_request() [all …]
|
/Linux-v4.19/arch/sh/kernel/ |
D | process_64.c | 43 unsigned long long ah, al, bh, bl, ch, cl; in show_regs() local 53 cl = (regs->regs[15]) & 0xffffffff; in show_regs() 55 ah, al, bh, bl, ch, cl); in show_regs() 64 asm volatile ("getcon " __KCR0 ", %0" : "=r" (cl)); in show_regs() 66 cl = (cl) & 0xffffffff; in show_regs() 68 ah, al, bh, bl, ch, cl); in show_regs() 75 cl = (regs->regs[2]) & 0xffffffff; in show_regs() 77 ah, al, bh, bl, ch, cl); in show_regs() 84 cl = (regs->regs[5]) & 0xffffffff; in show_regs() 86 ah, al, bh, bl, ch, cl); in show_regs() [all …]
|
/Linux-v4.19/drivers/gpu/drm/bridge/ |
D | parade-ps8622.c | 101 struct i2c_client *cl = ps8622->client; in ps8622_send_config() local 105 err = ps8622_set(cl, 0x02, 0xa1, 0x01); in ps8622_send_config() 110 err = ps8622_set(cl, 0x04, 0x14, 0x01); in ps8622_send_config() 115 err = ps8622_set(cl, 0x04, 0xe3, 0x20); in ps8622_send_config() 120 err = ps8622_set(cl, 0x04, 0xe2, 0x80); in ps8622_send_config() 128 err = ps8622_set(cl, 0x04, 0x8a, 0x0c); in ps8622_send_config() 133 err = ps8622_set(cl, 0x04, 0x89, 0x08); in ps8622_send_config() 138 err = ps8622_set(cl, 0x04, 0x71, 0x2d); in ps8622_send_config() 143 err = ps8622_set(cl, 0x04, 0x7d, 0x07); in ps8622_send_config() 148 err = ps8622_set(cl, 0x04, 0x7b, 0x00); in ps8622_send_config() [all …]
|
/Linux-v4.19/arch/openrisc/kernel/ |
D | dma.c | 32 unsigned long cl; in page_set_nocache() local 44 for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size) in page_set_nocache() 45 mtspr(SPR_DCBFR, cl); in page_set_nocache() 136 unsigned long cl; in arch_sync_dma_for_device() local 142 for (cl = addr; cl < addr + size; in arch_sync_dma_for_device() 143 cl += cpuinfo->dcache_block_size) in arch_sync_dma_for_device() 144 mtspr(SPR_DCBFR, cl); in arch_sync_dma_for_device() 148 for (cl = addr; cl < addr + size; in arch_sync_dma_for_device() 149 cl += cpuinfo->dcache_block_size) in arch_sync_dma_for_device() 150 mtspr(SPR_DCBIR, cl); in arch_sync_dma_for_device()
|