Home
last modified time | relevance | path

Searched refs:q (Results 1 – 25 of 1174) sorted by relevance

12345678910>>...47

/Linux-v4.19/crypto/
Dgf128mul.c56 #define gf128mul_dat(q) { \ argument
57 q(0x00), q(0x01), q(0x02), q(0x03), q(0x04), q(0x05), q(0x06), q(0x07),\
58 q(0x08), q(0x09), q(0x0a), q(0x0b), q(0x0c), q(0x0d), q(0x0e), q(0x0f),\
59 q(0x10), q(0x11), q(0x12), q(0x13), q(0x14), q(0x15), q(0x16), q(0x17),\
60 q(0x18), q(0x19), q(0x1a), q(0x1b), q(0x1c), q(0x1d), q(0x1e), q(0x1f),\
61 q(0x20), q(0x21), q(0x22), q(0x23), q(0x24), q(0x25), q(0x26), q(0x27),\
62 q(0x28), q(0x29), q(0x2a), q(0x2b), q(0x2c), q(0x2d), q(0x2e), q(0x2f),\
63 q(0x30), q(0x31), q(0x32), q(0x33), q(0x34), q(0x35), q(0x36), q(0x37),\
64 q(0x38), q(0x39), q(0x3a), q(0x3b), q(0x3c), q(0x3d), q(0x3e), q(0x3f),\
65 q(0x40), q(0x41), q(0x42), q(0x43), q(0x44), q(0x45), q(0x46), q(0x47),\
[all …]
/Linux-v4.19/net/xdp/
Dxsk_queue.h47 static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q) in xskq_nb_invalid_descs() argument
49 return q ? q->invalid_descs : 0; in xskq_nb_invalid_descs()
52 static inline u32 xskq_nb_avail(struct xsk_queue *q, u32 dcnt) in xskq_nb_avail() argument
54 u32 entries = q->prod_tail - q->cons_tail; in xskq_nb_avail()
58 q->prod_tail = READ_ONCE(q->ring->producer); in xskq_nb_avail()
59 entries = q->prod_tail - q->cons_tail; in xskq_nb_avail()
65 static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt) in xskq_nb_free() argument
67 u32 free_entries = q->nentries - (producer - q->cons_tail); in xskq_nb_free()
73 q->cons_tail = READ_ONCE(q->ring->consumer); in xskq_nb_free()
74 return q->nentries - (producer - q->cons_tail); in xskq_nb_free()
[all …]
/Linux-v4.19/drivers/media/v4l2-core/
Dvideobuf-core.c52 #define CALL(q, f, arg...) \ argument
53 ((q->int_ops->f) ? q->int_ops->f(arg) : 0)
54 #define CALLPTR(q, f, arg...) \ argument
55 ((q->int_ops->f) ? q->int_ops->f(arg) : NULL)
57 struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q) in videobuf_alloc_vb() argument
61 BUG_ON(q->msize < sizeof(*vb)); in videobuf_alloc_vb()
63 if (!q->int_ops || !q->int_ops->alloc_vb) { in videobuf_alloc_vb()
68 vb = q->int_ops->alloc_vb(q->msize); in videobuf_alloc_vb()
78 static int state_neither_active_nor_queued(struct videobuf_queue *q, in state_neither_active_nor_queued() argument
84 spin_lock_irqsave(q->irqlock, flags); in state_neither_active_nor_queued()
[all …]
/Linux-v4.19/sound/core/seq/
Dseq_queue.c63 static int queue_list_add(struct snd_seq_queue *q) in queue_list_add() argument
71 queue_list[i] = q; in queue_list_add()
72 q->queue = i; in queue_list_add()
84 struct snd_seq_queue *q; in queue_list_remove() local
88 q = queue_list[id]; in queue_list_remove()
89 if (q) { in queue_list_remove()
90 spin_lock(&q->owner_lock); in queue_list_remove()
91 if (q->owner == client) { in queue_list_remove()
93 q->klocked = 1; in queue_list_remove()
94 spin_unlock(&q->owner_lock); in queue_list_remove()
[all …]
/Linux-v4.19/drivers/isdn/hardware/eicon/
Ddqueue.c17 diva_data_q_init(diva_um_idi_data_queue_t *q, in diva_data_q_init() argument
22 q->max_length = max_length; in diva_data_q_init()
23 q->segments = max_segments; in diva_data_q_init()
25 for (i = 0; i < q->segments; i++) { in diva_data_q_init()
26 q->data[i] = NULL; in diva_data_q_init()
27 q->length[i] = 0; in diva_data_q_init()
29 q->read = q->write = q->count = q->segment_pending = 0; in diva_data_q_init()
31 for (i = 0; i < q->segments; i++) { in diva_data_q_init()
32 if (!(q->data[i] = diva_os_malloc(0, q->max_length))) { in diva_data_q_init()
33 diva_data_q_finit(q); in diva_data_q_init()
[all …]
/Linux-v4.19/drivers/s390/cio/
Dqdio_main.c112 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, in qdio_do_eqbs() argument
115 int tmp_count = count, tmp_start = start, nr = q->nr; in qdio_do_eqbs()
118 qperf_inc(q, eqbs); in qdio_do_eqbs()
120 if (!q->is_input_q) in qdio_do_eqbs()
121 nr += q->irq_ptr->nr_input_qs; in qdio_do_eqbs()
123 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count, in qdio_do_eqbs()
133 qperf_inc(q, eqbs_partial); in qdio_do_eqbs()
134 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x", in qdio_do_eqbs()
139 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq); in qdio_do_eqbs()
142 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq); in qdio_do_eqbs()
[all …]
/Linux-v4.19/sound/core/seq/oss/
Dseq_oss_readq.c48 struct seq_oss_readq *q; in snd_seq_oss_readq_new() local
50 q = kzalloc(sizeof(*q), GFP_KERNEL); in snd_seq_oss_readq_new()
51 if (!q) in snd_seq_oss_readq_new()
54 q->q = kcalloc(maxlen, sizeof(union evrec), GFP_KERNEL); in snd_seq_oss_readq_new()
55 if (!q->q) { in snd_seq_oss_readq_new()
56 kfree(q); in snd_seq_oss_readq_new()
60 q->maxlen = maxlen; in snd_seq_oss_readq_new()
61 q->qlen = 0; in snd_seq_oss_readq_new()
62 q->head = q->tail = 0; in snd_seq_oss_readq_new()
63 init_waitqueue_head(&q->midi_sleep); in snd_seq_oss_readq_new()
[all …]
Dseq_oss_writeq.c40 struct seq_oss_writeq *q; in snd_seq_oss_writeq_new() local
43 if ((q = kzalloc(sizeof(*q), GFP_KERNEL)) == NULL) in snd_seq_oss_writeq_new()
45 q->dp = dp; in snd_seq_oss_writeq_new()
46 q->maxlen = maxlen; in snd_seq_oss_writeq_new()
47 spin_lock_init(&q->sync_lock); in snd_seq_oss_writeq_new()
48 q->sync_event_put = 0; in snd_seq_oss_writeq_new()
49 q->sync_time = 0; in snd_seq_oss_writeq_new()
50 init_waitqueue_head(&q->sync_sleep); in snd_seq_oss_writeq_new()
59 return q; in snd_seq_oss_writeq_new()
66 snd_seq_oss_writeq_delete(struct seq_oss_writeq *q) in snd_seq_oss_writeq_delete() argument
[all …]
/Linux-v4.19/net/sched/
Dsch_choke.c80 static unsigned int choke_len(const struct choke_sched_data *q) in choke_len() argument
82 return (q->tail - q->head) & q->tab_mask; in choke_len()
86 static int use_ecn(const struct choke_sched_data *q) in use_ecn() argument
88 return q->flags & TC_RED_ECN; in use_ecn()
92 static int use_harddrop(const struct choke_sched_data *q) in use_harddrop() argument
94 return q->flags & TC_RED_HARDDROP; in use_harddrop()
98 static void choke_zap_head_holes(struct choke_sched_data *q) in choke_zap_head_holes() argument
101 q->head = (q->head + 1) & q->tab_mask; in choke_zap_head_holes()
102 if (q->head == q->tail) in choke_zap_head_holes()
104 } while (q->tab[q->head] == NULL); in choke_zap_head_holes()
[all …]
Dsch_sfq.c154 static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val) in sfq_dep_head() argument
157 return &q->slots[val].dep; in sfq_dep_head()
158 return &q->dep[val - SFQ_MAX_FLOWS]; in sfq_dep_head()
161 static unsigned int sfq_hash(const struct sfq_sched_data *q, in sfq_hash() argument
164 return skb_get_hash_perturb(skb, q->perturbation) & (q->divisor - 1); in sfq_hash()
170 struct sfq_sched_data *q = qdisc_priv(sch); in sfq_classify() local
177 TC_H_MIN(skb->priority) <= q->divisor) in sfq_classify()
180 fl = rcu_dereference_bh(q->filter_list); in sfq_classify()
182 return sfq_hash(q, skb) + 1; in sfq_classify()
198 if (TC_H_MIN(res.classid) <= q->divisor) in sfq_classify()
[all …]
Dsch_netem.c200 static bool loss_4state(struct netem_sched_data *q) in loss_4state() argument
202 struct clgstate *clg = &q->clg; in loss_4state()
265 static bool loss_gilb_ell(struct netem_sched_data *q) in loss_gilb_ell() argument
267 struct clgstate *clg = &q->clg; in loss_gilb_ell()
286 static bool loss_event(struct netem_sched_data *q) in loss_event() argument
288 switch (q->loss_model) { in loss_event()
291 return q->loss && q->loss >= get_crandom(&q->loss_cor); in loss_event()
299 return loss_4state(q); in loss_event()
307 return loss_gilb_ell(q); in loss_event()
345 static u64 packet_time_ns(u64 len, const struct netem_sched_data *q) in packet_time_ns() argument
[all …]
Dsch_sfb.c127 static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q) in increment_one_qlen() argument
130 struct sfb_bucket *b = &q->bins[slot].bins[0][0]; in increment_one_qlen()
142 static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) in increment_qlen() argument
148 increment_one_qlen(sfbhash, 0, q); in increment_qlen()
152 increment_one_qlen(sfbhash, 1, q); in increment_qlen()
156 struct sfb_sched_data *q) in decrement_one_qlen() argument
159 struct sfb_bucket *b = &q->bins[slot].bins[0][0]; in decrement_one_qlen()
171 static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) in decrement_qlen() argument
177 decrement_one_qlen(sfbhash, 0, q); in decrement_qlen()
181 decrement_one_qlen(sfbhash, 1, q); in decrement_qlen()
[all …]
Dsch_pie.c101 struct pie_sched_data *q = qdisc_priv(sch); in drop_early() local
103 u32 local_prob = q->vars.prob; in drop_early()
107 if (q->vars.burst_time > 0) in drop_early()
113 if ((q->vars.qdelay < q->params.target / 2) in drop_early()
114 && (q->vars.prob < MAX_PROB / 5)) in drop_early()
126 if (q->params.bytemode && packet_size <= mtu) in drop_early()
129 local_prob = q->vars.prob; in drop_early()
141 struct pie_sched_data *q = qdisc_priv(sch); in pie_qdisc_enqueue() local
145 q->stats.overlimit++; in pie_qdisc_enqueue()
151 } else if (q->params.ecn && (q->vars.prob <= MAX_PROB / 10) && in pie_qdisc_enqueue()
[all …]
Dsch_fq_codel.c76 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q, in fq_codel_hash() argument
79 return reciprocal_scale(skb_get_hash(skb), q->flows_cnt); in fq_codel_hash()
85 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_classify() local
92 TC_H_MIN(skb->priority) <= q->flows_cnt) in fq_codel_classify()
95 filter = rcu_dereference_bh(q->filter_list); in fq_codel_classify()
97 return fq_codel_hash(q, skb) + 1; in fq_codel_classify()
113 if (TC_H_MIN(res.classid) <= q->flows_cnt) in fq_codel_classify()
146 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_drop() local
160 for (i = 0; i < q->flows_cnt; i++) { in fq_codel_drop()
161 if (q->backlogs[i] > maxbacklog) { in fq_codel_drop()
[all …]
Dsch_skbprio.c44 static u16 calc_new_high_prio(const struct skbprio_sched_data *q) in calc_new_high_prio() argument
48 for (prio = q->highest_prio - 1; prio >= q->lowest_prio; prio--) { in calc_new_high_prio()
49 if (!skb_queue_empty(&q->qdiscs[prio])) in calc_new_high_prio()
57 static u16 calc_new_low_prio(const struct skbprio_sched_data *q) in calc_new_low_prio() argument
61 for (prio = q->lowest_prio + 1; prio <= q->highest_prio; prio++) { in calc_new_low_prio()
62 if (!skb_queue_empty(&q->qdiscs[prio])) in calc_new_low_prio()
76 struct skbprio_sched_data *q = qdisc_priv(sch); in skbprio_enqueue() local
85 qdisc = &q->qdiscs[prio]; in skbprio_enqueue()
86 if (sch->q.qlen < sch->limit) { in skbprio_enqueue()
89 q->qstats[prio].backlog += qdisc_pkt_len(skb); in skbprio_enqueue()
[all …]
/Linux-v4.19/drivers/mtd/spi-nor/
Dfsl-quadspi.c302 static inline int needs_swap_endian(struct fsl_qspi *q) in needs_swap_endian() argument
304 return q->devtype_data->driver_data & QUADSPI_QUIRK_SWAP_ENDIAN; in needs_swap_endian()
307 static inline int needs_4x_clock(struct fsl_qspi *q) in needs_4x_clock() argument
309 return q->devtype_data->driver_data & QUADSPI_QUIRK_4X_INT_CLK; in needs_4x_clock()
312 static inline int needs_fill_txfifo(struct fsl_qspi *q) in needs_fill_txfifo() argument
314 return q->devtype_data->driver_data & QUADSPI_QUIRK_TKT253890; in needs_fill_txfifo()
317 static inline int needs_wakeup_wait_mode(struct fsl_qspi *q) in needs_wakeup_wait_mode() argument
319 return q->devtype_data->driver_data & QUADSPI_QUIRK_TKT245618; in needs_wakeup_wait_mode()
328 static void qspi_writel(struct fsl_qspi *q, u32 val, void __iomem *addr) in qspi_writel() argument
330 if (q->big_endian) in qspi_writel()
[all …]
/Linux-v4.19/block/
Dblk-core.c79 void blk_queue_flag_set(unsigned int flag, struct request_queue *q) in blk_queue_flag_set() argument
83 spin_lock_irqsave(q->queue_lock, flags); in blk_queue_flag_set()
84 queue_flag_set(flag, q); in blk_queue_flag_set()
85 spin_unlock_irqrestore(q->queue_lock, flags); in blk_queue_flag_set()
94 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q) in blk_queue_flag_clear() argument
98 spin_lock_irqsave(q->queue_lock, flags); in blk_queue_flag_clear()
99 queue_flag_clear(flag, q); in blk_queue_flag_clear()
100 spin_unlock_irqrestore(q->queue_lock, flags); in blk_queue_flag_clear()
112 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q) in blk_queue_flag_test_and_set() argument
117 spin_lock_irqsave(q->queue_lock, flags); in blk_queue_flag_test_and_set()
[all …]
Dblk-sysfs.c60 static ssize_t queue_requests_show(struct request_queue *q, char *page) in queue_requests_show() argument
62 return queue_var_show(q->nr_requests, (page)); in queue_requests_show()
66 queue_requests_store(struct request_queue *q, const char *page, size_t count) in queue_requests_store() argument
71 if (!q->request_fn && !q->mq_ops) in queue_requests_store()
81 if (q->request_fn) in queue_requests_store()
82 err = blk_update_nr_requests(q, nr); in queue_requests_store()
84 err = blk_mq_update_nr_requests(q, nr); in queue_requests_store()
92 static ssize_t queue_ra_show(struct request_queue *q, char *page) in queue_ra_show() argument
94 unsigned long ra_kb = q->backing_dev_info->ra_pages << in queue_ra_show()
101 queue_ra_store(struct request_queue *q, const char *page, size_t count) in queue_ra_store() argument
[all …]
Dblk.h51 static inline void queue_lockdep_assert_held(struct request_queue *q) in queue_lockdep_assert_held() argument
53 if (q->queue_lock) in queue_lockdep_assert_held()
54 lockdep_assert_held(q->queue_lock); in queue_lockdep_assert_held()
58 struct request_queue *q) in queue_flag_set_unlocked() argument
60 if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) && in queue_flag_set_unlocked()
61 kref_read(&q->kobj.kref)) in queue_flag_set_unlocked()
62 lockdep_assert_held(q->queue_lock); in queue_flag_set_unlocked()
63 __set_bit(flag, &q->queue_flags); in queue_flag_set_unlocked()
67 struct request_queue *q) in queue_flag_clear_unlocked() argument
69 if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) && in queue_flag_clear_unlocked()
[all …]
Delevator.c60 struct request_queue *q = rq->q; in elv_iosched_allow_bio_merge() local
61 struct elevator_queue *e = q->elevator; in elv_iosched_allow_bio_merge()
64 return e->type->ops.mq.allow_merge(q, rq, bio); in elv_iosched_allow_bio_merge()
66 return e->type->ops.sq.elevator_allow_bio_merge_fn(q, rq, bio); in elv_iosched_allow_bio_merge()
116 static struct elevator_type *elevator_get(struct request_queue *q, in elevator_get() argument
123 e = elevator_find(name, q->mq_ops != NULL); in elevator_get()
128 e = elevator_find(name, q->mq_ops != NULL); in elevator_get()
174 struct elevator_queue *elevator_alloc(struct request_queue *q, in elevator_alloc() argument
179 eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node); in elevator_alloc()
207 int elevator_init(struct request_queue *q) in elevator_init() argument
[all …]
/Linux-v4.19/drivers/media/common/videobuf2/
Dvideobuf2-core.c92 #define log_qop(q, op) \ argument
93 dprintk(2, "call_qop(%p, %s)%s\n", q, #op, \
94 (q)->ops->op ? "" : " (nop)")
96 #define call_qop(q, op, args...) \ argument
100 log_qop(q, op); \
101 err = (q)->ops->op ? (q)->ops->op(args) : 0; \
103 (q)->cnt_ ## op++; \
107 #define call_void_qop(q, op, args...) \ argument
109 log_qop(q, op); \
110 if ((q)->ops->op) \
[all …]
/Linux-v4.19/drivers/infiniband/sw/rxe/
Drxe_queue.h86 void rxe_queue_reset(struct rxe_queue *q);
92 int rxe_queue_resize(struct rxe_queue *q,
104 static inline int next_index(struct rxe_queue *q, int index) in next_index() argument
106 return (index + 1) & q->buf->index_mask; in next_index()
109 static inline int queue_empty(struct rxe_queue *q) in queue_empty() argument
111 return ((q->buf->producer_index - q->buf->consumer_index) in queue_empty()
112 & q->index_mask) == 0; in queue_empty()
115 static inline int queue_full(struct rxe_queue *q) in queue_full() argument
117 return ((q->buf->producer_index + 1 - q->buf->consumer_index) in queue_full()
118 & q->index_mask) == 0; in queue_full()
[all …]
/Linux-v4.19/drivers/net/wireless/broadcom/b43/
Dpio.c37 static u16 generate_cookie(struct b43_pio_txqueue *q, in generate_cookie() argument
50 cookie = (((u16)q->index + 1) << 12); in generate_cookie()
62 struct b43_pio_txqueue *q = NULL; in parse_cookie() local
67 q = pio->tx_queue_AC_BK; in parse_cookie()
70 q = pio->tx_queue_AC_BE; in parse_cookie()
73 q = pio->tx_queue_AC_VI; in parse_cookie()
76 q = pio->tx_queue_AC_VO; in parse_cookie()
79 q = pio->tx_queue_mcast; in parse_cookie()
82 if (B43_WARN_ON(!q)) in parse_cookie()
85 if (B43_WARN_ON(pack_index >= ARRAY_SIZE(q->packets))) in parse_cookie()
[all …]
/Linux-v4.19/drivers/gpu/drm/amd/amdkfd/
Dkfd_queue.c27 void print_queue_properties(struct queue_properties *q) in print_queue_properties() argument
29 if (!q) in print_queue_properties()
33 pr_debug("Queue Type: %u\n", q->type); in print_queue_properties()
34 pr_debug("Queue Size: %llu\n", q->queue_size); in print_queue_properties()
35 pr_debug("Queue percent: %u\n", q->queue_percent); in print_queue_properties()
36 pr_debug("Queue Address: 0x%llX\n", q->queue_address); in print_queue_properties()
37 pr_debug("Queue Id: %u\n", q->queue_id); in print_queue_properties()
38 pr_debug("Queue Process Vmid: %u\n", q->vmid); in print_queue_properties()
39 pr_debug("Queue Read Pointer: 0x%px\n", q->read_ptr); in print_queue_properties()
40 pr_debug("Queue Write Pointer: 0x%px\n", q->write_ptr); in print_queue_properties()
[all …]
/Linux-v4.19/drivers/net/wireless/mediatek/mt76/
Ddma.c24 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_alloc_queue() argument
29 spin_lock_init(&q->lock); in mt76_dma_alloc_queue()
30 INIT_LIST_HEAD(&q->swq); in mt76_dma_alloc_queue()
32 size = q->ndesc * sizeof(struct mt76_desc); in mt76_dma_alloc_queue()
33 q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL); in mt76_dma_alloc_queue()
34 if (!q->desc) in mt76_dma_alloc_queue()
37 size = q->ndesc * sizeof(*q->entry); in mt76_dma_alloc_queue()
38 q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL); in mt76_dma_alloc_queue()
39 if (!q->entry) in mt76_dma_alloc_queue()
43 for (i = 0; i < q->ndesc; i++) in mt76_dma_alloc_queue()
[all …]

12345678910>>...47