/Linux-v5.15/drivers/media/v4l2-core/ |
D | videobuf-core.c | 50 #define CALL(q, f, arg...) \ argument 51 ((q->int_ops->f) ? q->int_ops->f(arg) : 0) 52 #define CALLPTR(q, f, arg...) \ argument 53 ((q->int_ops->f) ? q->int_ops->f(arg) : NULL) 55 struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q) in videobuf_alloc_vb() argument 59 BUG_ON(q->msize < sizeof(*vb)); in videobuf_alloc_vb() 61 if (!q->int_ops || !q->int_ops->alloc_vb) { in videobuf_alloc_vb() 66 vb = q->int_ops->alloc_vb(q->msize); in videobuf_alloc_vb() 76 static int state_neither_active_nor_queued(struct videobuf_queue *q, in state_neither_active_nor_queued() argument 82 spin_lock_irqsave(q->irqlock, flags); in state_neither_active_nor_queued() [all …]
|
/Linux-v5.15/crypto/ |
D | gf128mul.c | 25 1. distributions of this source code include the above copyright 56 #define gf128mul_dat(q) { \ argument 57 q(0x00), q(0x01), q(0x02), q(0x03), q(0x04), q(0x05), q(0x06), q(0x07),\ 58 q(0x08), q(0x09), q(0x0a), q(0x0b), q(0x0c), q(0x0d), q(0x0e), q(0x0f),\ 59 q(0x10), q(0x11), q(0x12), q(0x13), q(0x14), q(0x15), q(0x16), q(0x17),\ 60 q(0x18), q(0x19), q(0x1a), q(0x1b), q(0x1c), q(0x1d), q(0x1e), q(0x1f),\ 61 q(0x20), q(0x21), q(0x22), q(0x23), q(0x24), q(0x25), q(0x26), q(0x27),\ 62 q(0x28), q(0x29), q(0x2a), q(0x2b), q(0x2c), q(0x2d), q(0x2e), q(0x2f),\ 63 q(0x30), q(0x31), q(0x32), q(0x33), q(0x34), q(0x35), q(0x36), q(0x37),\ 64 q(0x38), q(0x39), q(0x3a), q(0x3b), q(0x3c), q(0x3d), q(0x3e), q(0x3f),\ [all …]
|
/Linux-v5.15/net/sched/ |
D | sch_choke.c | 46 #define CHOKE_MAX_QUEUE (128*1024 - 1) 70 unsigned int tab_mask; /* size - 1 */ 76 static unsigned int choke_len(const struct choke_sched_data *q) in choke_len() argument 78 return (q->tail - q->head) & q->tab_mask; in choke_len() 82 static int use_ecn(const struct choke_sched_data *q) in use_ecn() argument 84 return q->flags & TC_RED_ECN; in use_ecn() 88 static int use_harddrop(const struct choke_sched_data *q) in use_harddrop() argument 90 return q->flags & TC_RED_HARDDROP; in use_harddrop() 94 static void choke_zap_head_holes(struct choke_sched_data *q) in choke_zap_head_holes() argument 97 q->head = (q->head + 1) & q->tab_mask; in choke_zap_head_holes() [all …]
|
D | sch_sfb.c | 34 #define SFB_NUMBUCKETS (1 << SFB_BUCKET_SHIFT) /* N bins per Level */ 35 #define SFB_BUCKET_MASK (SFB_NUMBUCKETS - 1) 69 u8 slot; /* current active bins (0 or 1) */ 123 static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q) in increment_one_qlen() argument 126 struct sfb_bucket *b = &q->bins[slot].bins[0][0]; in increment_one_qlen() 138 static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) in increment_qlen() argument 144 increment_one_qlen(sfbhash, 0, q); in increment_qlen() 146 sfbhash = sfb_hash(skb, 1); in increment_qlen() 148 increment_one_qlen(sfbhash, 1, q); in increment_qlen() 152 struct sfb_sched_data *q) in decrement_one_qlen() argument [all …]
|
D | sch_sfq.c | 68 - max mtu to 2^18-1; 76 #define SFQ_MAX_FLOWS (0x10000 - SFQ_MAX_DEPTH - 1) /* max number of flows */ 81 * Scale allot by 8 (1<<3) so that no overflow occurs. 84 #define SFQ_ALLOT_SIZE(X) DIV_ROUND_UP(X, 1 << SFQ_ALLOT_SHIFT) 86 /* This type should contain at least SFQ_MAX_DEPTH + 1 + SFQ_MAX_FLOWS values */ 91 * Small indexes [0 ... SFQ_MAX_FLOWS - 1] are 'pointers' to slots[] array 133 struct sfq_head dep[SFQ_MAX_DEPTH + 1]; 136 * dep[1] : list of flows with 1 packet 150 static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val) in sfq_dep_head() argument 153 return &q->slots[val].dep; in sfq_dep_head() [all …]
|
D | sch_netem.c | 33 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based 58 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG 61 in the Netem module in the Linux kernel", available in [1] 116 TX_IN_GAP_PERIOD = 1, 123 GOOD_STATE = 1, 136 u32 a4; /* p14 for 4-states or 1-k for GE */ 190 rho = (u64)state->rho + 1; in get_crandom() 191 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32; in get_crandom() 200 static bool loss_4state(struct netem_sched_data *q) in loss_4state() argument 202 struct clgstate *clg = &q->clg; in loss_4state() [all …]
|
D | sch_skbprio.c | 40 static u16 calc_new_high_prio(const struct skbprio_sched_data *q) in calc_new_high_prio() argument 44 for (prio = q->highest_prio - 1; prio >= q->lowest_prio; prio--) { in calc_new_high_prio() 45 if (!skb_queue_empty(&q->qdiscs[prio])) in calc_new_high_prio() 53 static u16 calc_new_low_prio(const struct skbprio_sched_data *q) in calc_new_low_prio() argument 57 for (prio = q->lowest_prio + 1; prio <= q->highest_prio; prio++) { in calc_new_low_prio() 58 if (!skb_queue_empty(&q->qdiscs[prio])) in calc_new_low_prio() 62 /* SKB queue is empty, return SKBPRIO_MAX_PRIORITY - 1 in calc_new_low_prio() 65 return SKBPRIO_MAX_PRIORITY - 1; in calc_new_low_prio() 71 const unsigned int max_priority = SKBPRIO_MAX_PRIORITY - 1; in skbprio_enqueue() 72 struct skbprio_sched_data *q = qdisc_priv(sch); in skbprio_enqueue() local [all …]
|
D | sch_cake.c | 100 * @rec_inv_sqrt: reciprocal value of sqrt(count) >> 1 260 CAKE_FLAG_AUTORATE_INGRESS = BIT(1), 299 static u16 quantum_div[CAKE_QUEUES + 1] = {0}; 305 1, 1, 1, 1, 1, 1, 1, 1, 315 2, 0, 1, 2, 4, 2, 2, 2, 316 1, 2, 1, 2, 1, 2, 1, 2, 326 0, 1, 0, 0, 2, 0, 0, 0, 327 1, 0, 0, 0, 0, 0, 0, 0, 337 0, 1, 0, 0, 2, 0, 0, 0, 338 1, 0, 0, 0, 0, 0, 0, 0, [all …]
|
D | sch_multiq.c | 32 struct multiq_sched_data *q = qdisc_priv(sch); in multiq_classify() local 35 struct tcf_proto *fl = rcu_dereference_bh(q->filter_list); in multiq_classify() 53 if (band >= q->bands) in multiq_classify() 54 return q->queues[0]; in multiq_classify() 56 return q->queues[band]; in multiq_classify() 79 sch->q.qlen++; in multiq_enqueue() 89 struct multiq_sched_data *q = qdisc_priv(sch); in multiq_dequeue() local 94 for (band = 0; band < q->bands; band++) { in multiq_dequeue() 96 q->curband++; in multiq_dequeue() 97 if (q->curband >= q->bands) in multiq_dequeue() [all …]
|
D | sch_cbq.c | 23 Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource 48 allotment is W*r_i, and r_1+...+r_k = r < 1 103 struct Qdisc *q; /* Elementary queueing discipline */ member 111 level of children + 1 for nodes. 129 struct cbq_class *defaults[TC_PRIO_MAX + 1]; 134 int nclasses[TC_CBQ_MAXPRIO + 1]; 135 unsigned int quanta[TC_CBQ_MAXPRIO + 1]; 140 struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes 166 cbq_class_lookup(struct cbq_sched_data *q, u32 classid) in cbq_class_lookup() argument 170 clc = qdisc_class_find(&q->clhash, classid); in cbq_class_lookup() [all …]
|
D | sch_fq.c | 65 * If packets have monotically increasing time_to_send, they are placed in O(1) 74 unsigned long age; /* (jiffies | 1UL) when flow was emptied, for gc */ 87 struct rb_node rate_node; /* anchor in q->delayed tree */ 146 f->age = jiffies | 1UL; in fq_flow_set_detached() 151 return !!(f->age & 1UL); in fq_flow_is_detached() 172 static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f) in fq_flow_unset_throttled() argument 174 rb_erase(&f->rate_node, &q->delayed); in fq_flow_unset_throttled() 175 q->throttled_flows--; in fq_flow_unset_throttled() 176 fq_flow_add_tail(&q->old_flows, f); in fq_flow_unset_throttled() 179 static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f) in fq_flow_set_throttled() argument [all …]
|
D | sch_prio.c | 25 u8 prio2band[TC_PRIO_MAX+1]; 33 struct prio_sched_data *q = qdisc_priv(sch); in prio_classify() local 41 fl = rcu_dereference_bh(q->filter_list); in prio_classify() 57 return q->queues[q->prio2band[band & TC_PRIO_MAX]]; in prio_classify() 61 band = TC_H_MIN(band) - 1; in prio_classify() 62 if (band >= q->bands) in prio_classify() 63 return q->queues[q->prio2band[0]]; in prio_classify() 65 return q->queues[band]; in prio_classify() 89 sch->q.qlen++; in prio_enqueue() 99 struct prio_sched_data *q = qdisc_priv(sch); in prio_peek() local [all …]
|
/Linux-v5.15/drivers/media/common/videobuf2/ |
D | videobuf2-core.c | 37 #define dprintk(q, level, fmt, arg...) \ argument 40 pr_info("[%s] %s: " fmt, (q)->name, __func__, \ 93 #define log_qop(q, op) \ argument 94 dprintk(q, 2, "call_qop(%s)%s\n", #op, \ 95 (q)->ops->op ? "" : " (nop)") 97 #define call_qop(q, op, args...) \ argument 101 log_qop(q, op); \ 102 err = (q)->ops->op ? (q)->ops->op(args) : 0; \ 104 (q)->cnt_ ## op++; \ 108 #define call_void_qop(q, op, args...) \ argument [all …]
|
/Linux-v5.15/drivers/net/wireless/mediatek/mt76/ |
D | dma.c | 83 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_sync_idx() argument 85 writel(q->desc_dma, &q->regs->desc_base); in mt76_dma_sync_idx() 86 writel(q->ndesc, &q->regs->ring_size); in mt76_dma_sync_idx() 87 q->head = readl(&q->regs->dma_idx); in mt76_dma_sync_idx() 88 q->tail = q->head; in mt76_dma_sync_idx() 92 mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_queue_reset() argument 96 if (!q) in mt76_dma_queue_reset() 100 for (i = 0; i < q->ndesc; i++) in mt76_dma_queue_reset() 101 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); in mt76_dma_queue_reset() 103 writel(0, &q->regs->cpu_idx); in mt76_dma_queue_reset() [all …]
|
/Linux-v5.15/drivers/net/wireless/broadcom/b43/ |
D | pio.c | 24 static u16 generate_cookie(struct b43_pio_txqueue *q, in generate_cookie() argument 37 cookie = (((u16)q->index + 1) << 12); in generate_cookie() 49 struct b43_pio_txqueue *q = NULL; in parse_cookie() local 54 q = pio->tx_queue_AC_BK; in parse_cookie() 57 q = pio->tx_queue_AC_BE; in parse_cookie() 60 q = pio->tx_queue_AC_VI; in parse_cookie() 63 q = pio->tx_queue_AC_VO; in parse_cookie() 66 q = pio->tx_queue_mcast; in parse_cookie() 69 if (B43_WARN_ON(!q)) in parse_cookie() 72 if (B43_WARN_ON(pack_index >= ARRAY_SIZE(q->packets))) in parse_cookie() [all …]
|
/Linux-v5.15/sound/core/seq/oss/ |
D | seq_oss_readq.c | 20 //#define SNDRV_SEQ_OSS_MAX_TIMEOUT (unsigned long)(-1) 35 struct seq_oss_readq *q; in snd_seq_oss_readq_new() local 37 q = kzalloc(sizeof(*q), GFP_KERNEL); in snd_seq_oss_readq_new() 38 if (!q) in snd_seq_oss_readq_new() 41 q->q = kcalloc(maxlen, sizeof(union evrec), GFP_KERNEL); in snd_seq_oss_readq_new() 42 if (!q->q) { in snd_seq_oss_readq_new() 43 kfree(q); in snd_seq_oss_readq_new() 47 q->maxlen = maxlen; in snd_seq_oss_readq_new() 48 q->qlen = 0; in snd_seq_oss_readq_new() 49 q->head = q->tail = 0; in snd_seq_oss_readq_new() [all …]
|
/Linux-v5.15/drivers/net/ethernet/chelsio/cxgb3/ |
D | sge.c | 66 #define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1) 94 TXQ_RUNNING = 1 << 0, /* fetch engine is running */ 95 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */ 139 dma_addr_t addr[MAX_SKB_FRAGS + 1]; 146 * desc = 1 + (flits - 2) / (WR_FLITS - 1). 152 #if SGE_NUM_GENBITS == 1 153 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 158 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 163 # error "SGE_NUM_GENBITS must be 1 or 2" 167 static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx) in fl_to_qset() argument [all …]
|
/Linux-v5.15/drivers/s390/cio/ |
D | qdio_main.c | 40 " lgr 1,%[schid]\n" in do_siga_sync() 49 : "cc", "0", "1", "2", "3"); in do_siga_sync() 60 " lgr 1,%[schid]\n" in do_siga_input() 67 : "cc", "0", "1", "2"); in do_siga_input() 90 " lgr 1,%[schid]\n" in do_siga_output() 99 : "cc", "0", "1", "2", "3"); in do_siga_output() 106 * @q: queue to manipulate 115 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, in qdio_do_eqbs() argument 118 int tmp_count = count, tmp_start = start, nr = q->nr; in qdio_do_eqbs() 121 qperf_inc(q, eqbs); in qdio_do_eqbs() [all …]
|
/Linux-v5.15/arch/s390/kernel/ |
D | fpu.c | 26 asm volatile("stfpc %0" : "=Q" (state->fpc)); in __kernel_fpu_begin() 31 asm volatile("std 0,%0" : "=Q" (state->fprs[0])); in __kernel_fpu_begin() 32 asm volatile("std 1,%0" : "=Q" (state->fprs[1])); in __kernel_fpu_begin() 33 asm volatile("std 2,%0" : "=Q" (state->fprs[2])); in __kernel_fpu_begin() 34 asm volatile("std 3,%0" : "=Q" (state->fprs[3])); in __kernel_fpu_begin() 35 asm volatile("std 4,%0" : "=Q" (state->fprs[4])); in __kernel_fpu_begin() 36 asm volatile("std 5,%0" : "=Q" (state->fprs[5])); in __kernel_fpu_begin() 37 asm volatile("std 6,%0" : "=Q" (state->fprs[6])); in __kernel_fpu_begin() 38 asm volatile("std 7,%0" : "=Q" (state->fprs[7])); in __kernel_fpu_begin() 39 asm volatile("std 8,%0" : "=Q" (state->fprs[8])); in __kernel_fpu_begin() [all …]
|
/Linux-v5.15/drivers/infiniband/sw/rxe/ |
D | rxe_queue.h | 22 * - Kernel space indices are always masked off to q->index_mask 29 * - By passing the type in the parameter list separate from q 33 * paths just q->type is passed. 64 void rxe_queue_reset(struct rxe_queue *q); 69 int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p, 79 static inline int next_index(struct rxe_queue *q, int index) in next_index() argument 81 return (index + 1) & q->buf->index_mask; in next_index() 84 static inline int queue_empty(struct rxe_queue *q, enum queue_type type) in queue_empty() argument 92 prod = smp_load_acquire(&q->buf->producer_index); in queue_empty() 93 cons = q->index; in queue_empty() [all …]
|
/Linux-v5.15/block/ |
D | blk-sysfs.c | 61 static ssize_t queue_requests_show(struct request_queue *q, char *page) in queue_requests_show() argument 63 return queue_var_show(q->nr_requests, page); in queue_requests_show() 67 queue_requests_store(struct request_queue *q, const char *page, size_t count) in queue_requests_store() argument 72 if (!queue_is_mq(q)) in queue_requests_store() 82 err = blk_mq_update_nr_requests(q, nr); in queue_requests_store() 89 static ssize_t queue_ra_show(struct request_queue *q, char *page) in queue_ra_show() argument 93 if (!q->disk) in queue_ra_show() 95 ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10); in queue_ra_show() 100 queue_ra_store(struct request_queue *q, const char *page, size_t count) in queue_ra_store() argument 105 if (!q->disk) in queue_ra_store() [all …]
|
/Linux-v5.15/sound/core/seq/ |
D | seq_queue.c | 50 static int queue_list_add(struct snd_seq_queue *q) in queue_list_add() argument 58 queue_list[i] = q; in queue_list_add() 59 q->queue = i; in queue_list_add() 66 return -1; in queue_list_add() 71 struct snd_seq_queue *q; in queue_list_remove() local 75 q = queue_list[id]; in queue_list_remove() 76 if (q) { in queue_list_remove() 77 spin_lock(&q->owner_lock); in queue_list_remove() 78 if (q->owner == client) { in queue_list_remove() 80 q->klocked = 1; in queue_list_remove() [all …]
|
/Linux-v5.15/include/linux/ |
D | blkdev.h | 49 #define BLK_MQ_POLL_CLASSIC -1 64 #define RQF_STARTED ((__force req_flags_t)(1 << 1)) 66 #define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3)) 68 #define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4)) 70 #define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5)) 72 #define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6)) 74 #define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) 76 #define RQF_FAILED ((__force req_flags_t)(1 << 10)) 78 #define RQF_QUIET ((__force req_flags_t)(1 << 11)) 80 #define RQF_ELVPRIV ((__force req_flags_t)(1 << 12)) [all …]
|
D | fortify-string.h | 8 extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp); 9 extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy); 10 extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove); 12 extern char *__underlying_strcat(char *p, const char *q) __RENAME(strcat); 13 extern char *__underlying_strcpy(char *p, const char *q) __RENAME(strcpy); 15 extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat); 16 extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy); 30 __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size) in strncpy() argument 32 size_t p_size = __builtin_object_size(p, 1); in strncpy() 38 return __underlying_strncpy(p, q, size); in strncpy() [all …]
|
/Linux-v5.15/drivers/spi/ |
D | spi-fsl-qspi.c | 60 #define QUADSPI_MCR_SWRSTHD_MASK BIT(1) 102 #define QUADSPI_SR_IP_ACC_MASK BIT(1) 126 #define QUADSPI_LCKER_UNLOCK BIT(1) 135 #define LUT_CMD 1 159 #define LUT_PAD(x) (fls(x) - 1) 176 #define QUADSPI_QUIRK_4X_INT_CLK BIT(1) 278 static inline int needs_swap_endian(struct fsl_qspi *q) in needs_swap_endian() argument 280 return q->devtype_data->quirks & QUADSPI_QUIRK_SWAP_ENDIAN; in needs_swap_endian() 283 static inline int needs_4x_clock(struct fsl_qspi *q) in needs_4x_clock() argument 285 return q->devtype_data->quirks & QUADSPI_QUIRK_4X_INT_CLK; in needs_4x_clock() [all …]
|