/Linux-v5.4/lib/ |
D | list_sort.c | 22 struct list_head *head, **tail = &head; in merge() local 27 *tail = a; in merge() 28 tail = &a->next; in merge() 31 *tail = b; in merge() 35 *tail = b; in merge() 36 tail = &b->next; in merge() 39 *tail = a; in merge() 58 struct list_head *tail = head; in merge_final() local 64 tail->next = a; in merge_final() 65 a->prev = tail; in merge_final() [all …]
|
/Linux-v5.4/arch/arm64/kernel/ |
D | perf_callchain.c | 23 user_backtrace(struct frame_tail __user *tail, in user_backtrace() argument 31 if (!access_ok(tail, sizeof(buftail))) in user_backtrace() 35 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail)); in user_backtrace() 49 if (tail >= buftail.fp) in user_backtrace() 71 compat_user_backtrace(struct compat_frame_tail __user *tail, in compat_user_backtrace() argument 78 if (!access_ok(tail, sizeof(buftail))) in compat_user_backtrace() 82 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail)); in compat_user_backtrace() 94 if (tail + 1 >= (struct compat_frame_tail __user *) in compat_user_backtrace() 114 struct frame_tail __user *tail; in perf_callchain_user() local 116 tail = (struct frame_tail __user *)regs->regs[29]; in perf_callchain_user() [all …]
|
/Linux-v5.4/drivers/rpmsg/ |
D | qcom_glink_smem.c | 39 __le32 *tail; member 55 u32 tail; in glink_smem_rx_avail() local 71 tail = le32_to_cpu(*pipe->tail); in glink_smem_rx_avail() 73 if (head < tail) in glink_smem_rx_avail() 74 return pipe->native.length - tail + head; in glink_smem_rx_avail() 76 return head - tail; in glink_smem_rx_avail() 84 u32 tail; in glink_smem_rx_peak() local 86 tail = le32_to_cpu(*pipe->tail); in glink_smem_rx_peak() 87 tail += offset; in glink_smem_rx_peak() 88 if (tail >= pipe->native.length) in glink_smem_rx_peak() [all …]
|
D | qcom_glink_rpm.c | 50 void __iomem *tail; member 60 unsigned int tail; in glink_rpm_rx_avail() local 63 tail = readl(pipe->tail); in glink_rpm_rx_avail() 65 if (head < tail) in glink_rpm_rx_avail() 66 return pipe->native.length - tail + head; in glink_rpm_rx_avail() 68 return head - tail; in glink_rpm_rx_avail() 75 unsigned int tail; in glink_rpm_rx_peak() local 78 tail = readl(pipe->tail); in glink_rpm_rx_peak() 79 tail += offset; in glink_rpm_rx_peak() 80 if (tail >= pipe->native.length) in glink_rpm_rx_peak() [all …]
|
/Linux-v5.4/drivers/infiniband/sw/rdmavt/ |
D | cq.c | 76 u32 tail; in rvt_cq_enter() local 84 tail = RDMA_READ_UAPI_ATOMIC(u_wc->tail); in rvt_cq_enter() 89 tail = k_wc->tail; in rvt_cq_enter() 103 if (unlikely(next == tail || cq->cq_full)) { in rvt_cq_enter() 360 RDMA_READ_UAPI_ATOMIC(cq->queue->tail)) in rvt_req_notify_cq() 363 if (cq->kqueue->head != cq->kqueue->tail) in rvt_req_notify_cq() 382 u32 head, tail, n; in rvt_resize_cq() local 427 tail = RDMA_READ_UAPI_ATOMIC(old_u_wc->tail); in rvt_resize_cq() 431 tail = old_k_wc->tail; in rvt_resize_cq() 436 if (tail > (u32)cq->ibcq.cqe) in rvt_resize_cq() [all …]
|
D | rc.c | 108 u32 tail; in rvt_compute_aeth() local 115 tail = RDMA_READ_UAPI_ATOMIC(qp->r_rq.wq->tail); in rvt_compute_aeth() 118 tail = READ_ONCE(qp->r_rq.kwq->tail); in rvt_compute_aeth() 122 if (tail >= qp->r_rq.size) in rvt_compute_aeth() 123 tail = 0; in rvt_compute_aeth() 130 credits = head - tail; in rvt_compute_aeth()
|
D | srq.c | 179 u32 sz, size, n, head, tail; in rvt_modify_srq() local 217 tail = RDMA_READ_UAPI_ATOMIC(owq->tail); in rvt_modify_srq() 221 tail = okwq->tail; in rvt_modify_srq() 223 if (head >= srq->rq.size || tail >= srq->rq.size) { in rvt_modify_srq() 228 if (n < tail) in rvt_modify_srq() 229 n += srq->rq.size - tail; in rvt_modify_srq() 231 n -= tail; in rvt_modify_srq() 238 while (tail != head) { in rvt_modify_srq() 242 wqe = rvt_get_rwqe_ptr(&srq->rq, tail); in rvt_modify_srq() 249 if (++tail >= srq->rq.size) in rvt_modify_srq() [all …]
|
/Linux-v5.4/drivers/net/ethernet/intel/fm10k/ |
D | fm10k_mbx.c | 17 fifo->tail = 0; in fm10k_fifo_init() 28 return fifo->tail - fifo->head; in fm10k_fifo_used() 39 return fifo->size + fifo->head - fifo->tail; in fm10k_fifo_unused() 50 return fifo->head == fifo->tail; in fm10k_fifo_empty() 74 return (fifo->tail + offset) & (fifo->size - 1); in fm10k_fifo_tail_offset() 120 fifo->head = fifo->tail; in fm10k_fifo_drop_all() 132 static u16 fm10k_mbx_index_len(struct fm10k_mbx_info *mbx, u16 head, u16 tail) in fm10k_mbx_index_len() argument 134 u16 len = tail - head; in fm10k_mbx_index_len() 137 if (len > tail) in fm10k_mbx_index_len() 153 u16 tail = (mbx->tail + offset + 1) & ((mbx->mbmem_len << 1) - 1); in fm10k_mbx_tail_add() local [all …]
|
/Linux-v5.4/include/linux/ |
D | circ_buf.h | 12 int tail; member 16 #define CIRC_CNT(head,tail,size) (((head) - (tail)) & ((size)-1)) argument 21 #define CIRC_SPACE(head,tail,size) CIRC_CNT((tail),((head)+1),(size)) argument 26 #define CIRC_CNT_TO_END(head,tail,size) \ argument 27 ({int end = (size) - (tail); \ 32 #define CIRC_SPACE_TO_END(head,tail,size) \ argument 34 int n = (end + (tail)) & ((size)-1); \
|
/Linux-v5.4/arch/arm/kernel/ |
D | perf_callchain.c | 34 user_backtrace(struct frame_tail __user *tail, in user_backtrace() argument 40 if (!access_ok(tail, sizeof(buftail))) in user_backtrace() 44 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail)); in user_backtrace() 56 if (tail + 1 >= buftail.fp) in user_backtrace() 65 struct frame_tail __user *tail; in perf_callchain_user() local 77 tail = (struct frame_tail __user *)regs->ARM_fp - 1; in perf_callchain_user() 80 tail && !((unsigned long)tail & 0x3)) in perf_callchain_user() 81 tail = user_backtrace(tail, entry); in perf_callchain_user()
|
/Linux-v5.4/kernel/locking/ |
D | qspinlock.c | 116 u32 tail; in encode_tail() local 118 tail = (cpu + 1) << _Q_TAIL_CPU_OFFSET; in encode_tail() 119 tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */ in encode_tail() 121 return tail; in encode_tail() 124 static inline __pure struct mcs_spinlock *decode_tail(u32 tail) in decode_tail() argument 126 int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1; in decode_tail() 127 int idx = (tail & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET; in decode_tail() 175 static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) in xchg_tail() argument 181 return (u32)xchg_relaxed(&lock->tail, in xchg_tail() 182 tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET; in xchg_tail() [all …]
|
/Linux-v5.4/arch/arm/oprofile/ |
D | common.c | 86 static struct frame_tail* user_backtrace(struct frame_tail *tail) in user_backtrace() argument 91 if (!access_ok(tail, sizeof(buftail))) in user_backtrace() 93 if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail))) in user_backtrace() 100 if (tail + 1 >= buftail[0].fp) in user_backtrace() 108 struct frame_tail *tail = ((struct frame_tail *) regs->ARM_fp) - 1; in arm_backtrace() local 117 while (depth-- && tail && !((unsigned long) tail & 3)) in arm_backtrace() 118 tail = user_backtrace(tail); in arm_backtrace()
|
/Linux-v5.4/include/drm/ |
D | spsc_queue.h | 43 atomic_long_t tail; member 51 atomic_long_set(&queue->tail, (long)&queue->head); in spsc_queue_init() 67 struct spsc_node **tail; in spsc_queue_push() local 73 tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next); in spsc_queue_push() 74 WRITE_ONCE(*tail, node); in spsc_queue_push() 85 return tail == &queue->head; in spsc_queue_push() 107 if (atomic_long_cmpxchg(&queue->tail, in spsc_queue_pop()
|
/Linux-v5.4/net/sunrpc/ |
D | xdr.c | 181 struct kvec *tail = xdr->tail; in xdr_inline_pages() local 191 tail->iov_base = buf + offset; in xdr_inline_pages() 192 tail->iov_len = buflen - offset; in xdr_inline_pages() 194 tail->iov_len -= sizeof(__be32); in xdr_inline_pages() 365 struct kvec *head, *tail; in xdr_shrink_bufhead() local 371 tail = buf->tail; in xdr_shrink_bufhead() 379 if (tail->iov_len != 0) { in xdr_shrink_bufhead() 380 if (tail->iov_len > len) { in xdr_shrink_bufhead() 381 copy = tail->iov_len - len; in xdr_shrink_bufhead() 382 memmove((char *)tail->iov_base + len, in xdr_shrink_bufhead() [all …]
|
/Linux-v5.4/Documentation/trace/ |
D | ring-buffer-design.txt | 17 tail - where new writes happen in the ring buffer. 172 It is possible that the page swapped is the commit page and the tail page, 176 reader page commit page tail page 205 tail page - the page where the next write will take place. 229 +---------+ <--- tail pointer 254 +---------+ <--- tail pointer 267 +---------+ <--- tail pointer 278 +---------+ <--(last full commit and tail pointer) 287 The tail page points to the page with the last write (before 290 The tail page is always equal to or after the commit page. It may [all …]
|
/Linux-v5.4/drivers/infiniband/hw/vmw_pvrdma/ |
D | pvrdma_ring.h | 89 const __u32 tail = atomic_read(&r->prod_tail); in pvrdma_idx_ring_has_space() local 92 if (pvrdma_idx_valid(tail, max_elems) && in pvrdma_idx_ring_has_space() 94 *out_tail = tail & (max_elems - 1); in pvrdma_idx_ring_has_space() 95 return tail != (head ^ max_elems); in pvrdma_idx_ring_has_space() 103 const __u32 tail = atomic_read(&r->prod_tail); in pvrdma_idx_ring_has_data() local 106 if (pvrdma_idx_valid(tail, max_elems) && in pvrdma_idx_ring_has_data() 109 return tail != head; in pvrdma_idx_ring_has_data()
|
/Linux-v5.4/kernel/ |
D | softirq.c | 465 struct tasklet_struct **tail; member 481 *head->tail = t; in __tasklet_schedule_common() 482 head->tail = &(t->next); in __tasklet_schedule_common() 510 tl_head->tail = &tl_head->head; in tasklet_action_common() 532 *tl_head->tail = t; in tasklet_action_common() 533 tl_head->tail = &t->next; in tasklet_action_common() 580 per_cpu(tasklet_vec, cpu).tail = in softirq_init() 582 per_cpu(tasklet_hi_vec, cpu).tail = in softirq_init() 637 per_cpu(tasklet_vec, cpu).tail = i; in tasklet_kill_immediate() 650 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { in takeover_tasklets() [all …]
|
/Linux-v5.4/net/sched/ |
D | sch_choke.c | 68 unsigned int tail; member 78 return (q->tail - q->head) & q->tab_mask; in choke_len() 98 if (q->head == q->tail) in choke_zap_head_holes() 107 q->tail = (q->tail - 1) & q->tab_mask; in choke_zap_tail_holes() 108 if (q->head == q->tail) in choke_zap_tail_holes() 110 } while (q->tab[q->tail] == NULL); in choke_zap_tail_holes() 124 if (idx == q->tail) in choke_drop_by_idx() 212 if (q->head == q->tail) in choke_match_random() 275 q->tab[q->tail] = skb; in choke_enqueue() 276 q->tail = (q->tail + 1) & q->tab_mask; in choke_enqueue() [all …]
|
/Linux-v5.4/kernel/rcu/ |
D | rcu_segcblist.c | 21 rclp->tail = &rclp->head; in rcu_cblist_init() 34 *rclp->tail = rhp; in rcu_cblist_enqueue() 35 rclp->tail = &rhp->next; in rcu_cblist_enqueue() 53 drclp->tail = srclp->tail; in rcu_cblist_flush_enqueue() 55 drclp->tail = &drclp->head; in rcu_cblist_flush_enqueue() 63 srclp->tail = &rhp->next; in rcu_cblist_flush_enqueue() 86 rclp->tail = &rclp->head; in rcu_cblist_dequeue() 327 *rclp->tail = rsclp->head; in rcu_segcblist_extract_done_cbs() 330 rclp->tail = rsclp->tails[RCU_DONE_TAIL]; in rcu_segcblist_extract_done_cbs() 350 *rclp->tail = *rsclp->tails[RCU_DONE_TAIL]; in rcu_segcblist_extract_pend_cbs() [all …]
|
/Linux-v5.4/drivers/gpu/drm/i915/gt/uc/ |
D | intel_guc_ct.c | 80 desc, desc->head, desc->tail); in guc_ct_buffer_desc_reset() 82 desc->tail = 0; in guc_ct_buffer_desc_reset() 299 u32 tail = desc->tail / 4; /* in dwords */ in ctb_write() local 308 GEM_BUG_ON(desc->tail % 4); in ctb_write() 309 GEM_BUG_ON(tail >= size); in ctb_write() 315 if (tail < head) in ctb_write() 316 used = (size - head) + tail; in ctb_write() 318 used = tail - head; in ctb_write() 339 cmds[tail] = header; in ctb_write() 340 tail = (tail + 1) % size; in ctb_write() [all …]
|
/Linux-v5.4/drivers/of/ |
D | pdt.c | 114 struct property *head, *tail; in of_pdt_build_prop_list() local 116 head = tail = of_pdt_build_one_prop(node, NULL, in of_pdt_build_prop_list() 119 tail->next = of_pdt_build_one_prop(node, NULL, NULL, NULL, 0); in of_pdt_build_prop_list() 120 tail = tail->next; in of_pdt_build_prop_list() 121 while(tail) { in of_pdt_build_prop_list() 122 tail->next = of_pdt_build_one_prop(node, tail->name, in of_pdt_build_prop_list() 124 tail = tail->next; in of_pdt_build_prop_list()
|
/Linux-v5.4/fs/affs/ |
D | inode.c | 22 struct affs_tail *tail; in affs_iget() local 50 tail = AFFS_TAIL(sb, bh); in affs_iget() 51 prot = be32_to_cpu(tail->protect); in affs_iget() 76 id = be16_to_cpu(tail->uid); in affs_iget() 84 id = be16_to_cpu(tail->gid); in affs_iget() 92 switch (be32_to_cpu(tail->stype)) { in affs_iget() 98 if (be32_to_cpu(tail->stype) == ST_USERDIR || in affs_iget() 127 size = be32_to_cpu(tail->size); in affs_iget() 136 if (tail->link_chain) in affs_iget() 153 = (be32_to_cpu(tail->change.days) * 86400LL + in affs_iget() [all …]
|
/Linux-v5.4/tools/perf/util/ |
D | block-range.c | 205 struct block_range *tail = malloc(sizeof(struct block_range)); in block_range__create() local 206 if (!tail) in block_range__create() 209 *tail = (struct block_range){ in block_range__create() 225 rb_link_right_of_node(&tail->node, &entry->node); in block_range__create() 226 rb_insert_color(&tail->node, &block_ranges.root); in block_range__create() 250 struct block_range *tail; in block_range__create() local 252 tail = malloc(sizeof(struct block_range)); in block_range__create() 253 if (!tail) in block_range__create() 256 *tail = (struct block_range){ in block_range__create() 263 rb_link_right_of_node(&tail->node, &entry->node); in block_range__create() [all …]
|
/Linux-v5.4/fs/9p/ |
D | vfs_dir.c | 41 int tail; member 108 if (rdir->tail == rdir->head) { in v9fs_dir_readdir() 120 rdir->tail = n; in v9fs_dir_readdir() 122 while (rdir->head < rdir->tail) { in v9fs_dir_readdir() 124 rdir->tail - rdir->head, &st); in v9fs_dir_readdir() 166 if (rdir->tail == rdir->head) { in v9fs_dir_readdir_dotl() 173 rdir->tail = err; in v9fs_dir_readdir_dotl() 176 while (rdir->head < rdir->tail) { in v9fs_dir_readdir_dotl() 179 rdir->tail - rdir->head, in v9fs_dir_readdir_dotl()
|
/Linux-v5.4/drivers/crypto/caam/ |
D | jr.c | 194 int hw_idx, sw_idx, i, head, tail; in caam_jr_dequeue() local 207 sw_idx = tail = jrp->tail; in caam_jr_dequeue() 210 for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) { in caam_jr_dequeue() 211 sw_idx = (tail + i) & (JOBR_DEPTH - 1); in caam_jr_dequeue() 218 BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0); in caam_jr_dequeue() 255 if (sw_idx == tail) { in caam_jr_dequeue() 257 tail = (tail + 1) & (JOBR_DEPTH - 1); in caam_jr_dequeue() 258 } while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 && in caam_jr_dequeue() 259 jrp->entinfo[tail].desc_addr_dma == 0); in caam_jr_dequeue() 261 jrp->tail = tail; in caam_jr_dequeue() [all …]
|