/Linux-v5.4/drivers/gpu/drm/i915/gt/ |
D | selftest_timeline.c | 19 static struct page *hwsp_page(struct intel_timeline *tl) in hwsp_page() argument 21 struct drm_i915_gem_object *obj = tl->hwsp_ggtt->obj; in hwsp_page() 27 static unsigned long hwsp_cacheline(struct intel_timeline *tl) in hwsp_cacheline() argument 29 unsigned long address = (unsigned long)page_address(hwsp_page(tl)); in hwsp_cacheline() 31 return (address + tl->hwsp_offset) / CACHELINE_BYTES; in hwsp_cacheline() 50 struct intel_timeline *tl) in __mock_hwsp_record() argument 52 tl = xchg(&state->history[idx], tl); in __mock_hwsp_record() 53 if (tl) { in __mock_hwsp_record() 54 radix_tree_delete(&state->cachelines, hwsp_cacheline(tl)); in __mock_hwsp_record() 55 intel_timeline_put(tl); in __mock_hwsp_record() [all …]
|
D | intel_timeline.c | 313 int intel_timeline_pin(struct intel_timeline *tl) in intel_timeline_pin() argument 317 if (atomic_add_unless(&tl->pin_count, 1, 0)) in intel_timeline_pin() 320 err = i915_vma_pin(tl->hwsp_ggtt, 0, 0, PIN_GLOBAL | PIN_HIGH); in intel_timeline_pin() 324 tl->hwsp_offset = in intel_timeline_pin() 325 i915_ggtt_offset(tl->hwsp_ggtt) + in intel_timeline_pin() 326 offset_in_page(tl->hwsp_offset); in intel_timeline_pin() 328 cacheline_acquire(tl->hwsp_cacheline); in intel_timeline_pin() 329 if (atomic_fetch_inc(&tl->pin_count)) { in intel_timeline_pin() 330 cacheline_release(tl->hwsp_cacheline); in intel_timeline_pin() 331 __i915_vma_unpin(tl->hwsp_ggtt); in intel_timeline_pin() [all …]
|
D | intel_timeline.h | 34 int intel_timeline_init(struct intel_timeline *tl, 37 void intel_timeline_fini(struct intel_timeline *tl); 55 static inline int __intel_timeline_sync_set(struct intel_timeline *tl, in __intel_timeline_sync_set() argument 58 return i915_syncmap_set(&tl->sync, context, seqno); in __intel_timeline_sync_set() 61 static inline int intel_timeline_sync_set(struct intel_timeline *tl, in intel_timeline_sync_set() argument 64 return __intel_timeline_sync_set(tl, fence->context, fence->seqno); in intel_timeline_sync_set() 67 static inline bool __intel_timeline_sync_is_later(struct intel_timeline *tl, in __intel_timeline_sync_is_later() argument 70 return i915_syncmap_is_later(&tl->sync, context, seqno); in __intel_timeline_sync_is_later() 73 static inline bool intel_timeline_sync_is_later(struct intel_timeline *tl, in intel_timeline_sync_is_later() argument 76 return __intel_timeline_sync_is_later(tl, fence->context, fence->seqno); in intel_timeline_sync_is_later() [all …]
|
D | intel_context.h | 129 struct intel_timeline *tl = ce->timeline; in intel_context_timeline_lock() local 132 err = mutex_lock_interruptible(&tl->mutex); in intel_context_timeline_lock() 136 return tl; in intel_context_timeline_lock() 139 static inline void intel_context_timeline_unlock(struct intel_timeline *tl) in intel_context_timeline_unlock() argument 140 __releases(&tl->mutex) in intel_context_timeline_unlock() 142 mutex_unlock(&tl->mutex); in intel_context_timeline_unlock()
|
D | intel_context.c | 295 struct intel_timeline *tl = ce->timeline; in intel_context_prepare_remote_request() local 301 if (rq->timeline != tl) { /* beware timeline sharing */ in intel_context_prepare_remote_request() 302 err = mutex_lock_interruptible_nested(&tl->mutex, in intel_context_prepare_remote_request() 308 err = i915_active_request_set(&tl->last_request, rq); in intel_context_prepare_remote_request() 309 mutex_unlock(&tl->mutex); in intel_context_prepare_remote_request()
|
D | mock_engine.c | 35 static void mock_timeline_pin(struct intel_timeline *tl) in mock_timeline_pin() argument 37 atomic_inc(&tl->pin_count); in mock_timeline_pin() 40 static void mock_timeline_unpin(struct intel_timeline *tl) in mock_timeline_unpin() argument 42 GEM_BUG_ON(!atomic_read(&tl->pin_count)); in mock_timeline_unpin() 43 atomic_dec(&tl->pin_count); in mock_timeline_unpin()
|
/Linux-v5.4/include/linux/ |
D | ipmi-fru.h | 52 struct fru_type_length tl[0]; /* type-length stuff follows */ member 85 static inline int fru_type(struct fru_type_length *tl) in fru_type() argument 87 return tl->type_length & 0xc0; in fru_type() 90 static inline int fru_length(struct fru_type_length *tl) in fru_length() argument 92 return (tl->type_length & 0x3f) + 1; /* len of whole record */ in fru_length() 96 static inline int fru_strlen(struct fru_type_length *tl) in fru_strlen() argument 98 return fru_length(tl) - 1; in fru_strlen() 101 static inline char *fru_strcpy(char *dest, struct fru_type_length *tl) in fru_strcpy() argument 103 int len = fru_strlen(tl); in fru_strcpy() 104 memcpy(dest, tl->data, len); in fru_strcpy() [all …]
|
/Linux-v5.4/fs/cifs/ |
D | dfs_cache.h | 53 dfs_cache_get_next_tgt(struct dfs_cache_tgt_list *tl, in dfs_cache_get_next_tgt() argument 56 if (!tl || list_empty(&tl->tl_list) || !it || in dfs_cache_get_next_tgt() 57 list_is_last(&it->it_list, &tl->tl_list)) in dfs_cache_get_next_tgt() 63 dfs_cache_get_tgt_iterator(struct dfs_cache_tgt_list *tl) in dfs_cache_get_tgt_iterator() argument 65 if (!tl) in dfs_cache_get_tgt_iterator() 67 return list_first_entry_or_null(&tl->tl_list, in dfs_cache_get_tgt_iterator() 72 static inline void dfs_cache_free_tgts(struct dfs_cache_tgt_list *tl) in dfs_cache_free_tgts() argument 76 if (!tl || list_empty(&tl->tl_list)) in dfs_cache_free_tgts() 78 list_for_each_entry_safe(it, nit, &tl->tl_list, it_list) { in dfs_cache_free_tgts() 83 tl->tl_numtgts = 0; in dfs_cache_free_tgts() [all …]
|
/Linux-v5.4/drivers/net/ethernet/netronome/nfp/ |
D | nfp_net_debugdump.c | 58 struct nfp_dump_tl tl; member 64 struct nfp_dump_tl tl; member 70 struct nfp_dump_tl tl; member 78 struct nfp_dump_tl tl; member 87 struct nfp_dump_tl tl; member 92 struct nfp_dump_tl tl; member 112 typedef int (*nfp_tlv_visit)(struct nfp_pf *pf, struct nfp_dump_tl *tl, 120 struct nfp_dump_tl *tl; in nfp_traverse_tlvs() local 125 while (remaining >= sizeof(*tl)) { in nfp_traverse_tlvs() 126 tl = p; in nfp_traverse_tlvs() [all …]
|
/Linux-v5.4/drivers/gpu/drm/i915/ |
D | i915_request.c | 319 struct intel_timeline * const tl = rq->timeline; in i915_request_retire_upto() local 327 lockdep_assert_held(&tl->mutex); in i915_request_retire_upto() 331 tmp = list_first_entry(&tl->requests, typeof(*tmp), link); in i915_request_retire_upto() 582 static void retire_requests(struct intel_timeline *tl) in retire_requests() argument 586 list_for_each_entry_safe(rq, rn, &tl->requests, link) in retire_requests() 592 request_alloc_slow(struct intel_timeline *tl, gfp_t gfp) in request_alloc_slow() argument 596 if (list_empty(&tl->requests)) in request_alloc_slow() 603 rq = list_first_entry(&tl->requests, typeof(*rq), link); in request_alloc_slow() 612 rq = list_last_entry(&tl->requests, typeof(*rq), link); in request_alloc_slow() 616 retire_requests(tl); in request_alloc_slow() [all …]
|
/Linux-v5.4/crypto/ |
D | vmac.c | 150 int i; u64 th, tl; \ 153 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ 155 ADD128(rh, rl, th, tl); \ 161 int i; u64 th, tl; \ 164 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ 166 ADD128(rh, rl, th, tl); \ 167 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \ 169 ADD128(rh1, rl1, th, tl); \ 176 int i; u64 th, tl; \ 179 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ [all …]
|
D | camellia_generic.c | 367 u32 dw, tl, tr; in camellia_setup_tail() local 466 tl = subL[10] ^ (subR[10] & ~subR[8]); in camellia_setup_tail() 467 dw = tl & subL[8]; /* FL(kl1) */ in camellia_setup_tail() 469 SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */ in camellia_setup_tail() 475 tl = subL[7] ^ (subR[7] & ~subR[9]); in camellia_setup_tail() 476 dw = tl & subL[9]; /* FLinv(kl2) */ in camellia_setup_tail() 478 SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */ in camellia_setup_tail() 488 tl = subL[18] ^ (subR[18] & ~subR[16]); in camellia_setup_tail() 489 dw = tl & subL[16]; /* FL(kl3) */ in camellia_setup_tail() 491 SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */ in camellia_setup_tail() [all …]
|
/Linux-v5.4/drivers/isdn/mISDN/ |
D | fsm.c | 98 struct FsmTimer *ft = from_timer(ft, t, tl); in FsmExpireTimer() 114 timer_setup(&ft->tl, FsmExpireTimer, 0); in mISDN_FsmInitTimer() 126 del_timer(&ft->tl); in mISDN_FsmDelTimer() 141 if (timer_pending(&ft->tl)) { in mISDN_FsmAddTimer() 152 ft->tl.expires = jiffies + (millisec * HZ) / 1000; in mISDN_FsmAddTimer() 153 add_timer(&ft->tl); in mISDN_FsmAddTimer() 169 if (timer_pending(&ft->tl)) in mISDN_FsmRestartTimer() 170 del_timer(&ft->tl); in mISDN_FsmRestartTimer() 173 ft->tl.expires = jiffies + (millisec * HZ) / 1000; in mISDN_FsmRestartTimer() 174 add_timer(&ft->tl); in mISDN_FsmRestartTimer()
|
D | timerdev.c | 39 struct timer_list tl; member 77 del_timer_sync(&timer->tl); in mISDN_close() 158 struct mISDNtimer *timer = from_timer(timer, t, tl); in dev_expire_timer() 183 timer_setup(&timer->tl, dev_expire_timer, 0); in misdn_add_timer() 189 timer->tl.expires = jiffies + ((HZ * (u_long)timeout) / 1000); in misdn_add_timer() 190 add_timer(&timer->tl); in misdn_add_timer() 207 del_timer_sync(&timer->tl); in misdn_del_timer()
|
D | dsp_tones.c | 462 struct dsp *dsp = from_timer(dsp, t, tone.tl); in dsp_tone_timeout() 481 tone->tl.expires = jiffies + (pat->seq[index] * HZ) / 8000; in dsp_tone_timeout() 482 add_timer(&tone->tl); in dsp_tone_timeout() 507 if (dsp->features.hfc_loops && timer_pending(&tonet->tl)) in dsp_tone() 508 del_timer(&tonet->tl); in dsp_tone() 541 if (timer_pending(&tonet->tl)) in dsp_tone() 542 del_timer(&tonet->tl); in dsp_tone() 543 tonet->tl.expires = jiffies + (pat->seq[0] * HZ) / 8000; in dsp_tone() 544 add_timer(&tonet->tl); in dsp_tone()
|
/Linux-v5.4/drivers/s390/net/ |
D | fsm.c | 135 fsm_timer *this = from_timer(this, t, tl); in fsm_expire_timer() 151 timer_setup(&this->tl, fsm_expire_timer, 0); in fsm_settimer() 161 del_timer(&this->tl); in fsm_deltimer() 173 timer_setup(&this->tl, fsm_expire_timer, 0); in fsm_addtimer() 176 this->tl.expires = jiffies + (millisec * HZ) / 1000; in fsm_addtimer() 177 add_timer(&this->tl); in fsm_addtimer() 191 del_timer(&this->tl); in fsm_modtimer() 192 timer_setup(&this->tl, fsm_expire_timer, 0); in fsm_modtimer() 195 this->tl.expires = jiffies + (millisec * HZ) / 1000; in fsm_modtimer() 196 add_timer(&this->tl); in fsm_modtimer()
|
/Linux-v5.4/arch/x86/crypto/ |
D | camellia_glue.c | 801 u32 dw, tl, tr; in camellia_setup_tail() local 912 tl = (subRL[10] >> 32) ^ (subRL[10] & ~subRL[8]); in camellia_setup_tail() 913 dw = tl & (subRL[8] >> 32); /* FL(kl1) */ in camellia_setup_tail() 915 tt = (tr | ((u64)tl << 32)); in camellia_setup_tail() 921 tl = (subRL[7] >> 32) ^ (subRL[7] & ~subRL[9]); in camellia_setup_tail() 922 dw = tl & (subRL[9] >> 32); /* FLinv(kl2) */ in camellia_setup_tail() 924 tt = (tr | ((u64)tl << 32)); in camellia_setup_tail() 932 tl = (subRL[18] >> 32) ^ (subRL[18] & ~subRL[16]); in camellia_setup_tail() 933 dw = tl & (subRL[16] >> 32); /* FL(kl3) */ in camellia_setup_tail() 935 tt = (tr | ((u64)tl << 32)); in camellia_setup_tail() [all …]
|
/Linux-v5.4/kernel/sched/ |
D | topology.c | 1315 sd_init(struct sched_domain_topology_level *tl, in sd_init() argument 1319 struct sd_data *sdd = &tl->data; in sd_init() 1327 sched_domains_curr_level = tl->numa_level; in sd_init() 1330 sd_weight = cpumask_weight(tl->mask(cpu)); in sd_init() 1332 if (tl->sd_flags) in sd_init() 1333 sd_flags = (*tl->sd_flags)(); in sd_init() 1369 .name = tl->name, in sd_init() 1373 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); in sd_init() 1406 if (sched_domains_numa_distance[tl->numa_level] > node_reclaim_distance) { in sd_init() 1449 #define for_each_sd_topology(tl) \ argument [all …]
|
/Linux-v5.4/lib/ |
D | inflate.c | 590 struct huft *tl, /* literal/length decoder tables */ in inflate_codes() argument 618 if ((e = (t = tl + ((unsigned)b & ml))->e) > 16) in inflate_codes() 771 struct huft *tl; /* literal/length code table */ in inflate_fixed() local 793 if ((i = huft_build(l, 288, 257, cplens, cplext, &tl, &bl)) != 0) { in inflate_fixed() 804 huft_free(tl); in inflate_fixed() 813 if (inflate_codes(tl, td, bl, bd)) { in inflate_fixed() 820 huft_free(tl); in inflate_fixed() 837 struct huft *tl; /* literal/length code table */ in inflate_dynamic() local 901 if ((i = huft_build(ll, 19, 19, NULL, NULL, &tl, &bl)) != 0) in inflate_dynamic() 904 huft_free(tl); in inflate_dynamic() [all …]
|
/Linux-v5.4/drivers/net/ethernet/qlogic/qed/ |
D | qed_vf.h | 82 struct channel_tlv tl; member 89 struct channel_tlv tl; member 101 struct channel_tlv tl; member 150 struct channel_tlv tl; member 259 struct channel_tlv tl; member 372 struct channel_tlv tl; member 380 struct channel_tlv tl; member 386 struct channel_tlv tl; member 392 struct channel_tlv tl; member 404 struct channel_tlv tl; member [all …]
|
/Linux-v5.4/arch/s390/kvm/ |
D | gaccess.c | 31 unsigned long tl : 2; /* Region- or Segment-Table Length */ member 53 unsigned long tl : 2; /* Region-Second-Table Length */ member 68 unsigned long tl : 2; /* Region-Third-Table Length */ member 82 unsigned long tl : 2; /* Segment-Table Length */ member 636 if (vaddr.rfx01 > asce.tl) in guest_translate() 643 if (vaddr.rsx01 > asce.tl) in guest_translate() 650 if (vaddr.rtx01 > asce.tl) in guest_translate() 657 if (vaddr.sx01 > asce.tl) in guest_translate() 674 if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl) in guest_translate() 692 if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl) in guest_translate() [all …]
|
/Linux-v5.4/drivers/net/wireless/intel/iwlegacy/ |
D | 4965-rs.c | 235 il4965_rs_tl_rm_old_stats(struct il_traffic_load *tl, u32 curr_time) in il4965_rs_tl_rm_old_stats() argument 240 while (tl->queue_count && tl->time_stamp < oldest_time) { in il4965_rs_tl_rm_old_stats() 241 tl->total -= tl->packet_count[tl->head]; in il4965_rs_tl_rm_old_stats() 242 tl->packet_count[tl->head] = 0; in il4965_rs_tl_rm_old_stats() 243 tl->time_stamp += TID_QUEUE_CELL_SPACING; in il4965_rs_tl_rm_old_stats() 244 tl->queue_count--; in il4965_rs_tl_rm_old_stats() 245 tl->head++; in il4965_rs_tl_rm_old_stats() 246 if (tl->head >= TID_QUEUE_MAX_SIZE) in il4965_rs_tl_rm_old_stats() 247 tl->head = 0; in il4965_rs_tl_rm_old_stats() 261 struct il_traffic_load *tl = NULL; in il4965_rs_tl_add_packet() local [all …]
|
/Linux-v5.4/arch/sparc/kernel/ |
D | etrap_64.S | 221 rdpr %tl, %g1 223 wrpr %g0, 1, %tl 233 wrpr %g0, 2, %tl 248 wrpr %g0, 3, %tl 258 wrpr %g0, 4, %tl 271 wrpr %g0, 1, %tl
|
/Linux-v5.4/drivers/net/ethernet/broadcom/bnx2x/ |
D | bnx2x_vfpf.h | 99 struct channel_tlv tl; member 105 struct channel_tlv tl; member 117 struct channel_tlv tl; member 213 struct channel_tlv tl; member 219 struct channel_tlv tl; member
|
/Linux-v5.4/drivers/net/wireless/intel/iwlwifi/dvm/ |
D | rs.c | 248 static void rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time) in rs_tl_rm_old_stats() argument 253 while (tl->queue_count && in rs_tl_rm_old_stats() 254 (tl->time_stamp < oldest_time)) { in rs_tl_rm_old_stats() 255 tl->total -= tl->packet_count[tl->head]; in rs_tl_rm_old_stats() 256 tl->packet_count[tl->head] = 0; in rs_tl_rm_old_stats() 257 tl->time_stamp += TID_QUEUE_CELL_SPACING; in rs_tl_rm_old_stats() 258 tl->queue_count--; in rs_tl_rm_old_stats() 259 tl->head++; in rs_tl_rm_old_stats() 260 if (tl->head >= TID_QUEUE_MAX_SIZE) in rs_tl_rm_old_stats() 261 tl->head = 0; in rs_tl_rm_old_stats() [all …]
|