| /Linux-v5.10/drivers/gpu/drm/amd/amdgpu/ |
| D | amdgpu_vcn.c | 71 INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler); in amdgpu_vcn_sw_init() 72 mutex_init(&adev->vcn.vcn_pg_lock); in amdgpu_vcn_sw_init() 73 mutex_init(&adev->vcn.vcn1_jpeg1_workaround); in amdgpu_vcn_sw_init() 74 atomic_set(&adev->vcn.total_submission_cnt, 0); in amdgpu_vcn_sw_init() 75 for (i = 0; i < adev->vcn.num_vcn_inst; i++) in amdgpu_vcn_sw_init() 76 atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0); in amdgpu_vcn_sw_init() 91 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init() 101 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init() 107 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init() 113 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init() [all …]
|
| D | vcn_v2_5.c | 79 adev->vcn.num_vcn_inst = 2; in vcn_v2_5_early_init() 80 adev->vcn.harvest_config = 0; in vcn_v2_5_early_init() 81 adev->vcn.num_enc_rings = 1; in vcn_v2_5_early_init() 85 adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS; in vcn_v2_5_early_init() 86 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in vcn_v2_5_early_init() 89 adev->vcn.harvest_config |= 1 << i; in vcn_v2_5_early_init() 91 if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 | in vcn_v2_5_early_init() 96 adev->vcn.num_enc_rings = 2; in vcn_v2_5_early_init() 119 for (j = 0; j < adev->vcn.num_vcn_inst; j++) { in vcn_v2_5_sw_init() 120 if (adev->vcn.harvest_config & (1 << j)) in vcn_v2_5_sw_init() [all …]
|
| D | vcn_v2_0.c | 71 adev->vcn.num_vcn_inst = 1; in vcn_v2_0_early_init() 73 adev->vcn.num_enc_rings = 1; in vcn_v2_0_early_init() 75 adev->vcn.num_enc_rings = 2; in vcn_v2_0_early_init() 101 &adev->vcn.inst->irq); in vcn_v2_0_sw_init() 106 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { in vcn_v2_0_sw_init() 109 &adev->vcn.inst->irq); in vcn_v2_0_sw_init() 120 hdr = (const struct common_firmware_header *)adev->vcn.fw->data; in vcn_v2_0_sw_init() 122 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw; in vcn_v2_0_sw_init() 132 ring = &adev->vcn.inst->ring_dec; in vcn_v2_0_sw_init() 135 ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1; in vcn_v2_0_sw_init() [all …]
|
| D | vcn_v3_0.c | 86 adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID; in vcn_v3_0_early_init() 87 adev->vcn.harvest_config = 0; in vcn_v3_0_early_init() 88 adev->vcn.num_enc_rings = 1; in vcn_v3_0_early_init() 95 adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID; in vcn_v3_0_early_init() 96 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in vcn_v3_0_early_init() 99 adev->vcn.harvest_config |= 1 << i; in vcn_v3_0_early_init() 102 if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 | in vcn_v3_0_early_init() 107 adev->vcn.num_vcn_inst = 1; in vcn_v3_0_early_init() 109 adev->vcn.num_enc_rings = 2; in vcn_v3_0_early_init() 139 hdr = (const struct common_firmware_header *)adev->vcn.fw->data; in vcn_v3_0_sw_init() [all …]
|
| D | vcn_v1_0.c | 70 adev->vcn.num_vcn_inst = 1; in vcn_v1_0_early_init() 71 adev->vcn.num_enc_rings = 2; in vcn_v1_0_early_init() 97 VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst->irq); in vcn_v1_0_sw_init() 102 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { in vcn_v1_0_sw_init() 104 &adev->vcn.inst->irq); in vcn_v1_0_sw_init() 114 adev->vcn.idle_work.work.func = vcn_v1_0_idle_work_handler; in vcn_v1_0_sw_init() 118 hdr = (const struct common_firmware_header *)adev->vcn.fw->data; in vcn_v1_0_sw_init() 120 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw; in vcn_v1_0_sw_init() 130 ring = &adev->vcn.inst->ring_dec; in vcn_v1_0_sw_init() 132 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, in vcn_v1_0_sw_init() [all …]
|
| D | vega10_reg_init.c | 83 adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_DOORBELL64_VCN0_1; in vega10_doorbell_index_init() 84 adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_DOORBELL64_VCN2_3; in vega10_doorbell_index_init() 85 adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_DOORBELL64_VCN4_5; in vega10_doorbell_index_init() 86 adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_DOORBELL64_VCN6_7; in vega10_doorbell_index_init()
|
| D | vega20_reg_init.c | 89 adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_VEGA20_DOORBELL64_VCN0_1; in vega20_doorbell_index_init() 90 adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_VEGA20_DOORBELL64_VCN2_3; in vega20_doorbell_index_init() 91 adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_VEGA20_DOORBELL64_VCN4_5; in vega20_doorbell_index_init() 92 adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_VEGA20_DOORBELL64_VCN6_7; in vega20_doorbell_index_init()
|
| D | jpeg_v1_0.c | 594 bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work); in jpeg_v1_0_ring_begin_use() 597 mutex_lock(&adev->vcn.vcn1_jpeg1_workaround); in jpeg_v1_0_ring_begin_use() 599 if (amdgpu_fence_wait_empty(&adev->vcn.inst->ring_dec)) in jpeg_v1_0_ring_begin_use() 602 for (cnt = 0; cnt < adev->vcn.num_enc_rings; cnt++) { in jpeg_v1_0_ring_begin_use() 603 if (amdgpu_fence_wait_empty(&adev->vcn.inst->ring_enc[cnt])) in jpeg_v1_0_ring_begin_use()
|
| D | amdgpu_vcn.h | 142 *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = offset; \ 143 *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = value; \
|
| D | jpeg_v3_0.c | 94 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1; in jpeg_v3_0_sw_init() 141 (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0); in jpeg_v3_0_hw_init()
|
| D | amdgpu_kms.c | 235 fw_info->ver = adev->vcn.fw_version; in amdgpu_firmware_info() 407 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in amdgpu_hw_ip_info() 411 if (adev->vcn.inst[i].ring_dec.sched.ready) in amdgpu_hw_ip_info() 419 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in amdgpu_hw_ip_info() 423 for (j = 0; j < adev->vcn.num_enc_rings; j++) in amdgpu_hw_ip_info() 424 if (adev->vcn.inst[i].ring_enc[j].sched.ready) in amdgpu_hw_ip_info()
|
| D | nv.c | 659 adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1; in nv_init_doorbell_index() 660 adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3; in nv_init_doorbell_index() 661 adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5; in nv_init_doorbell_index() 662 adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7; in nv_init_doorbell_index()
|
| /Linux-v5.10/fs/ntfs/ |
| D | runlist.c | 151 if ((dst->vcn + dst->length) != src->vcn) in ntfs_are_rl_mergeable() 238 dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn; in ntfs_rl_append() 242 dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length; in ntfs_rl_append() 286 disc = (src[0].vcn > 0); in ntfs_rl_insert() 296 disc = (src[0].vcn > dst[loc - 1].vcn + merged_length); in ntfs_rl_insert() 325 dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length; in ntfs_rl_insert() 328 dst[marker].length = dst[marker + 1].vcn - dst[marker].vcn; in ntfs_rl_insert() 333 dst[loc].vcn = dst[loc - 1].vcn + dst[loc - 1].length; in ntfs_rl_insert() 334 dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn; in ntfs_rl_insert() 336 dst[loc].vcn = 0; in ntfs_rl_insert() [all …]
|
| D | index.c | 108 VCN vcn, old_vcn; in ntfs_index_lookup() local 248 vcn = sle64_to_cpup((sle64*)((u8*)ie + le16_to_cpu(ie->length) - 8)); in ntfs_index_lookup() 264 page = ntfs_map_page(ia_mapping, vcn << in ntfs_index_lookup() 276 ia = (INDEX_ALLOCATION*)(kaddr + ((vcn << in ntfs_index_lookup() 288 (long long)vcn, idx_ni->mft_no); in ntfs_index_lookup() 291 if (sle64_to_cpu(ia->index_block_vcn) != vcn) { in ntfs_index_lookup() 297 (unsigned long long)vcn, idx_ni->mft_no); in ntfs_index_lookup() 305 "driver bug.", (unsigned long long)vcn, in ntfs_index_lookup() 316 "driver.", (unsigned long long)vcn, in ntfs_index_lookup() 324 (unsigned long long)vcn, idx_ni->mft_no); in ntfs_index_lookup() [all …]
|
| D | logfile.c | 715 VCN vcn, end_vcn; in ntfs_empty_logfile() local 739 vcn = 0; in ntfs_empty_logfile() 747 if (unlikely(!rl || vcn < rl->vcn || !rl->length)) { in ntfs_empty_logfile() 749 err = ntfs_map_runlist_nolock(log_ni, vcn, NULL); in ntfs_empty_logfile() 756 BUG_ON(!rl || vcn < rl->vcn || !rl->length); in ntfs_empty_logfile() 759 while (rl->length && vcn >= rl[1].vcn) in ntfs_empty_logfile() 772 vcn = rl->vcn; in ntfs_empty_logfile() 783 if (rl[1].vcn > end_vcn) in ntfs_empty_logfile() 784 len = end_vcn - rl->vcn; in ntfs_empty_logfile() 820 } while ((++rl)->vcn < end_vcn); in ntfs_empty_logfile()
|
| D | dir.c | 80 VCN vcn, old_vcn; in ntfs_lookup_inode_by_name() local 292 vcn = sle64_to_cpup((sle64*)((u8*)ie + le16_to_cpu(ie->length) - 8)); in ntfs_lookup_inode_by_name() 308 page = ntfs_map_page(ia_mapping, vcn << in ntfs_lookup_inode_by_name() 320 ia = (INDEX_ALLOCATION*)(kaddr + ((vcn << in ntfs_lookup_inode_by_name() 332 (unsigned long long)vcn, dir_ni->mft_no); in ntfs_lookup_inode_by_name() 335 if (sle64_to_cpu(ia->index_block_vcn) != vcn) { in ntfs_lookup_inode_by_name() 341 (unsigned long long)vcn, dir_ni->mft_no); in ntfs_lookup_inode_by_name() 350 (unsigned long long)vcn, dir_ni->mft_no, in ntfs_lookup_inode_by_name() 360 "driver.", (unsigned long long)vcn, in ntfs_lookup_inode_by_name() 368 (unsigned long long)vcn, dir_ni->mft_no); in ntfs_lookup_inode_by_name() [all …]
|
| D | attrib.h | 49 extern int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, 51 extern int ntfs_map_runlist(ntfs_inode *ni, VCN vcn); 53 extern LCN ntfs_attr_vcn_to_lcn_nolock(ntfs_inode *ni, const VCN vcn, 57 const VCN vcn, ntfs_attr_search_ctx *ctx);
|
| D | aops.c | 167 VCN vcn; in ntfs_read_block() local 242 vcn = (VCN)iblock << blocksize_bits >> in ntfs_read_block() 253 while (rl->length && rl[1].vcn <= vcn) in ntfs_read_block() 255 lcn = ntfs_rl_vcn_to_lcn(rl, vcn); in ntfs_read_block() 283 err = ntfs_map_runlist(ni, vcn); in ntfs_read_block() 308 ni->type, (unsigned long long)vcn, in ntfs_read_block() 532 VCN vcn; in ntfs_write_block() local 699 vcn = (VCN)block << blocksize_bits; in ntfs_write_block() 700 vcn_ofs = vcn & vol->cluster_size_mask; in ntfs_write_block() 701 vcn >>= vol->cluster_size_bits; in ntfs_write_block() [all …]
|
| D | runlist.h | 29 VCN vcn; /* vcn = Starting virtual cluster number. */ member 65 extern LCN ntfs_rl_vcn_to_lcn(const runlist_element *rl, const VCN vcn); 70 const VCN vcn);
|
| D | attrib.c | 70 int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, ntfs_attr_search_ctx *ctx) in ntfs_map_runlist_nolock() argument 84 (unsigned long long)vcn); in ntfs_map_runlist_nolock() 120 if (vcn >= allocated_size_vcn || (a->type == ni->type && in ntfs_map_runlist_nolock() 125 <= vcn && end_vcn >= vcn)) in ntfs_map_runlist_nolock() 153 CASE_SENSITIVE, vcn, NULL, 0, ctx); in ntfs_map_runlist_nolock() 169 if (unlikely(vcn && vcn >= end_vcn)) { in ntfs_map_runlist_nolock() 284 int ntfs_map_runlist(ntfs_inode *ni, VCN vcn) in ntfs_map_runlist() argument 290 if (likely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) <= in ntfs_map_runlist() 292 err = ntfs_map_runlist_nolock(ni, vcn, NULL); in ntfs_map_runlist() 327 LCN ntfs_attr_vcn_to_lcn_nolock(ntfs_inode *ni, const VCN vcn, in ntfs_attr_vcn_to_lcn_nolock() argument [all …]
|
| D | compress.c | 478 VCN vcn; in ntfs_read_compressed_block() local 596 for (vcn = start_vcn, start_vcn += cb_clusters; vcn < start_vcn; in ntfs_read_compressed_block() 597 vcn++) { in ntfs_read_compressed_block() 607 while (rl->length && rl[1].vcn <= vcn) in ntfs_read_compressed_block() 609 lcn = ntfs_rl_vcn_to_lcn(rl, vcn); in ntfs_read_compressed_block() 613 (unsigned long long)vcn, in ntfs_read_compressed_block() 630 if (!ntfs_map_runlist(ni, vcn)) in ntfs_read_compressed_block() 730 if (vcn == start_vcn - cb_clusters) { in ntfs_read_compressed_block() 773 } else if (vcn == start_vcn) { in ntfs_read_compressed_block()
|
| D | lcnalloc.c | 388 rl[rlpos].vcn = rl[rlpos - 1].vcn + in ntfs_cluster_alloc() 393 rl[rlpos].vcn = start_vcn; in ntfs_cluster_alloc() 726 rl[rlpos].vcn = rl[rlpos - 1].vcn + rl[rlpos - 1].length; in ntfs_cluster_alloc() 883 delta = start_vcn - rl->vcn; in __ntfs_cluster_free() 916 VCN vcn; in __ntfs_cluster_free() local 919 vcn = rl->vcn; in __ntfs_cluster_free() 920 rl = ntfs_attr_find_vcn_nolock(ni, vcn, ctx); in __ntfs_cluster_free()
|
| D | mft.c | 525 VCN vcn; in ntfs_sync_mft_mirror() local 531 vcn = ((VCN)mft_no << vol->mft_record_size_bits) + in ntfs_sync_mft_mirror() 533 vcn_ofs = vcn & vol->cluster_size_mask; in ntfs_sync_mft_mirror() 534 vcn >>= vol->cluster_size_bits; in ntfs_sync_mft_mirror() 546 while (rl->length && rl[1].vcn <= vcn) in ntfs_sync_mft_mirror() 548 lcn = ntfs_rl_vcn_to_lcn(rl, vcn); in ntfs_sync_mft_mirror() 718 VCN vcn; in write_mft_record_nolock() local 724 vcn = ((VCN)ni->mft_no << vol->mft_record_size_bits) + in write_mft_record_nolock() 726 vcn_ofs = vcn & vol->cluster_size_mask; in write_mft_record_nolock() 727 vcn >>= vol->cluster_size_bits; in write_mft_record_nolock() [all …]
|
| D | file.c | 573 VCN vcn, highest_vcn = 0, cpos, cend, bh_cpos, bh_cend; in ntfs_prepare_pages_for_non_resident_write() local 628 vcn = lcn = -1; in ntfs_prepare_pages_for_non_resident_write() 706 cdelta = bh_cpos - vcn; in ntfs_prepare_pages_for_non_resident_write() 836 while (rl->length && rl[1].vcn <= bh_cpos) in ntfs_prepare_pages_for_non_resident_write() 845 vcn = bh_cpos; in ntfs_prepare_pages_for_non_resident_write() 846 vcn_len = rl[1].vcn - vcn; in ntfs_prepare_pages_for_non_resident_write() 857 if (likely(vcn + vcn_len >= cend)) { in ntfs_prepare_pages_for_non_resident_write() 1042 vcn = sle64_to_cpu(a->data.non_resident.lowest_vcn); in ntfs_prepare_pages_for_non_resident_write() 1043 rl2 = ntfs_rl_find_vcn_nolock(rl, vcn); in ntfs_prepare_pages_for_non_resident_write() 1060 mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, vcn, in ntfs_prepare_pages_for_non_resident_write() [all …]
|
| D | debug.c | 143 (long long)(rl + i)->vcn, lcn_str[index], in ntfs_debug_dump_runlist() 149 (long long)(rl + i)->vcn, in ntfs_debug_dump_runlist()
|