Lines Matching refs:nd_mapping
272 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; in nd_namespace_blk_size() local
273 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); in nd_namespace_blk_size()
290 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; in __nd_namespace_blk_validate() local
291 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); in __nd_namespace_blk_validate()
422 struct nd_mapping *nd_mapping, struct nd_label_id *label_id, in scan_free() argument
426 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); in scan_free()
485 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in shrink_dpa_allocation() local
488 rc = scan_free(nd_region, nd_mapping, label_id, n); in shrink_dpa_allocation()
497 struct nd_region *nd_region, struct nd_mapping *nd_mapping, in init_dpa_allocation() argument
501 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); in init_dpa_allocation()
508 first_dpa = nd_mapping->start + nd_mapping->size - n; in init_dpa_allocation()
510 first_dpa = nd_mapping->start; in init_dpa_allocation()
558 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; in space_valid() local
561 .nd_mapping = nd_mapping, in space_valid()
562 .available = nd_mapping->size, in space_valid()
595 struct nd_mapping *nd_mapping, struct nd_label_id *label_id, in scan_allocate() argument
598 resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1; in scan_allocate()
600 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); in scan_allocate()
609 valid.start = nd_mapping->start; in scan_allocate()
624 if (res->end < nd_mapping->start) in scan_allocate()
628 if (!first++ && res->start > nd_mapping->start) { in scan_allocate()
629 valid.start = nd_mapping->start; in scan_allocate()
742 return init_dpa_allocation(label_id, nd_region, nd_mapping, n); in scan_allocate()
747 struct nd_mapping *nd_mapping, struct nd_label_id *label_id) in merge_dpa() argument
749 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); in merge_dpa()
794 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in __reserve_free_pmem() local
797 if (nd_mapping->nvdimm != nvdimm) in __reserve_free_pmem()
800 n = nd_pmem_available_dpa(nd_region, nd_mapping, &rem); in __reserve_free_pmem()
803 rem = scan_allocate(nd_region, nd_mapping, &label_id, n); in __reserve_free_pmem()
815 struct nd_mapping *nd_mapping) in release_free_pmem() argument
817 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); in release_free_pmem()
826 struct nd_mapping *nd_mapping) in reserve_free_pmem() argument
828 struct nvdimm *nvdimm = nd_mapping->nvdimm; in reserve_free_pmem()
834 release_free_pmem(nvdimm_bus, nd_mapping); in reserve_free_pmem()
859 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in grow_dpa_allocation() local
871 rc = reserve_free_pmem(nvdimm_bus, nd_mapping); in grow_dpa_allocation()
875 rem = scan_allocate(nd_region, nd_mapping, in grow_dpa_allocation()
878 release_free_pmem(nvdimm_bus, nd_mapping); in grow_dpa_allocation()
892 rc = merge_dpa(nd_region, nd_mapping, label_id); in grow_dpa_allocation()
912 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; in nd_namespace_pmem_set_resource() local
913 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); in nd_namespace_pmem_set_resource()
927 offset = (res->start - nd_mapping->start) in nd_namespace_pmem_set_resource()
955 struct nd_mapping *nd_mapping; in __size_store() local
998 nd_mapping = &nd_region->mapping[i]; in __size_store()
999 ndd = to_ndd(nd_mapping); in __size_store()
1130 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in nvdimm_namespace_locked() local
1131 struct nvdimm *nvdimm = nd_mapping->nvdimm; in nvdimm_namespace_locked()
1204 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in namespace_update_uuid() local
1213 if (list_empty(&nd_mapping->labels)) in namespace_update_uuid()
1220 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in namespace_update_uuid() local
1221 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); in namespace_update_uuid()
1230 mutex_lock(&nd_mapping->lock); in namespace_update_uuid()
1231 list_for_each_entry(label_ent, &nd_mapping->labels, list) { in namespace_update_uuid()
1242 mutex_unlock(&nd_mapping->lock); in namespace_update_uuid()
1403 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in dpa_extents_show() local
1404 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); in dpa_extents_show()
1424 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in btt_claim_class() local
1425 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); in btt_claim_class()
1841 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in has_uuid_at_pos() local
1843 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); in has_uuid_at_pos()
1847 list_for_each_entry(label_ent, &nd_mapping->labels, list) { in has_uuid_at_pos()
1899 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in select_pmem_id() local
1900 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); in select_pmem_id()
1905 lockdep_assert_held(&nd_mapping->lock); in select_pmem_id()
1906 list_for_each_entry(label_ent, &nd_mapping->labels, list) { in select_pmem_id()
1924 hw_start = nd_mapping->start; in select_pmem_id()
1925 hw_end = hw_start + nd_mapping->size; in select_pmem_id()
1938 list_move(&label_ent->list, &nd_mapping->labels); in select_pmem_id()
1957 struct nd_mapping *nd_mapping; in create_namespace_pmem() local
2030 nd_mapping = &nd_region->mapping[i]; in create_namespace_pmem()
2031 label_ent = list_first_entry_or_null(&nd_mapping->labels, in create_namespace_pmem()
2049 ndd = to_ndd(nd_mapping); in create_namespace_pmem()
2222 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; in add_namespace_resource() local
2223 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); in add_namespace_resource()
2260 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; in create_namespace_blk() local
2262 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); in create_namespace_blk()
2347 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; in scan_labels() local
2348 resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1; in scan_labels()
2351 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { in scan_labels()
2366 if (__le64_to_cpu(nd_label->dpa) < nd_mapping->start || in scan_labels()
2385 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); in scan_labels()
2414 nd_mapping_free_labels(nd_mapping); in scan_labels()
2446 nd_mapping = &nd_region->mapping[i]; in scan_labels()
2447 if (list_empty(&nd_mapping->labels)) { in scan_labels()
2453 list_for_each_safe(l, e, &nd_mapping->labels) { in scan_labels()
2458 nd_mapping_free_labels(nd_mapping); in scan_labels()
2459 list_splice_init(&list, &nd_mapping->labels); in scan_labels()
2482 struct nd_mapping *nd_mapping; in create_namespaces() local
2491 nd_mapping = &nd_region->mapping[i]; in create_namespaces()
2492 mutex_lock_nested(&nd_mapping->lock, i); in create_namespaces()
2500 nd_mapping = &nd_region->mapping[reverse]; in create_namespaces()
2501 mutex_unlock(&nd_mapping->lock); in create_namespaces()
2513 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in deactivate_labels() local
2514 struct nvdimm_drvdata *ndd = nd_mapping->ndd; in deactivate_labels()
2515 struct nvdimm *nvdimm = nd_mapping->nvdimm; in deactivate_labels()
2517 mutex_lock(&nd_mapping->lock); in deactivate_labels()
2518 nd_mapping_free_labels(nd_mapping); in deactivate_labels()
2519 mutex_unlock(&nd_mapping->lock); in deactivate_labels()
2522 nd_mapping->ndd = NULL; in deactivate_labels()
2533 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in init_active_labels() local
2534 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); in init_active_labels()
2535 struct nvdimm *nvdimm = nd_mapping->nvdimm; in init_active_labels()
2552 dev_name(&nd_mapping->nvdimm->dev), in init_active_labels()
2557 nd_mapping->ndd = ndd; in init_active_labels()
2580 mutex_lock(&nd_mapping->lock); in init_active_labels()
2581 list_add_tail(&label_ent->list, &nd_mapping->labels); in init_active_labels()
2582 mutex_unlock(&nd_mapping->lock); in init_active_labels()