Lines Matching refs:acpi_desc

88 static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)  in to_acpi_dev()  argument
90 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; in to_acpi_dev()
100 return to_acpi_device(acpi_desc->dev); in to_acpi_dev()
438 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); in acpi_nfit_ctl() local
442 struct device *dev = acpi_desc->dev; in acpi_nfit_ctl()
475 struct acpi_device *adev = to_acpi_dev(acpi_desc); in acpi_nfit_ctl()
483 dsm_mask = acpi_desc->family_dsm_mask[family]; in acpi_nfit_ctl()
486 dsm_mask = acpi_desc->bus_dsm_mask; in acpi_nfit_ctl()
684 static bool add_spa(struct acpi_nfit_desc *acpi_desc, in add_spa() argument
688 struct device *dev = acpi_desc->dev; in add_spa()
696 list_move_tail(&nfit_spa->list, &acpi_desc->spas); in add_spa()
707 list_add_tail(&nfit_spa->list, &acpi_desc->spas); in add_spa()
714 static bool add_memdev(struct acpi_nfit_desc *acpi_desc, in add_memdev() argument
718 struct device *dev = acpi_desc->dev; in add_memdev()
726 list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs); in add_memdev()
736 list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs); in add_memdev()
746 struct acpi_nfit_desc *acpi_desc; in nfit_get_smbios_id() local
751 list_for_each_entry(acpi_desc, &acpi_descs, list) { in nfit_get_smbios_id()
752 mutex_lock(&acpi_desc->init_mutex); in nfit_get_smbios_id()
753 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { in nfit_get_smbios_id()
758 mutex_unlock(&acpi_desc->init_mutex); in nfit_get_smbios_id()
763 mutex_unlock(&acpi_desc->init_mutex); in nfit_get_smbios_id()
785 static bool add_dcr(struct acpi_nfit_desc *acpi_desc, in add_dcr() argument
789 struct device *dev = acpi_desc->dev; in add_dcr()
797 list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs); in add_dcr()
807 list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs); in add_dcr()
813 static bool add_bdw(struct acpi_nfit_desc *acpi_desc, in add_bdw() argument
817 struct device *dev = acpi_desc->dev; in add_bdw()
824 list_move_tail(&nfit_bdw->list, &acpi_desc->bdws); in add_bdw()
834 list_add_tail(&nfit_bdw->list, &acpi_desc->bdws); in add_bdw()
847 static bool add_idt(struct acpi_nfit_desc *acpi_desc, in add_idt() argument
851 struct device *dev = acpi_desc->dev; in add_idt()
862 list_move_tail(&nfit_idt->list, &acpi_desc->idts); in add_idt()
873 list_add_tail(&nfit_idt->list, &acpi_desc->idts); in add_idt()
886 static bool add_flush(struct acpi_nfit_desc *acpi_desc, in add_flush() argument
890 struct device *dev = acpi_desc->dev; in add_flush()
902 list_move_tail(&nfit_flush->list, &acpi_desc->flushes); in add_flush()
913 list_add_tail(&nfit_flush->list, &acpi_desc->flushes); in add_flush()
919 static bool add_platform_cap(struct acpi_nfit_desc *acpi_desc, in add_platform_cap() argument
922 struct device *dev = acpi_desc->dev; in add_platform_cap()
926 acpi_desc->platform_cap = pcap->capabilities & mask; in add_platform_cap()
927 dev_dbg(dev, "cap: %#x\n", acpi_desc->platform_cap); in add_platform_cap()
931 static void *add_table(struct acpi_nfit_desc *acpi_desc, in add_table() argument
934 struct device *dev = acpi_desc->dev; in add_table()
950 if (!add_spa(acpi_desc, prev, table)) in add_table()
954 if (!add_memdev(acpi_desc, prev, table)) in add_table()
958 if (!add_dcr(acpi_desc, prev, table)) in add_table()
962 if (!add_bdw(acpi_desc, prev, table)) in add_table()
966 if (!add_idt(acpi_desc, prev, table)) in add_table()
970 if (!add_flush(acpi_desc, prev, table)) in add_table()
977 if (!add_platform_cap(acpi_desc, table)) in add_table()
988 static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc, in nfit_mem_find_spa_bdw() argument
995 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { in nfit_mem_find_spa_bdw()
1003 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { in nfit_mem_find_spa_bdw()
1016 dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n", in nfit_mem_find_spa_bdw()
1021 static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc, in nfit_mem_init_bdw() argument
1030 list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) { in nfit_mem_init_bdw()
1040 nfit_mem_find_spa_bdw(acpi_desc, nfit_mem); in nfit_mem_init_bdw()
1046 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { in nfit_mem_init_bdw()
1052 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { in nfit_mem_init_bdw()
1062 static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc, in __nfit_mem_init() argument
1085 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { in __nfit_mem_init()
1098 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) in __nfit_mem_init()
1108 nfit_mem = devm_kzalloc(acpi_desc->dev, in __nfit_mem_init()
1113 nfit_mem->acpi_desc = acpi_desc; in __nfit_mem_init()
1114 list_add(&nfit_mem->list, &acpi_desc->dimms); in __nfit_mem_init()
1117 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { in __nfit_mem_init()
1135 list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) { in __nfit_mem_init()
1143 nfit_mem->flush_wpq = devm_kcalloc(acpi_desc->dev, in __nfit_mem_init()
1159 dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n", in __nfit_mem_init()
1172 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { in __nfit_mem_init()
1178 nfit_mem_init_bdw(acpi_desc, nfit_mem, spa); in __nfit_mem_init()
1208 static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc) in nfit_mem_init() argument
1222 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { in nfit_mem_init()
1223 rc = __nfit_mem_init(acpi_desc, nfit_spa->spa); in nfit_mem_init()
1233 rc = __nfit_mem_init(acpi_desc, NULL); in nfit_mem_init()
1237 list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp); in nfit_mem_init()
1247 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); in bus_dsm_mask_show() local
1249 return sprintf(buf, "%#lx\n", acpi_desc->bus_dsm_mask); in bus_dsm_mask_show()
1259 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); in revision_show() local
1261 return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision); in revision_show()
1270 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); in hw_error_scrub_show() local
1272 return sprintf(buf, "%d\n", acpi_desc->scrub_mode); in hw_error_scrub_show()
1296 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); in hw_error_scrub_store() local
1300 acpi_desc->scrub_mode = HW_ERROR_SCRUB_ON; in hw_error_scrub_store()
1303 acpi_desc->scrub_mode = HW_ERROR_SCRUB_OFF; in hw_error_scrub_store()
1326 struct acpi_nfit_desc *acpi_desc; in scrub_show() local
1336 acpi_desc = to_acpi_desc(nd_desc); in scrub_show()
1338 mutex_lock(&acpi_desc->init_mutex); in scrub_show()
1339 busy = test_bit(ARS_BUSY, &acpi_desc->scrub_flags) in scrub_show()
1340 && !test_bit(ARS_CANCEL, &acpi_desc->scrub_flags); in scrub_show()
1341 rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, busy ? "+\n" : "\n"); in scrub_show()
1344 &acpi_desc->scrub_flags)) { in scrub_show()
1345 acpi_desc->scrub_tmo = 1; in scrub_show()
1346 mod_delayed_work(nfit_wq, &acpi_desc->dwork, HZ); in scrub_show()
1349 mutex_unlock(&acpi_desc->init_mutex); in scrub_show()
1370 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); in scrub_store() local
1372 rc = acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG); in scrub_store()
1541 struct acpi_nfit_desc *acpi_desc; in format1_show() local
1546 acpi_desc = nfit_mem->acpi_desc; in format1_show()
1550 mutex_lock(&acpi_desc->init_mutex); in format1_show()
1551 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { in format1_show()
1558 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { in format1_show()
1570 mutex_unlock(&acpi_desc->init_mutex); in format1_show()
1717 static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc, in acpi_nfit_dimm_by_handle() argument
1722 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) in acpi_nfit_dimm_by_handle()
1732 struct acpi_nfit_desc *acpi_desc; in __acpi_nvdimm_notify() local
1743 acpi_desc = dev_get_drvdata(dev->parent); in __acpi_nvdimm_notify()
1744 if (!acpi_desc) in __acpi_nvdimm_notify()
1835 static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, in acpi_nfit_add_dimm() argument
1838 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; in acpi_nfit_add_dimm()
1840 struct device *dev = acpi_desc->dev; in acpi_nfit_add_dimm()
1848 nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en; in acpi_nfit_add_dimm()
1863 adev = to_acpi_dev(acpi_desc); in acpi_nfit_add_dimm()
1993 struct acpi_nfit_desc *acpi_desc = data; in shutdown_dimm_notify() local
1996 mutex_lock(&acpi_desc->init_mutex); in shutdown_dimm_notify()
2001 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { in shutdown_dimm_notify()
2014 mutex_unlock(&acpi_desc->init_mutex); in shutdown_dimm_notify()
2031 struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc; in acpi_nfit_get_fw_ops() local
2032 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; in acpi_nfit_get_fw_ops()
2047 static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) in acpi_nfit_register_dimms() argument
2053 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { in acpi_nfit_register_dimms()
2061 nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle); in acpi_nfit_register_dimms()
2073 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { in acpi_nfit_register_dimms()
2087 rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle); in acpi_nfit_register_dimms()
2119 nvdimm = __nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem, in acpi_nfit_register_dimms()
2134 dev_err(acpi_desc->dev, "Error found in NVDIMM %s flags:%s%s%s%s%s\n", in acpi_nfit_register_dimms()
2144 rc = nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count); in acpi_nfit_register_dimms()
2152 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { in acpi_nfit_register_dimms()
2165 dev_warn(acpi_desc->dev, "%s: notifications disabled\n", in acpi_nfit_register_dimms()
2169 return devm_add_action_or_reset(acpi_desc->dev, shutdown_dimm_notify, in acpi_nfit_register_dimms()
2170 acpi_desc); in acpi_nfit_register_dimms()
2184 static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) in acpi_nfit_init_dsms() argument
2186 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; in acpi_nfit_init_dsms()
2196 if (acpi_desc->bus_cmd_force_en) { in acpi_nfit_init_dsms()
2197 nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en; in acpi_nfit_init_dsms()
2199 if (acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL]) { in acpi_nfit_init_dsms()
2205 adev = to_acpi_dev(acpi_desc); in acpi_nfit_init_dsms()
2224 set_bit(i, &acpi_desc->bus_dsm_mask); in acpi_nfit_init_dsms()
2229 mask = &acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL]; in acpi_nfit_init_dsms()
2332 struct acpi_nfit_desc *acpi_desc, u16 range_index, int n) in memdev_from_spa() argument
2336 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) in memdev_from_spa()
2343 static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, in acpi_nfit_init_interleave_set() argument
2347 struct device *dev = acpi_desc->dev; in acpi_nfit_init_interleave_set()
2373 struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc, in acpi_nfit_init_interleave_set()
2702 static int ars_get_cap(struct acpi_nfit_desc *acpi_desc, in ars_get_cap() argument
2705 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; in ars_get_cap()
2718 static int ars_start(struct acpi_nfit_desc *acpi_desc, in ars_start() argument
2725 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; in ars_start()
2746 set_bit(ARS_VALID, &acpi_desc->scrub_flags); in ars_start()
2750 static int ars_continue(struct acpi_nfit_desc *acpi_desc) in ars_continue() argument
2754 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; in ars_continue()
2755 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; in ars_continue()
2769 static int ars_get_status(struct acpi_nfit_desc *acpi_desc) in ars_get_status() argument
2771 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; in ars_get_status()
2772 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; in ars_get_status()
2776 acpi_desc->max_ars, &cmd_rc); in ars_get_status()
2782 static void ars_complete(struct acpi_nfit_desc *acpi_desc, in ars_complete() argument
2785 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; in ars_complete()
2790 lockdep_assert_held(&acpi_desc->init_mutex); in ars_complete()
2796 if (acpi_desc->scrub_spa != nfit_spa) in ars_complete()
2818 acpi_desc->scrub_spa = NULL; in ars_complete()
2823 dev = acpi_desc->dev; in ars_complete()
2827 static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc) in ars_status_process_records() argument
2829 struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus; in ars_status_process_records()
2830 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; in ars_status_process_records()
2845 if (!test_and_clear_bit(ARS_VALID, &acpi_desc->scrub_flags)) { in ars_status_process_records()
2846 dev_dbg(acpi_desc->dev, "skip %d stale records\n", in ars_status_process_records()
2863 dev_warn(acpi_desc->dev, "detected truncated ars results\n"); in ars_status_process_records()
2875 static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc, in acpi_nfit_insert_resource() argument
2887 res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL); in acpi_nfit_insert_resource()
2901 ret = devm_add_action_or_reset(acpi_desc->dev, in acpi_nfit_insert_resource()
2910 static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc, in acpi_nfit_init_mapping() argument
2915 struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, in acpi_nfit_init_mapping()
2923 dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n", in acpi_nfit_init_mapping()
2938 dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n", in acpi_nfit_init_mapping()
2950 ndbr_desc->do_io = acpi_desc->blk_do_io; in acpi_nfit_init_mapping()
2951 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa); in acpi_nfit_init_mapping()
2954 nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus, in acpi_nfit_init_mapping()
2979 static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, in acpi_nfit_register_region() argument
2995 dev_dbg(acpi_desc->dev, "detected invalid spa index\n"); in acpi_nfit_register_region()
3021 if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH) in acpi_nfit_register_region()
3023 else if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH) in acpi_nfit_register_region()
3026 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { in acpi_nfit_register_region()
3033 dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n", in acpi_nfit_register_region()
3038 rc = acpi_nfit_init_mapping(acpi_desc, mapping, ndr_desc, in acpi_nfit_register_region()
3046 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa); in acpi_nfit_register_region()
3050 nvdimm_bus = acpi_desc->nvdimm_bus; in acpi_nfit_register_region()
3052 rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc); in acpi_nfit_register_region()
3054 dev_warn(acpi_desc->dev, in acpi_nfit_register_region()
3078 dev_err(acpi_desc->dev, "failed to register spa range %d\n", in acpi_nfit_register_region()
3083 static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc) in ars_status_alloc() argument
3085 struct device *dev = acpi_desc->dev; in ars_status_alloc()
3088 if (acpi_desc->ars_status) { in ars_status_alloc()
3089 memset(acpi_desc->ars_status, 0, acpi_desc->max_ars); in ars_status_alloc()
3093 ars_status = devm_kzalloc(dev, acpi_desc->max_ars, GFP_KERNEL); in ars_status_alloc()
3096 acpi_desc->ars_status = ars_status; in ars_status_alloc()
3100 static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc) in acpi_nfit_query_poison() argument
3104 if (ars_status_alloc(acpi_desc)) in acpi_nfit_query_poison()
3107 rc = ars_get_status(acpi_desc); in acpi_nfit_query_poison()
3112 if (ars_status_process_records(acpi_desc)) in acpi_nfit_query_poison()
3113 dev_err(acpi_desc->dev, "Failed to process ARS records\n"); in acpi_nfit_query_poison()
3118 static int ars_register(struct acpi_nfit_desc *acpi_desc, in ars_register() argument
3124 return acpi_nfit_register_region(acpi_desc, nfit_spa); in ars_register()
3130 switch (acpi_nfit_query_poison(acpi_desc)) { in ars_register()
3134 rc = ars_start(acpi_desc, nfit_spa, ARS_REQ_SHORT); in ars_register()
3143 rc = acpi_nfit_query_poison(acpi_desc); in ars_register()
3146 acpi_desc->scrub_spa = nfit_spa; in ars_register()
3147 ars_complete(acpi_desc, nfit_spa); in ars_register()
3153 acpi_desc->scrub_spa = NULL; in ars_register()
3168 return acpi_nfit_register_region(acpi_desc, nfit_spa); in ars_register()
3171 static void ars_complete_all(struct acpi_nfit_desc *acpi_desc) in ars_complete_all() argument
3175 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { in ars_complete_all()
3178 ars_complete(acpi_desc, nfit_spa); in ars_complete_all()
3182 static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc, in __acpi_nfit_scrub() argument
3185 unsigned int tmo = acpi_desc->scrub_tmo; in __acpi_nfit_scrub()
3186 struct device *dev = acpi_desc->dev; in __acpi_nfit_scrub()
3189 lockdep_assert_held(&acpi_desc->init_mutex); in __acpi_nfit_scrub()
3191 if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags)) in __acpi_nfit_scrub()
3200 ars_continue(acpi_desc); in __acpi_nfit_scrub()
3206 addr = acpi_desc->ars_status->address; in __acpi_nfit_scrub()
3207 end = addr + acpi_desc->ars_status->length; in __acpi_nfit_scrub()
3212 ars_complete_all(acpi_desc); in __acpi_nfit_scrub()
3213 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { in __acpi_nfit_scrub()
3227 rc = ars_start(acpi_desc, nfit_spa, req_type); in __acpi_nfit_scrub()
3241 dev_WARN_ONCE(dev, acpi_desc->scrub_spa, in __acpi_nfit_scrub()
3243 acpi_desc->scrub_spa->spa->range_index); in __acpi_nfit_scrub()
3245 acpi_desc->scrub_spa = nfit_spa; in __acpi_nfit_scrub()
3250 list_move_tail(&nfit_spa->list, &acpi_desc->spas); in __acpi_nfit_scrub()
3261 static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo) in __sched_ars() argument
3263 lockdep_assert_held(&acpi_desc->init_mutex); in __sched_ars()
3265 set_bit(ARS_BUSY, &acpi_desc->scrub_flags); in __sched_ars()
3268 acpi_desc->scrub_tmo = tmo; in __sched_ars()
3269 queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ); in __sched_ars()
3272 static void sched_ars(struct acpi_nfit_desc *acpi_desc) in sched_ars() argument
3274 __sched_ars(acpi_desc, 0); in sched_ars()
3277 static void notify_ars_done(struct acpi_nfit_desc *acpi_desc) in notify_ars_done() argument
3279 lockdep_assert_held(&acpi_desc->init_mutex); in notify_ars_done()
3281 clear_bit(ARS_BUSY, &acpi_desc->scrub_flags); in notify_ars_done()
3282 acpi_desc->scrub_count++; in notify_ars_done()
3283 if (acpi_desc->scrub_count_state) in notify_ars_done()
3284 sysfs_notify_dirent(acpi_desc->scrub_count_state); in notify_ars_done()
3289 struct acpi_nfit_desc *acpi_desc; in acpi_nfit_scrub() local
3293 acpi_desc = container_of(work, typeof(*acpi_desc), dwork.work); in acpi_nfit_scrub()
3294 mutex_lock(&acpi_desc->init_mutex); in acpi_nfit_scrub()
3295 query_rc = acpi_nfit_query_poison(acpi_desc); in acpi_nfit_scrub()
3296 tmo = __acpi_nfit_scrub(acpi_desc, query_rc); in acpi_nfit_scrub()
3298 __sched_ars(acpi_desc, tmo); in acpi_nfit_scrub()
3300 notify_ars_done(acpi_desc); in acpi_nfit_scrub()
3301 memset(acpi_desc->ars_status, 0, acpi_desc->max_ars); in acpi_nfit_scrub()
3302 clear_bit(ARS_POLL, &acpi_desc->scrub_flags); in acpi_nfit_scrub()
3303 mutex_unlock(&acpi_desc->init_mutex); in acpi_nfit_scrub()
3306 static void acpi_nfit_init_ars(struct acpi_nfit_desc *acpi_desc, in acpi_nfit_init_ars() argument
3315 rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa); in acpi_nfit_init_ars()
3328 acpi_desc->max_ars = max(nfit_spa->max_ars, acpi_desc->max_ars); in acpi_nfit_init_ars()
3332 static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) in acpi_nfit_register_regions() argument
3337 set_bit(ARS_VALID, &acpi_desc->scrub_flags); in acpi_nfit_register_regions()
3338 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { in acpi_nfit_register_regions()
3342 acpi_nfit_init_ars(acpi_desc, nfit_spa); in acpi_nfit_register_regions()
3347 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { in acpi_nfit_register_regions()
3352 rc = ars_register(acpi_desc, nfit_spa); in acpi_nfit_register_regions()
3372 rc = acpi_nfit_register_region(acpi_desc, nfit_spa); in acpi_nfit_register_regions()
3383 sched_ars(acpi_desc); in acpi_nfit_register_regions()
3387 static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc, in acpi_nfit_check_deletions() argument
3390 struct device *dev = acpi_desc->dev; in acpi_nfit_check_deletions()
3404 static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc) in acpi_nfit_desc_init_scrub_attr() argument
3406 struct device *dev = acpi_desc->dev; in acpi_nfit_desc_init_scrub_attr()
3410 if (!ars_supported(acpi_desc->nvdimm_bus)) in acpi_nfit_desc_init_scrub_attr()
3413 bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus); in acpi_nfit_desc_init_scrub_attr()
3419 acpi_desc->scrub_count_state = sysfs_get_dirent(nfit, "scrub"); in acpi_nfit_desc_init_scrub_attr()
3421 if (!acpi_desc->scrub_count_state) { in acpi_nfit_desc_init_scrub_attr()
3431 struct acpi_nfit_desc *acpi_desc = data; in acpi_nfit_unregister() local
3433 nvdimm_bus_unregister(acpi_desc->nvdimm_bus); in acpi_nfit_unregister()
3436 int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz) in acpi_nfit_init() argument
3438 struct device *dev = acpi_desc->dev; in acpi_nfit_init()
3443 if (!acpi_desc->nvdimm_bus) { in acpi_nfit_init()
3444 acpi_nfit_init_dsms(acpi_desc); in acpi_nfit_init()
3446 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, in acpi_nfit_init()
3447 &acpi_desc->nd_desc); in acpi_nfit_init()
3448 if (!acpi_desc->nvdimm_bus) in acpi_nfit_init()
3452 acpi_desc); in acpi_nfit_init()
3456 rc = acpi_nfit_desc_init_scrub_attr(acpi_desc); in acpi_nfit_init()
3462 list_add_tail(&acpi_desc->list, &acpi_descs); in acpi_nfit_init()
3466 mutex_lock(&acpi_desc->init_mutex); in acpi_nfit_init()
3475 list_cut_position(&prev.spas, &acpi_desc->spas, in acpi_nfit_init()
3476 acpi_desc->spas.prev); in acpi_nfit_init()
3477 list_cut_position(&prev.memdevs, &acpi_desc->memdevs, in acpi_nfit_init()
3478 acpi_desc->memdevs.prev); in acpi_nfit_init()
3479 list_cut_position(&prev.dcrs, &acpi_desc->dcrs, in acpi_nfit_init()
3480 acpi_desc->dcrs.prev); in acpi_nfit_init()
3481 list_cut_position(&prev.bdws, &acpi_desc->bdws, in acpi_nfit_init()
3482 acpi_desc->bdws.prev); in acpi_nfit_init()
3483 list_cut_position(&prev.idts, &acpi_desc->idts, in acpi_nfit_init()
3484 acpi_desc->idts.prev); in acpi_nfit_init()
3485 list_cut_position(&prev.flushes, &acpi_desc->flushes, in acpi_nfit_init()
3486 acpi_desc->flushes.prev); in acpi_nfit_init()
3490 data = add_table(acpi_desc, &prev, data, end); in acpi_nfit_init()
3498 rc = acpi_nfit_check_deletions(acpi_desc, &prev); in acpi_nfit_init()
3502 rc = nfit_mem_init(acpi_desc); in acpi_nfit_init()
3506 rc = acpi_nfit_register_dimms(acpi_desc); in acpi_nfit_init()
3510 rc = acpi_nfit_register_regions(acpi_desc); in acpi_nfit_init()
3513 mutex_unlock(&acpi_desc->init_mutex); in acpi_nfit_init()
3520 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); in acpi_nfit_flush_probe() local
3521 struct device *dev = acpi_desc->dev; in acpi_nfit_flush_probe()
3528 mutex_lock(&acpi_desc->init_mutex); in acpi_nfit_flush_probe()
3529 mutex_unlock(&acpi_desc->init_mutex); in acpi_nfit_flush_probe()
3537 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); in __acpi_nfit_clear_to_send() local
3550 if (work_busy(&acpi_desc->dwork.work)) in __acpi_nfit_clear_to_send()
3582 int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, in acpi_nfit_ars_rescan() argument
3585 struct device *dev = acpi_desc->dev; in acpi_nfit_ars_rescan()
3589 mutex_lock(&acpi_desc->init_mutex); in acpi_nfit_ars_rescan()
3590 if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags)) { in acpi_nfit_ars_rescan()
3591 mutex_unlock(&acpi_desc->init_mutex); in acpi_nfit_ars_rescan()
3595 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { in acpi_nfit_ars_rescan()
3609 sched_ars(acpi_desc); in acpi_nfit_ars_rescan()
3612 mutex_unlock(&acpi_desc->init_mutex); in acpi_nfit_ars_rescan()
3621 void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev) in acpi_nfit_desc_init() argument
3625 dev_set_drvdata(dev, acpi_desc); in acpi_nfit_desc_init()
3626 acpi_desc->dev = dev; in acpi_nfit_desc_init()
3627 acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io; in acpi_nfit_desc_init()
3628 nd_desc = &acpi_desc->nd_desc; in acpi_nfit_desc_init()
3636 INIT_LIST_HEAD(&acpi_desc->spas); in acpi_nfit_desc_init()
3637 INIT_LIST_HEAD(&acpi_desc->dcrs); in acpi_nfit_desc_init()
3638 INIT_LIST_HEAD(&acpi_desc->bdws); in acpi_nfit_desc_init()
3639 INIT_LIST_HEAD(&acpi_desc->idts); in acpi_nfit_desc_init()
3640 INIT_LIST_HEAD(&acpi_desc->flushes); in acpi_nfit_desc_init()
3641 INIT_LIST_HEAD(&acpi_desc->memdevs); in acpi_nfit_desc_init()
3642 INIT_LIST_HEAD(&acpi_desc->dimms); in acpi_nfit_desc_init()
3643 INIT_LIST_HEAD(&acpi_desc->list); in acpi_nfit_desc_init()
3644 mutex_init(&acpi_desc->init_mutex); in acpi_nfit_desc_init()
3645 acpi_desc->scrub_tmo = 1; in acpi_nfit_desc_init()
3646 INIT_DELAYED_WORK(&acpi_desc->dwork, acpi_nfit_scrub); in acpi_nfit_desc_init()
3657 struct acpi_nfit_desc *acpi_desc = data; in acpi_nfit_shutdown() local
3658 struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus); in acpi_nfit_shutdown()
3665 list_del(&acpi_desc->list); in acpi_nfit_shutdown()
3668 mutex_lock(&acpi_desc->init_mutex); in acpi_nfit_shutdown()
3669 set_bit(ARS_CANCEL, &acpi_desc->scrub_flags); in acpi_nfit_shutdown()
3670 cancel_delayed_work_sync(&acpi_desc->dwork); in acpi_nfit_shutdown()
3671 mutex_unlock(&acpi_desc->init_mutex); in acpi_nfit_shutdown()
3688 struct acpi_nfit_desc *acpi_desc; in acpi_nfit_add() local
3713 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); in acpi_nfit_add()
3714 if (!acpi_desc) in acpi_nfit_add()
3716 acpi_nfit_desc_init(acpi_desc, &adev->dev); in acpi_nfit_add()
3719 acpi_desc->acpi_header = *tbl; in acpi_nfit_add()
3727 rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer, in acpi_nfit_add()
3735 rc = acpi_nfit_init(acpi_desc, (void *) tbl in acpi_nfit_add()
3741 return devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc); in acpi_nfit_add()
3752 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev); in acpi_nfit_update_notify() local
3764 if (!acpi_desc) { in acpi_nfit_update_notify()
3765 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); in acpi_nfit_update_notify()
3766 if (!acpi_desc) in acpi_nfit_update_notify()
3768 acpi_nfit_desc_init(acpi_desc, dev); in acpi_nfit_update_notify()
3786 ret = acpi_nfit_init(acpi_desc, obj->buffer.pointer, in acpi_nfit_update_notify()
3797 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev); in acpi_nfit_uc_error_notify() local
3799 if (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON) in acpi_nfit_uc_error_notify()
3800 acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG); in acpi_nfit_uc_error_notify()
3802 acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_SHORT); in acpi_nfit_uc_error_notify()