Lines Matching refs:con

126 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
641 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_create_obj() local
644 if (!adev->ras_enabled || !con) in amdgpu_ras_create_obj()
654 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; in amdgpu_ras_create_obj()
656 obj = &con->objs[head->block]; in amdgpu_ras_create_obj()
664 list_add(&obj->node, &con->head); in amdgpu_ras_create_obj()
674 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_find_obj() local
678 if (!adev->ras_enabled || !con) in amdgpu_ras_find_obj()
689 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; in amdgpu_ras_find_obj()
691 obj = &con->objs[head->block]; in amdgpu_ras_find_obj()
697 obj = &con->objs[i]; in amdgpu_ras_find_obj()
717 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_is_feature_enabled() local
719 return con->features & BIT(head->block); in amdgpu_ras_is_feature_enabled()
729 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in __amdgpu_ras_feature_enable() local
750 con->features |= BIT(head->block); in __amdgpu_ras_feature_enable()
753 con->features &= ~BIT(head->block); in __amdgpu_ras_feature_enable()
765 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_feature_enable() local
769 if (!con) in amdgpu_ras_feature_enable()
821 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_feature_enable_on_boot() local
824 if (!con) in amdgpu_ras_feature_enable_on_boot()
827 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { in amdgpu_ras_feature_enable_on_boot()
855 con->features |= BIT(head->block); in amdgpu_ras_feature_enable_on_boot()
861 con->features &= ~BIT(head->block); in amdgpu_ras_feature_enable_on_boot()
872 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_disable_all_features() local
875 list_for_each_entry_safe(obj, tmp, &con->head, node) { in amdgpu_ras_disable_all_features()
888 return con->features; in amdgpu_ras_disable_all_features()
894 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_enable_all_features() local
941 return con->features; in amdgpu_ras_enable_all_features()
1240 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_query_error_count() local
1245 if (!adev->ras_enabled || !con) in amdgpu_ras_query_error_count()
1257 list_for_each_entry(obj, &con->head, node) { in amdgpu_ras_query_error_count()
1335 struct amdgpu_ras *con = in amdgpu_ras_sysfs_badpages_read() local
1337 struct amdgpu_device *adev = con->adev; in amdgpu_ras_sysfs_badpages_read()
1366 struct amdgpu_ras *con = in amdgpu_ras_sysfs_features_read() local
1369 return sysfs_emit(buf, "feature mask: 0x%x\n", con->features); in amdgpu_ras_sysfs_features_read()
1374 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_sysfs_remove_bad_page_node() local
1377 &con->badpages_attr.attr, in amdgpu_ras_sysfs_remove_bad_page_node()
1383 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_sysfs_remove_feature_node() local
1385 &con->features_attr.attr, in amdgpu_ras_sysfs_remove_feature_node()
1451 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_sysfs_remove_all() local
1454 list_for_each_entry_safe(obj, tmp, &con->head, node) { in amdgpu_ras_sysfs_remove_all()
1488 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_debugfs_create_ctrl_node() local
1489 struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control; in amdgpu_ras_debugfs_create_ctrl_node()
1499 &con->bad_page_cnt_threshold); in amdgpu_ras_debugfs_create_ctrl_node()
1505 con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table", in amdgpu_ras_debugfs_create_ctrl_node()
1508 amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control); in amdgpu_ras_debugfs_create_ctrl_node()
1518 debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot); in amdgpu_ras_debugfs_create_ctrl_node()
1525 &con->disable_ras_err_cnt_harvest); in amdgpu_ras_debugfs_create_ctrl_node()
1550 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_debugfs_create_all() local
1559 if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con) in amdgpu_ras_debugfs_create_all()
1564 list_for_each_entry(obj, &con->head, node) { in amdgpu_ras_debugfs_create_all()
1584 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_fs_init() local
1589 &con->features_attr.attr, in amdgpu_ras_fs_init()
1599 con->features_attr = dev_attr_features; in amdgpu_ras_fs_init()
1606 con->badpages_attr = bin_attr_gpu_vram_bad_pages; in amdgpu_ras_fs_init()
1607 bin_attrs[0] = &con->badpages_attr; in amdgpu_ras_fs_init()
1621 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_fs_fini() local
1625 list_for_each_entry_safe(con_obj, tmp, &con->head, node) { in amdgpu_ras_fs_fini()
1867 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_interrupt_remove_all() local
1870 list_for_each_entry_safe(obj, tmp, &con->head, node) { in amdgpu_ras_interrupt_remove_all()
1881 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_log_on_err_counter() local
1884 if (!adev->ras_enabled || !con) in amdgpu_ras_log_on_err_counter()
1887 list_for_each_entry(obj, &con->head, node) { in amdgpu_ras_log_on_err_counter()
1952 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_query_err_status() local
1955 if (!adev->ras_enabled || !con) in amdgpu_ras_query_err_status()
1958 list_for_each_entry(obj, &con->head, node) { in amdgpu_ras_query_err_status()
1975 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_badpages_read() local
1980 if (!con || !con->eh_data || !bps || !count) in amdgpu_ras_badpages_read()
1983 mutex_lock(&con->recovery_lock); in amdgpu_ras_badpages_read()
1984 data = con->eh_data; in amdgpu_ras_badpages_read()
2013 mutex_unlock(&con->recovery_lock); in amdgpu_ras_badpages_read()
2108 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_add_bad_pages() local
2113 if (!con || !con->eh_data || !bps || pages <= 0) in amdgpu_ras_add_bad_pages()
2116 mutex_lock(&con->recovery_lock); in amdgpu_ras_add_bad_pages()
2117 data = con->eh_data; in amdgpu_ras_add_bad_pages()
2122 if (amdgpu_ras_check_bad_page_unlock(con, in amdgpu_ras_add_bad_pages()
2141 mutex_unlock(&con->recovery_lock); in amdgpu_ras_add_bad_pages()
2154 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_save_bad_pages() local
2159 if (!con || !con->eh_data) { in amdgpu_ras_save_bad_pages()
2166 mutex_lock(&con->recovery_lock); in amdgpu_ras_save_bad_pages()
2167 control = &con->eeprom_control; in amdgpu_ras_save_bad_pages()
2168 data = con->eh_data; in amdgpu_ras_save_bad_pages()
2170 mutex_unlock(&con->recovery_lock); in amdgpu_ras_save_bad_pages()
2219 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con, in amdgpu_ras_check_bad_page_unlock() argument
2222 struct ras_err_handler_data *data = con->eh_data; in amdgpu_ras_check_bad_page_unlock()
2241 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_check_bad_page() local
2244 if (!con || !con->eh_data) in amdgpu_ras_check_bad_page()
2247 mutex_lock(&con->recovery_lock); in amdgpu_ras_check_bad_page()
2248 ret = amdgpu_ras_check_bad_page_unlock(con, addr); in amdgpu_ras_check_bad_page()
2249 mutex_unlock(&con->recovery_lock); in amdgpu_ras_check_bad_page()
2256 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_validate_threshold() local
2282 con->bad_page_cnt_threshold = min(lower_32_bits(val), in amdgpu_ras_validate_threshold()
2285 con->bad_page_cnt_threshold = min_t(int, max_count, in amdgpu_ras_validate_threshold()
2292 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_recovery_init() local
2298 if (!con || amdgpu_sriov_vf(adev)) in amdgpu_ras_recovery_init()
2306 con->adev = adev; in amdgpu_ras_recovery_init()
2311 data = &con->eh_data; in amdgpu_ras_recovery_init()
2318 mutex_init(&con->recovery_lock); in amdgpu_ras_recovery_init()
2319 INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery); in amdgpu_ras_recovery_init()
2320 atomic_set(&con->in_recovery, 0); in amdgpu_ras_recovery_init()
2321 con->eeprom_control.bad_channel_bitmap = 0; in amdgpu_ras_recovery_init()
2323 max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control); in amdgpu_ras_recovery_init()
2332 ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit); in amdgpu_ras_recovery_init()
2340 if (con->eeprom_control.ras_num_recs) { in amdgpu_ras_recovery_init()
2345 amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs); in amdgpu_ras_recovery_init()
2347 if (con->update_channel_flag == true) { in amdgpu_ras_recovery_init()
2348 amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap); in amdgpu_ras_recovery_init()
2349 con->update_channel_flag = false; in amdgpu_ras_recovery_init()
2363 con->eh_data = NULL; in amdgpu_ras_recovery_init()
2381 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_recovery_fini() local
2382 struct ras_err_handler_data *data = con->eh_data; in amdgpu_ras_recovery_fini()
2388 cancel_work_sync(&con->recovery_work); in amdgpu_ras_recovery_fini()
2390 mutex_lock(&con->recovery_lock); in amdgpu_ras_recovery_fini()
2391 con->eh_data = NULL; in amdgpu_ras_recovery_fini()
2394 mutex_unlock(&con->recovery_lock); in amdgpu_ras_recovery_fini()
2533 struct amdgpu_ras *con = container_of(work, struct amdgpu_ras, in amdgpu_ras_counte_dw() local
2535 struct amdgpu_device *adev = con->adev; in amdgpu_ras_counte_dw()
2547 atomic_set(&con->ras_ce_count, ce_count); in amdgpu_ras_counte_dw()
2548 atomic_set(&con->ras_ue_count, ue_count); in amdgpu_ras_counte_dw()
2558 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_query_poison_mode() local
2562 if (amdgpu_sriov_vf(adev) || !con) in amdgpu_ras_query_poison_mode()
2568 con->poison_supported = true; in amdgpu_ras_query_poison_mode()
2580 con->poison_supported = true; in amdgpu_ras_query_poison_mode()
2590 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_init() local
2593 if (con) in amdgpu_ras_init()
2596 con = kmalloc(sizeof(struct amdgpu_ras) + in amdgpu_ras_init()
2600 if (!con) in amdgpu_ras_init()
2603 con->adev = adev; in amdgpu_ras_init()
2604 INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw); in amdgpu_ras_init()
2605 atomic_set(&con->ras_ce_count, 0); in amdgpu_ras_init()
2606 atomic_set(&con->ras_ue_count, 0); in amdgpu_ras_init()
2608 con->objs = (struct ras_manager *)(con + 1); in amdgpu_ras_init()
2610 amdgpu_ras_set_context(adev, con); in amdgpu_ras_init()
2619 con->features |= BIT(AMDGPU_RAS_BLOCK__GFX); in amdgpu_ras_init()
2628 con->update_channel_flag = false; in amdgpu_ras_init()
2629 con->features = 0; in amdgpu_ras_init()
2630 INIT_LIST_HEAD(&con->head); in amdgpu_ras_init()
2632 con->flags = RAS_DEFAULT_FLAGS; in amdgpu_ras_init()
2697 kfree(con); in amdgpu_ras_init()
2731 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_is_poison_mode_supported() local
2733 if (!con) in amdgpu_ras_is_poison_mode_supported()
2736 return con->poison_supported; in amdgpu_ras_is_poison_mode_supported()
2744 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_block_late_init() local
2796 atomic_set(&con->ras_ce_count, ce_count); in amdgpu_ras_block_late_init()
2797 atomic_set(&con->ras_ue_count, ue_count); in amdgpu_ras_block_late_init()
2845 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_resume() local
2848 if (!adev->ras_enabled || !con) { in amdgpu_ras_resume()
2855 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { in amdgpu_ras_resume()
2867 list_for_each_entry_safe(obj, tmp, &con->head, node) { in amdgpu_ras_resume()
2879 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_suspend() local
2881 if (!adev->ras_enabled || !con) in amdgpu_ras_suspend()
2886 if (con->features) in amdgpu_ras_suspend()
2924 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_pre_fini() local
2926 if (!adev->ras_enabled || !con) in amdgpu_ras_pre_fini()
2931 if (con->features) in amdgpu_ras_pre_fini()
2941 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_fini() local
2943 if (!adev->ras_enabled || !con) in amdgpu_ras_fini()
2964 WARN(con->features, "Feature mask is not cleared"); in amdgpu_ras_fini()
2966 if (con->features) in amdgpu_ras_fini()
2969 cancel_delayed_work_sync(&con->ras_counte_delay_work); in amdgpu_ras_fini()
2972 kfree(con); in amdgpu_ras_fini()
3003 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_release_ras_context() local
3005 if (!con) in amdgpu_release_ras_context()
3008 if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) { in amdgpu_release_ras_context()
3009 con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX); in amdgpu_release_ras_context()
3011 kfree(con); in amdgpu_release_ras_context()