Lines Matching full:bank

122 static enum smca_bank_types smca_get_bank_type(unsigned int bank)  in smca_get_bank_type()  argument
126 if (bank >= MAX_NR_BANKS) in smca_get_bank_type()
129 b = &smca_banks[bank]; in smca_get_bank_type()
198 * So to define a unique name for each bank, we use a temp c-string to append
227 static void smca_set_misc_banks_map(unsigned int bank, unsigned int cpu) in smca_set_misc_banks_map() argument
235 if (rdmsr_safe(MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high)) in smca_set_misc_banks_map()
241 if (rdmsr_safe(MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high)) in smca_set_misc_banks_map()
245 per_cpu(smca_misc_banks_map, cpu) |= BIT(bank); in smca_set_misc_banks_map()
249 static void smca_configure(unsigned int bank, unsigned int cpu) in smca_configure() argument
254 u32 smca_config = MSR_AMD64_SMCA_MCx_CONFIG(bank); in smca_configure()
261 * bank. It also means that the OS will configure deferred in smca_configure()
270 * SMCA sets the Deferred Error Interrupt type per bank. in smca_configure()
286 smca_set_misc_banks_map(bank, cpu); in smca_configure()
288 /* Return early if this bank was already initialized. */ in smca_configure()
289 if (smca_banks[bank].hwid && smca_banks[bank].hwid->hwid_mcatype != 0) in smca_configure()
292 if (rdmsr_safe(MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) { in smca_configure()
293 pr_warn("Failed to read MCA_IPID for bank %d\n", bank); in smca_configure()
303 smca_banks[bank].hwid = s_hwid; in smca_configure()
304 smca_banks[bank].id = low; in smca_configure()
305 smca_banks[bank].sysfs_id = s_hwid->count++; in smca_configure()
319 static inline bool is_shared_bank(int bank) in is_shared_bank() argument
323 * a shared bank. in is_shared_bank()
328 /* Bank 4 is for northbridge reporting and is thus shared */ in is_shared_bank()
329 return (bank == 4); in is_shared_bank()
352 static bool lvt_interrupt_supported(unsigned int bank, u32 msr_high_bits) in lvt_interrupt_supported() argument
355 * bank 4 supports APIC LVT interrupts implicitly since forever. in lvt_interrupt_supported()
357 if (bank == 4) in lvt_interrupt_supported()
362 * bank can generate APIC LVT interrupts in lvt_interrupt_supported()
373 "for bank %d, block %d (MSR%08X=0x%x%08x)\n", b->cpu, in lvt_off_valid()
374 b->bank, b->block, b->address, hi, lo); in lvt_off_valid()
388 "for bank %d, block %d (MSR%08X=0x%x%08x)\n", in lvt_off_valid()
389 b->cpu, apic, b->bank, b->block, b->address, hi, lo); in lvt_off_valid()
396 /* Reprogram MCx_MISC MSR behind this threshold bank. */
502 static u32 smca_get_block_address(unsigned int bank, unsigned int block, in smca_get_block_address() argument
506 return MSR_AMD64_SMCA_MCx_MISC(bank); in smca_get_block_address()
508 if (!(per_cpu(smca_misc_banks_map, cpu) & BIT(bank))) in smca_get_block_address()
511 return MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1); in smca_get_block_address()
515 unsigned int bank, unsigned int block, in get_block_address() argument
520 if ((bank >= per_cpu(mce_num_banks, cpu)) || (block >= NR_BLOCKS)) in get_block_address()
524 return smca_get_block_address(bank, block, cpu); in get_block_address()
529 addr = msr_ops.misc(bank); in get_block_address()
543 prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr, in prepare_threshold_block() argument
552 per_cpu(bank_map, cpu) |= (1 << bank); in prepare_threshold_block()
556 b.bank = bank; in prepare_threshold_block()
559 b.interrupt_capable = lvt_interrupt_supported(bank, misc_high); in prepare_threshold_block()
591 enum smca_bank_types bank_type = smca_get_bank_type(m->bank); in amd_filter_mce()
602 if (m->bank == 4 && XEC(m->status, 0x1f) == 0x5) in amd_filter_mce()
612 * - Prevent possible spurious interrupts from the IF bank on Family 0x17
615 static void disable_err_thresholding(struct cpuinfo_x86 *c, unsigned int bank) in disable_err_thresholding() argument
622 if (c->x86 == 0x15 && bank == 4) { in disable_err_thresholding()
629 if (smca_get_bank_type(bank) != SMCA_IF) in disable_err_thresholding()
632 msrs[0] = MSR_AMD64_SMCA_MCx_MISC(bank); in disable_err_thresholding()
657 unsigned int bank, block, cpu = smp_processor_id(); in mce_amd_feature_init() local
662 for (bank = 0; bank < this_cpu_read(mce_num_banks); ++bank) { in mce_amd_feature_init()
664 smca_configure(bank, cpu); in mce_amd_feature_init()
666 disable_err_thresholding(c, bank); in mce_amd_feature_init()
669 address = get_block_address(address, low, high, bank, block, cpu); in mce_amd_feature_init()
683 offset = prepare_threshold_block(bank, block, address, offset, high); in mce_amd_feature_init()
897 return smca_get_bank_type(m->bank) == SMCA_UMC && xec == 0x0; in amd_mce_is_memory_error()
899 return m->bank == 4 && xec == 0x8; in amd_mce_is_memory_error()
902 static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc) in __log_error() argument
910 m.bank = bank; in __log_error()
928 rdmsrl(MSR_AMD64_SMCA_MCx_IPID(bank), m.ipid); in __log_error()
931 rdmsrl(MSR_AMD64_SMCA_MCx_SYND(bank), m.synd); in __log_error()
950 _log_error_bank(unsigned int bank, u32 msr_stat, u32 msr_addr, u64 misc) in _log_error_bank() argument
961 __log_error(bank, status, addr, misc); in _log_error_bank()
977 static void log_error_deferred(unsigned int bank) in log_error_deferred() argument
981 defrd = _log_error_bank(bank, msr_ops.status(bank), in log_error_deferred()
982 msr_ops.addr(bank), 0); in log_error_deferred()
989 wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(bank), 0); in log_error_deferred()
997 _log_error_bank(bank, MSR_AMD64_SMCA_MCx_DESTAT(bank), in log_error_deferred()
998 MSR_AMD64_SMCA_MCx_DEADDR(bank), 0); in log_error_deferred()
1004 unsigned int bank; in amd_deferred_error_interrupt() local
1006 for (bank = 0; bank < this_cpu_read(mce_num_banks); ++bank) in amd_deferred_error_interrupt()
1007 log_error_deferred(bank); in amd_deferred_error_interrupt()
1010 static void log_error_thresholding(unsigned int bank, u64 misc) in log_error_thresholding() argument
1012 _log_error_bank(bank, msr_ops.status(bank), msr_ops.addr(bank), misc); in log_error_thresholding()
1030 log_error_thresholding(block->bank, ((u64)high << 32) | low); in log_and_reset_block()
1046 unsigned int bank, cpu = smp_processor_id(); in amd_threshold_interrupt() local
1049 * Validate that the threshold bank has been initialized already. The in amd_threshold_interrupt()
1056 for (bank = 0; bank < this_cpu_read(mce_num_banks); ++bank) { in amd_threshold_interrupt()
1057 if (!(per_cpu(bank_map, cpu) & (1 << bank))) in amd_threshold_interrupt()
1060 first_block = bp[bank]->blocks; in amd_threshold_interrupt()
1213 static const char *get_name(unsigned int bank, struct threshold_block *b) in get_name() argument
1218 if (b && bank == 4) in get_name()
1221 return th_names[bank]; in get_name()
1224 bank_type = smca_get_bank_type(bank); in get_name()
1234 if (smca_banks[bank].hwid->count == 1) in get_name()
1239 smca_banks[bank].sysfs_id); in get_name()
1244 unsigned int bank, unsigned int block, in allocate_threshold_blocks() argument
1251 if ((bank >= this_cpu_read(mce_num_banks)) || (block >= NR_BLOCKS)) in allocate_threshold_blocks()
1273 b->bank = bank; in allocate_threshold_blocks()
1277 b->interrupt_capable = lvt_interrupt_supported(bank, high); in allocate_threshold_blocks()
1295 err = kobject_init_and_add(&b->kobj, &threshold_ktype, tb->kobj, get_name(bank, b)); in allocate_threshold_blocks()
1299 address = get_block_address(address, low, high, bank, ++block, cpu); in allocate_threshold_blocks()
1303 err = allocate_threshold_blocks(cpu, tb, bank, block, address); in allocate_threshold_blocks()
1345 unsigned int bank) in threshold_create_bank() argument
1350 const char *name = get_name(bank, NULL); in threshold_create_bank()
1356 if (is_shared_bank(bank)) { in threshold_create_bank()
1367 bp[bank] = b; in threshold_create_bank()
1382 /* Associate the bank with the per-CPU MCE device */ in threshold_create_bank()
1389 if (is_shared_bank(bank)) { in threshold_create_bank()
1400 err = allocate_threshold_blocks(cpu, b, bank, 0, msr_ops.misc(bank)); in threshold_create_bank()
1404 bp[bank] = b; in threshold_create_bank()
1420 static void deallocate_threshold_blocks(struct threshold_bank *bank) in deallocate_threshold_blocks() argument
1424 list_for_each_entry_safe(pos, tmp, &bank->blocks->miscj, miscj) { in deallocate_threshold_blocks()
1429 kobject_put(&bank->blocks->kobj); in deallocate_threshold_blocks()
1443 static void threshold_remove_bank(struct threshold_bank *bank) in threshold_remove_bank() argument
1447 if (!bank->blocks) in threshold_remove_bank()
1450 if (!bank->shared) in threshold_remove_bank()
1453 if (!refcount_dec_and_test(&bank->cpus)) { in threshold_remove_bank()
1454 __threshold_remove_blocks(bank); in threshold_remove_bank()
1458 * The last CPU on this node using the shared bank is going in threshold_remove_bank()
1459 * away, remove that bank now. in threshold_remove_bank()
1466 deallocate_threshold_blocks(bank); in threshold_remove_bank()
1469 kobject_put(bank->kobj); in threshold_remove_bank()
1470 kfree(bank); in threshold_remove_bank()
1476 unsigned int bank, numbanks = this_cpu_read(mce_num_banks); in mce_threshold_remove_device() local
1487 for (bank = 0; bank < numbanks; bank++) { in mce_threshold_remove_device()
1488 if (bp[bank]) { in mce_threshold_remove_device()
1489 threshold_remove_bank(bp[bank]); in mce_threshold_remove_device()
1490 bp[bank] = NULL; in mce_threshold_remove_device()
1510 unsigned int numbanks, bank; in mce_threshold_create_device() local
1526 for (bank = 0; bank < numbanks; ++bank) { in mce_threshold_create_device()
1527 if (!(this_cpu_read(bank_map) & (1 << bank))) in mce_threshold_create_device()
1529 err = threshold_create_bank(bp, cpu, bank); in mce_threshold_create_device()