Lines Matching +full:use +full:- +full:minimum +full:- +full:ecc

1 // SPDX-License-Identifier: GPL-2.0-only
8 * Set by command line parameter. If BIOS has enabled the ECC, this override is
9 * cleared to prevent re-enabling the hardware by this driver.
20 if (!fam_type->flags.zn_regs_v2) in get_umc_reg()
33 /* Per-node stuff */
41 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
83 func, PCI_FUNC(pdev->devfn), offset); in __amd64_read_pci_cfg_dword()
96 func, PCI_FUNC(pdev->devfn), offset); in __amd64_write_pci_cfg_dword()
108 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg); in f15h_select_dct()
109 reg &= (pvt->model == 0x30) ? ~3 : ~1; in f15h_select_dct()
111 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg); in f15h_select_dct()
121 * DCT0 -> F2x040..
122 * DCT1 -> F2x140..
131 switch (pvt->fam) { in amd64_read_dct_pci_cfg()
134 return -EINVAL; in amd64_read_dct_pci_cfg()
156 dct = (dct && pvt->model == 0x30) ? 3 : dct; in amd64_read_dct_pci_cfg()
162 return -EINVAL; in amd64_read_dct_pci_cfg()
168 return amd64_read_pci_cfg(pvt->F2, offset, val); in amd64_read_dct_pci_cfg()
178 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
193 scrubval -= 0x5; in __f17h_set_scrubval()
194 pci_write_bits32(pvt->F6, F17H_SCR_LIMIT_ADDR, scrubval, 0xF); in __f17h_set_scrubval()
195 pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 1, 0x1); in __f17h_set_scrubval()
197 pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 0, 0x1); in __f17h_set_scrubval()
202 * issue. If requested is too big, then use last maximum value found.
218 for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) { in __set_scrub_rate()
232 if (pvt->umc) { in __set_scrub_rate()
234 } else if (pvt->fam == 0x15 && pvt->model == 0x60) { in __set_scrub_rate()
236 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F); in __set_scrub_rate()
238 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F); in __set_scrub_rate()
240 pci_write_bits32(pvt->F3, SCRCTRL, scrubval, 0x001F); in __set_scrub_rate()
251 struct amd64_pvt *pvt = mci->pvt_info; in set_scrub_rate()
254 if (pvt->fam == 0xf) in set_scrub_rate()
257 if (pvt->fam == 0x15) { in set_scrub_rate()
259 if (pvt->model < 0x10) in set_scrub_rate()
262 if (pvt->model == 0x60) in set_scrub_rate()
270 struct amd64_pvt *pvt = mci->pvt_info; in get_scrub_rate()
271 int i, retval = -EINVAL; in get_scrub_rate()
274 if (pvt->umc) { in get_scrub_rate()
275 amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval); in get_scrub_rate()
277 amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval); in get_scrub_rate()
283 } else if (pvt->fam == 0x15) { in get_scrub_rate()
285 if (pvt->model < 0x10) in get_scrub_rate()
288 if (pvt->model == 0x60) in get_scrub_rate()
289 amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval); in get_scrub_rate()
291 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval); in get_scrub_rate()
293 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval); in get_scrub_rate()
315 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be in base_limit_match()
317 * Here we discard bits 63-40. See section 3.4.2 of AMD publication in base_limit_match()
318 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1 in base_limit_match()
341 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section in find_mc_by_sys_addr()
344 pvt = mci->pvt_info; in find_mc_by_sys_addr()
406 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) { in get_cs_base_and_mask()
407 csbase = pvt->csels[dct].csbases[csrow]; in get_cs_base_and_mask()
408 csmask = pvt->csels[dct].csmasks[csrow]; in get_cs_base_and_mask()
417 } else if (pvt->fam == 0x16 || in get_cs_base_and_mask()
418 (pvt->fam == 0x15 && pvt->model >= 0x30)) { in get_cs_base_and_mask()
419 csbase = pvt->csels[dct].csbases[csrow]; in get_cs_base_and_mask()
420 csmask = pvt->csels[dct].csmasks[csrow >> 1]; in get_cs_base_and_mask()
435 csbase = pvt->csels[dct].csbases[csrow]; in get_cs_base_and_mask()
436 csmask = pvt->csels[dct].csmasks[csrow >> 1]; in get_cs_base_and_mask()
439 if (pvt->fam == 0x15) in get_cs_base_and_mask()
457 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
460 pvt->csels[dct].csbases[i]
463 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
466 for (i = 0; i < fam_type->max_mcs; i++)
470 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
478 pvt = mci->pvt_info; in input_addr_to_csrow()
491 pvt->mc_node_id); in input_addr_to_csrow()
497 (unsigned long)input_addr, pvt->mc_node_id); in input_addr_to_csrow()
499 return -1; in input_addr_to_csrow()
508 * - The revision of the node is not E or greater. In this case, the DRAM Hole
511 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
515 * complete 32-bit values despite the fact that the bitfields in the DHAR
516 * only represent bits 31-24 of the base and offset values.
521 struct amd64_pvt *pvt = mci->pvt_info; in get_dram_hole_info()
524 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) { in get_dram_hole_info()
526 pvt->ext_model, pvt->mc_node_id); in get_dram_hole_info()
531 if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) { in get_dram_hole_info()
538 pvt->mc_node_id); in get_dram_hole_info()
544 /* +------------------+--------------------+--------------------+----- in get_dram_hole_info()
546 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from | in get_dram_hole_info()
550 * | | | (0xffffffff-x))] | in get_dram_hole_info()
551 * +------------------+--------------------+--------------------+----- in get_dram_hole_info()
561 *hole_size = (1ULL << 32) - *hole_base; in get_dram_hole_info()
563 *hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt) in get_dram_hole_info()
567 pvt->mc_node_id, (unsigned long)*hole_base, in get_dram_hole_info()
579 struct amd64_pvt *pvt = mci->pvt_info; \
581 return sprintf(data, "0x%016llx\n", (u64)pvt->reg); \
630 struct amd64_pvt *pvt = mci->pvt_info; in inject_section_show()
631 return sprintf(buf, "0x%x\n", pvt->injection.section); in inject_section_show()
635 * store error injection section value which refers to one of 4 16-byte sections
636 * within a 64-byte cacheline
645 struct amd64_pvt *pvt = mci->pvt_info; in inject_section_store()
655 return -EINVAL; in inject_section_store()
658 pvt->injection.section = (u32) value; in inject_section_store()
666 struct amd64_pvt *pvt = mci->pvt_info; in inject_word_show()
667 return sprintf(buf, "0x%x\n", pvt->injection.word); in inject_word_show()
671 * store error injection word value which refers to one of 9 16-bit word of the
672 * 16-byte (128-bit + ECC bits) section
681 struct amd64_pvt *pvt = mci->pvt_info; in inject_word_store()
691 return -EINVAL; in inject_word_store()
694 pvt->injection.word = (u32) value; in inject_word_store()
703 struct amd64_pvt *pvt = mci->pvt_info; in inject_ecc_vector_show()
704 return sprintf(buf, "0x%x\n", pvt->injection.bit_map); in inject_ecc_vector_show()
710 * DRAM ECC read, it holds the contents of the of the DRAM ECC bits.
717 struct amd64_pvt *pvt = mci->pvt_info; in inject_ecc_vector_store()
727 return -EINVAL; in inject_ecc_vector_store()
730 pvt->injection.bit_map = (u32) value; in inject_ecc_vector_store()
735 * Do a DRAM ECC read. Assemble staged values in the pvt area, format into
743 struct amd64_pvt *pvt = mci->pvt_info; in inject_read_store()
752 /* Form value to choose 16-byte section of cacheline */ in inject_read_store()
753 section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section); in inject_read_store()
755 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section); in inject_read_store()
757 word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection); in inject_read_store()
760 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits); in inject_read_store()
768 * Do a DRAM ECC write. Assemble staged values in the pvt area and format into
776 struct amd64_pvt *pvt = mci->pvt_info; in inject_write_store()
785 /* Form value to choose 16-byte section of cacheline */ in inject_write_store()
786 section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section); in inject_write_store()
788 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section); in inject_write_store()
790 word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection); in inject_write_store()
799 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits); in inject_write_store()
803 amd64_read_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, &tmp); in inject_write_store()
839 struct amd64_pvt *pvt = mci->pvt_info; in inj_is_visible()
842 if (pvt->fam >= 0x10 && pvt->fam <= 0x16) in inj_is_visible()
843 return attr->mode; in inj_is_visible()
885 struct amd64_pvt *pvt = mci->pvt_info; in sys_addr_to_dram_addr()
889 dram_base = get_dram_base(pvt, pvt->mc_node_id); in sys_addr_to_dram_addr()
895 /* use DHAR to translate SysAddr to DramAddr */ in sys_addr_to_dram_addr()
896 dram_addr = sys_addr - hole_offset; in sys_addr_to_dram_addr()
908 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8 in sys_addr_to_dram_addr()
909 * only deals with 40-bit values. Therefore we discard bits 63-40 of in sys_addr_to_dram_addr()
912 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture in sys_addr_to_dram_addr()
915 dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base; in sys_addr_to_dram_addr()
944 pvt = mci->pvt_info; in dram_addr_to_input_addr()
947 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E) in dram_addr_to_input_addr()
982 err->page = (u32) (error_address >> PAGE_SHIFT); in error_address_to_page_and_offset()
983 err->offset = ((u32) error_address) & ~PAGE_MASK; in error_address_to_page_and_offset()
989 * of a node that detected an ECC memory error. mci represents the node that
991 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
1000 if (csrow == -1) in sys_addr_to_csrow()
1019 * Use Instance Id 0xFF to indicate a broadcast read.
1026 int err = -ENODEV; in __df_indirect_read()
1031 F4 = node_to_amd_nb(node)->link; in __df_indirect_read()
1107 ctx.ret_addr -= hi_addr_offset; in umc_normaddr_to_sysaddr()
1145 /* Re-use intlv_num_chan by setting it equal to log2(#channels) */ in umc_normaddr_to_sysaddr()
1188 * This is the fabric id for this coherent slave. Use in umc_normaddr_to_sysaddr()
1201 cs_mask = (1 << die_id_bit) - 1; in umc_normaddr_to_sysaddr()
1230 * The pre-interleaved address consists of XXXXXXIIIYYYYY in umc_normaddr_to_sysaddr()
1232 * address bits from the post-interleaved address. in umc_normaddr_to_sysaddr()
1237 temp_addr_y = ctx.ret_addr & GENMASK_ULL(intlv_addr_bit - 1, 0); in umc_normaddr_to_sysaddr()
1253 ctx.ret_addr += (BIT_ULL(32) - dram_hole_base); in umc_normaddr_to_sysaddr()
1257 /* Save some parentheses and grab ls-bit at the end. */ in umc_normaddr_to_sysaddr()
1278 return -EINVAL; in umc_normaddr_to_sysaddr()
1284 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
1285 * are ECC capable.
1292 if (pvt->umc) { in determine_edac_cap()
1296 if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT)) in determine_edac_cap()
1302 if (pvt->umc[i].umc_cfg & BIT(12)) in determine_edac_cap()
1309 bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F) in determine_edac_cap()
1313 if (pvt->dclr0 & BIT(bit)) in determine_edac_cap()
1326 if (pvt->dram_type == MEM_LRDDR3) { in debug_dump_dramcfg_low()
1327 u32 dcsm = pvt->csels[chan].csmasks[0]; in debug_dump_dramcfg_low()
1330 * same 'type' until proven otherwise. So, use a cs in debug_dump_dramcfg_low()
1336 edac_dbg(1, "All DIMMs support ECC:%s\n", in debug_dump_dramcfg_low()
1343 if (pvt->fam == 0x10) in debug_dump_dramcfg_low()
1374 /* Asymmetric dual-rank DIMM support. */ in f17_get_cs_mode()
1387 pvt->csels[ctrl].csmasks[0] == pvt->csels[ctrl].csmasks[1]) { in f17_get_cs_mode()
1388 edac_dbg(1, "3R interleaving in use.\n"); in f17_get_cs_mode()
1407 size0 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs0); in debug_display_dimm_sizes_df()
1408 size1 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs1); in debug_display_dimm_sizes_df()
1423 umc = &pvt->umc[i]; in __dump_misc_regs_df()
1425 edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg); in __dump_misc_regs_df()
1426 edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg); in __dump_misc_regs_df()
1427 edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl); in __dump_misc_regs_df()
1428 edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl); in __dump_misc_regs_df()
1430 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ECC_BAD_SYMBOL, &tmp); in __dump_misc_regs_df()
1431 edac_dbg(1, "UMC%d ECC bad symbol: 0x%x\n", i, tmp); in __dump_misc_regs_df()
1433 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_UMC_CAP, &tmp); in __dump_misc_regs_df()
1435 edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi); in __dump_misc_regs_df()
1437 edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n", in __dump_misc_regs_df()
1438 i, (umc->umc_cap_hi & BIT(30)) ? "yes" : "no", in __dump_misc_regs_df()
1439 (umc->umc_cap_hi & BIT(31)) ? "yes" : "no"); in __dump_misc_regs_df()
1440 edac_dbg(1, "UMC%d All DIMMs support ECC: %s\n", in __dump_misc_regs_df()
1441 i, (umc->umc_cfg & BIT(12)) ? "yes" : "no"); in __dump_misc_regs_df()
1443 i, (umc->dimm_cfg & BIT(6)) ? "yes" : "no"); in __dump_misc_regs_df()
1445 i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no"); in __dump_misc_regs_df()
1447 if (umc->dram_type == MEM_LRDDR4 || umc->dram_type == MEM_LRDDR5) { in __dump_misc_regs_df()
1448 amd_smn_read(pvt->mc_node_id, in __dump_misc_regs_df()
1459 pvt->dhar, dhar_base(pvt)); in __dump_misc_regs_df()
1465 edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap); in __dump_misc_regs()
1468 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no"); in __dump_misc_regs()
1470 edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n", in __dump_misc_regs()
1471 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no", in __dump_misc_regs()
1472 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no"); in __dump_misc_regs()
1474 debug_dump_dramcfg_low(pvt, pvt->dclr0, 0); in __dump_misc_regs()
1476 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare); in __dump_misc_regs()
1479 pvt->dhar, dhar_base(pvt), in __dump_misc_regs()
1480 (pvt->fam == 0xf) ? k8_dhar_offset(pvt) in __dump_misc_regs()
1486 if (pvt->fam == 0xf) in __dump_misc_regs()
1493 debug_dump_dramcfg_low(pvt, pvt->dclr1, 1); in __dump_misc_regs()
1499 if (pvt->umc) in dump_misc_regs()
1506 amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz); in dump_misc_regs()
1514 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) { in prep_chip_selects()
1515 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8; in prep_chip_selects()
1516 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8; in prep_chip_selects()
1517 } else if (pvt->fam == 0x15 && pvt->model == 0x30) { in prep_chip_selects()
1518 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4; in prep_chip_selects()
1519 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2; in prep_chip_selects()
1520 } else if (pvt->fam >= 0x17) { in prep_chip_selects()
1524 pvt->csels[umc].b_cnt = 4; in prep_chip_selects()
1525 pvt->csels[umc].m_cnt = fam_type->flags.zn_regs_v2 ? 4 : 2; in prep_chip_selects()
1529 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8; in prep_chip_selects()
1530 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4; in prep_chip_selects()
1549 base = &pvt->csels[umc].csbases[cs]; in read_umc_base_mask()
1550 base_sec = &pvt->csels[umc].csbases_sec[cs]; in read_umc_base_mask()
1555 if (!amd_smn_read(pvt->mc_node_id, base_reg, base)) in read_umc_base_mask()
1559 if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, base_sec)) in read_umc_base_mask()
1568 mask = &pvt->csels[umc].csmasks[cs]; in read_umc_base_mask()
1569 mask_sec = &pvt->csels[umc].csmasks_sec[cs]; in read_umc_base_mask()
1574 if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask)) in read_umc_base_mask()
1578 if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, mask_sec)) in read_umc_base_mask()
1594 if (pvt->umc) in read_dct_base_mask()
1600 u32 *base0 = &pvt->csels[0].csbases[cs]; in read_dct_base_mask()
1601 u32 *base1 = &pvt->csels[1].csbases[cs]; in read_dct_base_mask()
1607 if (pvt->fam == 0xf) in read_dct_base_mask()
1612 cs, *base1, (pvt->fam == 0x10) ? reg1 in read_dct_base_mask()
1619 u32 *mask0 = &pvt->csels[0].csmasks[cs]; in read_dct_base_mask()
1620 u32 *mask1 = &pvt->csels[1].csmasks[cs]; in read_dct_base_mask()
1626 if (pvt->fam == 0xf) in read_dct_base_mask()
1631 cs, *mask1, (pvt->fam == 0x10) ? reg1 in read_dct_base_mask()
1642 umc = &pvt->umc[i]; in determine_memory_type_df()
1644 if (!(umc->sdp_ctrl & UMC_SDP_INIT)) { in determine_memory_type_df()
1645 umc->dram_type = MEM_EMPTY; in determine_memory_type_df()
1651 * and has DDR5 DIMMs in use. in determine_memory_type_df()
1653 if (fam_type->flags.zn_regs_v2 && ((umc->umc_cfg & GENMASK(2, 0)) == 0x1)) { in determine_memory_type_df()
1654 if (umc->dimm_cfg & BIT(5)) in determine_memory_type_df()
1655 umc->dram_type = MEM_LRDDR5; in determine_memory_type_df()
1656 else if (umc->dimm_cfg & BIT(4)) in determine_memory_type_df()
1657 umc->dram_type = MEM_RDDR5; in determine_memory_type_df()
1659 umc->dram_type = MEM_DDR5; in determine_memory_type_df()
1661 if (umc->dimm_cfg & BIT(5)) in determine_memory_type_df()
1662 umc->dram_type = MEM_LRDDR4; in determine_memory_type_df()
1663 else if (umc->dimm_cfg & BIT(4)) in determine_memory_type_df()
1664 umc->dram_type = MEM_RDDR4; in determine_memory_type_df()
1666 umc->dram_type = MEM_DDR4; in determine_memory_type_df()
1669 edac_dbg(1, " UMC%d DIMM type: %s\n", i, edac_mem_types[umc->dram_type]); in determine_memory_type_df()
1677 if (pvt->umc) in determine_memory_type()
1680 switch (pvt->fam) { in determine_memory_type()
1682 if (pvt->ext_model >= K8_REV_F) in determine_memory_type()
1685 pvt->dram_type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR; in determine_memory_type()
1689 if (pvt->dchr0 & DDR3_MODE) in determine_memory_type()
1692 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2; in determine_memory_type()
1696 if (pvt->model < 0x60) in determine_memory_type()
1702 * We use a Chip Select value of '0' to obtain dcsm. in determine_memory_type()
1709 dcsm = pvt->csels[0].csmasks[0]; in determine_memory_type()
1712 pvt->dram_type = MEM_DDR4; in determine_memory_type()
1713 else if (pvt->dclr0 & BIT(16)) in determine_memory_type()
1714 pvt->dram_type = MEM_DDR3; in determine_memory_type()
1716 pvt->dram_type = MEM_LRDDR3; in determine_memory_type()
1718 pvt->dram_type = MEM_RDDR3; in determine_memory_type()
1726 WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam); in determine_memory_type()
1727 pvt->dram_type = MEM_EMPTY; in determine_memory_type()
1732 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3; in determine_memory_type()
1740 if (pvt->ext_model >= K8_REV_F) in k8_early_channel_count()
1742 flag = pvt->dclr0 & WIDTH_128; in k8_early_channel_count()
1745 flag = pvt->dclr0 & REVE_WIDTH_128; in k8_early_channel_count()
1748 pvt->dclr1 = 0; in k8_early_channel_count()
1756 u16 mce_nid = topology_die_id(m->extcpu); in get_error_address()
1766 pvt = mci->pvt_info; in get_error_address()
1768 if (pvt->fam == 0xf) { in get_error_address()
1773 addr = m->addr & GENMASK_ULL(end_bit, start_bit); in get_error_address()
1778 if (pvt->fam == 0x15) { in get_error_address()
1787 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp); in get_error_address()
1802 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp); in get_error_address()
1826 if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) && in pci_get_related_function()
1827 (dev->bus->number == related->bus->number) && in pci_get_related_function()
1828 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn))) in pci_get_related_function()
1843 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo); in read_dram_base_limit_regs()
1844 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo); in read_dram_base_limit_regs()
1846 if (pvt->fam == 0xf) in read_dram_base_limit_regs()
1852 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi); in read_dram_base_limit_regs()
1853 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi); in read_dram_base_limit_regs()
1856 if (pvt->fam != 0x15) in read_dram_base_limit_regs()
1863 if (pvt->model == 0x60) in read_dram_base_limit_regs()
1865 else if (pvt->model == 0x30) in read_dram_base_limit_regs()
1870 f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc); in read_dram_base_limit_regs()
1876 pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0); in read_dram_base_limit_regs()
1879 pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16; in read_dram_base_limit_regs()
1881 pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0); in read_dram_base_limit_regs()
1884 pvt->ranges[range].lim.hi |= llim >> 13; in read_dram_base_limit_regs()
1892 struct amd64_pvt *pvt = mci->pvt_info; in k8_map_sysaddr_to_csrow()
1900 err->src_mci = find_mc_by_sys_addr(mci, sys_addr); in k8_map_sysaddr_to_csrow()
1901 if (!err->src_mci) { in k8_map_sysaddr_to_csrow()
1904 err->err_code = ERR_NODE; in k8_map_sysaddr_to_csrow()
1909 err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr); in k8_map_sysaddr_to_csrow()
1910 if (err->csrow < 0) { in k8_map_sysaddr_to_csrow()
1911 err->err_code = ERR_CSROW; in k8_map_sysaddr_to_csrow()
1916 if (pvt->nbcfg & NBCFG_CHIPKILL) { in k8_map_sysaddr_to_csrow()
1917 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome); in k8_map_sysaddr_to_csrow()
1918 if (err->channel < 0) { in k8_map_sysaddr_to_csrow()
1924 amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - " in k8_map_sysaddr_to_csrow()
1926 err->syndrome); in k8_map_sysaddr_to_csrow()
1927 err->err_code = ERR_CHANNEL; in k8_map_sysaddr_to_csrow()
1932 * non-chipkill ecc mode in k8_map_sysaddr_to_csrow()
1935 * channel number when using non-chipkill memory. This method in k8_map_sysaddr_to_csrow()
1937 * (Wish the email was placed in this comment - norsk) in k8_map_sysaddr_to_csrow()
1939 err->channel = ((sys_addr & BIT(3)) != 0); in k8_map_sysaddr_to_csrow()
1960 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0; in k8_dbam_to_chip_select()
1962 if (pvt->ext_model >= K8_REV_F) { in k8_dbam_to_chip_select()
1966 else if (pvt->ext_model >= K8_REV_D) { in k8_dbam_to_chip_select()
1996 return 32 << (cs_mode - diff); in k8_dbam_to_chip_select()
2005 * Get the number of DCT channels in use.
2017 if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128)) in f1x_early_channel_count()
2028 edac_dbg(0, "Data width is not 128 bits - need more decoding\n"); in f1x_early_channel_count()
2036 u32 dbam = (i ? pvt->dbam1 : pvt->dbam0); in f1x_early_channel_count()
2060 channels += !!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT); in f17_early_channel_count()
2073 cs_size = -1; in ddr3_cs_size()
2083 if (cs_size != -1) in ddr3_cs_size()
2095 cs_size = -1; in ddr3_lrdimm_cs_size()
2103 if (cs_size != -1) in ddr3_lrdimm_cs_size()
2114 cs_size = -1; in ddr4_cs_size()
2127 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0; in f10_dbam_to_chip_select()
2131 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE) in f10_dbam_to_chip_select()
2153 u32 dcsm = pvt->csels[dct].csmasks[cs_mask_nr]; in f15_m60h_dbam_to_chip_select()
2157 if (pvt->dram_type == MEM_DDR4) { in f15_m60h_dbam_to_chip_select()
2159 return -1; in f15_m60h_dbam_to_chip_select()
2162 } else if (pvt->dram_type == MEM_LRDDR3) { in f15_m60h_dbam_to_chip_select()
2169 /* Minimum cs size is 512mb for F15hM60h*/ in f15_m60h_dbam_to_chip_select()
2171 return -1; in f15_m60h_dbam_to_chip_select()
2189 return -1; in f16_dbam_to_chip_select()
2218 * CS0 and CS1 -> MASK0 / DIMM0 in f17_addr_mask_to_cs_size()
2219 * CS2 and CS3 -> MASK1 / DIMM1 in f17_addr_mask_to_cs_size()
2224 * CS0 -> MASK0 -> DIMM0 in f17_addr_mask_to_cs_size()
2225 * CS1 -> MASK1 -> DIMM0 in f17_addr_mask_to_cs_size()
2226 * CS2 -> MASK2 -> DIMM1 in f17_addr_mask_to_cs_size()
2227 * CS3 -> MASK3 -> DIMM1 in f17_addr_mask_to_cs_size()
2234 if (!fam_type->flags.zn_regs_v2) in f17_addr_mask_to_cs_size()
2237 /* Asymmetric dual-rank DIMM support. */ in f17_addr_mask_to_cs_size()
2239 addr_mask_orig = pvt->csels[umc].csmasks_sec[cs_mask_nr]; in f17_addr_mask_to_cs_size()
2241 addr_mask_orig = pvt->csels[umc].csmasks[cs_mask_nr]; in f17_addr_mask_to_cs_size()
2254 msb = fls(addr_mask_orig) - 1; in f17_addr_mask_to_cs_size()
2256 num_zero_bits = msb - weight - !!(cs_mode & CS_3R_INTERLEAVE); in f17_addr_mask_to_cs_size()
2259 addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1); in f17_addr_mask_to_cs_size()
2275 if (pvt->fam == 0xf) in read_dram_ctl_register()
2278 if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) { in read_dram_ctl_register()
2280 pvt->dct_sel_lo, dct_sel_baseaddr(pvt)); in read_dram_ctl_register()
2289 edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n", in read_dram_ctl_register()
2299 amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi); in read_dram_ctl_register()
2340 u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1; in f1x_determine_channel()
2349 * see F2x110[DctSelIntLvAddr] - channel interleave mode in f1x_determine_channel()
2388 u64 dct_sel_base_off = (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16; in f1x_get_norm_dct_addr()
2425 return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23)); in f1x_get_norm_dct_addr()
2454 * -EINVAL: NOT FOUND
2455 * 0..csrow = Chip-Select Row
2462 int cs_found = -EINVAL; in f1x_lookup_addr_in_dct()
2469 pvt = mci->pvt_info; in f1x_lookup_addr_in_dct()
2488 if (pvt->fam == 0x15 && pvt->model >= 0x30) { in f1x_lookup_addr_in_dct()
2502 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
2503 * swapped with a region located at the bottom of memory so that the GPU can use
2510 if (pvt->fam == 0x10) { in f1x_swap_interleaved_region()
2512 if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3)) in f1x_swap_interleaved_region()
2516 amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg); in f1x_swap_interleaved_region()
2539 int cs_found = -EINVAL; in f1x_match_to_this_node()
2557 return -EINVAL; in f1x_match_to_this_node()
2561 return -EINVAL; in f1x_match_to_this_node()
2619 int cs_found = -EINVAL; in f15_m30h_match_to_this_node()
2631 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg); in f15_m30h_match_to_this_node()
2632 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg); in f15_m30h_match_to_this_node()
2642 return -EINVAL; in f15_m30h_match_to_this_node()
2649 return -EINVAL; in f15_m30h_match_to_this_node()
2659 return -EINVAL; in f15_m30h_match_to_this_node()
2665 return -EINVAL; in f15_m30h_match_to_this_node()
2667 if (pvt->model >= 0x60) in f15_m30h_match_to_this_node()
2675 return -EINVAL; in f15_m30h_match_to_this_node()
2685 chan_addr = sys_addr - chan_offset; in f15_m30h_match_to_this_node()
2696 return -EINVAL; in f15_m30h_match_to_this_node()
2706 return -EINVAL; in f15_m30h_match_to_this_node()
2710 amd64_read_pci_cfg(pvt->F1, in f15_m30h_match_to_this_node()
2725 * pvt->csels[1]. So we need to use '1' here to get correct info. in f15_m30h_match_to_this_node()
2742 int cs_found = -EINVAL; in f1x_translate_sysaddr_to_cs()
2749 if (pvt->fam == 0x15 && pvt->model >= 0x30) in f1x_translate_sysaddr_to_cs()
2775 struct amd64_pvt *pvt = mci->pvt_info; in f1x_map_sysaddr_to_csrow()
2779 err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel); in f1x_map_sysaddr_to_csrow()
2780 if (err->csrow < 0) { in f1x_map_sysaddr_to_csrow()
2781 err->err_code = ERR_CSROW; in f1x_map_sysaddr_to_csrow()
2791 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome); in f1x_map_sysaddr_to_csrow()
2801 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases; in debug_display_dimm_sizes()
2802 u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0; in debug_display_dimm_sizes()
2804 if (pvt->fam == 0xf) { in debug_display_dimm_sizes()
2806 if (pvt->ext_model < K8_REV_F) in debug_display_dimm_sizes()
2812 if (pvt->fam == 0x10) { in debug_display_dimm_sizes()
2813 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 in debug_display_dimm_sizes()
2814 : pvt->dbam0; in debug_display_dimm_sizes()
2816 pvt->csels[1].csbases : in debug_display_dimm_sizes()
2817 pvt->csels[0].csbases; in debug_display_dimm_sizes()
2819 dbam = pvt->dbam0; in debug_display_dimm_sizes()
2820 dcsb = pvt->csels[1].csbases; in debug_display_dimm_sizes()
2838 size0 = pvt->ops->dbam_to_cs(pvt, ctrl, in debug_display_dimm_sizes()
2844 size1 = pvt->ops->dbam_to_cs(pvt, ctrl, in debug_display_dimm_sizes()
3116 return -1; in decode_syndrome()
3139 return -1; in map_err_sym_to_channel()
3147 return -1; in map_err_sym_to_channel()
3152 struct amd64_pvt *pvt = mci->pvt_info; in get_channel_from_ecc_syndrome()
3153 int err_sym = -1; in get_channel_from_ecc_syndrome()
3155 if (pvt->ecc_sym_sz == 8) in get_channel_from_ecc_syndrome()
3158 pvt->ecc_sym_sz); in get_channel_from_ecc_syndrome()
3159 else if (pvt->ecc_sym_sz == 4) in get_channel_from_ecc_syndrome()
3162 pvt->ecc_sym_sz); in get_channel_from_ecc_syndrome()
3164 amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz); in get_channel_from_ecc_syndrome()
3168 return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz); in get_channel_from_ecc_syndrome()
3188 switch (err->err_code) { in __log_ecc_error()
3199 string = "Unknown syndrome - possible error reporting race"; in __log_ecc_error()
3202 string = "MCA_SYND not valid - unknown syndrome and csrow"; in __log_ecc_error()
3213 err->page, err->offset, err->syndrome, in __log_ecc_error()
3214 err->csrow, err->channel, -1, in __log_ecc_error()
3222 u8 ecc_type = (m->status >> 45) & 0x3; in decode_bus_error()
3223 u8 xec = XEC(m->status, 0x1f); in decode_bus_error()
3224 u16 ec = EC(m->status); in decode_bus_error()
3232 pvt = mci->pvt_info; in decode_bus_error()
3238 /* Do only ECC errors */ in decode_bus_error()
3247 err.syndrome = extract_syndrome(m->status); in decode_bus_error()
3249 pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err); in decode_bus_error()
3265 return (m->ipid & GENMASK(31, 0)) >> 20; in find_umc_channel()
3270 u8 ecc_type = (m->status >> 45) & 0x3; in decode_umc_error()
3280 pvt = mci->pvt_info; in decode_umc_error()
3284 if (m->status & MCI_STATUS_DEFERRED) in decode_umc_error()
3289 if (!(m->status & MCI_STATUS_SYNDV)) { in decode_umc_error()
3295 u8 length = (m->synd >> 18) & 0x3f; in decode_umc_error()
3298 err.syndrome = (m->synd >> 32) & GENMASK(length - 1, 0); in decode_umc_error()
3303 err.csrow = m->synd & 0x7; in decode_umc_error()
3305 if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) { in decode_umc_error()
3317 * Use pvt->F3 which contains the F3 CPU PCI device to get the related
3324 if (pvt->umc) { in reserve_mc_sibling_devs()
3325 pvt->F0 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3); in reserve_mc_sibling_devs()
3326 if (!pvt->F0) { in reserve_mc_sibling_devs()
3328 return -ENODEV; in reserve_mc_sibling_devs()
3331 pvt->F6 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3); in reserve_mc_sibling_devs()
3332 if (!pvt->F6) { in reserve_mc_sibling_devs()
3333 pci_dev_put(pvt->F0); in reserve_mc_sibling_devs()
3334 pvt->F0 = NULL; in reserve_mc_sibling_devs()
3337 return -ENODEV; in reserve_mc_sibling_devs()
3341 pci_ctl_dev = &pvt->F0->dev; in reserve_mc_sibling_devs()
3343 edac_dbg(1, "F0: %s\n", pci_name(pvt->F0)); in reserve_mc_sibling_devs()
3344 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3)); in reserve_mc_sibling_devs()
3345 edac_dbg(1, "F6: %s\n", pci_name(pvt->F6)); in reserve_mc_sibling_devs()
3351 pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3); in reserve_mc_sibling_devs()
3352 if (!pvt->F1) { in reserve_mc_sibling_devs()
3354 return -ENODEV; in reserve_mc_sibling_devs()
3358 pvt->F2 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3); in reserve_mc_sibling_devs()
3359 if (!pvt->F2) { in reserve_mc_sibling_devs()
3360 pci_dev_put(pvt->F1); in reserve_mc_sibling_devs()
3361 pvt->F1 = NULL; in reserve_mc_sibling_devs()
3364 return -ENODEV; in reserve_mc_sibling_devs()
3368 pci_ctl_dev = &pvt->F2->dev; in reserve_mc_sibling_devs()
3370 edac_dbg(1, "F1: %s\n", pci_name(pvt->F1)); in reserve_mc_sibling_devs()
3371 edac_dbg(1, "F2: %s\n", pci_name(pvt->F2)); in reserve_mc_sibling_devs()
3372 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3)); in reserve_mc_sibling_devs()
3379 if (pvt->umc) { in free_mc_sibling_devs()
3380 pci_dev_put(pvt->F0); in free_mc_sibling_devs()
3381 pci_dev_put(pvt->F6); in free_mc_sibling_devs()
3383 pci_dev_put(pvt->F1); in free_mc_sibling_devs()
3384 pci_dev_put(pvt->F2); in free_mc_sibling_devs()
3390 pvt->ecc_sym_sz = 4; in determine_ecc_sym_sz()
3392 if (pvt->umc) { in determine_ecc_sym_sz()
3397 if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) { in determine_ecc_sym_sz()
3398 if (pvt->umc[i].ecc_ctrl & BIT(9)) { in determine_ecc_sym_sz()
3399 pvt->ecc_sym_sz = 16; in determine_ecc_sym_sz()
3401 } else if (pvt->umc[i].ecc_ctrl & BIT(7)) { in determine_ecc_sym_sz()
3402 pvt->ecc_sym_sz = 8; in determine_ecc_sym_sz()
3407 } else if (pvt->fam >= 0x10) { in determine_ecc_sym_sz()
3410 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp); in determine_ecc_sym_sz()
3412 if (pvt->fam != 0x16) in determine_ecc_sym_sz()
3413 amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1); in determine_ecc_sym_sz()
3415 /* F10h, revD and later can do x8 ECC too. */ in determine_ecc_sym_sz()
3416 if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25)) in determine_ecc_sym_sz()
3417 pvt->ecc_sym_sz = 8; in determine_ecc_sym_sz()
3426 u8 nid = pvt->mc_node_id; in __read_mc_regs_df()
3434 umc = &pvt->umc[i]; in __read_mc_regs_df()
3436 amd_smn_read(nid, umc_base + get_umc_reg(UMCCH_DIMM_CFG), &umc->dimm_cfg); in __read_mc_regs_df()
3437 amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg); in __read_mc_regs_df()
3438 amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl); in __read_mc_regs_df()
3439 amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl); in __read_mc_regs_df()
3440 amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &umc->umc_cap_hi); in __read_mc_regs_df()
3455 * those are Read-As-Zero. in read_mc_regs()
3457 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem); in read_mc_regs()
3458 edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem); in read_mc_regs()
3463 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2); in read_mc_regs()
3464 edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2); in read_mc_regs()
3469 if (pvt->umc) { in read_mc_regs()
3471 amd64_read_pci_cfg(pvt->F0, DF_DHAR, &pvt->dhar); in read_mc_regs()
3476 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap); in read_mc_regs()
3497 (rw & 0x1) ? "R" : "-", in read_mc_regs()
3498 (rw & 0x2) ? "W" : "-", in read_mc_regs()
3503 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar); in read_mc_regs()
3504 amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0); in read_mc_regs()
3506 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare); in read_mc_regs()
3508 amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0); in read_mc_regs()
3509 amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0); in read_mc_regs()
3512 amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1); in read_mc_regs()
3513 amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1); in read_mc_regs()
3521 if (!pvt->umc) in read_mc_regs()
3522 edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]); in read_mc_regs()
3531 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
3532 * k8 private pointer to -->
3540 * 0-3 CSROWs 0 and 1
3541 * 4-7 CSROWs 2 and 3
3542 * 8-11 CSROWs 4 and 5
3543 * 12-15 CSROWs 6 and 7
3546 * The meaning of the values depends on CPU revision and dual-channel state,
3563 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0; in get_csrow_nr_pages()
3567 if (!pvt->umc) { in get_csrow_nr_pages()
3574 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr); in get_csrow_nr_pages()
3575 nr_pages <<= 20 - PAGE_SHIFT; in get_csrow_nr_pages()
3586 struct amd64_pvt *pvt = mci->pvt_info; in init_csrows_df()
3593 if (mci->edac_ctl_cap & EDAC_FLAG_S16ECD16ED) { in init_csrows_df()
3596 } else if (mci->edac_ctl_cap & EDAC_FLAG_S8ECD8ED) { in init_csrows_df()
3599 } else if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED) { in init_csrows_df()
3602 } else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED) { in init_csrows_df()
3612 dimm = mci->csrows[cs]->channels[umc]->dimm; in init_csrows_df()
3615 pvt->mc_node_id, cs); in init_csrows_df()
3617 dimm->nr_pages = get_csrow_nr_pages(pvt, umc, cs); in init_csrows_df()
3618 dimm->mtype = pvt->umc[umc].dram_type; in init_csrows_df()
3619 dimm->edac_mode = edac_mode; in init_csrows_df()
3620 dimm->dtype = dev_type; in init_csrows_df()
3621 dimm->grain = 64; in init_csrows_df()
3634 struct amd64_pvt *pvt = mci->pvt_info; in init_csrows()
3642 if (pvt->umc) in init_csrows()
3645 amd64_read_pci_cfg(pvt->F3, NBCFG, &val); in init_csrows()
3647 pvt->nbcfg = val; in init_csrows()
3650 pvt->mc_node_id, val, in init_csrows()
3660 if (pvt->fam != 0xf) in init_csrows()
3666 csrow = mci->csrows[i]; in init_csrows()
3670 pvt->mc_node_id, i); in init_csrows()
3674 csrow->channels[0]->dimm->nr_pages = nr_pages; in init_csrows()
3678 if (pvt->fam != 0xf && row_dct1) { in init_csrows()
3681 csrow->channels[1]->dimm->nr_pages = row_dct1_pages; in init_csrows()
3687 /* Determine DIMM ECC mode: */ in init_csrows()
3688 if (pvt->nbcfg & NBCFG_ECC_ENABLE) { in init_csrows()
3689 edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL) in init_csrows()
3694 for (j = 0; j < pvt->channel_count; j++) { in init_csrows()
3695 dimm = csrow->channels[j]->dimm; in init_csrows()
3696 dimm->mtype = pvt->dram_type; in init_csrows()
3697 dimm->edac_mode = edac_mode; in init_csrows()
3698 dimm->grain = 64; in init_csrows()
3733 nbe = reg->l & MSR_MCGCTL_NBE; in nb_mce_bank_enabled_on_node()
3736 cpu, reg->q, in nb_mce_bank_enabled_on_node()
3756 return -ENOMEM; in toggle_ecc_err_reporting()
3768 if (reg->l & MSR_MCGCTL_NBE) in toggle_ecc_err_reporting()
3769 s->flags.nb_mce_enable = 1; in toggle_ecc_err_reporting()
3771 reg->l |= MSR_MCGCTL_NBE; in toggle_ecc_err_reporting()
3776 if (!s->flags.nb_mce_enable) in toggle_ecc_err_reporting()
3777 reg->l &= ~MSR_MCGCTL_NBE; in toggle_ecc_err_reporting()
3794 amd64_warn("Error enabling ECC reporting over MCGCTL!\n"); in enable_ecc_error_reporting()
3800 s->old_nbctl = value & mask; in enable_ecc_error_reporting()
3801 s->nbctl_valid = true; in enable_ecc_error_reporting()
3812 amd64_warn("DRAM ECC disabled on this node, enabling...\n"); in enable_ecc_error_reporting()
3814 s->flags.nb_ecc_prev = 0; in enable_ecc_error_reporting()
3816 /* Attempt to turn on DRAM ECC Enable */ in enable_ecc_error_reporting()
3823 amd64_warn("Hardware rejected DRAM ECC enable," in enable_ecc_error_reporting()
3827 amd64_info("Hardware accepted DRAM ECC Enable\n"); in enable_ecc_error_reporting()
3830 s->flags.nb_ecc_prev = 1; in enable_ecc_error_reporting()
3844 if (!s->nbctl_valid) in restore_ecc_error_reporting()
3849 value |= s->old_nbctl; in restore_ecc_error_reporting()
3853 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */ in restore_ecc_error_reporting()
3854 if (!s->flags.nb_ecc_prev) { in restore_ecc_error_reporting()
3867 u16 nid = pvt->mc_node_id; in ecc_enabled()
3877 umc = &pvt->umc[i]; in ecc_enabled()
3880 if (!(umc->sdp_ctrl & UMC_SDP_INIT)) in ecc_enabled()
3885 if (umc->umc_cap_hi & UMC_ECC_ENABLED) in ecc_enabled()
3898 amd64_read_pci_cfg(pvt->F3, NBCFG, &value); in ecc_enabled()
3908 edac_dbg(3, "Node %d: DRAM ECC %s.\n", nid, (ecc_en ? "enabled" : "disabled")); in ecc_enabled()
3922 if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) { in f17h_determine_edac_ctl_cap()
3923 ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED); in f17h_determine_edac_ctl_cap()
3924 cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP); in f17h_determine_edac_ctl_cap()
3926 dev_x4 &= !!(pvt->umc[i].dimm_cfg & BIT(6)); in f17h_determine_edac_ctl_cap()
3927 dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7)); in f17h_determine_edac_ctl_cap()
3931 /* Set chipkill only if ECC is enabled: */ in f17h_determine_edac_ctl_cap()
3933 mci->edac_ctl_cap |= EDAC_FLAG_SECDED; in f17h_determine_edac_ctl_cap()
3939 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED; in f17h_determine_edac_ctl_cap()
3941 mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED; in f17h_determine_edac_ctl_cap()
3943 mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED; in f17h_determine_edac_ctl_cap()
3949 struct amd64_pvt *pvt = mci->pvt_info; in setup_mci_misc_attrs()
3951 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2; in setup_mci_misc_attrs()
3952 mci->edac_ctl_cap = EDAC_FLAG_NONE; in setup_mci_misc_attrs()
3954 if (pvt->umc) { in setup_mci_misc_attrs()
3957 if (pvt->nbcap & NBCAP_SECDED) in setup_mci_misc_attrs()
3958 mci->edac_ctl_cap |= EDAC_FLAG_SECDED; in setup_mci_misc_attrs()
3960 if (pvt->nbcap & NBCAP_CHIPKILL) in setup_mci_misc_attrs()
3961 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED; in setup_mci_misc_attrs()
3964 mci->edac_cap = determine_edac_cap(pvt); in setup_mci_misc_attrs()
3965 mci->mod_name = EDAC_MOD_STR; in setup_mci_misc_attrs()
3966 mci->ctl_name = fam_type->ctl_name; in setup_mci_misc_attrs()
3967 mci->dev_name = pci_name(pvt->F3); in setup_mci_misc_attrs()
3968 mci->ctl_page_to_phys = NULL; in setup_mci_misc_attrs()
3971 mci->set_sdram_scrub_rate = set_scrub_rate; in setup_mci_misc_attrs()
3972 mci->get_sdram_scrub_rate = get_scrub_rate; in setup_mci_misc_attrs()
3980 pvt->ext_model = boot_cpu_data.x86_model >> 4; in per_family_init()
3981 pvt->stepping = boot_cpu_data.x86_stepping; in per_family_init()
3982 pvt->model = boot_cpu_data.x86_model; in per_family_init()
3983 pvt->fam = boot_cpu_data.x86; in per_family_init()
3985 switch (pvt->fam) { in per_family_init()
3988 pvt->ops = &family_types[K8_CPUS].ops; in per_family_init()
3993 pvt->ops = &family_types[F10_CPUS].ops; in per_family_init()
3997 if (pvt->model == 0x30) { in per_family_init()
3999 pvt->ops = &family_types[F15_M30H_CPUS].ops; in per_family_init()
4001 } else if (pvt->model == 0x60) { in per_family_init()
4003 pvt->ops = &family_types[F15_M60H_CPUS].ops; in per_family_init()
4006 } else if (pvt->model == 0x13) { in per_family_init()
4010 pvt->ops = &family_types[F15_CPUS].ops; in per_family_init()
4015 if (pvt->model == 0x30) { in per_family_init()
4017 pvt->ops = &family_types[F16_M30H_CPUS].ops; in per_family_init()
4021 pvt->ops = &family_types[F16_CPUS].ops; in per_family_init()
4025 if (pvt->model >= 0x10 && pvt->model <= 0x2f) { in per_family_init()
4027 pvt->ops = &family_types[F17_M10H_CPUS].ops; in per_family_init()
4029 } else if (pvt->model >= 0x30 && pvt->model <= 0x3f) { in per_family_init()
4031 pvt->ops = &family_types[F17_M30H_CPUS].ops; in per_family_init()
4033 } else if (pvt->model >= 0x60 && pvt->model <= 0x6f) { in per_family_init()
4035 pvt->ops = &family_types[F17_M60H_CPUS].ops; in per_family_init()
4037 } else if (pvt->model >= 0x70 && pvt->model <= 0x7f) { in per_family_init()
4039 pvt->ops = &family_types[F17_M70H_CPUS].ops; in per_family_init()
4045 pvt->ops = &family_types[F17_CPUS].ops; in per_family_init()
4047 if (pvt->fam == 0x18) in per_family_init()
4052 if (pvt->model >= 0x10 && pvt->model <= 0x1f) { in per_family_init()
4054 pvt->ops = &family_types[F19_M10H_CPUS].ops; in per_family_init()
4056 } else if (pvt->model >= 0x20 && pvt->model <= 0x2f) { in per_family_init()
4058 pvt->ops = &family_types[F17_M70H_CPUS].ops; in per_family_init()
4059 fam_type->ctl_name = "F19h_M20h"; in per_family_init()
4061 } else if (pvt->model >= 0x50 && pvt->model <= 0x5f) { in per_family_init()
4063 pvt->ops = &family_types[F19_M50H_CPUS].ops; in per_family_init()
4064 fam_type->ctl_name = "F19h_M50h"; in per_family_init()
4066 } else if (pvt->model >= 0xa0 && pvt->model <= 0xaf) { in per_family_init()
4068 pvt->ops = &family_types[F19_M10H_CPUS].ops; in per_family_init()
4069 fam_type->ctl_name = "F19h_MA0h"; in per_family_init()
4073 pvt->ops = &family_types[F19_CPUS].ops; in per_family_init()
4098 if (pvt->fam >= 0x17) { in hw_info_get()
4099 pvt->umc = kcalloc(fam_type->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL); in hw_info_get()
4100 if (!pvt->umc) in hw_info_get()
4101 return -ENOMEM; in hw_info_get()
4103 pci_id1 = fam_type->f0_id; in hw_info_get()
4104 pci_id2 = fam_type->f6_id; in hw_info_get()
4106 pci_id1 = fam_type->f1_id; in hw_info_get()
4107 pci_id2 = fam_type->f2_id; in hw_info_get()
4121 if (pvt->F0 || pvt->F1) in hw_info_put()
4124 kfree(pvt->umc); in hw_info_put()
4131 int ret = -EINVAL; in init_one_instance()
4134 * We need to determine how many memory channels there are. Then use in init_one_instance()
4138 pvt->channel_count = pvt->ops->early_channel_count(pvt); in init_one_instance()
4139 if (pvt->channel_count < 0) in init_one_instance()
4142 ret = -ENOMEM; in init_one_instance()
4144 layers[0].size = pvt->csels[0].b_cnt; in init_one_instance()
4153 layers[1].size = fam_type->max_mcs; in init_one_instance()
4156 mci = edac_mc_alloc(pvt->mc_node_id, ARRAY_SIZE(layers), layers, 0); in init_one_instance()
4160 mci->pvt_info = pvt; in init_one_instance()
4161 mci->pdev = &pvt->F3->dev; in init_one_instance()
4166 mci->edac_cap = EDAC_FLAG_NONE; in init_one_instance()
4168 ret = -ENODEV; in init_one_instance()
4183 for (dct = 0; dct < fam_type->max_mcs; dct++) { in instance_has_memory()
4193 struct pci_dev *F3 = node_to_amd_nb(nid)->misc; in probe_one_instance()
4198 ret = -ENOMEM; in probe_one_instance()
4209 pvt->mc_node_id = nid; in probe_one_instance()
4210 pvt->F3 = F3; in probe_one_instance()
4212 ret = -ENODEV; in probe_one_instance()
4228 ret = -ENODEV; in probe_one_instance()
4234 amd64_warn("Forcing ECC on is not recommended on newer systems. Please enable ECC in BIOS."); in probe_one_instance()
4237 amd64_warn("Forcing ECC on!\n"); in probe_one_instance()
4253 amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name, in probe_one_instance()
4254 (pvt->fam == 0xf ? in probe_one_instance()
4255 (pvt->ext_model >= K8_REV_F ? "revF or later " in probe_one_instance()
4257 : ""), pvt->mc_node_id); in probe_one_instance()
4277 struct pci_dev *F3 = node_to_amd_nb(nid)->misc; in remove_one_instance()
4283 mci = edac_mc_del_mc(&F3->dev); in remove_one_instance()
4287 pvt = mci->pvt_info; in remove_one_instance()
4295 mci->pvt_info = NULL; in remove_one_instance()
4329 int err = -ENODEV; in amd64_edac_init()
4334 return -EBUSY; in amd64_edac_init()
4337 return -ENODEV; in amd64_edac_init()
4340 return -ENODEV; in amd64_edac_init()
4344 err = -ENOMEM; in amd64_edac_init()
4357 while (--i >= 0) in amd64_edac_init()
4365 err = -ENODEV; in amd64_edac_init()
4378 amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR); in amd64_edac_init()
4429 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "