1 /*
2 * Intel 7300 class Memory Controllers kernel module (Clarksboro)
3 *
4 * This file may be distributed under the terms of the
5 * GNU General Public License version 2 only.
6 *
7 * Copyright (c) 2010 by:
8 * Mauro Carvalho Chehab
9 *
10 * Red Hat Inc. http://www.redhat.com
11 *
12 * Intel 7300 Chipset Memory Controller Hub (MCH) - Datasheet
13 * http://www.intel.com/Assets/PDF/datasheet/318082.pdf
14 *
15 * TODO: The chipset allow checking for PCI Express errors also. Currently,
16 * the driver covers only memory error errors
17 *
18 * This driver uses "csrows" EDAC attribute to represent DIMM slot#
19 */
20
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/pci.h>
24 #include <linux/pci_ids.h>
25 #include <linux/slab.h>
26 #include <linux/edac.h>
27 #include <linux/mmzone.h>
28
29 #include "edac_module.h"
30
31 /*
32 * Alter this version for the I7300 module when modifications are made
33 */
34 #define I7300_REVISION " Ver: 1.0.0"
35
36 #define EDAC_MOD_STR "i7300_edac"
37
38 #define i7300_printk(level, fmt, arg...) \
39 edac_printk(level, "i7300", fmt, ##arg)
40
41 #define i7300_mc_printk(mci, level, fmt, arg...) \
42 edac_mc_chipset_printk(mci, level, "i7300", fmt, ##arg)
43
44 /***********************************************
45 * i7300 Limit constants Structs and static vars
46 ***********************************************/
47
48 /*
49 * Memory topology is organized as:
50 * Branch 0 - 2 channels: channels 0 and 1 (FDB0 PCI dev 21.0)
51 * Branch 1 - 2 channels: channels 2 and 3 (FDB1 PCI dev 22.0)
52 * Each channel can have to 8 DIMM sets (called as SLOTS)
53 * Slots should generally be filled in pairs
54 * Except on Single Channel mode of operation
55 * just slot 0/channel0 filled on this mode
56 * On normal operation mode, the two channels on a branch should be
57 * filled together for the same SLOT#
58 * When in mirrored mode, Branch 1 replicate memory at Branch 0, so, the four
59 * channels on both branches should be filled
60 */
61
62 /* Limits for i7300 */
63 #define MAX_SLOTS 8
64 #define MAX_BRANCHES 2
65 #define MAX_CH_PER_BRANCH 2
66 #define MAX_CHANNELS (MAX_CH_PER_BRANCH * MAX_BRANCHES)
67 #define MAX_MIR 3
68
69 #define to_channel(ch, branch) ((((branch)) << 1) | (ch))
70
71 #define to_csrow(slot, ch, branch) \
72 (to_channel(ch, branch) | ((slot) << 2))
73
74 /* Device name and register DID (Device ID) */
75 struct i7300_dev_info {
76 const char *ctl_name; /* name for this device */
77 u16 fsb_mapping_errors; /* DID for the branchmap,control */
78 };
79
80 /* Table of devices attributes supported by this driver */
81 static const struct i7300_dev_info i7300_devs[] = {
82 {
83 .ctl_name = "I7300",
84 .fsb_mapping_errors = PCI_DEVICE_ID_INTEL_I7300_MCH_ERR,
85 },
86 };
87
88 struct i7300_dimm_info {
89 int megabytes; /* size, 0 means not present */
90 };
91
92 /* driver private data structure */
93 struct i7300_pvt {
94 struct pci_dev *pci_dev_16_0_fsb_ctlr; /* 16.0 */
95 struct pci_dev *pci_dev_16_1_fsb_addr_map; /* 16.1 */
96 struct pci_dev *pci_dev_16_2_fsb_err_regs; /* 16.2 */
97 struct pci_dev *pci_dev_2x_0_fbd_branch[MAX_BRANCHES]; /* 21.0 and 22.0 */
98
99 u16 tolm; /* top of low memory */
100 u64 ambase; /* AMB BAR */
101
102 u32 mc_settings; /* Report several settings */
103 u32 mc_settings_a;
104
105 u16 mir[MAX_MIR]; /* Memory Interleave Reg*/
106
107 u16 mtr[MAX_SLOTS][MAX_BRANCHES]; /* Memory Technlogy Reg */
108 u16 ambpresent[MAX_CHANNELS]; /* AMB present regs */
109
110 /* DIMM information matrix, allocating architecture maximums */
111 struct i7300_dimm_info dimm_info[MAX_SLOTS][MAX_CHANNELS];
112
113 /* Temporary buffer for use when preparing error messages */
114 char *tmp_prt_buffer;
115 };
116
117 /* FIXME: Why do we need to have this static? */
118 static struct edac_pci_ctl_info *i7300_pci;
119
120 /***************************************************
121 * i7300 Register definitions for memory enumeration
122 ***************************************************/
123
124 /*
125 * Device 16,
126 * Function 0: System Address (not documented)
127 * Function 1: Memory Branch Map, Control, Errors Register
128 */
129
130 /* OFFSETS for Function 0 */
131 #define AMBASE 0x48 /* AMB Mem Mapped Reg Region Base */
132 #define MAXCH 0x56 /* Max Channel Number */
133 #define MAXDIMMPERCH 0x57 /* Max DIMM PER Channel Number */
134
135 /* OFFSETS for Function 1 */
136 #define MC_SETTINGS 0x40
137 #define IS_MIRRORED(mc) ((mc) & (1 << 16))
138 #define IS_ECC_ENABLED(mc) ((mc) & (1 << 5))
139 #define IS_RETRY_ENABLED(mc) ((mc) & (1 << 31))
140 #define IS_SCRBALGO_ENHANCED(mc) ((mc) & (1 << 8))
141
142 #define MC_SETTINGS_A 0x58
143 #define IS_SINGLE_MODE(mca) ((mca) & (1 << 14))
144
145 #define TOLM 0x6C
146
147 #define MIR0 0x80
148 #define MIR1 0x84
149 #define MIR2 0x88
150
151 /*
152 * Note: Other Intel EDAC drivers use AMBPRESENT to identify if the available
153 * memory. From datasheet item 7.3.1 (FB-DIMM technology & organization), it
154 * seems that we cannot use this information directly for the same usage.
155 * Each memory slot may have up to 2 AMB interfaces, one for income and another
156 * for outcome interface to the next slot.
157 * For now, the driver just stores the AMB present registers, but rely only at
158 * the MTR info to detect memory.
159 * Datasheet is also not clear about how to map each AMBPRESENT registers to
160 * one of the 4 available channels.
161 */
162 #define AMBPRESENT_0 0x64
163 #define AMBPRESENT_1 0x66
164
165 static const u16 mtr_regs[MAX_SLOTS] = {
166 0x80, 0x84, 0x88, 0x8c,
167 0x82, 0x86, 0x8a, 0x8e
168 };
169
170 /*
171 * Defines to extract the vaious fields from the
172 * MTRx - Memory Technology Registers
173 */
174 #define MTR_DIMMS_PRESENT(mtr) ((mtr) & (1 << 8))
175 #define MTR_DIMMS_ETHROTTLE(mtr) ((mtr) & (1 << 7))
176 #define MTR_DRAM_WIDTH(mtr) (((mtr) & (1 << 6)) ? 8 : 4)
177 #define MTR_DRAM_BANKS(mtr) (((mtr) & (1 << 5)) ? 8 : 4)
178 #define MTR_DIMM_RANKS(mtr) (((mtr) & (1 << 4)) ? 1 : 0)
179 #define MTR_DIMM_ROWS(mtr) (((mtr) >> 2) & 0x3)
180 #define MTR_DRAM_BANKS_ADDR_BITS 2
181 #define MTR_DIMM_ROWS_ADDR_BITS(mtr) (MTR_DIMM_ROWS(mtr) + 13)
182 #define MTR_DIMM_COLS(mtr) ((mtr) & 0x3)
183 #define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10)
184
185 /************************************************
186 * i7300 Register definitions for error detection
187 ************************************************/
188
189 /*
190 * Device 16.1: FBD Error Registers
191 */
192 #define FERR_FAT_FBD 0x98
193 static const char *ferr_fat_fbd_name[] = {
194 [22] = "Non-Redundant Fast Reset Timeout",
195 [2] = ">Tmid Thermal event with intelligent throttling disabled",
196 [1] = "Memory or FBD configuration CRC read error",
197 [0] = "Memory Write error on non-redundant retry or "
198 "FBD configuration Write error on retry",
199 };
200 #define GET_FBD_FAT_IDX(fbderr) (((fbderr) >> 28) & 3)
201 #define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 22))
202
203 #define FERR_NF_FBD 0xa0
204 static const char *ferr_nf_fbd_name[] = {
205 [24] = "DIMM-Spare Copy Completed",
206 [23] = "DIMM-Spare Copy Initiated",
207 [22] = "Redundant Fast Reset Timeout",
208 [21] = "Memory Write error on redundant retry",
209 [18] = "SPD protocol Error",
210 [17] = "FBD Northbound parity error on FBD Sync Status",
211 [16] = "Correctable Patrol Data ECC",
212 [15] = "Correctable Resilver- or Spare-Copy Data ECC",
213 [14] = "Correctable Mirrored Demand Data ECC",
214 [13] = "Correctable Non-Mirrored Demand Data ECC",
215 [11] = "Memory or FBD configuration CRC read error",
216 [10] = "FBD Configuration Write error on first attempt",
217 [9] = "Memory Write error on first attempt",
218 [8] = "Non-Aliased Uncorrectable Patrol Data ECC",
219 [7] = "Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
220 [6] = "Non-Aliased Uncorrectable Mirrored Demand Data ECC",
221 [5] = "Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC",
222 [4] = "Aliased Uncorrectable Patrol Data ECC",
223 [3] = "Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
224 [2] = "Aliased Uncorrectable Mirrored Demand Data ECC",
225 [1] = "Aliased Uncorrectable Non-Mirrored Demand Data ECC",
226 [0] = "Uncorrectable Data ECC on Replay",
227 };
228 #define GET_FBD_NF_IDX(fbderr) (((fbderr) >> 28) & 3)
229 #define FERR_NF_FBD_ERR_MASK ((1 << 24) | (1 << 23) | (1 << 22) | (1 << 21) |\
230 (1 << 18) | (1 << 17) | (1 << 16) | (1 << 15) |\
231 (1 << 14) | (1 << 13) | (1 << 11) | (1 << 10) |\
232 (1 << 9) | (1 << 8) | (1 << 7) | (1 << 6) |\
233 (1 << 5) | (1 << 4) | (1 << 3) | (1 << 2) |\
234 (1 << 1) | (1 << 0))
235
236 #define EMASK_FBD 0xa8
237 #define EMASK_FBD_ERR_MASK ((1 << 27) | (1 << 26) | (1 << 25) | (1 << 24) |\
238 (1 << 22) | (1 << 21) | (1 << 20) | (1 << 19) |\
239 (1 << 18) | (1 << 17) | (1 << 16) | (1 << 14) |\
240 (1 << 13) | (1 << 12) | (1 << 11) | (1 << 10) |\
241 (1 << 9) | (1 << 8) | (1 << 7) | (1 << 6) |\
242 (1 << 5) | (1 << 4) | (1 << 3) | (1 << 2) |\
243 (1 << 1) | (1 << 0))
244
245 /*
246 * Device 16.2: Global Error Registers
247 */
248
249 #define FERR_GLOBAL_HI 0x48
250 static const char *ferr_global_hi_name[] = {
251 [3] = "FSB 3 Fatal Error",
252 [2] = "FSB 2 Fatal Error",
253 [1] = "FSB 1 Fatal Error",
254 [0] = "FSB 0 Fatal Error",
255 };
256 #define ferr_global_hi_is_fatal(errno) 1
257
258 #define FERR_GLOBAL_LO 0x40
259 static const char *ferr_global_lo_name[] = {
260 [31] = "Internal MCH Fatal Error",
261 [30] = "Intel QuickData Technology Device Fatal Error",
262 [29] = "FSB1 Fatal Error",
263 [28] = "FSB0 Fatal Error",
264 [27] = "FBD Channel 3 Fatal Error",
265 [26] = "FBD Channel 2 Fatal Error",
266 [25] = "FBD Channel 1 Fatal Error",
267 [24] = "FBD Channel 0 Fatal Error",
268 [23] = "PCI Express Device 7Fatal Error",
269 [22] = "PCI Express Device 6 Fatal Error",
270 [21] = "PCI Express Device 5 Fatal Error",
271 [20] = "PCI Express Device 4 Fatal Error",
272 [19] = "PCI Express Device 3 Fatal Error",
273 [18] = "PCI Express Device 2 Fatal Error",
274 [17] = "PCI Express Device 1 Fatal Error",
275 [16] = "ESI Fatal Error",
276 [15] = "Internal MCH Non-Fatal Error",
277 [14] = "Intel QuickData Technology Device Non Fatal Error",
278 [13] = "FSB1 Non-Fatal Error",
279 [12] = "FSB 0 Non-Fatal Error",
280 [11] = "FBD Channel 3 Non-Fatal Error",
281 [10] = "FBD Channel 2 Non-Fatal Error",
282 [9] = "FBD Channel 1 Non-Fatal Error",
283 [8] = "FBD Channel 0 Non-Fatal Error",
284 [7] = "PCI Express Device 7 Non-Fatal Error",
285 [6] = "PCI Express Device 6 Non-Fatal Error",
286 [5] = "PCI Express Device 5 Non-Fatal Error",
287 [4] = "PCI Express Device 4 Non-Fatal Error",
288 [3] = "PCI Express Device 3 Non-Fatal Error",
289 [2] = "PCI Express Device 2 Non-Fatal Error",
290 [1] = "PCI Express Device 1 Non-Fatal Error",
291 [0] = "ESI Non-Fatal Error",
292 };
293 #define ferr_global_lo_is_fatal(errno) ((errno < 16) ? 0 : 1)
294
295 #define NRECMEMA 0xbe
296 #define NRECMEMA_BANK(v) (((v) >> 12) & 7)
297 #define NRECMEMA_RANK(v) (((v) >> 8) & 15)
298
299 #define NRECMEMB 0xc0
300 #define NRECMEMB_IS_WR(v) ((v) & (1 << 31))
301 #define NRECMEMB_CAS(v) (((v) >> 16) & 0x1fff)
302 #define NRECMEMB_RAS(v) ((v) & 0xffff)
303
304 #define REDMEMA 0xdc
305
306 #define REDMEMB 0x7c
307
308 #define RECMEMA 0xe0
309 #define RECMEMA_BANK(v) (((v) >> 12) & 7)
310 #define RECMEMA_RANK(v) (((v) >> 8) & 15)
311
312 #define RECMEMB 0xe4
313 #define RECMEMB_IS_WR(v) ((v) & (1 << 31))
314 #define RECMEMB_CAS(v) (((v) >> 16) & 0x1fff)
315 #define RECMEMB_RAS(v) ((v) & 0xffff)
316
317 /********************************************
318 * i7300 Functions related to error detection
319 ********************************************/
320
321 /**
322 * get_err_from_table() - Gets the error message from a table
323 * @table: table name (array of char *)
324 * @size: number of elements at the table
325 * @pos: position of the element to be returned
326 *
327 * This is a small routine that gets the pos-th element of a table. If the
328 * element doesn't exist (or it is empty), it returns "reserved".
329 * Instead of calling it directly, the better is to call via the macro
330 * GET_ERR_FROM_TABLE(), that automatically checks the table size via
331 * ARRAY_SIZE() macro
332 */
get_err_from_table(const char * table[],int size,int pos)333 static const char *get_err_from_table(const char *table[], int size, int pos)
334 {
335 if (unlikely(pos >= size))
336 return "Reserved";
337
338 if (unlikely(!table[pos]))
339 return "Reserved";
340
341 return table[pos];
342 }
343
344 #define GET_ERR_FROM_TABLE(table, pos) \
345 get_err_from_table(table, ARRAY_SIZE(table), pos)
346
347 /**
348 * i7300_process_error_global() - Retrieve the hardware error information from
349 * the hardware global error registers and
350 * sends it to dmesg
351 * @mci: struct mem_ctl_info pointer
352 */
i7300_process_error_global(struct mem_ctl_info * mci)353 static void i7300_process_error_global(struct mem_ctl_info *mci)
354 {
355 struct i7300_pvt *pvt;
356 u32 errnum, error_reg;
357 unsigned long errors;
358 const char *specific;
359 bool is_fatal;
360
361 pvt = mci->pvt_info;
362
363 /* read in the 1st FATAL error register */
364 pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
365 FERR_GLOBAL_HI, &error_reg);
366 if (unlikely(error_reg)) {
367 errors = error_reg;
368 errnum = find_first_bit(&errors,
369 ARRAY_SIZE(ferr_global_hi_name));
370 specific = GET_ERR_FROM_TABLE(ferr_global_hi_name, errnum);
371 is_fatal = ferr_global_hi_is_fatal(errnum);
372
373 /* Clear the error bit */
374 pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
375 FERR_GLOBAL_HI, error_reg);
376
377 goto error_global;
378 }
379
380 pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
381 FERR_GLOBAL_LO, &error_reg);
382 if (unlikely(error_reg)) {
383 errors = error_reg;
384 errnum = find_first_bit(&errors,
385 ARRAY_SIZE(ferr_global_lo_name));
386 specific = GET_ERR_FROM_TABLE(ferr_global_lo_name, errnum);
387 is_fatal = ferr_global_lo_is_fatal(errnum);
388
389 /* Clear the error bit */
390 pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
391 FERR_GLOBAL_LO, error_reg);
392
393 goto error_global;
394 }
395 return;
396
397 error_global:
398 i7300_mc_printk(mci, KERN_EMERG, "%s misc error: %s\n",
399 is_fatal ? "Fatal" : "NOT fatal", specific);
400 }
401
402 /**
403 * i7300_process_fbd_error() - Retrieve the hardware error information from
404 * the FBD error registers and sends it via
405 * EDAC error API calls
406 * @mci: struct mem_ctl_info pointer
407 */
i7300_process_fbd_error(struct mem_ctl_info * mci)408 static void i7300_process_fbd_error(struct mem_ctl_info *mci)
409 {
410 struct i7300_pvt *pvt;
411 u32 errnum, value, error_reg;
412 u16 val16;
413 unsigned branch, channel, bank, rank, cas, ras;
414 u32 syndrome;
415
416 unsigned long errors;
417 const char *specific;
418 bool is_wr;
419
420 pvt = mci->pvt_info;
421
422 /* read in the 1st FATAL error register */
423 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
424 FERR_FAT_FBD, &error_reg);
425 if (unlikely(error_reg & FERR_FAT_FBD_ERR_MASK)) {
426 errors = error_reg & FERR_FAT_FBD_ERR_MASK ;
427 errnum = find_first_bit(&errors,
428 ARRAY_SIZE(ferr_fat_fbd_name));
429 specific = GET_ERR_FROM_TABLE(ferr_fat_fbd_name, errnum);
430 branch = (GET_FBD_FAT_IDX(error_reg) == 2) ? 1 : 0;
431
432 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map,
433 NRECMEMA, &val16);
434 bank = NRECMEMA_BANK(val16);
435 rank = NRECMEMA_RANK(val16);
436
437 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
438 NRECMEMB, &value);
439 is_wr = NRECMEMB_IS_WR(value);
440 cas = NRECMEMB_CAS(value);
441 ras = NRECMEMB_RAS(value);
442
443 /* Clean the error register */
444 pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
445 FERR_FAT_FBD, error_reg);
446
447 snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
448 "Bank=%d RAS=%d CAS=%d Err=0x%lx (%s))",
449 bank, ras, cas, errors, specific);
450
451 edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 1, 0, 0, 0,
452 branch, -1, rank,
453 is_wr ? "Write error" : "Read error",
454 pvt->tmp_prt_buffer);
455
456 }
457
458 /* read in the 1st NON-FATAL error register */
459 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
460 FERR_NF_FBD, &error_reg);
461 if (unlikely(error_reg & FERR_NF_FBD_ERR_MASK)) {
462 errors = error_reg & FERR_NF_FBD_ERR_MASK;
463 errnum = find_first_bit(&errors,
464 ARRAY_SIZE(ferr_nf_fbd_name));
465 specific = GET_ERR_FROM_TABLE(ferr_nf_fbd_name, errnum);
466 branch = (GET_FBD_NF_IDX(error_reg) == 2) ? 1 : 0;
467
468 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
469 REDMEMA, &syndrome);
470
471 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map,
472 RECMEMA, &val16);
473 bank = RECMEMA_BANK(val16);
474 rank = RECMEMA_RANK(val16);
475
476 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
477 RECMEMB, &value);
478 is_wr = RECMEMB_IS_WR(value);
479 cas = RECMEMB_CAS(value);
480 ras = RECMEMB_RAS(value);
481
482 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
483 REDMEMB, &value);
484 channel = (branch << 1);
485
486 /* Second channel ? */
487 channel += !!(value & BIT(17));
488
489 /* Clear the error bit */
490 pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
491 FERR_NF_FBD, error_reg);
492
493 /* Form out message */
494 snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
495 "DRAM-Bank=%d RAS=%d CAS=%d, Err=0x%lx (%s))",
496 bank, ras, cas, errors, specific);
497
498 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0,
499 syndrome,
500 branch >> 1, channel % 2, rank,
501 is_wr ? "Write error" : "Read error",
502 pvt->tmp_prt_buffer);
503 }
504 return;
505 }
506
507 /**
508 * i7300_check_error() - Calls the error checking subroutines
509 * @mci: struct mem_ctl_info pointer
510 */
i7300_check_error(struct mem_ctl_info * mci)511 static void i7300_check_error(struct mem_ctl_info *mci)
512 {
513 i7300_process_error_global(mci);
514 i7300_process_fbd_error(mci);
515 };
516
517 /**
518 * i7300_clear_error() - Clears the error registers
519 * @mci: struct mem_ctl_info pointer
520 */
i7300_clear_error(struct mem_ctl_info * mci)521 static void i7300_clear_error(struct mem_ctl_info *mci)
522 {
523 struct i7300_pvt *pvt = mci->pvt_info;
524 u32 value;
525 /*
526 * All error values are RWC - we need to read and write 1 to the
527 * bit that we want to cleanup
528 */
529
530 /* Clear global error registers */
531 pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
532 FERR_GLOBAL_HI, &value);
533 pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
534 FERR_GLOBAL_HI, value);
535
536 pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
537 FERR_GLOBAL_LO, &value);
538 pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
539 FERR_GLOBAL_LO, value);
540
541 /* Clear FBD error registers */
542 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
543 FERR_FAT_FBD, &value);
544 pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
545 FERR_FAT_FBD, value);
546
547 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
548 FERR_NF_FBD, &value);
549 pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
550 FERR_NF_FBD, value);
551 }
552
553 /**
554 * i7300_enable_error_reporting() - Enable the memory reporting logic at the
555 * hardware
556 * @mci: struct mem_ctl_info pointer
557 */
i7300_enable_error_reporting(struct mem_ctl_info * mci)558 static void i7300_enable_error_reporting(struct mem_ctl_info *mci)
559 {
560 struct i7300_pvt *pvt = mci->pvt_info;
561 u32 fbd_error_mask;
562
563 /* Read the FBD Error Mask Register */
564 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
565 EMASK_FBD, &fbd_error_mask);
566
567 /* Enable with a '0' */
568 fbd_error_mask &= ~(EMASK_FBD_ERR_MASK);
569
570 pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
571 EMASK_FBD, fbd_error_mask);
572 }
573
574 /************************************************
575 * i7300 Functions related to memory enumberation
576 ************************************************/
577
578 /**
579 * decode_mtr() - Decodes the MTR descriptor, filling the edac structs
580 * @pvt: pointer to the private data struct used by i7300 driver
581 * @slot: DIMM slot (0 to 7)
582 * @ch: Channel number within the branch (0 or 1)
583 * @branch: Branch number (0 or 1)
584 * @dinfo: Pointer to DIMM info where dimm size is stored
585 * @p_csrow: Pointer to the struct csrow_info that corresponds to that element
586 */
decode_mtr(struct i7300_pvt * pvt,int slot,int ch,int branch,struct i7300_dimm_info * dinfo,struct dimm_info * dimm)587 static int decode_mtr(struct i7300_pvt *pvt,
588 int slot, int ch, int branch,
589 struct i7300_dimm_info *dinfo,
590 struct dimm_info *dimm)
591 {
592 int mtr, ans, addrBits, channel;
593
594 channel = to_channel(ch, branch);
595
596 mtr = pvt->mtr[slot][branch];
597 ans = MTR_DIMMS_PRESENT(mtr) ? 1 : 0;
598
599 edac_dbg(2, "\tMTR%d CH%d: DIMMs are %sPresent (mtr)\n",
600 slot, channel, ans ? "" : "NOT ");
601
602 /* Determine if there is a DIMM present in this DIMM slot */
603 if (!ans)
604 return 0;
605
606 /* Start with the number of bits for a Bank
607 * on the DRAM */
608 addrBits = MTR_DRAM_BANKS_ADDR_BITS;
609 /* Add thenumber of ROW bits */
610 addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
611 /* add the number of COLUMN bits */
612 addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
613 /* add the number of RANK bits */
614 addrBits += MTR_DIMM_RANKS(mtr);
615
616 addrBits += 6; /* add 64 bits per DIMM */
617 addrBits -= 20; /* divide by 2^^20 */
618 addrBits -= 3; /* 8 bits per bytes */
619
620 dinfo->megabytes = 1 << addrBits;
621
622 edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
623
624 edac_dbg(2, "\t\tELECTRICAL THROTTLING is %s\n",
625 MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
626
627 edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
628 edac_dbg(2, "\t\tNUMRANK: %s\n",
629 MTR_DIMM_RANKS(mtr) ? "double" : "single");
630 edac_dbg(2, "\t\tNUMROW: %s\n",
631 MTR_DIMM_ROWS(mtr) == 0 ? "8,192 - 13 rows" :
632 MTR_DIMM_ROWS(mtr) == 1 ? "16,384 - 14 rows" :
633 MTR_DIMM_ROWS(mtr) == 2 ? "32,768 - 15 rows" :
634 "65,536 - 16 rows");
635 edac_dbg(2, "\t\tNUMCOL: %s\n",
636 MTR_DIMM_COLS(mtr) == 0 ? "1,024 - 10 columns" :
637 MTR_DIMM_COLS(mtr) == 1 ? "2,048 - 11 columns" :
638 MTR_DIMM_COLS(mtr) == 2 ? "4,096 - 12 columns" :
639 "reserved");
640 edac_dbg(2, "\t\tSIZE: %d MB\n", dinfo->megabytes);
641
642 /*
643 * The type of error detection actually depends of the
644 * mode of operation. When it is just one single memory chip, at
645 * socket 0, channel 0, it uses 8-byte-over-32-byte SECDED+ code.
646 * In normal or mirrored mode, it uses Lockstep mode,
647 * with the possibility of using an extended algorithm for x8 memories
648 * See datasheet Sections 7.3.6 to 7.3.8
649 */
650
651 dimm->nr_pages = MiB_TO_PAGES(dinfo->megabytes);
652 dimm->grain = 8;
653 dimm->mtype = MEM_FB_DDR2;
654 if (IS_SINGLE_MODE(pvt->mc_settings_a)) {
655 dimm->edac_mode = EDAC_SECDED;
656 edac_dbg(2, "\t\tECC code is 8-byte-over-32-byte SECDED+ code\n");
657 } else {
658 edac_dbg(2, "\t\tECC code is on Lockstep mode\n");
659 if (MTR_DRAM_WIDTH(mtr) == 8)
660 dimm->edac_mode = EDAC_S8ECD8ED;
661 else
662 dimm->edac_mode = EDAC_S4ECD4ED;
663 }
664
665 /* ask what device type on this row */
666 if (MTR_DRAM_WIDTH(mtr) == 8) {
667 edac_dbg(2, "\t\tScrub algorithm for x8 is on %s mode\n",
668 IS_SCRBALGO_ENHANCED(pvt->mc_settings) ?
669 "enhanced" : "normal");
670
671 dimm->dtype = DEV_X8;
672 } else
673 dimm->dtype = DEV_X4;
674
675 return mtr;
676 }
677
678 /**
679 * print_dimm_size() - Prints dump of the memory organization
680 * @pvt: pointer to the private data struct used by i7300 driver
681 *
682 * Useful for debug. If debug is disabled, this routine do nothing
683 */
print_dimm_size(struct i7300_pvt * pvt)684 static void print_dimm_size(struct i7300_pvt *pvt)
685 {
686 #ifdef CONFIG_EDAC_DEBUG
687 struct i7300_dimm_info *dinfo;
688 char *p;
689 int space, n;
690 int channel, slot;
691
692 space = PAGE_SIZE;
693 p = pvt->tmp_prt_buffer;
694
695 n = snprintf(p, space, " ");
696 p += n;
697 space -= n;
698 for (channel = 0; channel < MAX_CHANNELS; channel++) {
699 n = snprintf(p, space, "channel %d | ", channel);
700 p += n;
701 space -= n;
702 }
703 edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
704 p = pvt->tmp_prt_buffer;
705 space = PAGE_SIZE;
706 n = snprintf(p, space, "-------------------------------"
707 "------------------------------");
708 p += n;
709 space -= n;
710 edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
711 p = pvt->tmp_prt_buffer;
712 space = PAGE_SIZE;
713
714 for (slot = 0; slot < MAX_SLOTS; slot++) {
715 n = snprintf(p, space, "csrow/SLOT %d ", slot);
716 p += n;
717 space -= n;
718
719 for (channel = 0; channel < MAX_CHANNELS; channel++) {
720 dinfo = &pvt->dimm_info[slot][channel];
721 n = snprintf(p, space, "%4d MB | ", dinfo->megabytes);
722 p += n;
723 space -= n;
724 }
725
726 edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
727 p = pvt->tmp_prt_buffer;
728 space = PAGE_SIZE;
729 }
730
731 n = snprintf(p, space, "-------------------------------"
732 "------------------------------");
733 p += n;
734 space -= n;
735 edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
736 p = pvt->tmp_prt_buffer;
737 space = PAGE_SIZE;
738 #endif
739 }
740
741 /**
742 * i7300_init_csrows() - Initialize the 'csrows' table within
743 * the mci control structure with the
744 * addressing of memory.
745 * @mci: struct mem_ctl_info pointer
746 */
i7300_init_csrows(struct mem_ctl_info * mci)747 static int i7300_init_csrows(struct mem_ctl_info *mci)
748 {
749 struct i7300_pvt *pvt;
750 struct i7300_dimm_info *dinfo;
751 int rc = -ENODEV;
752 int mtr;
753 int ch, branch, slot, channel, max_channel, max_branch;
754 struct dimm_info *dimm;
755
756 pvt = mci->pvt_info;
757
758 edac_dbg(2, "Memory Technology Registers:\n");
759
760 if (IS_SINGLE_MODE(pvt->mc_settings_a)) {
761 max_branch = 1;
762 max_channel = 1;
763 } else {
764 max_branch = MAX_BRANCHES;
765 max_channel = MAX_CH_PER_BRANCH;
766 }
767
768 /* Get the AMB present registers for the four channels */
769 for (branch = 0; branch < max_branch; branch++) {
770 /* Read and dump branch 0's MTRs */
771 channel = to_channel(0, branch);
772 pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
773 AMBPRESENT_0,
774 &pvt->ambpresent[channel]);
775 edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n",
776 channel, pvt->ambpresent[channel]);
777
778 if (max_channel == 1)
779 continue;
780
781 channel = to_channel(1, branch);
782 pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
783 AMBPRESENT_1,
784 &pvt->ambpresent[channel]);
785 edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n",
786 channel, pvt->ambpresent[channel]);
787 }
788
789 /* Get the set of MTR[0-7] regs by each branch */
790 for (slot = 0; slot < MAX_SLOTS; slot++) {
791 int where = mtr_regs[slot];
792 for (branch = 0; branch < max_branch; branch++) {
793 pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
794 where,
795 &pvt->mtr[slot][branch]);
796 for (ch = 0; ch < max_channel; ch++) {
797 int channel = to_channel(ch, branch);
798
799 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
800 mci->n_layers, branch, ch, slot);
801
802 dinfo = &pvt->dimm_info[slot][channel];
803
804 mtr = decode_mtr(pvt, slot, ch, branch,
805 dinfo, dimm);
806
807 /* if no DIMMS on this row, continue */
808 if (!MTR_DIMMS_PRESENT(mtr))
809 continue;
810
811 rc = 0;
812
813 }
814 }
815 }
816
817 return rc;
818 }
819
820 /**
821 * decode_mir() - Decodes Memory Interleave Register (MIR) info
822 * @int mir_no: number of the MIR register to decode
823 * @mir: array with the MIR data cached on the driver
824 */
decode_mir(int mir_no,u16 mir[MAX_MIR])825 static void decode_mir(int mir_no, u16 mir[MAX_MIR])
826 {
827 if (mir[mir_no] & 3)
828 edac_dbg(2, "MIR%d: limit= 0x%x Branch(es) that participate: %s %s\n",
829 mir_no,
830 (mir[mir_no] >> 4) & 0xfff,
831 (mir[mir_no] & 1) ? "B0" : "",
832 (mir[mir_no] & 2) ? "B1" : "");
833 }
834
835 /**
836 * i7300_get_mc_regs() - Get the contents of the MC enumeration registers
837 * @mci: struct mem_ctl_info pointer
838 *
839 * Data read is cached internally for its usage when needed
840 */
i7300_get_mc_regs(struct mem_ctl_info * mci)841 static int i7300_get_mc_regs(struct mem_ctl_info *mci)
842 {
843 struct i7300_pvt *pvt;
844 u32 actual_tolm;
845 int i, rc;
846
847 pvt = mci->pvt_info;
848
849 pci_read_config_dword(pvt->pci_dev_16_0_fsb_ctlr, AMBASE,
850 (u32 *) &pvt->ambase);
851
852 edac_dbg(2, "AMBASE= 0x%lx\n", (long unsigned int)pvt->ambase);
853
854 /* Get the Branch Map regs */
855 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, TOLM, &pvt->tolm);
856 pvt->tolm >>= 12;
857 edac_dbg(2, "TOLM (number of 256M regions) =%u (0x%x)\n",
858 pvt->tolm, pvt->tolm);
859
860 actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28));
861 edac_dbg(2, "Actual TOLM byte addr=%u.%03u GB (0x%x)\n",
862 actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28);
863
864 /* Get memory controller settings */
865 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS,
866 &pvt->mc_settings);
867 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS_A,
868 &pvt->mc_settings_a);
869
870 if (IS_SINGLE_MODE(pvt->mc_settings_a))
871 edac_dbg(0, "Memory controller operating on single mode\n");
872 else
873 edac_dbg(0, "Memory controller operating on %smirrored mode\n",
874 IS_MIRRORED(pvt->mc_settings) ? "" : "non-");
875
876 edac_dbg(0, "Error detection is %s\n",
877 IS_ECC_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
878 edac_dbg(0, "Retry is %s\n",
879 IS_RETRY_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
880
881 /* Get Memory Interleave Range registers */
882 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR0,
883 &pvt->mir[0]);
884 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR1,
885 &pvt->mir[1]);
886 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR2,
887 &pvt->mir[2]);
888
889 /* Decode the MIR regs */
890 for (i = 0; i < MAX_MIR; i++)
891 decode_mir(i, pvt->mir);
892
893 rc = i7300_init_csrows(mci);
894 if (rc < 0)
895 return rc;
896
897 /* Go and determine the size of each DIMM and place in an
898 * orderly matrix */
899 print_dimm_size(pvt);
900
901 return 0;
902 }
903
904 /*************************************************
905 * i7300 Functions related to device probe/release
906 *************************************************/
907
908 /**
909 * i7300_put_devices() - Release the PCI devices
910 * @mci: struct mem_ctl_info pointer
911 */
i7300_put_devices(struct mem_ctl_info * mci)912 static void i7300_put_devices(struct mem_ctl_info *mci)
913 {
914 struct i7300_pvt *pvt;
915 int branch;
916
917 pvt = mci->pvt_info;
918
919 /* Decrement usage count for devices */
920 for (branch = 0; branch < MAX_CH_PER_BRANCH; branch++)
921 pci_dev_put(pvt->pci_dev_2x_0_fbd_branch[branch]);
922 pci_dev_put(pvt->pci_dev_16_2_fsb_err_regs);
923 pci_dev_put(pvt->pci_dev_16_1_fsb_addr_map);
924 }
925
926 /**
927 * i7300_get_devices() - Find and perform 'get' operation on the MCH's
928 * device/functions we want to reference for this driver
929 * @mci: struct mem_ctl_info pointer
930 *
931 * Access and prepare the several devices for usage:
932 * I7300 devices used by this driver:
933 * Device 16, functions 0,1 and 2: PCI_DEVICE_ID_INTEL_I7300_MCH_ERR
934 * Device 21 function 0: PCI_DEVICE_ID_INTEL_I7300_MCH_FB0
935 * Device 22 function 0: PCI_DEVICE_ID_INTEL_I7300_MCH_FB1
936 */
i7300_get_devices(struct mem_ctl_info * mci)937 static int i7300_get_devices(struct mem_ctl_info *mci)
938 {
939 struct i7300_pvt *pvt;
940 struct pci_dev *pdev;
941
942 pvt = mci->pvt_info;
943
944 /* Attempt to 'get' the MCH register we want */
945 pdev = NULL;
946 while ((pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
947 PCI_DEVICE_ID_INTEL_I7300_MCH_ERR,
948 pdev))) {
949 /* Store device 16 funcs 1 and 2 */
950 switch (PCI_FUNC(pdev->devfn)) {
951 case 1:
952 if (!pvt->pci_dev_16_1_fsb_addr_map)
953 pvt->pci_dev_16_1_fsb_addr_map =
954 pci_dev_get(pdev);
955 break;
956 case 2:
957 if (!pvt->pci_dev_16_2_fsb_err_regs)
958 pvt->pci_dev_16_2_fsb_err_regs =
959 pci_dev_get(pdev);
960 break;
961 }
962 }
963
964 if (!pvt->pci_dev_16_1_fsb_addr_map ||
965 !pvt->pci_dev_16_2_fsb_err_regs) {
966 /* At least one device was not found */
967 i7300_printk(KERN_ERR,
968 "'system address,Process Bus' device not found:"
969 "vendor 0x%x device 0x%x ERR funcs (broken BIOS?)\n",
970 PCI_VENDOR_ID_INTEL,
971 PCI_DEVICE_ID_INTEL_I7300_MCH_ERR);
972 goto error;
973 }
974
975 edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n",
976 pci_name(pvt->pci_dev_16_0_fsb_ctlr),
977 pvt->pci_dev_16_0_fsb_ctlr->vendor,
978 pvt->pci_dev_16_0_fsb_ctlr->device);
979 edac_dbg(1, "Branchmap, control and errors - PCI Bus ID: %s %x:%x\n",
980 pci_name(pvt->pci_dev_16_1_fsb_addr_map),
981 pvt->pci_dev_16_1_fsb_addr_map->vendor,
982 pvt->pci_dev_16_1_fsb_addr_map->device);
983 edac_dbg(1, "FSB Error Regs - PCI Bus ID: %s %x:%x\n",
984 pci_name(pvt->pci_dev_16_2_fsb_err_regs),
985 pvt->pci_dev_16_2_fsb_err_regs->vendor,
986 pvt->pci_dev_16_2_fsb_err_regs->device);
987
988 pvt->pci_dev_2x_0_fbd_branch[0] = pci_get_device(PCI_VENDOR_ID_INTEL,
989 PCI_DEVICE_ID_INTEL_I7300_MCH_FB0,
990 NULL);
991 if (!pvt->pci_dev_2x_0_fbd_branch[0]) {
992 i7300_printk(KERN_ERR,
993 "MC: 'BRANCH 0' device not found:"
994 "vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n",
995 PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_FB0);
996 goto error;
997 }
998
999 pvt->pci_dev_2x_0_fbd_branch[1] = pci_get_device(PCI_VENDOR_ID_INTEL,
1000 PCI_DEVICE_ID_INTEL_I7300_MCH_FB1,
1001 NULL);
1002 if (!pvt->pci_dev_2x_0_fbd_branch[1]) {
1003 i7300_printk(KERN_ERR,
1004 "MC: 'BRANCH 1' device not found:"
1005 "vendor 0x%x device 0x%x Func 0 "
1006 "(broken BIOS?)\n",
1007 PCI_VENDOR_ID_INTEL,
1008 PCI_DEVICE_ID_INTEL_I7300_MCH_FB1);
1009 goto error;
1010 }
1011
1012 return 0;
1013
1014 error:
1015 i7300_put_devices(mci);
1016 return -ENODEV;
1017 }
1018
1019 /**
1020 * i7300_init_one() - Probe for one instance of the device
1021 * @pdev: struct pci_dev pointer
1022 * @id: struct pci_device_id pointer - currently unused
1023 */
i7300_init_one(struct pci_dev * pdev,const struct pci_device_id * id)1024 static int i7300_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1025 {
1026 struct mem_ctl_info *mci;
1027 struct edac_mc_layer layers[3];
1028 struct i7300_pvt *pvt;
1029 int rc;
1030
1031 /* wake up device */
1032 rc = pci_enable_device(pdev);
1033 if (rc == -EIO)
1034 return rc;
1035
1036 edac_dbg(0, "MC: pdev bus %u dev=0x%x fn=0x%x\n",
1037 pdev->bus->number,
1038 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1039
1040 /* We only are looking for func 0 of the set */
1041 if (PCI_FUNC(pdev->devfn) != 0)
1042 return -ENODEV;
1043
1044 /* allocate a new MC control structure */
1045 layers[0].type = EDAC_MC_LAYER_BRANCH;
1046 layers[0].size = MAX_BRANCHES;
1047 layers[0].is_virt_csrow = false;
1048 layers[1].type = EDAC_MC_LAYER_CHANNEL;
1049 layers[1].size = MAX_CH_PER_BRANCH;
1050 layers[1].is_virt_csrow = true;
1051 layers[2].type = EDAC_MC_LAYER_SLOT;
1052 layers[2].size = MAX_SLOTS;
1053 layers[2].is_virt_csrow = true;
1054 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1055 if (mci == NULL)
1056 return -ENOMEM;
1057
1058 edac_dbg(0, "MC: mci = %p\n", mci);
1059
1060 mci->pdev = &pdev->dev; /* record ptr to the generic device */
1061
1062 pvt = mci->pvt_info;
1063 pvt->pci_dev_16_0_fsb_ctlr = pdev; /* Record this device in our private */
1064
1065 pvt->tmp_prt_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
1066 if (!pvt->tmp_prt_buffer) {
1067 edac_mc_free(mci);
1068 return -ENOMEM;
1069 }
1070
1071 /* 'get' the pci devices we want to reserve for our use */
1072 if (i7300_get_devices(mci))
1073 goto fail0;
1074
1075 mci->mc_idx = 0;
1076 mci->mtype_cap = MEM_FLAG_FB_DDR2;
1077 mci->edac_ctl_cap = EDAC_FLAG_NONE;
1078 mci->edac_cap = EDAC_FLAG_NONE;
1079 mci->mod_name = "i7300_edac.c";
1080 mci->ctl_name = i7300_devs[0].ctl_name;
1081 mci->dev_name = pci_name(pdev);
1082 mci->ctl_page_to_phys = NULL;
1083
1084 /* Set the function pointer to an actual operation function */
1085 mci->edac_check = i7300_check_error;
1086
1087 /* initialize the MC control structure 'csrows' table
1088 * with the mapping and control information */
1089 if (i7300_get_mc_regs(mci)) {
1090 edac_dbg(0, "MC: Setting mci->edac_cap to EDAC_FLAG_NONE because i7300_init_csrows() returned nonzero value\n");
1091 mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */
1092 } else {
1093 edac_dbg(1, "MC: Enable error reporting now\n");
1094 i7300_enable_error_reporting(mci);
1095 }
1096
1097 /* add this new MC control structure to EDAC's list of MCs */
1098 if (edac_mc_add_mc(mci)) {
1099 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1100 /* FIXME: perhaps some code should go here that disables error
1101 * reporting if we just enabled it
1102 */
1103 goto fail1;
1104 }
1105
1106 i7300_clear_error(mci);
1107
1108 /* allocating generic PCI control info */
1109 i7300_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
1110 if (!i7300_pci) {
1111 printk(KERN_WARNING
1112 "%s(): Unable to create PCI control\n",
1113 __func__);
1114 printk(KERN_WARNING
1115 "%s(): PCI error report via EDAC not setup\n",
1116 __func__);
1117 }
1118
1119 return 0;
1120
1121 /* Error exit unwinding stack */
1122 fail1:
1123
1124 i7300_put_devices(mci);
1125
1126 fail0:
1127 kfree(pvt->tmp_prt_buffer);
1128 edac_mc_free(mci);
1129 return -ENODEV;
1130 }
1131
1132 /**
1133 * i7300_remove_one() - Remove the driver
1134 * @pdev: struct pci_dev pointer
1135 */
i7300_remove_one(struct pci_dev * pdev)1136 static void i7300_remove_one(struct pci_dev *pdev)
1137 {
1138 struct mem_ctl_info *mci;
1139 char *tmp;
1140
1141 edac_dbg(0, "\n");
1142
1143 if (i7300_pci)
1144 edac_pci_release_generic_ctl(i7300_pci);
1145
1146 mci = edac_mc_del_mc(&pdev->dev);
1147 if (!mci)
1148 return;
1149
1150 tmp = ((struct i7300_pvt *)mci->pvt_info)->tmp_prt_buffer;
1151
1152 /* retrieve references to resources, and free those resources */
1153 i7300_put_devices(mci);
1154
1155 kfree(tmp);
1156 edac_mc_free(mci);
1157 }
1158
1159 /*
1160 * pci_device_id: table for which devices we are looking for
1161 *
1162 * Has only 8086:360c PCI ID
1163 */
1164 static const struct pci_device_id i7300_pci_tbl[] = {
1165 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
1166 {0,} /* 0 terminated list. */
1167 };
1168
1169 MODULE_DEVICE_TABLE(pci, i7300_pci_tbl);
1170
1171 /*
1172 * i7300_driver: pci_driver structure for this module
1173 */
1174 static struct pci_driver i7300_driver = {
1175 .name = "i7300_edac",
1176 .probe = i7300_init_one,
1177 .remove = i7300_remove_one,
1178 .id_table = i7300_pci_tbl,
1179 };
1180
1181 /**
1182 * i7300_init() - Registers the driver
1183 */
i7300_init(void)1184 static int __init i7300_init(void)
1185 {
1186 int pci_rc;
1187
1188 edac_dbg(2, "\n");
1189
1190 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1191 opstate_init();
1192
1193 pci_rc = pci_register_driver(&i7300_driver);
1194
1195 return (pci_rc < 0) ? pci_rc : 0;
1196 }
1197
1198 /**
1199 * i7300_init() - Unregisters the driver
1200 */
i7300_exit(void)1201 static void __exit i7300_exit(void)
1202 {
1203 edac_dbg(2, "\n");
1204 pci_unregister_driver(&i7300_driver);
1205 }
1206
1207 module_init(i7300_init);
1208 module_exit(i7300_exit);
1209
1210 MODULE_LICENSE("GPL");
1211 MODULE_AUTHOR("Mauro Carvalho Chehab");
1212 MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
1213 MODULE_DESCRIPTION("MC Driver for Intel I7300 memory controllers - "
1214 I7300_REVISION);
1215
1216 module_param(edac_op_state, int, 0444);
1217 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1218