1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/init.h>
3 #include <linux/mm.h>
4 #include <asm/mtrr.h>
5 #include <asm/msr.h>
6
7 #include "mtrr.h"
8
9 static void
amd_get_mtrr(unsigned int reg,unsigned long * base,unsigned long * size,mtrr_type * type)10 amd_get_mtrr(unsigned int reg, unsigned long *base,
11 unsigned long *size, mtrr_type *type)
12 {
13 unsigned long low, high;
14
15 rdmsr(MSR_K6_UWCCR, low, high);
16 /* Upper dword is region 1, lower is region 0 */
17 if (reg == 1)
18 low = high;
19 /* The base masks off on the right alignment */
20 *base = (low & 0xFFFE0000) >> PAGE_SHIFT;
21 *type = 0;
22 if (low & 1)
23 *type = MTRR_TYPE_UNCACHABLE;
24 if (low & 2)
25 *type = MTRR_TYPE_WRCOMB;
26 if (!(low & 3)) {
27 *size = 0;
28 return;
29 }
30 /*
31 * This needs a little explaining. The size is stored as an
32 * inverted mask of bits of 128K granularity 15 bits long offset
33 * 2 bits.
34 *
35 * So to get a size we do invert the mask and add 1 to the lowest
36 * mask bit (4 as its 2 bits in). This gives us a size we then shift
37 * to turn into 128K blocks.
38 *
39 * eg 111 1111 1111 1100 is 512K
40 *
41 * invert 000 0000 0000 0011
42 * +1 000 0000 0000 0100
43 * *128K ...
44 */
45 low = (~low) & 0x1FFFC;
46 *size = (low + 4) << (15 - PAGE_SHIFT);
47 }
48
49 /**
50 * amd_set_mtrr - Set variable MTRR register on the local CPU.
51 *
52 * @reg The register to set.
53 * @base The base address of the region.
54 * @size The size of the region. If this is 0 the region is disabled.
55 * @type The type of the region.
56 *
57 * Returns nothing.
58 */
59 static void
amd_set_mtrr(unsigned int reg,unsigned long base,unsigned long size,mtrr_type type)60 amd_set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type)
61 {
62 u32 regs[2];
63
64 /*
65 * Low is MTRR0, High MTRR 1
66 */
67 rdmsr(MSR_K6_UWCCR, regs[0], regs[1]);
68 /*
69 * Blank to disable
70 */
71 if (size == 0) {
72 regs[reg] = 0;
73 } else {
74 /*
75 * Set the register to the base, the type (off by one) and an
76 * inverted bitmask of the size The size is the only odd
77 * bit. We are fed say 512K We invert this and we get 111 1111
78 * 1111 1011 but if you subtract one and invert you get the
79 * desired 111 1111 1111 1100 mask
80 *
81 * But ~(x - 1) == ~x + 1 == -x. Two's complement rocks!
82 */
83 regs[reg] = (-size >> (15 - PAGE_SHIFT) & 0x0001FFFC)
84 | (base << PAGE_SHIFT) | (type + 1);
85 }
86
87 /*
88 * The writeback rule is quite specific. See the manual. Its
89 * disable local interrupts, write back the cache, set the mtrr
90 */
91 wbinvd();
92 wrmsr(MSR_K6_UWCCR, regs[0], regs[1]);
93 }
94
95 static int
amd_validate_add_page(unsigned long base,unsigned long size,unsigned int type)96 amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
97 {
98 /*
99 * Apply the K6 block alignment and size rules
100 * In order
101 * o Uncached or gathering only
102 * o 128K or bigger block
103 * o Power of 2 block
104 * o base suitably aligned to the power
105 */
106 if (type > MTRR_TYPE_WRCOMB || size < (1 << (17 - PAGE_SHIFT))
107 || (size & ~(size - 1)) - size || (base & (size - 1)))
108 return -EINVAL;
109 return 0;
110 }
111
112 static const struct mtrr_ops amd_mtrr_ops = {
113 .vendor = X86_VENDOR_AMD,
114 .set = amd_set_mtrr,
115 .get = amd_get_mtrr,
116 .get_free_region = generic_get_free_region,
117 .validate_add_page = amd_validate_add_page,
118 .have_wrcomb = positive_have_wrcomb,
119 };
120
amd_init_mtrr(void)121 int __init amd_init_mtrr(void)
122 {
123 set_mtrr_ops(&amd_mtrr_ops);
124 return 0;
125 }
126