1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * acpi-cpufreq.c - ACPI Processor P-States Driver
4 *
5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7 * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
8 * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com>
9 */
10
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/smp.h>
17 #include <linux/sched.h>
18 #include <linux/cpufreq.h>
19 #include <linux/compiler.h>
20 #include <linux/dmi.h>
21 #include <linux/slab.h>
22
23 #include <linux/acpi.h>
24 #include <linux/io.h>
25 #include <linux/delay.h>
26 #include <linux/uaccess.h>
27
28 #include <acpi/processor.h>
29 #include <acpi/cppc_acpi.h>
30
31 #include <asm/msr.h>
32 #include <asm/processor.h>
33 #include <asm/cpufeature.h>
34 #include <asm/cpu_device_id.h>
35
36 MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
37 MODULE_DESCRIPTION("ACPI Processor P-States Driver");
38 MODULE_LICENSE("GPL");
39
40 enum {
41 UNDEFINED_CAPABLE = 0,
42 SYSTEM_INTEL_MSR_CAPABLE,
43 SYSTEM_AMD_MSR_CAPABLE,
44 SYSTEM_IO_CAPABLE,
45 };
46
47 #define INTEL_MSR_RANGE (0xffff)
48 #define AMD_MSR_RANGE (0x7)
49 #define HYGON_MSR_RANGE (0x7)
50
51 #define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
52
53 struct acpi_cpufreq_data {
54 unsigned int resume;
55 unsigned int cpu_feature;
56 unsigned int acpi_perf_cpu;
57 cpumask_var_t freqdomain_cpus;
58 void (*cpu_freq_write)(struct acpi_pct_register *reg, u32 val);
59 u32 (*cpu_freq_read)(struct acpi_pct_register *reg);
60 };
61
62 /* acpi_perf_data is a pointer to percpu data. */
63 static struct acpi_processor_performance __percpu *acpi_perf_data;
64
to_perf_data(struct acpi_cpufreq_data * data)65 static inline struct acpi_processor_performance *to_perf_data(struct acpi_cpufreq_data *data)
66 {
67 return per_cpu_ptr(acpi_perf_data, data->acpi_perf_cpu);
68 }
69
70 static struct cpufreq_driver acpi_cpufreq_driver;
71
72 static unsigned int acpi_pstate_strict;
73
boost_state(unsigned int cpu)74 static bool boost_state(unsigned int cpu)
75 {
76 u32 lo, hi;
77 u64 msr;
78
79 switch (boot_cpu_data.x86_vendor) {
80 case X86_VENDOR_INTEL:
81 case X86_VENDOR_CENTAUR:
82 case X86_VENDOR_ZHAOXIN:
83 rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
84 msr = lo | ((u64)hi << 32);
85 return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
86 case X86_VENDOR_HYGON:
87 case X86_VENDOR_AMD:
88 rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
89 msr = lo | ((u64)hi << 32);
90 return !(msr & MSR_K7_HWCR_CPB_DIS);
91 }
92 return false;
93 }
94
boost_set_msr(bool enable)95 static int boost_set_msr(bool enable)
96 {
97 u32 msr_addr;
98 u64 msr_mask, val;
99
100 switch (boot_cpu_data.x86_vendor) {
101 case X86_VENDOR_INTEL:
102 case X86_VENDOR_CENTAUR:
103 case X86_VENDOR_ZHAOXIN:
104 msr_addr = MSR_IA32_MISC_ENABLE;
105 msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
106 break;
107 case X86_VENDOR_HYGON:
108 case X86_VENDOR_AMD:
109 msr_addr = MSR_K7_HWCR;
110 msr_mask = MSR_K7_HWCR_CPB_DIS;
111 break;
112 default:
113 return -EINVAL;
114 }
115
116 rdmsrl(msr_addr, val);
117
118 if (enable)
119 val &= ~msr_mask;
120 else
121 val |= msr_mask;
122
123 wrmsrl(msr_addr, val);
124 return 0;
125 }
126
boost_set_msr_each(void * p_en)127 static void boost_set_msr_each(void *p_en)
128 {
129 bool enable = (bool) p_en;
130
131 boost_set_msr(enable);
132 }
133
set_boost(struct cpufreq_policy * policy,int val)134 static int set_boost(struct cpufreq_policy *policy, int val)
135 {
136 on_each_cpu_mask(policy->cpus, boost_set_msr_each,
137 (void *)(long)val, 1);
138 pr_debug("CPU %*pbl: Core Boosting %sabled.\n",
139 cpumask_pr_args(policy->cpus), val ? "en" : "dis");
140
141 return 0;
142 }
143
show_freqdomain_cpus(struct cpufreq_policy * policy,char * buf)144 static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
145 {
146 struct acpi_cpufreq_data *data = policy->driver_data;
147
148 if (unlikely(!data))
149 return -ENODEV;
150
151 return cpufreq_show_cpus(data->freqdomain_cpus, buf);
152 }
153
154 cpufreq_freq_attr_ro(freqdomain_cpus);
155
156 #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
store_cpb(struct cpufreq_policy * policy,const char * buf,size_t count)157 static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
158 size_t count)
159 {
160 int ret;
161 unsigned int val = 0;
162
163 if (!acpi_cpufreq_driver.set_boost)
164 return -EINVAL;
165
166 ret = kstrtouint(buf, 10, &val);
167 if (ret || val > 1)
168 return -EINVAL;
169
170 cpus_read_lock();
171 set_boost(policy, val);
172 cpus_read_unlock();
173
174 return count;
175 }
176
show_cpb(struct cpufreq_policy * policy,char * buf)177 static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
178 {
179 return sprintf(buf, "%u\n", acpi_cpufreq_driver.boost_enabled);
180 }
181
182 cpufreq_freq_attr_rw(cpb);
183 #endif
184
check_est_cpu(unsigned int cpuid)185 static int check_est_cpu(unsigned int cpuid)
186 {
187 struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
188
189 return cpu_has(cpu, X86_FEATURE_EST);
190 }
191
check_amd_hwpstate_cpu(unsigned int cpuid)192 static int check_amd_hwpstate_cpu(unsigned int cpuid)
193 {
194 struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
195
196 return cpu_has(cpu, X86_FEATURE_HW_PSTATE);
197 }
198
extract_io(struct cpufreq_policy * policy,u32 value)199 static unsigned extract_io(struct cpufreq_policy *policy, u32 value)
200 {
201 struct acpi_cpufreq_data *data = policy->driver_data;
202 struct acpi_processor_performance *perf;
203 int i;
204
205 perf = to_perf_data(data);
206
207 for (i = 0; i < perf->state_count; i++) {
208 if (value == perf->states[i].status)
209 return policy->freq_table[i].frequency;
210 }
211 return 0;
212 }
213
extract_msr(struct cpufreq_policy * policy,u32 msr)214 static unsigned extract_msr(struct cpufreq_policy *policy, u32 msr)
215 {
216 struct acpi_cpufreq_data *data = policy->driver_data;
217 struct cpufreq_frequency_table *pos;
218 struct acpi_processor_performance *perf;
219
220 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
221 msr &= AMD_MSR_RANGE;
222 else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
223 msr &= HYGON_MSR_RANGE;
224 else
225 msr &= INTEL_MSR_RANGE;
226
227 perf = to_perf_data(data);
228
229 cpufreq_for_each_entry(pos, policy->freq_table)
230 if (msr == perf->states[pos->driver_data].status)
231 return pos->frequency;
232 return policy->freq_table[0].frequency;
233 }
234
extract_freq(struct cpufreq_policy * policy,u32 val)235 static unsigned extract_freq(struct cpufreq_policy *policy, u32 val)
236 {
237 struct acpi_cpufreq_data *data = policy->driver_data;
238
239 switch (data->cpu_feature) {
240 case SYSTEM_INTEL_MSR_CAPABLE:
241 case SYSTEM_AMD_MSR_CAPABLE:
242 return extract_msr(policy, val);
243 case SYSTEM_IO_CAPABLE:
244 return extract_io(policy, val);
245 default:
246 return 0;
247 }
248 }
249
cpu_freq_read_intel(struct acpi_pct_register * not_used)250 static u32 cpu_freq_read_intel(struct acpi_pct_register *not_used)
251 {
252 u32 val, dummy __always_unused;
253
254 rdmsr(MSR_IA32_PERF_CTL, val, dummy);
255 return val;
256 }
257
cpu_freq_write_intel(struct acpi_pct_register * not_used,u32 val)258 static void cpu_freq_write_intel(struct acpi_pct_register *not_used, u32 val)
259 {
260 u32 lo, hi;
261
262 rdmsr(MSR_IA32_PERF_CTL, lo, hi);
263 lo = (lo & ~INTEL_MSR_RANGE) | (val & INTEL_MSR_RANGE);
264 wrmsr(MSR_IA32_PERF_CTL, lo, hi);
265 }
266
cpu_freq_read_amd(struct acpi_pct_register * not_used)267 static u32 cpu_freq_read_amd(struct acpi_pct_register *not_used)
268 {
269 u32 val, dummy __always_unused;
270
271 rdmsr(MSR_AMD_PERF_CTL, val, dummy);
272 return val;
273 }
274
cpu_freq_write_amd(struct acpi_pct_register * not_used,u32 val)275 static void cpu_freq_write_amd(struct acpi_pct_register *not_used, u32 val)
276 {
277 wrmsr(MSR_AMD_PERF_CTL, val, 0);
278 }
279
cpu_freq_read_io(struct acpi_pct_register * reg)280 static u32 cpu_freq_read_io(struct acpi_pct_register *reg)
281 {
282 u32 val;
283
284 acpi_os_read_port(reg->address, &val, reg->bit_width);
285 return val;
286 }
287
cpu_freq_write_io(struct acpi_pct_register * reg,u32 val)288 static void cpu_freq_write_io(struct acpi_pct_register *reg, u32 val)
289 {
290 acpi_os_write_port(reg->address, val, reg->bit_width);
291 }
292
293 struct drv_cmd {
294 struct acpi_pct_register *reg;
295 u32 val;
296 union {
297 void (*write)(struct acpi_pct_register *reg, u32 val);
298 u32 (*read)(struct acpi_pct_register *reg);
299 } func;
300 };
301
302 /* Called via smp_call_function_single(), on the target CPU */
do_drv_read(void * _cmd)303 static void do_drv_read(void *_cmd)
304 {
305 struct drv_cmd *cmd = _cmd;
306
307 cmd->val = cmd->func.read(cmd->reg);
308 }
309
drv_read(struct acpi_cpufreq_data * data,const struct cpumask * mask)310 static u32 drv_read(struct acpi_cpufreq_data *data, const struct cpumask *mask)
311 {
312 struct acpi_processor_performance *perf = to_perf_data(data);
313 struct drv_cmd cmd = {
314 .reg = &perf->control_register,
315 .func.read = data->cpu_freq_read,
316 };
317 int err;
318
319 err = smp_call_function_any(mask, do_drv_read, &cmd, 1);
320 WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */
321 return cmd.val;
322 }
323
324 /* Called via smp_call_function_many(), on the target CPUs */
do_drv_write(void * _cmd)325 static void do_drv_write(void *_cmd)
326 {
327 struct drv_cmd *cmd = _cmd;
328
329 cmd->func.write(cmd->reg, cmd->val);
330 }
331
drv_write(struct acpi_cpufreq_data * data,const struct cpumask * mask,u32 val)332 static void drv_write(struct acpi_cpufreq_data *data,
333 const struct cpumask *mask, u32 val)
334 {
335 struct acpi_processor_performance *perf = to_perf_data(data);
336 struct drv_cmd cmd = {
337 .reg = &perf->control_register,
338 .val = val,
339 .func.write = data->cpu_freq_write,
340 };
341 int this_cpu;
342
343 this_cpu = get_cpu();
344 if (cpumask_test_cpu(this_cpu, mask))
345 do_drv_write(&cmd);
346
347 smp_call_function_many(mask, do_drv_write, &cmd, 1);
348 put_cpu();
349 }
350
get_cur_val(const struct cpumask * mask,struct acpi_cpufreq_data * data)351 static u32 get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data)
352 {
353 u32 val;
354
355 if (unlikely(cpumask_empty(mask)))
356 return 0;
357
358 val = drv_read(data, mask);
359
360 pr_debug("%s = %u\n", __func__, val);
361
362 return val;
363 }
364
get_cur_freq_on_cpu(unsigned int cpu)365 static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
366 {
367 struct acpi_cpufreq_data *data;
368 struct cpufreq_policy *policy;
369 unsigned int freq;
370 unsigned int cached_freq;
371
372 pr_debug("%s (%d)\n", __func__, cpu);
373
374 policy = cpufreq_cpu_get_raw(cpu);
375 if (unlikely(!policy))
376 return 0;
377
378 data = policy->driver_data;
379 if (unlikely(!data || !policy->freq_table))
380 return 0;
381
382 cached_freq = policy->freq_table[to_perf_data(data)->state].frequency;
383 freq = extract_freq(policy, get_cur_val(cpumask_of(cpu), data));
384 if (freq != cached_freq) {
385 /*
386 * The dreaded BIOS frequency change behind our back.
387 * Force set the frequency on next target call.
388 */
389 data->resume = 1;
390 }
391
392 pr_debug("cur freq = %u\n", freq);
393
394 return freq;
395 }
396
check_freqs(struct cpufreq_policy * policy,const struct cpumask * mask,unsigned int freq)397 static unsigned int check_freqs(struct cpufreq_policy *policy,
398 const struct cpumask *mask, unsigned int freq)
399 {
400 struct acpi_cpufreq_data *data = policy->driver_data;
401 unsigned int cur_freq;
402 unsigned int i;
403
404 for (i = 0; i < 100; i++) {
405 cur_freq = extract_freq(policy, get_cur_val(mask, data));
406 if (cur_freq == freq)
407 return 1;
408 udelay(10);
409 }
410 return 0;
411 }
412
acpi_cpufreq_target(struct cpufreq_policy * policy,unsigned int index)413 static int acpi_cpufreq_target(struct cpufreq_policy *policy,
414 unsigned int index)
415 {
416 struct acpi_cpufreq_data *data = policy->driver_data;
417 struct acpi_processor_performance *perf;
418 const struct cpumask *mask;
419 unsigned int next_perf_state = 0; /* Index into perf table */
420 int result = 0;
421
422 if (unlikely(!data)) {
423 return -ENODEV;
424 }
425
426 perf = to_perf_data(data);
427 next_perf_state = policy->freq_table[index].driver_data;
428 if (perf->state == next_perf_state) {
429 if (unlikely(data->resume)) {
430 pr_debug("Called after resume, resetting to P%d\n",
431 next_perf_state);
432 data->resume = 0;
433 } else {
434 pr_debug("Already at target state (P%d)\n",
435 next_perf_state);
436 return 0;
437 }
438 }
439
440 /*
441 * The core won't allow CPUs to go away until the governor has been
442 * stopped, so we can rely on the stability of policy->cpus.
443 */
444 mask = policy->shared_type == CPUFREQ_SHARED_TYPE_ANY ?
445 cpumask_of(policy->cpu) : policy->cpus;
446
447 drv_write(data, mask, perf->states[next_perf_state].control);
448
449 if (acpi_pstate_strict) {
450 if (!check_freqs(policy, mask,
451 policy->freq_table[index].frequency)) {
452 pr_debug("%s (%d)\n", __func__, policy->cpu);
453 result = -EAGAIN;
454 }
455 }
456
457 if (!result)
458 perf->state = next_perf_state;
459
460 return result;
461 }
462
acpi_cpufreq_fast_switch(struct cpufreq_policy * policy,unsigned int target_freq)463 static unsigned int acpi_cpufreq_fast_switch(struct cpufreq_policy *policy,
464 unsigned int target_freq)
465 {
466 struct acpi_cpufreq_data *data = policy->driver_data;
467 struct acpi_processor_performance *perf;
468 struct cpufreq_frequency_table *entry;
469 unsigned int next_perf_state, next_freq, index;
470
471 /*
472 * Find the closest frequency above target_freq.
473 */
474 if (policy->cached_target_freq == target_freq)
475 index = policy->cached_resolved_idx;
476 else
477 index = cpufreq_table_find_index_dl(policy, target_freq,
478 false);
479
480 entry = &policy->freq_table[index];
481 next_freq = entry->frequency;
482 next_perf_state = entry->driver_data;
483
484 perf = to_perf_data(data);
485 if (perf->state == next_perf_state) {
486 if (unlikely(data->resume))
487 data->resume = 0;
488 else
489 return next_freq;
490 }
491
492 data->cpu_freq_write(&perf->control_register,
493 perf->states[next_perf_state].control);
494 perf->state = next_perf_state;
495 return next_freq;
496 }
497
498 static unsigned long
acpi_cpufreq_guess_freq(struct acpi_cpufreq_data * data,unsigned int cpu)499 acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
500 {
501 struct acpi_processor_performance *perf;
502
503 perf = to_perf_data(data);
504 if (cpu_khz) {
505 /* search the closest match to cpu_khz */
506 unsigned int i;
507 unsigned long freq;
508 unsigned long freqn = perf->states[0].core_frequency * 1000;
509
510 for (i = 0; i < (perf->state_count-1); i++) {
511 freq = freqn;
512 freqn = perf->states[i+1].core_frequency * 1000;
513 if ((2 * cpu_khz) > (freqn + freq)) {
514 perf->state = i;
515 return freq;
516 }
517 }
518 perf->state = perf->state_count-1;
519 return freqn;
520 } else {
521 /* assume CPU is at P0... */
522 perf->state = 0;
523 return perf->states[0].core_frequency * 1000;
524 }
525 }
526
free_acpi_perf_data(void)527 static void free_acpi_perf_data(void)
528 {
529 unsigned int i;
530
531 /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
532 for_each_possible_cpu(i)
533 free_cpumask_var(per_cpu_ptr(acpi_perf_data, i)
534 ->shared_cpu_map);
535 free_percpu(acpi_perf_data);
536 }
537
cpufreq_boost_online(unsigned int cpu)538 static int cpufreq_boost_online(unsigned int cpu)
539 {
540 /*
541 * On the CPU_UP path we simply keep the boost-disable flag
542 * in sync with the current global state.
543 */
544 return boost_set_msr(acpi_cpufreq_driver.boost_enabled);
545 }
546
cpufreq_boost_down_prep(unsigned int cpu)547 static int cpufreq_boost_down_prep(unsigned int cpu)
548 {
549 /*
550 * Clear the boost-disable bit on the CPU_DOWN path so that
551 * this cpu cannot block the remaining ones from boosting.
552 */
553 return boost_set_msr(1);
554 }
555
556 /*
557 * acpi_cpufreq_early_init - initialize ACPI P-States library
558 *
559 * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
560 * in order to determine correct frequency and voltage pairings. We can
561 * do _PDC and _PSD and find out the processor dependency for the
562 * actual init that will happen later...
563 */
acpi_cpufreq_early_init(void)564 static int __init acpi_cpufreq_early_init(void)
565 {
566 unsigned int i;
567 pr_debug("%s\n", __func__);
568
569 acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
570 if (!acpi_perf_data) {
571 pr_debug("Memory allocation error for acpi_perf_data.\n");
572 return -ENOMEM;
573 }
574 for_each_possible_cpu(i) {
575 if (!zalloc_cpumask_var_node(
576 &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map,
577 GFP_KERNEL, cpu_to_node(i))) {
578
579 /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
580 free_acpi_perf_data();
581 return -ENOMEM;
582 }
583 }
584
585 /* Do initialization in ACPI core */
586 acpi_processor_preregister_performance(acpi_perf_data);
587 return 0;
588 }
589
590 #ifdef CONFIG_SMP
591 /*
592 * Some BIOSes do SW_ANY coordination internally, either set it up in hw
593 * or do it in BIOS firmware and won't inform about it to OS. If not
594 * detected, this has a side effect of making CPU run at a different speed
595 * than OS intended it to run at. Detect it and handle it cleanly.
596 */
597 static int bios_with_sw_any_bug;
598
sw_any_bug_found(const struct dmi_system_id * d)599 static int sw_any_bug_found(const struct dmi_system_id *d)
600 {
601 bios_with_sw_any_bug = 1;
602 return 0;
603 }
604
605 static const struct dmi_system_id sw_any_bug_dmi_table[] = {
606 {
607 .callback = sw_any_bug_found,
608 .ident = "Supermicro Server X6DLP",
609 .matches = {
610 DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
611 DMI_MATCH(DMI_BIOS_VERSION, "080010"),
612 DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"),
613 },
614 },
615 { }
616 };
617
acpi_cpufreq_blacklist(struct cpuinfo_x86 * c)618 static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
619 {
620 /* Intel Xeon Processor 7100 Series Specification Update
621 * https://www.intel.com/Assets/PDF/specupdate/314554.pdf
622 * AL30: A Machine Check Exception (MCE) Occurring during an
623 * Enhanced Intel SpeedStep Technology Ratio Change May Cause
624 * Both Processor Cores to Lock Up. */
625 if (c->x86_vendor == X86_VENDOR_INTEL) {
626 if ((c->x86 == 15) &&
627 (c->x86_model == 6) &&
628 (c->x86_stepping == 8)) {
629 pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n");
630 return -ENODEV;
631 }
632 }
633 return 0;
634 }
635 #endif
636
637 #ifdef CONFIG_ACPI_CPPC_LIB
get_max_boost_ratio(unsigned int cpu)638 static u64 get_max_boost_ratio(unsigned int cpu)
639 {
640 struct cppc_perf_caps perf_caps;
641 u64 highest_perf, nominal_perf;
642 int ret;
643
644 if (acpi_pstate_strict)
645 return 0;
646
647 ret = cppc_get_perf_caps(cpu, &perf_caps);
648 if (ret) {
649 pr_debug("CPU%d: Unable to get performance capabilities (%d)\n",
650 cpu, ret);
651 return 0;
652 }
653
654 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
655 highest_perf = amd_get_highest_perf();
656 else
657 highest_perf = perf_caps.highest_perf;
658
659 nominal_perf = perf_caps.nominal_perf;
660
661 if (!highest_perf || !nominal_perf) {
662 pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
663 return 0;
664 }
665
666 if (highest_perf < nominal_perf) {
667 pr_debug("CPU%d: nominal performance above highest\n", cpu);
668 return 0;
669 }
670
671 return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
672 }
673 #else
get_max_boost_ratio(unsigned int cpu)674 static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; }
675 #endif
676
acpi_cpufreq_cpu_init(struct cpufreq_policy * policy)677 static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
678 {
679 struct cpufreq_frequency_table *freq_table;
680 struct acpi_processor_performance *perf;
681 struct acpi_cpufreq_data *data;
682 unsigned int cpu = policy->cpu;
683 struct cpuinfo_x86 *c = &cpu_data(cpu);
684 unsigned int valid_states = 0;
685 unsigned int result = 0;
686 u64 max_boost_ratio;
687 unsigned int i;
688 #ifdef CONFIG_SMP
689 static int blacklisted;
690 #endif
691
692 pr_debug("%s\n", __func__);
693
694 #ifdef CONFIG_SMP
695 if (blacklisted)
696 return blacklisted;
697 blacklisted = acpi_cpufreq_blacklist(c);
698 if (blacklisted)
699 return blacklisted;
700 #endif
701
702 data = kzalloc(sizeof(*data), GFP_KERNEL);
703 if (!data)
704 return -ENOMEM;
705
706 if (!zalloc_cpumask_var(&data->freqdomain_cpus, GFP_KERNEL)) {
707 result = -ENOMEM;
708 goto err_free;
709 }
710
711 perf = per_cpu_ptr(acpi_perf_data, cpu);
712 data->acpi_perf_cpu = cpu;
713 policy->driver_data = data;
714
715 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
716 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
717
718 result = acpi_processor_register_performance(perf, cpu);
719 if (result)
720 goto err_free_mask;
721
722 policy->shared_type = perf->shared_type;
723
724 /*
725 * Will let policy->cpus know about dependency only when software
726 * coordination is required.
727 */
728 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
729 policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
730 cpumask_copy(policy->cpus, perf->shared_cpu_map);
731 }
732 cpumask_copy(data->freqdomain_cpus, perf->shared_cpu_map);
733
734 #ifdef CONFIG_SMP
735 dmi_check_system(sw_any_bug_dmi_table);
736 if (bios_with_sw_any_bug && !policy_is_shared(policy)) {
737 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
738 cpumask_copy(policy->cpus, topology_core_cpumask(cpu));
739 }
740
741 if (check_amd_hwpstate_cpu(cpu) && boot_cpu_data.x86 < 0x19 &&
742 !acpi_pstate_strict) {
743 cpumask_clear(policy->cpus);
744 cpumask_set_cpu(cpu, policy->cpus);
745 cpumask_copy(data->freqdomain_cpus,
746 topology_sibling_cpumask(cpu));
747 policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
748 pr_info_once("overriding BIOS provided _PSD data\n");
749 }
750 #endif
751
752 /* capability check */
753 if (perf->state_count <= 1) {
754 pr_debug("No P-States\n");
755 result = -ENODEV;
756 goto err_unreg;
757 }
758
759 if (perf->control_register.space_id != perf->status_register.space_id) {
760 result = -ENODEV;
761 goto err_unreg;
762 }
763
764 switch (perf->control_register.space_id) {
765 case ACPI_ADR_SPACE_SYSTEM_IO:
766 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
767 boot_cpu_data.x86 == 0xf) {
768 pr_debug("AMD K8 systems must use native drivers.\n");
769 result = -ENODEV;
770 goto err_unreg;
771 }
772 pr_debug("SYSTEM IO addr space\n");
773 data->cpu_feature = SYSTEM_IO_CAPABLE;
774 data->cpu_freq_read = cpu_freq_read_io;
775 data->cpu_freq_write = cpu_freq_write_io;
776 break;
777 case ACPI_ADR_SPACE_FIXED_HARDWARE:
778 pr_debug("HARDWARE addr space\n");
779 if (check_est_cpu(cpu)) {
780 data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
781 data->cpu_freq_read = cpu_freq_read_intel;
782 data->cpu_freq_write = cpu_freq_write_intel;
783 break;
784 }
785 if (check_amd_hwpstate_cpu(cpu)) {
786 data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
787 data->cpu_freq_read = cpu_freq_read_amd;
788 data->cpu_freq_write = cpu_freq_write_amd;
789 break;
790 }
791 result = -ENODEV;
792 goto err_unreg;
793 default:
794 pr_debug("Unknown addr space %d\n",
795 (u32) (perf->control_register.space_id));
796 result = -ENODEV;
797 goto err_unreg;
798 }
799
800 freq_table = kcalloc(perf->state_count + 1, sizeof(*freq_table),
801 GFP_KERNEL);
802 if (!freq_table) {
803 result = -ENOMEM;
804 goto err_unreg;
805 }
806
807 /* detect transition latency */
808 policy->cpuinfo.transition_latency = 0;
809 for (i = 0; i < perf->state_count; i++) {
810 if ((perf->states[i].transition_latency * 1000) >
811 policy->cpuinfo.transition_latency)
812 policy->cpuinfo.transition_latency =
813 perf->states[i].transition_latency * 1000;
814 }
815
816 /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
817 if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
818 policy->cpuinfo.transition_latency > 20 * 1000) {
819 policy->cpuinfo.transition_latency = 20 * 1000;
820 pr_info_once("P-state transition latency capped at 20 uS\n");
821 }
822
823 /* table init */
824 for (i = 0; i < perf->state_count; i++) {
825 if (i > 0 && perf->states[i].core_frequency >=
826 freq_table[valid_states-1].frequency / 1000)
827 continue;
828
829 freq_table[valid_states].driver_data = i;
830 freq_table[valid_states].frequency =
831 perf->states[i].core_frequency * 1000;
832 valid_states++;
833 }
834 freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
835
836 max_boost_ratio = get_max_boost_ratio(cpu);
837 if (max_boost_ratio) {
838 unsigned int freq = freq_table[0].frequency;
839
840 /*
841 * Because the loop above sorts the freq_table entries in the
842 * descending order, freq is the maximum frequency in the table.
843 * Assume that it corresponds to the CPPC nominal frequency and
844 * use it to set cpuinfo.max_freq.
845 */
846 policy->cpuinfo.max_freq = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT;
847 } else {
848 /*
849 * If the maximum "boost" frequency is unknown, ask the arch
850 * scale-invariance code to use the "nominal" performance for
851 * CPU utilization scaling so as to prevent the schedutil
852 * governor from selecting inadequate CPU frequencies.
853 */
854 arch_set_max_freq_ratio(true);
855 }
856
857 policy->freq_table = freq_table;
858 perf->state = 0;
859
860 switch (perf->control_register.space_id) {
861 case ACPI_ADR_SPACE_SYSTEM_IO:
862 /*
863 * The core will not set policy->cur, because
864 * cpufreq_driver->get is NULL, so we need to set it here.
865 * However, we have to guess it, because the current speed is
866 * unknown and not detectable via IO ports.
867 */
868 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
869 break;
870 case ACPI_ADR_SPACE_FIXED_HARDWARE:
871 acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
872 break;
873 default:
874 break;
875 }
876
877 /* notify BIOS that we exist */
878 acpi_processor_notify_smm(THIS_MODULE);
879
880 pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
881 for (i = 0; i < perf->state_count; i++)
882 pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n",
883 (i == perf->state ? '*' : ' '), i,
884 (u32) perf->states[i].core_frequency,
885 (u32) perf->states[i].power,
886 (u32) perf->states[i].transition_latency);
887
888 /*
889 * the first call to ->target() should result in us actually
890 * writing something to the appropriate registers.
891 */
892 data->resume = 1;
893
894 policy->fast_switch_possible = !acpi_pstate_strict &&
895 !(policy_is_shared(policy) && policy->shared_type != CPUFREQ_SHARED_TYPE_ANY);
896
897 if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency)
898 pr_warn(FW_WARN "P-state 0 is not max freq\n");
899
900 return result;
901
902 err_unreg:
903 acpi_processor_unregister_performance(cpu);
904 err_free_mask:
905 free_cpumask_var(data->freqdomain_cpus);
906 err_free:
907 kfree(data);
908 policy->driver_data = NULL;
909
910 return result;
911 }
912
acpi_cpufreq_cpu_exit(struct cpufreq_policy * policy)913 static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
914 {
915 struct acpi_cpufreq_data *data = policy->driver_data;
916
917 pr_debug("%s\n", __func__);
918
919 policy->fast_switch_possible = false;
920 policy->driver_data = NULL;
921 acpi_processor_unregister_performance(data->acpi_perf_cpu);
922 free_cpumask_var(data->freqdomain_cpus);
923 kfree(policy->freq_table);
924 kfree(data);
925
926 return 0;
927 }
928
acpi_cpufreq_resume(struct cpufreq_policy * policy)929 static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
930 {
931 struct acpi_cpufreq_data *data = policy->driver_data;
932
933 pr_debug("%s\n", __func__);
934
935 data->resume = 1;
936
937 return 0;
938 }
939
940 static struct freq_attr *acpi_cpufreq_attr[] = {
941 &cpufreq_freq_attr_scaling_available_freqs,
942 &freqdomain_cpus,
943 #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
944 &cpb,
945 #endif
946 NULL,
947 };
948
949 static struct cpufreq_driver acpi_cpufreq_driver = {
950 .verify = cpufreq_generic_frequency_table_verify,
951 .target_index = acpi_cpufreq_target,
952 .fast_switch = acpi_cpufreq_fast_switch,
953 .bios_limit = acpi_processor_get_bios_limit,
954 .init = acpi_cpufreq_cpu_init,
955 .exit = acpi_cpufreq_cpu_exit,
956 .resume = acpi_cpufreq_resume,
957 .name = "acpi-cpufreq",
958 .attr = acpi_cpufreq_attr,
959 };
960
961 static enum cpuhp_state acpi_cpufreq_online;
962
acpi_cpufreq_boost_init(void)963 static void __init acpi_cpufreq_boost_init(void)
964 {
965 int ret;
966
967 if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA))) {
968 pr_debug("Boost capabilities not present in the processor\n");
969 return;
970 }
971
972 acpi_cpufreq_driver.set_boost = set_boost;
973 acpi_cpufreq_driver.boost_enabled = boost_state(0);
974
975 /*
976 * This calls the online callback on all online cpu and forces all
977 * MSRs to the same value.
978 */
979 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "cpufreq/acpi:online",
980 cpufreq_boost_online, cpufreq_boost_down_prep);
981 if (ret < 0) {
982 pr_err("acpi_cpufreq: failed to register hotplug callbacks\n");
983 return;
984 }
985 acpi_cpufreq_online = ret;
986 }
987
acpi_cpufreq_boost_exit(void)988 static void acpi_cpufreq_boost_exit(void)
989 {
990 if (acpi_cpufreq_online > 0)
991 cpuhp_remove_state_nocalls(acpi_cpufreq_online);
992 }
993
acpi_cpufreq_init(void)994 static int __init acpi_cpufreq_init(void)
995 {
996 int ret;
997
998 if (acpi_disabled)
999 return -ENODEV;
1000
1001 /* don't keep reloading if cpufreq_driver exists */
1002 if (cpufreq_get_current_driver())
1003 return -EEXIST;
1004
1005 pr_debug("%s\n", __func__);
1006
1007 ret = acpi_cpufreq_early_init();
1008 if (ret)
1009 return ret;
1010
1011 #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
1012 /* this is a sysfs file with a strange name and an even stranger
1013 * semantic - per CPU instantiation, but system global effect.
1014 * Lets enable it only on AMD CPUs for compatibility reasons and
1015 * only if configured. This is considered legacy code, which
1016 * will probably be removed at some point in the future.
1017 */
1018 if (!check_amd_hwpstate_cpu(0)) {
1019 struct freq_attr **attr;
1020
1021 pr_debug("CPB unsupported, do not expose it\n");
1022
1023 for (attr = acpi_cpufreq_attr; *attr; attr++)
1024 if (*attr == &cpb) {
1025 *attr = NULL;
1026 break;
1027 }
1028 }
1029 #endif
1030 acpi_cpufreq_boost_init();
1031
1032 ret = cpufreq_register_driver(&acpi_cpufreq_driver);
1033 if (ret) {
1034 free_acpi_perf_data();
1035 acpi_cpufreq_boost_exit();
1036 }
1037 return ret;
1038 }
1039
acpi_cpufreq_exit(void)1040 static void __exit acpi_cpufreq_exit(void)
1041 {
1042 pr_debug("%s\n", __func__);
1043
1044 acpi_cpufreq_boost_exit();
1045
1046 cpufreq_unregister_driver(&acpi_cpufreq_driver);
1047
1048 free_acpi_perf_data();
1049 }
1050
1051 module_param(acpi_pstate_strict, uint, 0644);
1052 MODULE_PARM_DESC(acpi_pstate_strict,
1053 "value 0 or non-zero. non-zero -> strict ACPI checks are "
1054 "performed during frequency changes.");
1055
1056 late_initcall(acpi_cpufreq_init);
1057 module_exit(acpi_cpufreq_exit);
1058
1059 static const struct x86_cpu_id __maybe_unused acpi_cpufreq_ids[] = {
1060 X86_MATCH_FEATURE(X86_FEATURE_ACPI, NULL),
1061 X86_MATCH_FEATURE(X86_FEATURE_HW_PSTATE, NULL),
1062 {}
1063 };
1064 MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids);
1065
1066 static const struct acpi_device_id __maybe_unused processor_device_ids[] = {
1067 {ACPI_PROCESSOR_OBJECT_HID, },
1068 {ACPI_PROCESSOR_DEVICE_HID, },
1069 {},
1070 };
1071 MODULE_DEVICE_TABLE(acpi, processor_device_ids);
1072
1073 MODULE_ALIAS("acpi");
1074