Lines Matching +full:lock +full:- +full:latency +full:- +full:ns

1 // SPDX-License-Identifier: GPL-2.0-only
15 * P-state scale which is tied to CPU frequency only. In brief, the basic
18 * - OS makes a CPU performance request. (Can provide min and max bounds)
20 * - Platform (such as BMC) is free to optimize request within requested bounds
23 * - Platform conveys its decision back to OS
57 * Lock to provide controlled access to the PCC channel.
67 * For non-performance critical usecases(init)
95 #define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
99 #define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
100 (cpc)->cpc_entry.reg.space_id == \
104 #define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \
105 (reg)->address == 0 && \
106 (reg)->bit_width == 0 && \
107 (reg)->bit_offset == 0 && \
108 (reg)->access_width == 0)
111 #define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \
112 !!(cpc)->cpc_entry.int_value : \
113 !IS_NULL_REG(&(cpc)->cpc_entry.reg))
135 ret = access_fn(cpc_ptr->cpu_id, &st_name); \
161 ret = cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs); in show_feedback_ctrs()
193 pcc_ss_data->pcc_comm_addr; in check_pcc_chan()
195 if (!pcc_ss_data->platform_owns_pcc) in check_pcc_chan()
202 ret = readw_relaxed_poll_timeout(&generic_comm_base->status, status, in check_pcc_chan()
204 pcc_ss_data->deadline_us); in check_pcc_chan()
207 pcc_ss_data->platform_owns_pcc = false; in check_pcc_chan()
209 ret = -EIO; in check_pcc_chan()
225 int ret = -EIO, i; in send_pcc_cmd()
228 pcc_ss_data->pcc_comm_addr; in send_pcc_cmd()
241 if (pcc_ss_data->pending_pcc_write_cmd) in send_pcc_cmd()
248 pcc_ss_data->pending_pcc_write_cmd = FALSE; in send_pcc_cmd()
255 if (pcc_ss_data->pcc_mrtt) { in send_pcc_cmd()
257 pcc_ss_data->last_cmd_cmpl_time); in send_pcc_cmd()
258 if (pcc_ss_data->pcc_mrtt > time_delta) in send_pcc_cmd()
259 udelay(pcc_ss_data->pcc_mrtt - time_delta); in send_pcc_cmd()
263 * Handle the non-zero Maximum Periodic Access Rate(MPAR) in send_pcc_cmd()
273 if (pcc_ss_data->pcc_mpar) { in send_pcc_cmd()
274 if (pcc_ss_data->mpar_count == 0) { in send_pcc_cmd()
276 pcc_ss_data->last_mpar_reset); in send_pcc_cmd()
277 if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) { in send_pcc_cmd()
280 ret = -EIO; in send_pcc_cmd()
283 pcc_ss_data->last_mpar_reset = ktime_get(); in send_pcc_cmd()
284 pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar; in send_pcc_cmd()
286 pcc_ss_data->mpar_count--; in send_pcc_cmd()
290 writew_relaxed(cmd, &generic_comm_base->command); in send_pcc_cmd()
293 writew_relaxed(0, &generic_comm_base->status); in send_pcc_cmd()
295 pcc_ss_data->platform_owns_pcc = true; in send_pcc_cmd()
298 ret = mbox_send_message(pcc_ss_data->pcc_channel, &cmd); in send_pcc_cmd()
308 if (pcc_ss_data->pcc_mrtt) in send_pcc_cmd()
309 pcc_ss_data->last_cmd_cmpl_time = ktime_get(); in send_pcc_cmd()
311 if (pcc_ss_data->pcc_channel->mbox->txdone_irq) in send_pcc_cmd()
312 mbox_chan_txdone(pcc_ss_data->pcc_channel, ret); in send_pcc_cmd()
314 mbox_client_txdone(pcc_ss_data->pcc_channel, ret); in send_pcc_cmd()
325 if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt) in send_pcc_cmd()
326 desc->write_cmd_status = ret; in send_pcc_cmd()
329 pcc_ss_data->pcc_write_cnt++; in send_pcc_cmd()
330 wake_up_all(&pcc_ss_data->pcc_write_wait_q); in send_pcc_cmd()
353 int result = -EFAULT; in acpi_get_psd()
366 return -ENODEV; in acpi_get_psd()
369 if (!psd || psd->package.count != 1) { in acpi_get_psd()
374 pdomain = &(cpc_ptr->domain_info); in acpi_get_psd()
379 status = acpi_extract_package(&(psd->package.elements[0]), in acpi_get_psd()
382 pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id); in acpi_get_psd()
386 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) { in acpi_get_psd()
387 pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id); in acpi_get_psd()
391 if (pdomain->revision != ACPI_PSD_REV0_REVISION) { in acpi_get_psd()
392 pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id); in acpi_get_psd()
396 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL && in acpi_get_psd()
397 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY && in acpi_get_psd()
398 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) { in acpi_get_psd()
399 pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id); in acpi_get_psd()
425 * acpi_get_psd_map - Map the CPUs in the freq domain of a given cpu
439 * Now that we have _PSD data from all CPUs, let's setup P-state in acpi_get_psd_map()
444 return -EFAULT; in acpi_get_psd_map()
446 pdomain = &(cpc_ptr->domain_info); in acpi_get_psd_map()
447 cpumask_set_cpu(cpu, cpu_data->shared_cpu_map); in acpi_get_psd_map()
448 if (pdomain->num_processors <= 1) in acpi_get_psd_map()
452 count_target = pdomain->num_processors; in acpi_get_psd_map()
453 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL) in acpi_get_psd_map()
454 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ALL; in acpi_get_psd_map()
455 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL) in acpi_get_psd_map()
456 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_HW; in acpi_get_psd_map()
457 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY) in acpi_get_psd_map()
458 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ANY; in acpi_get_psd_map()
468 match_pdomain = &(match_cpc_ptr->domain_info); in acpi_get_psd_map()
469 if (match_pdomain->domain != pdomain->domain) in acpi_get_psd_map()
473 if (match_pdomain->num_processors != count_target) in acpi_get_psd_map()
476 if (pdomain->coord_type != match_pdomain->coord_type) in acpi_get_psd_map()
479 cpumask_set_cpu(i, cpu_data->shared_cpu_map); in acpi_get_psd_map()
486 cpumask_clear(cpu_data->shared_cpu_map); in acpi_get_psd_map()
487 cpumask_set_cpu(cpu, cpu_data->shared_cpu_map); in acpi_get_psd_map()
488 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_NONE; in acpi_get_psd_map()
490 return -EFAULT; in acpi_get_psd_map()
500 pcc_data[pcc_ss_idx]->pcc_channel = in register_pcc_channel()
503 if (IS_ERR(pcc_data[pcc_ss_idx]->pcc_channel)) { in register_pcc_channel()
506 return -ENODEV; in register_pcc_channel()
515 cppc_ss = (pcc_data[pcc_ss_idx]->pcc_channel)->con_priv; in register_pcc_channel()
520 return -ENODEV; in register_pcc_channel()
524 * cppc_ss->latency is just a Nominal value. In reality in register_pcc_channel()
528 usecs_lat = NUM_RETRIES * cppc_ss->latency; in register_pcc_channel()
529 pcc_data[pcc_ss_idx]->deadline_us = usecs_lat; in register_pcc_channel()
530 pcc_data[pcc_ss_idx]->pcc_mrtt = cppc_ss->min_turnaround_time; in register_pcc_channel()
531 pcc_data[pcc_ss_idx]->pcc_mpar = cppc_ss->max_access_rate; in register_pcc_channel()
532 pcc_data[pcc_ss_idx]->pcc_nominal = cppc_ss->latency; in register_pcc_channel()
534 pcc_data[pcc_ss_idx]->pcc_comm_addr = in register_pcc_channel()
535 acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length); in register_pcc_channel()
536 if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) { in register_pcc_channel()
539 return -ENOMEM; in register_pcc_channel()
543 pcc_data[pcc_ss_idx]->pcc_channel_acquired = true; in register_pcc_channel()
550 * cpc_ffh_supported() - check if FFH reading supported
563 * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
568 * with hardware multi-threading support.
575 return -EINVAL; in pcc_data_alloc()
578 pcc_data[pcc_ss_id]->refcount++; in pcc_data_alloc()
583 return -ENOMEM; in pcc_data_alloc()
584 pcc_data[pcc_ss_id]->refcount++; in pcc_data_alloc()
668 * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
680 acpi_handle handle = pr->handle; in acpi_cppc_processor_probe()
682 int pcc_subspace_id = -1; in acpi_cppc_processor_probe()
684 int ret = -EFAULT; in acpi_cppc_processor_probe()
690 ret = -ENODEV; in acpi_cppc_processor_probe()
698 ret = -ENOMEM; in acpi_cppc_processor_probe()
703 cpc_obj = &out_obj->package.elements[0]; in acpi_cppc_processor_probe()
704 if (cpc_obj->type == ACPI_TYPE_INTEGER) { in acpi_cppc_processor_probe()
705 num_ent = cpc_obj->integer.value; in acpi_cppc_processor_probe()
708 cpc_obj->type); in acpi_cppc_processor_probe()
711 cpc_ptr->num_entries = num_ent; in acpi_cppc_processor_probe()
714 cpc_obj = &out_obj->package.elements[1]; in acpi_cppc_processor_probe()
715 if (cpc_obj->type == ACPI_TYPE_INTEGER) { in acpi_cppc_processor_probe()
716 cpc_rev = cpc_obj->integer.value; in acpi_cppc_processor_probe()
719 cpc_obj->type); in acpi_cppc_processor_probe()
722 cpc_ptr->version = cpc_rev; in acpi_cppc_processor_probe()
729 cpc_obj = &out_obj->package.elements[i]; in acpi_cppc_processor_probe()
731 if (cpc_obj->type == ACPI_TYPE_INTEGER) { in acpi_cppc_processor_probe()
732 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER; in acpi_cppc_processor_probe()
733 cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value; in acpi_cppc_processor_probe()
734 } else if (cpc_obj->type == ACPI_TYPE_BUFFER) { in acpi_cppc_processor_probe()
736 cpc_obj->buffer.pointer; in acpi_cppc_processor_probe()
744 if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { in acpi_cppc_processor_probe()
746 pcc_subspace_id = gas_t->access_width; in acpi_cppc_processor_probe()
749 } else if (pcc_subspace_id != gas_t->access_width) { in acpi_cppc_processor_probe()
753 } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { in acpi_cppc_processor_probe()
754 if (gas_t->address) { in acpi_cppc_processor_probe()
757 addr = ioremap(gas_t->address, gas_t->bit_width/8); in acpi_cppc_processor_probe()
760 cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr; in acpi_cppc_processor_probe()
763 if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) { in acpi_cppc_processor_probe()
765 pr_debug("Unsupported register type: %d\n", gas_t->space_id); in acpi_cppc_processor_probe()
770 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER; in acpi_cppc_processor_probe()
771 memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t)); in acpi_cppc_processor_probe()
773 pr_debug("Err in entry:%d in CPC table of CPU:%d\n", i, pr->id); in acpi_cppc_processor_probe()
777 per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id; in acpi_cppc_processor_probe()
784 for (i = num_ent - 2; i < MAX_CPC_REG_ENT; i++) { in acpi_cppc_processor_probe()
785 cpc_ptr->cpc_regs[i].type = ACPI_TYPE_INTEGER; in acpi_cppc_processor_probe()
786 cpc_ptr->cpc_regs[i].cpc_entry.int_value = 0; in acpi_cppc_processor_probe()
791 cpc_ptr->cpu_id = pr->id; in acpi_cppc_processor_probe()
799 if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) { in acpi_cppc_processor_probe()
804 init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock); in acpi_cppc_processor_probe()
805 init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q); in acpi_cppc_processor_probe()
809 pr_debug("Parsed CPC struct for CPU: %d\n", pr->id); in acpi_cppc_processor_probe()
812 cpu_dev = get_cpu_device(pr->id); in acpi_cppc_processor_probe()
814 ret = -EINVAL; in acpi_cppc_processor_probe()
819 per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr; in acpi_cppc_processor_probe()
821 ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj, in acpi_cppc_processor_probe()
824 per_cpu(cpc_desc_ptr, pr->id) = NULL; in acpi_cppc_processor_probe()
825 kobject_put(&cpc_ptr->kobj); in acpi_cppc_processor_probe()
836 for (i = 2; i < cpc_ptr->num_entries; i++) { in acpi_cppc_processor_probe()
837 void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr; in acpi_cppc_processor_probe()
851 * acpi_cppc_processor_exit - Cleanup CPC structs.
861 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id); in acpi_cppc_processor_exit()
864 if (pcc_data[pcc_ss_id]->pcc_channel_acquired) { in acpi_cppc_processor_exit()
865 pcc_data[pcc_ss_id]->refcount--; in acpi_cppc_processor_exit()
866 if (!pcc_data[pcc_ss_id]->refcount) { in acpi_cppc_processor_exit()
867 pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel); in acpi_cppc_processor_exit()
874 cpc_ptr = per_cpu(cpc_desc_ptr, pr->id); in acpi_cppc_processor_exit()
879 for (i = 2; i < cpc_ptr->num_entries; i++) { in acpi_cppc_processor_exit()
880 addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr; in acpi_cppc_processor_exit()
885 kobject_put(&cpc_ptr->kobj); in acpi_cppc_processor_exit()
891 * cpc_read_ffh() - Read FFH register
902 return -ENOTSUPP; in cpc_read_ffh()
906 * cpc_write_ffh() - Write FFH register
917 return -ENOTSUPP; in cpc_write_ffh()
931 struct cpc_reg *reg = &reg_res->cpc_entry.reg; in cpc_read()
933 if (reg_res->type == ACPI_TYPE_INTEGER) { in cpc_read()
934 *val = reg_res->cpc_entry.int_value; in cpc_read()
939 if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) in cpc_read()
940 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id); in cpc_read()
941 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) in cpc_read()
942 vaddr = reg_res->sys_mem_vaddr; in cpc_read()
943 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) in cpc_read()
946 return acpi_os_read_memory((acpi_physical_address)reg->address, in cpc_read()
947 val, reg->bit_width); in cpc_read()
949 switch (reg->bit_width) { in cpc_read()
964 reg->bit_width, pcc_ss_id); in cpc_read()
965 ret_val = -EFAULT; in cpc_read()
976 struct cpc_reg *reg = &reg_res->cpc_entry.reg; in cpc_write()
978 if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) in cpc_write()
979 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id); in cpc_write()
980 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) in cpc_write()
981 vaddr = reg_res->sys_mem_vaddr; in cpc_write()
982 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) in cpc_write()
985 return acpi_os_write_memory((acpi_physical_address)reg->address, in cpc_write()
986 val, reg->bit_width); in cpc_write()
988 switch (reg->bit_width) { in cpc_write()
1003 reg->bit_width, pcc_ss_id); in cpc_write()
1004 ret_val = -EFAULT; in cpc_write()
1014 struct cpc_register_resource *reg = &cpc_desc->cpc_regs[reg_idx]; in cppc_get_perf()
1022 return -EIO; in cppc_get_perf()
1026 down_write(&pcc_ss_data->pcc_lock); in cppc_get_perf()
1031 ret = -EIO; in cppc_get_perf()
1033 up_write(&pcc_ss_data->pcc_lock); in cppc_get_perf()
1044 * cppc_get_desired_perf - Get the desired performance register value.
1048 * Return: 0 for success, -EIO otherwise.
1057 * cppc_get_nominal_perf - Get the nominal performance register value.
1061 * Return: 0 for success, -EIO otherwise.
1069 * cppc_get_perf_caps - Get a CPU's performance capabilities.
1073 * Return: 0 for success with perf_caps populated else -ERRNO.
1088 return -ENODEV; in cppc_get_perf_caps()
1091 highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF]; in cppc_get_perf_caps()
1092 lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF]; in cppc_get_perf_caps()
1093 lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF]; in cppc_get_perf_caps()
1094 nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF]; in cppc_get_perf_caps()
1095 low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ]; in cppc_get_perf_caps()
1096 nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ]; in cppc_get_perf_caps()
1097 guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF]; in cppc_get_perf_caps()
1105 return -ENODEV; in cppc_get_perf_caps()
1109 down_write(&pcc_ss_data->pcc_lock); in cppc_get_perf_caps()
1112 ret = -EIO; in cppc_get_perf_caps()
1118 perf_caps->highest_perf = high; in cppc_get_perf_caps()
1121 perf_caps->lowest_perf = low; in cppc_get_perf_caps()
1124 perf_caps->nominal_perf = nom; in cppc_get_perf_caps()
1126 if (guaranteed_reg->type != ACPI_TYPE_BUFFER || in cppc_get_perf_caps()
1127 IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) { in cppc_get_perf_caps()
1128 perf_caps->guaranteed_perf = 0; in cppc_get_perf_caps()
1131 perf_caps->guaranteed_perf = guaranteed; in cppc_get_perf_caps()
1135 perf_caps->lowest_nonlinear_perf = min_nonlinear; in cppc_get_perf_caps()
1138 ret = -EFAULT; in cppc_get_perf_caps()
1147 perf_caps->lowest_freq = low_f; in cppc_get_perf_caps()
1148 perf_caps->nominal_freq = nom_f; in cppc_get_perf_caps()
1153 up_write(&pcc_ss_data->pcc_lock); in cppc_get_perf_caps()
1159 * cppc_get_perf_ctrs - Read a CPU's performance feedback counters.
1163 * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
1177 return -ENODEV; in cppc_get_perf_ctrs()
1180 delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR]; in cppc_get_perf_ctrs()
1181 reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR]; in cppc_get_perf_ctrs()
1182 ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF]; in cppc_get_perf_ctrs()
1183 ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME]; in cppc_get_perf_ctrs()
1190 ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF]; in cppc_get_perf_ctrs()
1197 return -ENODEV; in cppc_get_perf_ctrs()
1200 down_write(&pcc_ss_data->pcc_lock); in cppc_get_perf_ctrs()
1204 ret = -EIO; in cppc_get_perf_ctrs()
1223 ret = -EFAULT; in cppc_get_perf_ctrs()
1227 perf_fb_ctrs->delivered = delivered; in cppc_get_perf_ctrs()
1228 perf_fb_ctrs->reference = reference; in cppc_get_perf_ctrs()
1229 perf_fb_ctrs->reference_perf = ref_perf; in cppc_get_perf_ctrs()
1230 perf_fb_ctrs->wraparound_time = ctr_wrap_time; in cppc_get_perf_ctrs()
1233 up_write(&pcc_ss_data->pcc_lock); in cppc_get_perf_ctrs()
1239 * cppc_set_perf - Set a CPU's performance controls.
1243 * Return: 0 for success, -ERRNO otherwise.
1255 return -ENODEV; in cppc_set_perf()
1258 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF]; in cppc_set_perf()
1261 * This is Phase-I where we want to write to CPC registers in cppc_set_perf()
1262 * -> We want all CPUs to be able to execute this phase in parallel in cppc_set_perf()
1270 return -ENODEV; in cppc_set_perf()
1273 down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */ in cppc_set_perf()
1274 if (pcc_ss_data->platform_owns_pcc) { in cppc_set_perf()
1277 up_read(&pcc_ss_data->pcc_lock); in cppc_set_perf()
1283 * arrive and steal the channel during the switch to write lock in cppc_set_perf()
1285 pcc_ss_data->pending_pcc_write_cmd = true; in cppc_set_perf()
1286 cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt; in cppc_set_perf()
1287 cpc_desc->write_cmd_status = 0; in cppc_set_perf()
1294 cpc_write(cpu, desired_reg, perf_ctrls->desired_perf); in cppc_set_perf()
1297 up_read(&pcc_ss_data->pcc_lock); /* END Phase-I */ in cppc_set_perf()
1299 * This is Phase-II where we transfer the ownership of PCC to Platform in cppc_set_perf()
1303 * come out of Phase-I will enter Phase-II and ring the doorbell. in cppc_set_perf()
1305 * We have the following requirements for Phase-II: in cppc_set_perf()
1306 * 1. We want to execute Phase-II only when there are no CPUs in cppc_set_perf()
1307 * currently executing in Phase-I in cppc_set_perf()
1308 * 2. Once we start Phase-II we want to avoid all other CPUs from in cppc_set_perf()
1309 * entering Phase-I. in cppc_set_perf()
1310 * 3. We want only one CPU among all those who went through Phase-I in cppc_set_perf()
1311 * to run phase-II in cppc_set_perf()
1313 * If write_trylock fails to get the lock and doesn't transfer the in cppc_set_perf()
1315 * 1. There is at-least one CPU in Phase-I which will later execute in cppc_set_perf()
1316 * write_trylock, so the CPUs in Phase-I will be responsible for in cppc_set_perf()
1317 * executing the Phase-II. in cppc_set_perf()
1321 * before this CPU's Phase-I as we held the read_lock. in cppc_set_perf()
1332 * Phase-I and Phase-II. Before it is scheduled back on, another CPU in cppc_set_perf()
1339 * there was a pcc CMD_READ waiting on down_write and it steals the lock in cppc_set_perf()
1345 if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */ in cppc_set_perf()
1347 if (pcc_ss_data->pending_pcc_write_cmd) in cppc_set_perf()
1349 up_write(&pcc_ss_data->pcc_lock); /* END Phase-II */ in cppc_set_perf()
1352 wait_event(pcc_ss_data->pcc_write_wait_q, in cppc_set_perf()
1353 cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt); in cppc_set_perf()
1356 ret = cpc_desc->write_cmd_status; in cppc_set_perf()
1363 * cppc_get_transition_latency - returns frequency transition latency in ns
1366 * transition latency for performance change requests. The closest we have
1373 * Expected transition latency is based on the PCCT timing values in cppc_get_transition_latency()
1375 * pcc_nominal- Expected latency to process a command, in microseconds in cppc_get_transition_latency()
1376 * pcc_mpar - The maximum number of periodic requests that the subspace in cppc_get_transition_latency()
1379 * pcc_mrtt - The minimum amount of time that OSPM must wait after the in cppc_get_transition_latency()
1393 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF]; in cppc_get_transition_latency()
1401 if (pcc_ss_data->pcc_mpar) in cppc_get_transition_latency()
1402 latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar); in cppc_get_transition_latency()
1404 latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000); in cppc_get_transition_latency()
1405 latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000); in cppc_get_transition_latency()