1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * HSM extension and cpu_ops implementation.
4 *
5 * Copyright (c) 2020 Western Digital Corporation or its affiliates.
6 */
7
8 #include <linux/init.h>
9 #include <linux/mm.h>
10 #include <linux/sched/task_stack.h>
11 #include <asm/cpu_ops.h>
12 #include <asm/cpu_ops_sbi.h>
13 #include <asm/sbi.h>
14 #include <asm/smp.h>
15
16 extern char secondary_start_sbi[];
17 const struct cpu_operations cpu_ops_sbi;
18
19 /*
20 * Ordered booting via HSM brings one cpu at a time. However, cpu hotplug can
21 * be invoked from multiple threads in parallel. Define a per cpu data
22 * to handle that.
23 */
24 static DEFINE_PER_CPU(struct sbi_hart_boot_data, boot_data);
25
sbi_hsm_hart_start(unsigned long hartid,unsigned long saddr,unsigned long priv)26 static int sbi_hsm_hart_start(unsigned long hartid, unsigned long saddr,
27 unsigned long priv)
28 {
29 struct sbiret ret;
30
31 ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_START,
32 hartid, saddr, priv, 0, 0, 0);
33 if (ret.error)
34 return sbi_err_map_linux_errno(ret.error);
35 else
36 return 0;
37 }
38
39 #ifdef CONFIG_HOTPLUG_CPU
sbi_hsm_hart_stop(void)40 static int sbi_hsm_hart_stop(void)
41 {
42 struct sbiret ret;
43
44 ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_STOP, 0, 0, 0, 0, 0, 0);
45
46 if (ret.error)
47 return sbi_err_map_linux_errno(ret.error);
48 else
49 return 0;
50 }
51
sbi_hsm_hart_get_status(unsigned long hartid)52 static int sbi_hsm_hart_get_status(unsigned long hartid)
53 {
54 struct sbiret ret;
55
56 ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_STATUS,
57 hartid, 0, 0, 0, 0, 0);
58 if (ret.error)
59 return sbi_err_map_linux_errno(ret.error);
60 else
61 return ret.value;
62 }
63 #endif
64
sbi_cpu_start(unsigned int cpuid,struct task_struct * tidle)65 static int sbi_cpu_start(unsigned int cpuid, struct task_struct *tidle)
66 {
67 unsigned long boot_addr = __pa_symbol(secondary_start_sbi);
68 unsigned long hartid = cpuid_to_hartid_map(cpuid);
69 unsigned long hsm_data;
70 struct sbi_hart_boot_data *bdata = &per_cpu(boot_data, cpuid);
71
72 /* Make sure tidle is updated */
73 smp_mb();
74 bdata->task_ptr = tidle;
75 bdata->stack_ptr = task_stack_page(tidle) + THREAD_SIZE;
76 /* Make sure boot data is updated */
77 smp_mb();
78 hsm_data = __pa(bdata);
79 return sbi_hsm_hart_start(hartid, boot_addr, hsm_data);
80 }
81
sbi_cpu_prepare(unsigned int cpuid)82 static int sbi_cpu_prepare(unsigned int cpuid)
83 {
84 if (!cpu_ops_sbi.cpu_start) {
85 pr_err("cpu start method not defined for CPU [%d]\n", cpuid);
86 return -ENODEV;
87 }
88 return 0;
89 }
90
91 #ifdef CONFIG_HOTPLUG_CPU
sbi_cpu_disable(unsigned int cpuid)92 static int sbi_cpu_disable(unsigned int cpuid)
93 {
94 if (!cpu_ops_sbi.cpu_stop)
95 return -EOPNOTSUPP;
96 return 0;
97 }
98
sbi_cpu_stop(void)99 static void sbi_cpu_stop(void)
100 {
101 int ret;
102
103 ret = sbi_hsm_hart_stop();
104 pr_crit("Unable to stop the cpu %u (%d)\n", smp_processor_id(), ret);
105 }
106
sbi_cpu_is_stopped(unsigned int cpuid)107 static int sbi_cpu_is_stopped(unsigned int cpuid)
108 {
109 int rc;
110 unsigned long hartid = cpuid_to_hartid_map(cpuid);
111
112 rc = sbi_hsm_hart_get_status(hartid);
113
114 if (rc == SBI_HSM_STATE_STOPPED)
115 return 0;
116 return rc;
117 }
118 #endif
119
120 const struct cpu_operations cpu_ops_sbi = {
121 .name = "sbi",
122 .cpu_prepare = sbi_cpu_prepare,
123 .cpu_start = sbi_cpu_start,
124 #ifdef CONFIG_HOTPLUG_CPU
125 .cpu_disable = sbi_cpu_disable,
126 .cpu_stop = sbi_cpu_stop,
127 .cpu_is_stopped = sbi_cpu_is_stopped,
128 #endif
129 };
130