1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Initial setup-routines for HP 9000 based hardware.
4 *
5 * Copyright (C) 1991, 1992, 1995 Linus Torvalds
6 * Modifications for PA-RISC (C) 1999-2008 Helge Deller <deller@gmx.de>
7 * Modifications copyright 1999 SuSE GmbH (Philipp Rumpf)
8 * Modifications copyright 2000 Martin K. Petersen <mkp@mkp.net>
9 * Modifications copyright 2000 Philipp Rumpf <prumpf@tux.org>
10 * Modifications copyright 2001 Ryan Bradetich <rbradetich@uswest.net>
11 *
12 * Initial PA-RISC Version: 04-23-1999 by Helge Deller
13 */
14 #include <linux/delay.h>
15 #include <linux/init.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/seq_file.h>
19 #include <linux/random.h>
20 #include <linux/slab.h>
21 #include <linux/cpu.h>
22 #include <asm/topology.h>
23 #include <asm/param.h>
24 #include <asm/cache.h>
25 #include <asm/hardware.h> /* for register_parisc_driver() stuff */
26 #include <asm/processor.h>
27 #include <asm/page.h>
28 #include <asm/pdc.h>
29 #include <asm/smp.h>
30 #include <asm/pdcpat.h>
31 #include <asm/irq.h> /* for struct irq_region */
32 #include <asm/parisc-device.h>
33
34 struct system_cpuinfo_parisc boot_cpu_data __ro_after_init;
35 EXPORT_SYMBOL(boot_cpu_data);
36 #ifdef CONFIG_PA8X00
37 int _parisc_requires_coherency __ro_after_init;
38 EXPORT_SYMBOL(_parisc_requires_coherency);
39 #endif
40
41 DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data);
42
43 /*
44 ** PARISC CPU driver - claim "device" and initialize CPU data structures.
45 **
46 ** Consolidate per CPU initialization into (mostly) one module.
47 ** Monarch CPU will initialize boot_cpu_data which shouldn't
48 ** change once the system has booted.
49 **
50 ** The callback *should* do per-instance initialization of
51 ** everything including the monarch. "Per CPU" init code in
52 ** setup.c:start_parisc() has migrated here and start_parisc()
53 ** will call register_parisc_driver(&cpu_driver) before calling do_inventory().
54 **
55 ** The goal of consolidating CPU initialization into one place is
56 ** to make sure all CPUs get initialized the same way.
57 ** The code path not shared is how PDC hands control of the CPU to the OS.
58 ** The initialization of OS data structures is the same (done below).
59 */
60
61 /**
62 * init_percpu_prof - enable/setup per cpu profiling hooks.
63 * @cpunum: The processor instance.
64 *
65 * FIXME: doesn't do much yet...
66 */
67 static void
init_percpu_prof(unsigned long cpunum)68 init_percpu_prof(unsigned long cpunum)
69 {
70 }
71
72
73 /**
74 * processor_probe - Determine if processor driver should claim this device.
75 * @dev: The device which has been found.
76 *
77 * Determine if processor driver should claim this chip (return 0) or not
78 * (return 1). If so, initialize the chip and tell other partners in crime
79 * they have work to do.
80 */
processor_probe(struct parisc_device * dev)81 static int __init processor_probe(struct parisc_device *dev)
82 {
83 unsigned long txn_addr;
84 unsigned long cpuid;
85 struct cpuinfo_parisc *p;
86 struct pdc_pat_cpu_num cpu_info = { };
87
88 #ifdef CONFIG_SMP
89 if (num_online_cpus() >= nr_cpu_ids) {
90 printk(KERN_INFO "num_online_cpus() >= nr_cpu_ids\n");
91 return 1;
92 }
93 #else
94 if (boot_cpu_data.cpu_count > 0) {
95 printk(KERN_INFO "CONFIG_SMP=n ignoring additional CPUs\n");
96 return 1;
97 }
98 #endif
99
100 /* logical CPU ID and update global counter
101 * May get overwritten by PAT code.
102 */
103 cpuid = boot_cpu_data.cpu_count;
104 txn_addr = dev->hpa.start; /* for legacy PDC */
105 cpu_info.cpu_num = cpu_info.cpu_loc = cpuid;
106
107 #ifdef CONFIG_64BIT
108 if (is_pdc_pat()) {
109 ulong status;
110 unsigned long bytecnt;
111 pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell;
112
113 pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL);
114 if (!pa_pdc_cell)
115 panic("couldn't allocate memory for PDC_PAT_CELL!");
116
117 status = pdc_pat_cell_module(&bytecnt, dev->pcell_loc,
118 dev->mod_index, PA_VIEW, pa_pdc_cell);
119
120 BUG_ON(PDC_OK != status);
121
122 /* verify it's the same as what do_pat_inventory() found */
123 BUG_ON(dev->mod_info != pa_pdc_cell->mod_info);
124 BUG_ON(dev->pmod_loc != pa_pdc_cell->mod_location);
125
126 txn_addr = pa_pdc_cell->mod[0]; /* id_eid for IO sapic */
127
128 kfree(pa_pdc_cell);
129
130 /* get the cpu number */
131 status = pdc_pat_cpu_get_number(&cpu_info, dev->hpa.start);
132 BUG_ON(PDC_OK != status);
133
134 pr_info("Logical CPU #%lu is physical cpu #%lu at location "
135 "0x%lx with hpa %pa\n",
136 cpuid, cpu_info.cpu_num, cpu_info.cpu_loc,
137 &dev->hpa.start);
138
139 #undef USE_PAT_CPUID
140 #ifdef USE_PAT_CPUID
141 /* We need contiguous numbers for cpuid. Firmware's notion
142 * of cpuid is for physical CPUs and we just don't care yet.
143 * We'll care when we need to query PAT PDC about a CPU *after*
144 * boot time (ie shutdown a CPU from an OS perspective).
145 */
146 if (cpu_info.cpu_num >= NR_CPUS) {
147 printk(KERN_WARNING "IGNORING CPU at %pa,"
148 " cpu_slot_id > NR_CPUS"
149 " (%ld > %d)\n",
150 &dev->hpa.start, cpu_info.cpu_num, NR_CPUS);
151 /* Ignore CPU since it will only crash */
152 boot_cpu_data.cpu_count--;
153 return 1;
154 } else {
155 cpuid = cpu_info.cpu_num;
156 }
157 #endif
158 }
159 #endif
160
161 p = &per_cpu(cpu_data, cpuid);
162 boot_cpu_data.cpu_count++;
163
164 /* initialize counters - CPU 0 gets it_value set in time_init() */
165 if (cpuid)
166 memset(p, 0, sizeof(struct cpuinfo_parisc));
167
168 p->dev = dev; /* Save IODC data in case we need it */
169 p->hpa = dev->hpa.start; /* save CPU hpa */
170 p->cpuid = cpuid; /* save CPU id */
171 p->txn_addr = txn_addr; /* save CPU IRQ address */
172 p->cpu_num = cpu_info.cpu_num;
173 p->cpu_loc = cpu_info.cpu_loc;
174
175 set_cpu_possible(cpuid, true);
176 store_cpu_topology(cpuid);
177
178 #ifdef CONFIG_SMP
179 /*
180 ** FIXME: review if any other initialization is clobbered
181 ** for boot_cpu by the above memset().
182 */
183 init_percpu_prof(cpuid);
184 #endif
185
186 /*
187 ** CONFIG_SMP: init_smp_config() will attempt to get CPUs into
188 ** OS control. RENDEZVOUS is the default state - see mem_set above.
189 ** p->state = STATE_RENDEZVOUS;
190 */
191
192 #if 0
193 /* CPU 0 IRQ table is statically allocated/initialized */
194 if (cpuid) {
195 struct irqaction actions[];
196
197 /*
198 ** itimer and ipi IRQ handlers are statically initialized in
199 ** arch/parisc/kernel/irq.c. ie Don't need to register them.
200 */
201 actions = kmalloc(sizeof(struct irqaction)*MAX_CPU_IRQ, GFP_ATOMIC);
202 if (!actions) {
203 /* not getting it's own table, share with monarch */
204 actions = cpu_irq_actions[0];
205 }
206
207 cpu_irq_actions[cpuid] = actions;
208 }
209 #endif
210
211 /*
212 * Bring this CPU up now! (ignore bootstrap cpuid == 0)
213 */
214 #ifdef CONFIG_SMP
215 if (cpuid) {
216 set_cpu_present(cpuid, true);
217 add_cpu(cpuid);
218 }
219 #endif
220
221 return 0;
222 }
223
224 /**
225 * collect_boot_cpu_data - Fill the boot_cpu_data structure.
226 *
227 * This function collects and stores the generic processor information
228 * in the boot_cpu_data structure.
229 */
collect_boot_cpu_data(void)230 void __init collect_boot_cpu_data(void)
231 {
232 unsigned long cr16_seed;
233 char orig_prod_num[64], current_prod_num[64], serial_no[64];
234
235 memset(&boot_cpu_data, 0, sizeof(boot_cpu_data));
236
237 cr16_seed = get_cycles();
238 add_device_randomness(&cr16_seed, sizeof(cr16_seed));
239
240 boot_cpu_data.cpu_hz = 100 * PAGE0->mem_10msec; /* Hz of this PARISC */
241
242 /* get CPU-Model Information... */
243 #define p ((unsigned long *)&boot_cpu_data.pdc.model)
244 if (pdc_model_info(&boot_cpu_data.pdc.model) == PDC_OK) {
245 printk(KERN_INFO
246 "model %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
247 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]);
248
249 add_device_randomness(&boot_cpu_data.pdc.model,
250 sizeof(boot_cpu_data.pdc.model));
251 }
252 #undef p
253
254 if (pdc_model_versions(&boot_cpu_data.pdc.versions, 0) == PDC_OK) {
255 printk(KERN_INFO "vers %08lx\n",
256 boot_cpu_data.pdc.versions);
257
258 add_device_randomness(&boot_cpu_data.pdc.versions,
259 sizeof(boot_cpu_data.pdc.versions));
260 }
261
262 if (pdc_model_cpuid(&boot_cpu_data.pdc.cpuid) == PDC_OK) {
263 printk(KERN_INFO "CPUID vers %ld rev %ld (0x%08lx)\n",
264 (boot_cpu_data.pdc.cpuid >> 5) & 127,
265 boot_cpu_data.pdc.cpuid & 31,
266 boot_cpu_data.pdc.cpuid);
267
268 add_device_randomness(&boot_cpu_data.pdc.cpuid,
269 sizeof(boot_cpu_data.pdc.cpuid));
270 }
271
272 if (pdc_model_capabilities(&boot_cpu_data.pdc.capabilities) == PDC_OK)
273 printk(KERN_INFO "capabilities 0x%lx\n",
274 boot_cpu_data.pdc.capabilities);
275
276 if (pdc_model_sysmodel(OS_ID_HPUX, boot_cpu_data.pdc.sys_model_name) == PDC_OK)
277 pr_info("HP-UX model name: %s\n",
278 boot_cpu_data.pdc.sys_model_name);
279
280 serial_no[0] = 0;
281 if (pdc_model_sysmodel(OS_ID_MPEXL, serial_no) == PDC_OK &&
282 serial_no[0])
283 pr_info("MPE/iX model name: %s\n", serial_no);
284
285 dump_stack_set_arch_desc("%s", boot_cpu_data.pdc.sys_model_name);
286
287 boot_cpu_data.hversion = boot_cpu_data.pdc.model.hversion;
288 boot_cpu_data.sversion = boot_cpu_data.pdc.model.sversion;
289
290 boot_cpu_data.cpu_type = parisc_get_cpu_type(boot_cpu_data.hversion);
291 boot_cpu_data.cpu_name = cpu_name_version[boot_cpu_data.cpu_type][0];
292 boot_cpu_data.family_name = cpu_name_version[boot_cpu_data.cpu_type][1];
293
294 #ifdef CONFIG_PA8X00
295 _parisc_requires_coherency = (boot_cpu_data.cpu_type == mako) ||
296 (boot_cpu_data.cpu_type == mako2);
297 #endif
298
299 if (pdc_model_platform_info(orig_prod_num, current_prod_num, serial_no) == PDC_OK) {
300 printk(KERN_INFO "product %s, original product %s, S/N: %s\n",
301 current_prod_num[0] ? current_prod_num : "n/a",
302 orig_prod_num, serial_no);
303 add_device_randomness(orig_prod_num, strlen(orig_prod_num));
304 add_device_randomness(current_prod_num, strlen(current_prod_num));
305 add_device_randomness(serial_no, strlen(serial_no));
306 }
307 }
308
309
310 /**
311 * init_per_cpu - Handle individual processor initializations.
312 * @cpunum: logical processor number.
313 *
314 * This function handles initialization for *every* CPU
315 * in the system:
316 *
317 * o Set "default" CPU width for trap handlers
318 *
319 * o Enable FP coprocessor
320 * REVISIT: this could be done in the "code 22" trap handler.
321 * (frowands idea - that way we know which processes need FP
322 * registers saved on the interrupt stack.)
323 * NEWS FLASH: wide kernels need FP coprocessor enabled to handle
324 * formatted printing of %lx for example (double divides I think)
325 *
326 * o Enable CPU profiling hooks.
327 */
init_per_cpu(int cpunum)328 int init_per_cpu(int cpunum)
329 {
330 int ret;
331 struct pdc_coproc_cfg coproc_cfg;
332
333 set_firmware_width();
334 ret = pdc_coproc_cfg(&coproc_cfg);
335
336 if(ret >= 0 && coproc_cfg.ccr_functional) {
337 mtctl(coproc_cfg.ccr_functional, 10); /* 10 == Coprocessor Control Reg */
338
339 /* FWIW, FP rev/model is a more accurate way to determine
340 ** CPU type. CPU rev/model has some ambiguous cases.
341 */
342 per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
343 per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
344
345 if (cpunum == 0)
346 printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n",
347 cpunum, coproc_cfg.revision, coproc_cfg.model);
348
349 /*
350 ** store status register to stack (hopefully aligned)
351 ** and clear the T-bit.
352 */
353 asm volatile ("fstd %fr0,8(%sp)");
354
355 } else {
356 printk(KERN_WARNING "WARNING: No FP CoProcessor?!"
357 " (coproc_cfg.ccr_functional == 0x%lx, expected 0xc0)\n"
358 #ifdef CONFIG_64BIT
359 "Halting Machine - FP required\n"
360 #endif
361 , coproc_cfg.ccr_functional);
362 #ifdef CONFIG_64BIT
363 mdelay(100); /* previous chars get pushed to console */
364 panic("FP CoProc not reported");
365 #endif
366 }
367
368 /* FUTURE: Enable Performance Monitor : ccr bit 0x20 */
369 init_percpu_prof(cpunum);
370
371 btlb_init_per_cpu();
372
373 return ret;
374 }
375
376 /*
377 * Display CPU info for all CPUs.
378 */
379 int
show_cpuinfo(struct seq_file * m,void * v)380 show_cpuinfo (struct seq_file *m, void *v)
381 {
382 unsigned long cpu;
383 char cpu_name[60], *p;
384
385 /* strip PA path from CPU name to not confuse lscpu */
386 strlcpy(cpu_name, per_cpu(cpu_data, 0).dev->name, sizeof(cpu_name));
387 p = strrchr(cpu_name, '[');
388 if (p)
389 *(--p) = 0;
390
391 for_each_online_cpu(cpu) {
392 #ifdef CONFIG_SMP
393 const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
394
395 if (0 == cpuinfo->hpa)
396 continue;
397 #endif
398 seq_printf(m, "processor\t: %lu\n"
399 "cpu family\t: PA-RISC %s\n",
400 cpu, boot_cpu_data.family_name);
401
402 seq_printf(m, "cpu\t\t: %s\n", boot_cpu_data.cpu_name );
403
404 /* cpu MHz */
405 seq_printf(m, "cpu MHz\t\t: %d.%06d\n",
406 boot_cpu_data.cpu_hz / 1000000,
407 boot_cpu_data.cpu_hz % 1000000 );
408
409 #ifdef CONFIG_GENERIC_ARCH_TOPOLOGY
410 seq_printf(m, "physical id\t: %d\n",
411 topology_physical_package_id(cpu));
412 seq_printf(m, "siblings\t: %d\n",
413 cpumask_weight(topology_core_cpumask(cpu)));
414 seq_printf(m, "core id\t\t: %d\n", topology_core_id(cpu));
415 #endif
416
417 seq_printf(m, "capabilities\t:");
418 if (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS32)
419 seq_puts(m, " os32");
420 if (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS64)
421 seq_puts(m, " os64");
422 if (boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC)
423 seq_puts(m, " iopdir_fdc");
424 switch (boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) {
425 case PDC_MODEL_NVA_SUPPORTED:
426 seq_puts(m, " nva_supported");
427 break;
428 case PDC_MODEL_NVA_SLOW:
429 seq_puts(m, " nva_slow");
430 break;
431 case PDC_MODEL_NVA_UNSUPPORTED:
432 seq_puts(m, " needs_equivalent_aliasing");
433 break;
434 }
435 seq_printf(m, " (0x%02lx)\n", boot_cpu_data.pdc.capabilities);
436
437 seq_printf(m, "model\t\t: %s - %s\n",
438 boot_cpu_data.pdc.sys_model_name,
439 cpu_name);
440
441 seq_printf(m, "hversion\t: 0x%08x\n"
442 "sversion\t: 0x%08x\n",
443 boot_cpu_data.hversion,
444 boot_cpu_data.sversion );
445
446 /* print cachesize info */
447 show_cache_info(m);
448
449 seq_printf(m, "bogomips\t: %lu.%02lu\n",
450 loops_per_jiffy / (500000 / HZ),
451 loops_per_jiffy / (5000 / HZ) % 100);
452
453 seq_printf(m, "software id\t: %ld\n\n",
454 boot_cpu_data.pdc.model.sw_id);
455 }
456 return 0;
457 }
458
459 static const struct parisc_device_id processor_tbl[] __initconst = {
460 { HPHW_NPROC, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, SVERSION_ANY_ID },
461 { 0, }
462 };
463
464 static struct parisc_driver cpu_driver __refdata = {
465 .name = "CPU",
466 .id_table = processor_tbl,
467 .probe = processor_probe
468 };
469
470 /**
471 * processor_init - Processor initialization procedure.
472 *
473 * Register this driver.
474 */
processor_init(void)475 void __init processor_init(void)
476 {
477 unsigned int cpu;
478
479 reset_cpu_topology();
480
481 /* reset possible mask. We will mark those which are possible. */
482 for_each_possible_cpu(cpu)
483 set_cpu_possible(cpu, false);
484
485 register_parisc_driver(&cpu_driver);
486 }
487