1/*
2 * Copyright (c) 2014-2023, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6#ifndef CPU_MACROS_S
7#define CPU_MACROS_S
8
9#include <assert_macros.S>
10#include <lib/cpus/cpu_ops.h>
11#include <lib/cpus/errata.h>
12
13	/*
14	 * Write given expressions as quad words
15	 *
16	 * _count:
17	 *	Write at least _count quad words. If the given number of
18	 *	expressions is less than _count, repeat the last expression to
19	 *	fill _count quad words in total
20	 * _rest:
21	 *	Optional list of expressions. _this is for parameter extraction
22	 *	only, and has no significance to the caller
23	 *
24	 * Invoked as:
25	 *	fill_constants 2, foo, bar, blah, ...
26	 */
27	.macro fill_constants _count:req, _this, _rest:vararg
28	  .ifgt \_count
29	    /* Write the current expression */
30	    .ifb \_this
31	      .error "Nothing to fill"
32	    .endif
33	    .quad \_this
34
35	    /* Invoke recursively for remaining expressions */
36	    .ifnb \_rest
37	      fill_constants \_count-1, \_rest
38	    .else
39	      fill_constants \_count-1, \_this
40	    .endif
41	  .endif
42	.endm
43
44	/*
45	 * Declare CPU operations
46	 *
47	 * _name:
48	 *	Name of the CPU for which operations are being specified
49	 * _midr:
50	 *	Numeric value expected to read from CPU's MIDR
51	 * _resetfunc:
52	 *	Reset function for the CPU. If there's no CPU reset function,
53	 *	specify CPU_NO_RESET_FUNC
54	 * _extra1:
55	 *	This is a placeholder for future per CPU operations.  Currently,
56	 *	some CPUs use this entry to set a test function to determine if
57	 *	the workaround for CVE-2017-5715 needs to be applied or not.
58	 * _extra2:
59	 *	This is a placeholder for future per CPU operations. Currently
60	 *	some CPUs use this entry to set a function to disable the
61	 *	workaround for CVE-2018-3639.
62	 * _extra3:
63	 *	This is a placeholder for future per CPU operations. Currently,
64	 *	some CPUs use this entry to set a test function to determine if
65	 *	the workaround for CVE-2022-23960 needs to be applied or not.
66	 * _e_handler:
67	 *	This is a placeholder for future per CPU exception handlers.
68	 * _power_down_ops:
69	 *	Comma-separated list of functions to perform power-down
70	 *	operatios on the CPU. At least one, and up to
71	 *	CPU_MAX_PWR_DWN_OPS number of functions may be specified.
72	 *	Starting at power level 0, these functions shall handle power
73	 *	down at subsequent power levels. If there aren't exactly
74	 *	CPU_MAX_PWR_DWN_OPS functions, the last specified one will be
75	 *	used to handle power down at subsequent levels
76	 */
77	.macro declare_cpu_ops_base _name:req, _midr:req, _resetfunc:req, \
78		_extra1:req, _extra2:req, _extra3:req, _e_handler:req, _power_down_ops:vararg
79	.section .cpu_ops, "a"
80	.align 3
81	.type cpu_ops_\_name, %object
82	.quad \_midr
83#if defined(IMAGE_AT_EL3)
84	.quad \_resetfunc
85#endif
86	.quad \_extra1
87	.quad \_extra2
88	.quad \_extra3
89	.quad \_e_handler
90#ifdef IMAGE_BL31
91	/* Insert list of functions */
92	fill_constants CPU_MAX_PWR_DWN_OPS, \_power_down_ops
93#endif
94	/*
95	 * It is possible (although unlikely) that a cpu may have no errata in
96	 * code. In that case the start label will not be defined. The list is
97	 * intended to be used in a loop, so define it as zero-length for
98	 * predictable behaviour. Since this macro is always called at the end
99	 * of the cpu file (after all errata have been parsed) we can be sure
100	 * that we are at the end of the list. Some cpus call declare_cpu_ops
101	 * twice, so only do this once.
102	 */
103	.pushsection .rodata.errata_entries
104	.ifndef \_name\()_errata_list_start
105		\_name\()_errata_list_start:
106	.endif
107	.ifndef \_name\()_errata_list_end
108		\_name\()_errata_list_end:
109	.endif
110	.popsection
111
112	/* and now put them in cpu_ops */
113	.quad \_name\()_errata_list_start
114	.quad \_name\()_errata_list_end
115
116#if REPORT_ERRATA
117	.ifndef \_name\()_cpu_str
118	  /*
119	   * Place errata reported flag, and the spinlock to arbitrate access to
120	   * it in the data section.
121	   */
122	  .pushsection .data
123	  define_asm_spinlock \_name\()_errata_lock
124	  \_name\()_errata_reported:
125	  .word	0
126	  .popsection
127
128	  /* Place CPU string in rodata */
129	  .pushsection .rodata
130	  \_name\()_cpu_str:
131	  .asciz "\_name"
132	  .popsection
133	.endif
134
135
136	/*
137	 * Mandatory errata status printing function for CPUs of
138	 * this class.
139	 */
140	.quad \_name\()_errata_report
141	.quad \_name\()_cpu_str
142
143#ifdef IMAGE_BL31
144	/* Pointers to errata lock and reported flag */
145	.quad \_name\()_errata_lock
146	.quad \_name\()_errata_reported
147#endif /* IMAGE_BL31 */
148#endif /* REPORT_ERRATA */
149
150#if defined(IMAGE_BL31) && CRASH_REPORTING
151	.quad \_name\()_cpu_reg_dump
152#endif
153	.endm
154
155	.macro declare_cpu_ops _name:req, _midr:req, _resetfunc:req, \
156		_power_down_ops:vararg
157		declare_cpu_ops_base \_name, \_midr, \_resetfunc, 0, 0, 0, 0, \
158			\_power_down_ops
159	.endm
160
161	.macro declare_cpu_ops_eh _name:req, _midr:req, _resetfunc:req, \
162		_e_handler:req, _power_down_ops:vararg
163		declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
164			0, 0, 0, \_e_handler, \_power_down_ops
165	.endm
166
167	.macro declare_cpu_ops_wa _name:req, _midr:req, \
168		_resetfunc:req, _extra1:req, _extra2:req, \
169		_extra3:req, _power_down_ops:vararg
170		declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
171			\_extra1, \_extra2, \_extra3, 0, \_power_down_ops
172	.endm
173
174/* TODO can be deleted once all CPUs have been converted */
175#if REPORT_ERRATA
176	/*
177	 * Print status of a CPU errata
178	 *
179	 * _chosen:
180	 *	Identifier indicating whether or not a CPU errata has been
181	 *	compiled in.
182	 * _cpu:
183	 *	Name of the CPU
184	 * _id:
185	 *	Errata identifier
186	 * _rev_var:
187	 *	Register containing the combined value CPU revision and variant
188	 *	- typically the return value of cpu_get_rev_var
189	 */
190	.macro report_errata _chosen, _cpu, _id, _rev_var=x8
191	/* Stash a string with errata ID */
192	.pushsection .rodata
193	\_cpu\()_errata_\_id\()_str:
194	.asciz	"\_id"
195	.popsection
196
197	/* Check whether errata applies */
198	mov	x0, \_rev_var
199	/* Shall clobber: x0-x7 */
200	bl	check_errata_\_id
201
202	.ifeq \_chosen
203	/*
204	 * Errata workaround has not been compiled in. If the errata would have
205	 * applied had it been compiled in, print its status as missing.
206	 */
207	cbz	x0, 900f
208	mov	x0, #ERRATA_MISSING
209	.endif
210900:
211	adr	x1, \_cpu\()_cpu_str
212	adr	x2, \_cpu\()_errata_\_id\()_str
213	bl	errata_print_msg
214	.endm
215#endif
216
217	/*
218	 * This macro is used on some CPUs to detect if they are vulnerable
219	 * to CVE-2017-5715.
220	 */
221	.macro	cpu_check_csv2 _reg _label
222	mrs	\_reg, id_aa64pfr0_el1
223	ubfx	\_reg, \_reg, #ID_AA64PFR0_CSV2_SHIFT, #ID_AA64PFR0_CSV2_LENGTH
224	/*
225	 * If the field equals 1, branch targets trained in one context cannot
226	 * affect speculative execution in a different context.
227	 *
228	 * If the field equals 2, it means that the system is also aware of
229	 * SCXTNUM_ELx register contexts. We aren't using them in the TF, so we
230	 * expect users of the registers to do the right thing.
231	 *
232	 * Only apply mitigations if the value of this field is 0.
233	 */
234#if ENABLE_ASSERTIONS
235	cmp	\_reg, #3 /* Only values 0 to 2 are expected */
236	ASM_ASSERT(lo)
237#endif
238
239	cmp	\_reg, #0
240	bne	\_label
241	.endm
242
243	/*
244	 * Helper macro that reads the part number of the current
245	 * CPU and jumps to the given label if it matches the CPU
246	 * MIDR provided.
247	 *
248	 * Clobbers x0.
249	 */
250	.macro  jump_if_cpu_midr _cpu_midr, _label
251	mrs	x0, midr_el1
252	ubfx	x0, x0, MIDR_PN_SHIFT, #12
253	cmp	w0, #((\_cpu_midr >> MIDR_PN_SHIFT) & MIDR_PN_MASK)
254	b.eq	\_label
255	.endm
256
257
258/*
259 * Workaround wrappers for errata that apply at reset or runtime. Reset errata
260 * will be applied automatically
261 *
262 * _cpu:
263 *	Name of cpu as given to declare_cpu_ops
264 *
265 * _cve:
266 *	Whether erratum is a CVE. CVE year if yes, 0 otherwise
267 *
268 * _id:
269 *	Erratum or CVE number. Please combine with previous field with ERRATUM
270 *	or CVE macros
271 *
272 * _chosen:
273 *	Compile time flag on whether the erratum is included
274 *
275 * _apply_at_reset:
276 *	Whether the erratum should be automatically applied at reset
277 */
278.macro add_erratum_entry _cpu:req, _cve:req, _id:req, _chosen:req, _apply_at_reset:req
279	.pushsection .rodata.errata_entries
280		.align	3
281		.ifndef \_cpu\()_errata_list_start
282		\_cpu\()_errata_list_start:
283		.endif
284
285		/* check if unused and compile out if no references */
286		.if \_apply_at_reset && \_chosen
287			.quad	erratum_\_cpu\()_\_id\()_wa
288		.else
289			.quad	0
290		.endif
291		/* TODO(errata ABI): this prevents all checker functions from
292		 * being optimised away. Can be done away with unless the ABI
293		 * needs them */
294		.quad	check_erratum_\_cpu\()_\_id
295		/* Will fit CVEs with up to 10 character in the ID field */
296		.word	\_id
297		.hword	\_cve
298		.byte	\_chosen
299		/* TODO(errata ABI): mitigated field for known but unmitigated
300		 * errata */
301		.byte	0x1
302	.popsection
303.endm
304
305.macro _workaround_start _cpu:req, _cve:req, _id:req, _chosen:req, _apply_at_reset:req
306	add_erratum_entry \_cpu, \_cve, \_id, \_chosen, \_apply_at_reset
307
308	func erratum_\_cpu\()_\_id\()_wa
309		mov	x8, x30
310
311		/* save rev_var for workarounds that might need it but don't
312		 * restore to x0 because few will care */
313		mov	x7, x0
314		bl	check_erratum_\_cpu\()_\_id
315		cbz	x0, erratum_\_cpu\()_\_id\()_skip
316.endm
317
318.macro _workaround_end _cpu:req, _id:req
319	erratum_\_cpu\()_\_id\()_skip:
320		ret	x8
321	endfunc erratum_\_cpu\()_\_id\()_wa
322.endm
323
324/*******************************************************************************
325 * Errata workaround wrappers
326 ******************************************************************************/
327/*
328 * Workaround wrappers for errata that apply at reset or runtime. Reset errata
329 * will be applied automatically
330 *
331 * _cpu:
332 *	Name of cpu as given to declare_cpu_ops
333 *
334 * _cve:
335 *	Whether erratum is a CVE. CVE year if yes, 0 otherwise
336 *
337 * _id:
338 *	Erratum or CVE number. Please combine with previous field with ERRATUM
339 *	or CVE macros
340 *
341 * _chosen:
342 *	Compile time flag on whether the erratum is included
343 *
344 * in body:
345 *	clobber x0 to x7 (please only use those)
346 *	argument x7 - cpu_rev_var
347 *
348 * _wa clobbers: x0-x8 (PCS compliant)
349 */
350.macro workaround_reset_start _cpu:req, _cve:req, _id:req, _chosen:req
351	_workaround_start \_cpu, \_cve, \_id, \_chosen, 1
352.endm
353
354/*
355 * See `workaround_reset_start` for usage info. Additional arguments:
356 *
357 * _midr:
358 *	Check if CPU's MIDR matches the CPU it's meant for. Must be specified
359 *	for errata applied in generic code
360 */
361.macro workaround_runtime_start _cpu:req, _cve:req, _id:req, _chosen:req, _midr
362	/*
363	 * Let errata specify if they need MIDR checking. Sadly, storing the
364	 * MIDR in an .equ to retrieve automatically blows up as it stores some
365	 * brackets in the symbol
366	 */
367	.ifnb \_midr
368		jump_if_cpu_midr \_midr, 1f
369		b	erratum_\_cpu\()_\_id\()_skip
370
371		1:
372	.endif
373	_workaround_start \_cpu, \_cve, \_id, \_chosen, 0
374.endm
375
376/*
377 * Usage and arguments identical to `workaround_reset_start`. The _cve argument
378 * is kept here so the same #define can be used as that macro
379 */
380.macro workaround_reset_end _cpu:req, _cve:req, _id:req
381	_workaround_end \_cpu, \_id
382.endm
383
384/*
385 * See `workaround_reset_start` for usage info. The _cve argument is kept here
386 * so the same #define can be used as that macro. Additional arguments:
387 *
388 * _no_isb:
389 *	Optionally do not include the trailing isb. Please disable with the
390 *	NO_ISB macro
391 */
392.macro workaround_runtime_end _cpu:req, _cve:req, _id:req, _no_isb
393	/*
394	 * Runtime errata do not have a reset function to call the isb for them
395	 * and missing the isb could be very problematic. It is also likely as
396	 * they tend to be scattered in generic code.
397	 */
398	.ifb \_no_isb
399		isb
400	.endif
401	_workaround_end \_cpu, \_id
402.endm
403
404/*******************************************************************************
405 * Errata workaround helpers
406 ******************************************************************************/
407/*
408 * Set a bit in a system register. Can set multiple bits but is limited by the
409 *  way the ORR instruction encodes them.
410 *
411 * _reg:
412 *	Register to write to
413 *
414 * _bit:
415 *	Bit to set. Please use a descriptive #define
416 *
417 * _assert:
418 *	Optionally whether to read back and assert that the bit has been
419 *	written. Please disable with NO_ASSERT macro
420 *
421 * clobbers: x1
422 */
423.macro sysreg_bit_set _reg:req, _bit:req, _assert=1
424	mrs	x1, \_reg
425	orr	x1, x1, #\_bit
426	msr	\_reg, x1
427.endm
428
429/*
430 * Clear a bit in a system register. Can clear multiple bits but is limited by
431 *  the way the BIC instrucion encodes them.
432 *
433 * see sysreg_bit_set for usage
434 */
435.macro sysreg_bit_clear _reg:req, _bit:req
436	mrs	x1, \_reg
437	bic	x1, x1, #\_bit
438	msr	\_reg, x1
439.endm
440
441.macro override_vector_table _table:req
442	adr	x1, \_table
443	msr	vbar_el3, x1
444.endm
445
446/*
447 * BFI : Inserts bitfield into a system register.
448 *
449 * BFI{cond} Rd, Rn, #lsb, #width
450 */
451.macro sysreg_bitfield_insert _reg:req, _src:req, _lsb:req, _width:req
452	/* Source value for BFI */
453	mov	x1, #\_src
454	mrs	x0, \_reg
455	bfi	x0, x1, #\_lsb, #\_width
456	msr	\_reg, x0
457.endm
458
459/*
460 * Apply erratum
461 *
462 * _cpu:
463 *	Name of cpu as given to declare_cpu_ops
464 *
465 * _cve:
466 *	Whether erratum is a CVE. CVE year if yes, 0 otherwise
467 *
468 * _id:
469 *	Erratum or CVE number. Please combine with previous field with ERRATUM
470 *	or CVE macros
471 *
472 * _chosen:
473 *	Compile time flag on whether the erratum is included
474 *
475 * _get_rev:
476 *	Optional parameter that determines whether to insert a call to the CPU revision fetching
477 *	procedure. Stores the result of this in the temporary register x10.
478 *
479 * clobbers: x0-x10 (PCS compliant)
480 */
481.macro apply_erratum _cpu:req, _cve:req, _id:req, _chosen:req, _get_rev=GET_CPU_REV
482	.if (\_chosen & \_get_rev)
483		mov	x9, x30
484		bl	cpu_get_rev_var
485		mov	x10, x0
486	.elseif (\_chosen)
487		mov	x9, x30
488		mov	x0, x10
489	.endif
490
491	.if \_chosen
492		bl	erratum_\_cpu\()_\_id\()_wa
493		mov	x30, x9
494	.endif
495.endm
496
497/*
498 * Helpers to select which revisions errata apply to. Don't leave a link
499 * register as the cpu_rev_var_*** will call the ret and we can save on one.
500 *
501 * _cpu:
502 *	Name of cpu as given to declare_cpu_ops
503 *
504 * _cve:
505 *	Whether erratum is a CVE. CVE year if yes, 0 otherwise
506 *
507 * _id:
508 *	Erratum or CVE number. Please combine with previous field with ERRATUM
509 *	or CVE macros
510 *
511 * _rev_num:
512 *	Revision to apply to
513 *
514 * in body:
515 *	clobber: x0 to x4
516 *	argument: x0 - cpu_rev_var
517 */
518.macro check_erratum_ls _cpu:req, _cve:req, _id:req, _rev_num:req
519	func check_erratum_\_cpu\()_\_id
520		mov	x1, #\_rev_num
521		b	cpu_rev_var_ls
522	endfunc check_erratum_\_cpu\()_\_id
523.endm
524
525.macro check_erratum_hs _cpu:req, _cve:req, _id:req, _rev_num:req
526	func check_erratum_\_cpu\()_\_id
527		mov	x1, #\_rev_num
528		b	cpu_rev_var_hs
529	endfunc check_erratum_\_cpu\()_\_id
530.endm
531
532.macro check_erratum_range _cpu:req, _cve:req, _id:req, _rev_num_lo:req, _rev_num_hi:req
533	func check_erratum_\_cpu\()_\_id
534		mov	x1, #\_rev_num_lo
535		mov	x2, #\_rev_num_hi
536		b	cpu_rev_var_range
537	endfunc check_erratum_\_cpu\()_\_id
538.endm
539
540.macro check_erratum_chosen _cpu:req, _cve:req, _id:req, _chosen:req
541	func check_erratum_\_cpu\()_\_id
542		.if \_chosen
543			mov	x0, #ERRATA_APPLIES
544		.else
545			mov	x0, #ERRATA_MISSING
546		.endif
547		ret
548	endfunc check_erratum_\_cpu\()_\_id
549.endm
550
551/* provide a shorthand for the name format for annoying errata */
552.macro check_erratum_custom_start _cpu:req, _cve:req, _id:req
553	func check_erratum_\_cpu\()_\_id
554.endm
555
556.macro check_erratum_custom_end _cpu:req, _cve:req, _id:req
557	endfunc check_erratum_\_cpu\()_\_id
558.endm
559
560
561/*******************************************************************************
562 * CPU reset function wrapper
563 ******************************************************************************/
564
565/*
566 * Wrapper to automatically apply all reset-time errata. Will end with an isb.
567 *
568 * _cpu:
569 *	Name of cpu as given to declare_cpu_ops
570 *
571 * in body:
572 *	clobber x8 to x14
573 *	argument x14 - cpu_rev_var
574 */
575.macro cpu_reset_func_start _cpu:req
576	func \_cpu\()_reset_func
577		mov	x15, x30
578		bl	cpu_get_rev_var
579		mov	x14, x0
580
581		/* short circuit the location to avoid searching the list */
582		adrp	x12, \_cpu\()_errata_list_start
583		add	x12, x12, :lo12:\_cpu\()_errata_list_start
584		adrp	x13, \_cpu\()_errata_list_end
585		add	x13, x13, :lo12:\_cpu\()_errata_list_end
586
587	errata_begin:
588		/* if head catches up with end of list, exit */
589		cmp	x12, x13
590		b.eq	errata_end
591
592		ldr	x10, [x12, #ERRATUM_WA_FUNC]
593		/* TODO(errata ABI): check mitigated and checker function fields
594		 * for 0 */
595		ldrb	w11, [x12, #ERRATUM_CHOSEN]
596
597		/* skip if not chosen */
598		cbz	x11, 1f
599		/* skip if runtime erratum */
600		cbz	x10, 1f
601
602		/* put cpu revision in x0 and call workaround */
603		mov	x0, x14
604		blr	x10
605	1:
606		add	x12, x12, #ERRATUM_ENTRY_SIZE
607		b	errata_begin
608	errata_end:
609.endm
610
611.macro cpu_reset_func_end _cpu:req
612		isb
613		ret	x15
614	endfunc \_cpu\()_reset_func
615.endm
616
617/*
618 * Maintain compatibility with the old scheme of each cpu has its own reporting.
619 * TODO remove entirely once all cpus have been converted. This includes the
620 * cpu_ops entry, as print_errata_status can call this directly for all cpus
621 */
622.macro errata_report_shim _cpu:req
623	#if REPORT_ERRATA
624	func \_cpu\()_errata_report
625		/* normal stack frame for pretty debugging */
626		stp	x29, x30, [sp, #-16]!
627		mov	x29, sp
628
629		bl	generic_errata_report
630
631		ldp	x29, x30, [sp], #16
632		ret
633	endfunc \_cpu\()_errata_report
634	#endif
635.endm
636#endif /* CPU_MACROS_S */
637