1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Common EFI (Extensible Firmware Interface) support functions
4 * Based on Extensible Firmware Interface Specification version 1.0
5 *
6 * Copyright (C) 1999 VA Linux Systems
7 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
8 * Copyright (C) 1999-2002 Hewlett-Packard Co.
9 * David Mosberger-Tang <davidm@hpl.hp.com>
10 * Stephane Eranian <eranian@hpl.hp.com>
11 * Copyright (C) 2005-2008 Intel Co.
12 * Fenghua Yu <fenghua.yu@intel.com>
13 * Bibo Mao <bibo.mao@intel.com>
14 * Chandramouli Narayanan <mouli@linux.intel.com>
15 * Huang Ying <ying.huang@intel.com>
16 * Copyright (C) 2013 SuSE Labs
17 * Borislav Petkov <bp@suse.de> - runtime services VA mapping
18 *
19 * Copied from efi_32.c to eliminate the duplicated code between EFI
20 * 32/64 support code. --ying 2007-10-26
21 *
22 * All EFI Runtime Services are not implemented yet as EFI only
23 * supports physical mode addressing on SoftSDV. This is to be fixed
24 * in a future version. --drummond 1999-07-20
25 *
26 * Implemented EFI runtime services and virtual mode calls. --davidm
27 *
28 * Goutham Rao: <goutham.rao@intel.com>
29 * Skip non-WB memory and ignore empty memory ranges.
30 */
31
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
34 #include <linux/kernel.h>
35 #include <linux/init.h>
36 #include <linux/efi.h>
37 #include <linux/efi-bgrt.h>
38 #include <linux/export.h>
39 #include <linux/memblock.h>
40 #include <linux/slab.h>
41 #include <linux/spinlock.h>
42 #include <linux/uaccess.h>
43 #include <linux/time.h>
44 #include <linux/io.h>
45 #include <linux/reboot.h>
46 #include <linux/bcd.h>
47
48 #include <asm/setup.h>
49 #include <asm/efi.h>
50 #include <asm/e820/api.h>
51 #include <asm/time.h>
52 #include <asm/tlbflush.h>
53 #include <asm/x86_init.h>
54 #include <asm/uv/uv.h>
55
56 static unsigned long efi_systab_phys __initdata;
57 static unsigned long prop_phys = EFI_INVALID_TABLE_ADDR;
58 static unsigned long uga_phys = EFI_INVALID_TABLE_ADDR;
59 static unsigned long efi_runtime, efi_nr_tables;
60
61 unsigned long efi_fw_vendor, efi_config_table;
62
63 static const efi_config_table_type_t arch_tables[] __initconst = {
64 {EFI_PROPERTIES_TABLE_GUID, &prop_phys, "PROP" },
65 {UGA_IO_PROTOCOL_GUID, &uga_phys, "UGA" },
66 #ifdef CONFIG_X86_UV
67 {UV_SYSTEM_TABLE_GUID, &uv_systab_phys, "UVsystab" },
68 #endif
69 {},
70 };
71
72 static const unsigned long * const efi_tables[] = {
73 &efi.acpi,
74 &efi.acpi20,
75 &efi.smbios,
76 &efi.smbios3,
77 &uga_phys,
78 #ifdef CONFIG_X86_UV
79 &uv_systab_phys,
80 #endif
81 &efi_fw_vendor,
82 &efi_runtime,
83 &efi_config_table,
84 &efi.esrt,
85 &prop_phys,
86 &efi_mem_attr_table,
87 #ifdef CONFIG_EFI_RCI2_TABLE
88 &rci2_table_phys,
89 #endif
90 &efi.tpm_log,
91 &efi.tpm_final_log,
92 &efi_rng_seed,
93 #ifdef CONFIG_LOAD_UEFI_KEYS
94 &efi.mokvar_table,
95 #endif
96 #ifdef CONFIG_EFI_COCO_SECRET
97 &efi.coco_secret,
98 #endif
99 };
100
101 u64 efi_setup; /* efi setup_data physical address */
102
103 static int add_efi_memmap __initdata;
setup_add_efi_memmap(char * arg)104 static int __init setup_add_efi_memmap(char *arg)
105 {
106 add_efi_memmap = 1;
107 return 0;
108 }
109 early_param("add_efi_memmap", setup_add_efi_memmap);
110
111 /*
112 * Tell the kernel about the EFI memory map. This might include
113 * more than the max 128 entries that can fit in the passed in e820
114 * legacy (zeropage) memory map, but the kernel's e820 table can hold
115 * E820_MAX_ENTRIES.
116 */
117
do_add_efi_memmap(void)118 static void __init do_add_efi_memmap(void)
119 {
120 efi_memory_desc_t *md;
121
122 if (!efi_enabled(EFI_MEMMAP))
123 return;
124
125 for_each_efi_memory_desc(md) {
126 unsigned long long start = md->phys_addr;
127 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
128 int e820_type;
129
130 switch (md->type) {
131 case EFI_LOADER_CODE:
132 case EFI_LOADER_DATA:
133 case EFI_BOOT_SERVICES_CODE:
134 case EFI_BOOT_SERVICES_DATA:
135 case EFI_CONVENTIONAL_MEMORY:
136 if (efi_soft_reserve_enabled()
137 && (md->attribute & EFI_MEMORY_SP))
138 e820_type = E820_TYPE_SOFT_RESERVED;
139 else if (md->attribute & EFI_MEMORY_WB)
140 e820_type = E820_TYPE_RAM;
141 else
142 e820_type = E820_TYPE_RESERVED;
143 break;
144 case EFI_ACPI_RECLAIM_MEMORY:
145 e820_type = E820_TYPE_ACPI;
146 break;
147 case EFI_ACPI_MEMORY_NVS:
148 e820_type = E820_TYPE_NVS;
149 break;
150 case EFI_UNUSABLE_MEMORY:
151 e820_type = E820_TYPE_UNUSABLE;
152 break;
153 case EFI_PERSISTENT_MEMORY:
154 e820_type = E820_TYPE_PMEM;
155 break;
156 default:
157 /*
158 * EFI_RESERVED_TYPE EFI_RUNTIME_SERVICES_CODE
159 * EFI_RUNTIME_SERVICES_DATA EFI_MEMORY_MAPPED_IO
160 * EFI_MEMORY_MAPPED_IO_PORT_SPACE EFI_PAL_CODE
161 */
162 e820_type = E820_TYPE_RESERVED;
163 break;
164 }
165
166 e820__range_add(start, size, e820_type);
167 }
168 e820__update_table(e820_table);
169 }
170
171 /*
172 * Given add_efi_memmap defaults to 0 and there is no alternative
173 * e820 mechanism for soft-reserved memory, import the full EFI memory
174 * map if soft reservations are present and enabled. Otherwise, the
175 * mechanism to disable the kernel's consideration of EFI_MEMORY_SP is
176 * the efi=nosoftreserve option.
177 */
do_efi_soft_reserve(void)178 static bool do_efi_soft_reserve(void)
179 {
180 efi_memory_desc_t *md;
181
182 if (!efi_enabled(EFI_MEMMAP))
183 return false;
184
185 if (!efi_soft_reserve_enabled())
186 return false;
187
188 for_each_efi_memory_desc(md)
189 if (md->type == EFI_CONVENTIONAL_MEMORY &&
190 (md->attribute & EFI_MEMORY_SP))
191 return true;
192 return false;
193 }
194
efi_memblock_x86_reserve_range(void)195 int __init efi_memblock_x86_reserve_range(void)
196 {
197 struct efi_info *e = &boot_params.efi_info;
198 struct efi_memory_map_data data;
199 phys_addr_t pmap;
200 int rv;
201
202 if (efi_enabled(EFI_PARAVIRT))
203 return 0;
204
205 /* Can't handle firmware tables above 4GB on i386 */
206 if (IS_ENABLED(CONFIG_X86_32) && e->efi_memmap_hi > 0) {
207 pr_err("Memory map is above 4GB, disabling EFI.\n");
208 return -EINVAL;
209 }
210 pmap = (phys_addr_t)(e->efi_memmap | ((u64)e->efi_memmap_hi << 32));
211
212 data.phys_map = pmap;
213 data.size = e->efi_memmap_size;
214 data.desc_size = e->efi_memdesc_size;
215 data.desc_version = e->efi_memdesc_version;
216
217 rv = efi_memmap_init_early(&data);
218 if (rv)
219 return rv;
220
221 if (add_efi_memmap || do_efi_soft_reserve())
222 do_add_efi_memmap();
223
224 efi_fake_memmap_early();
225
226 WARN(efi.memmap.desc_version != 1,
227 "Unexpected EFI_MEMORY_DESCRIPTOR version %ld",
228 efi.memmap.desc_version);
229
230 memblock_reserve(pmap, efi.memmap.nr_map * efi.memmap.desc_size);
231 set_bit(EFI_PRESERVE_BS_REGIONS, &efi.flags);
232
233 return 0;
234 }
235
236 #define OVERFLOW_ADDR_SHIFT (64 - EFI_PAGE_SHIFT)
237 #define OVERFLOW_ADDR_MASK (U64_MAX << OVERFLOW_ADDR_SHIFT)
238 #define U64_HIGH_BIT (~(U64_MAX >> 1))
239
efi_memmap_entry_valid(const efi_memory_desc_t * md,int i)240 static bool __init efi_memmap_entry_valid(const efi_memory_desc_t *md, int i)
241 {
242 u64 end = (md->num_pages << EFI_PAGE_SHIFT) + md->phys_addr - 1;
243 u64 end_hi = 0;
244 char buf[64];
245
246 if (md->num_pages == 0) {
247 end = 0;
248 } else if (md->num_pages > EFI_PAGES_MAX ||
249 EFI_PAGES_MAX - md->num_pages <
250 (md->phys_addr >> EFI_PAGE_SHIFT)) {
251 end_hi = (md->num_pages & OVERFLOW_ADDR_MASK)
252 >> OVERFLOW_ADDR_SHIFT;
253
254 if ((md->phys_addr & U64_HIGH_BIT) && !(end & U64_HIGH_BIT))
255 end_hi += 1;
256 } else {
257 return true;
258 }
259
260 pr_warn_once(FW_BUG "Invalid EFI memory map entries:\n");
261
262 if (end_hi) {
263 pr_warn("mem%02u: %s range=[0x%016llx-0x%llx%016llx] (invalid)\n",
264 i, efi_md_typeattr_format(buf, sizeof(buf), md),
265 md->phys_addr, end_hi, end);
266 } else {
267 pr_warn("mem%02u: %s range=[0x%016llx-0x%016llx] (invalid)\n",
268 i, efi_md_typeattr_format(buf, sizeof(buf), md),
269 md->phys_addr, end);
270 }
271 return false;
272 }
273
efi_clean_memmap(void)274 static void __init efi_clean_memmap(void)
275 {
276 efi_memory_desc_t *out = efi.memmap.map;
277 const efi_memory_desc_t *in = out;
278 const efi_memory_desc_t *end = efi.memmap.map_end;
279 int i, n_removal;
280
281 for (i = n_removal = 0; in < end; i++) {
282 if (efi_memmap_entry_valid(in, i)) {
283 if (out != in)
284 memcpy(out, in, efi.memmap.desc_size);
285 out = (void *)out + efi.memmap.desc_size;
286 } else {
287 n_removal++;
288 }
289 in = (void *)in + efi.memmap.desc_size;
290 }
291
292 if (n_removal > 0) {
293 struct efi_memory_map_data data = {
294 .phys_map = efi.memmap.phys_map,
295 .desc_version = efi.memmap.desc_version,
296 .desc_size = efi.memmap.desc_size,
297 .size = efi.memmap.desc_size * (efi.memmap.nr_map - n_removal),
298 .flags = 0,
299 };
300
301 pr_warn("Removing %d invalid memory map entries.\n", n_removal);
302 efi_memmap_install(&data);
303 }
304 }
305
efi_print_memmap(void)306 void __init efi_print_memmap(void)
307 {
308 efi_memory_desc_t *md;
309 int i = 0;
310
311 for_each_efi_memory_desc(md) {
312 char buf[64];
313
314 pr_info("mem%02u: %s range=[0x%016llx-0x%016llx] (%lluMB)\n",
315 i++, efi_md_typeattr_format(buf, sizeof(buf), md),
316 md->phys_addr,
317 md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1,
318 (md->num_pages >> (20 - EFI_PAGE_SHIFT)));
319 }
320 }
321
efi_systab_init(unsigned long phys)322 static int __init efi_systab_init(unsigned long phys)
323 {
324 int size = efi_enabled(EFI_64BIT) ? sizeof(efi_system_table_64_t)
325 : sizeof(efi_system_table_32_t);
326 const efi_table_hdr_t *hdr;
327 bool over4g = false;
328 void *p;
329 int ret;
330
331 hdr = p = early_memremap_ro(phys, size);
332 if (p == NULL) {
333 pr_err("Couldn't map the system table!\n");
334 return -ENOMEM;
335 }
336
337 ret = efi_systab_check_header(hdr, 1);
338 if (ret) {
339 early_memunmap(p, size);
340 return ret;
341 }
342
343 if (efi_enabled(EFI_64BIT)) {
344 const efi_system_table_64_t *systab64 = p;
345
346 efi_runtime = systab64->runtime;
347 over4g = systab64->runtime > U32_MAX;
348
349 if (efi_setup) {
350 struct efi_setup_data *data;
351
352 data = early_memremap_ro(efi_setup, sizeof(*data));
353 if (!data) {
354 early_memunmap(p, size);
355 return -ENOMEM;
356 }
357
358 efi_fw_vendor = (unsigned long)data->fw_vendor;
359 efi_config_table = (unsigned long)data->tables;
360
361 over4g |= data->fw_vendor > U32_MAX ||
362 data->tables > U32_MAX;
363
364 early_memunmap(data, sizeof(*data));
365 } else {
366 efi_fw_vendor = systab64->fw_vendor;
367 efi_config_table = systab64->tables;
368
369 over4g |= systab64->fw_vendor > U32_MAX ||
370 systab64->tables > U32_MAX;
371 }
372 efi_nr_tables = systab64->nr_tables;
373 } else {
374 const efi_system_table_32_t *systab32 = p;
375
376 efi_fw_vendor = systab32->fw_vendor;
377 efi_runtime = systab32->runtime;
378 efi_config_table = systab32->tables;
379 efi_nr_tables = systab32->nr_tables;
380 }
381
382 efi.runtime_version = hdr->revision;
383
384 efi_systab_report_header(hdr, efi_fw_vendor);
385 early_memunmap(p, size);
386
387 if (IS_ENABLED(CONFIG_X86_32) && over4g) {
388 pr_err("EFI data located above 4GB, disabling EFI.\n");
389 return -EINVAL;
390 }
391
392 return 0;
393 }
394
efi_config_init(const efi_config_table_type_t * arch_tables)395 static int __init efi_config_init(const efi_config_table_type_t *arch_tables)
396 {
397 void *config_tables;
398 int sz, ret;
399
400 if (efi_nr_tables == 0)
401 return 0;
402
403 if (efi_enabled(EFI_64BIT))
404 sz = sizeof(efi_config_table_64_t);
405 else
406 sz = sizeof(efi_config_table_32_t);
407
408 /*
409 * Let's see what config tables the firmware passed to us.
410 */
411 config_tables = early_memremap(efi_config_table, efi_nr_tables * sz);
412 if (config_tables == NULL) {
413 pr_err("Could not map Configuration table!\n");
414 return -ENOMEM;
415 }
416
417 ret = efi_config_parse_tables(config_tables, efi_nr_tables,
418 arch_tables);
419
420 early_memunmap(config_tables, efi_nr_tables * sz);
421 return ret;
422 }
423
efi_init(void)424 void __init efi_init(void)
425 {
426 if (IS_ENABLED(CONFIG_X86_32) &&
427 (boot_params.efi_info.efi_systab_hi ||
428 boot_params.efi_info.efi_memmap_hi)) {
429 pr_info("Table located above 4GB, disabling EFI.\n");
430 return;
431 }
432
433 efi_systab_phys = boot_params.efi_info.efi_systab |
434 ((__u64)boot_params.efi_info.efi_systab_hi << 32);
435
436 if (efi_systab_init(efi_systab_phys))
437 return;
438
439 if (efi_reuse_config(efi_config_table, efi_nr_tables))
440 return;
441
442 if (efi_config_init(arch_tables))
443 return;
444
445 /*
446 * Note: We currently don't support runtime services on an EFI
447 * that doesn't match the kernel 32/64-bit mode.
448 */
449
450 if (!efi_runtime_supported())
451 pr_err("No EFI runtime due to 32/64-bit mismatch with kernel\n");
452
453 if (!efi_runtime_supported() || efi_runtime_disabled()) {
454 efi_memmap_unmap();
455 return;
456 }
457
458 /* Parse the EFI Properties table if it exists */
459 if (prop_phys != EFI_INVALID_TABLE_ADDR) {
460 efi_properties_table_t *tbl;
461
462 tbl = early_memremap_ro(prop_phys, sizeof(*tbl));
463 if (tbl == NULL) {
464 pr_err("Could not map Properties table!\n");
465 } else {
466 if (tbl->memory_protection_attribute &
467 EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA)
468 set_bit(EFI_NX_PE_DATA, &efi.flags);
469
470 early_memunmap(tbl, sizeof(*tbl));
471 }
472 }
473
474 set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
475 efi_clean_memmap();
476
477 if (efi_enabled(EFI_DBG))
478 efi_print_memmap();
479 }
480
481 /* Merge contiguous regions of the same type and attribute */
efi_merge_regions(void)482 static void __init efi_merge_regions(void)
483 {
484 efi_memory_desc_t *md, *prev_md = NULL;
485
486 for_each_efi_memory_desc(md) {
487 u64 prev_size;
488
489 if (!prev_md) {
490 prev_md = md;
491 continue;
492 }
493
494 if (prev_md->type != md->type ||
495 prev_md->attribute != md->attribute) {
496 prev_md = md;
497 continue;
498 }
499
500 prev_size = prev_md->num_pages << EFI_PAGE_SHIFT;
501
502 if (md->phys_addr == (prev_md->phys_addr + prev_size)) {
503 prev_md->num_pages += md->num_pages;
504 md->type = EFI_RESERVED_TYPE;
505 md->attribute = 0;
506 continue;
507 }
508 prev_md = md;
509 }
510 }
511
realloc_pages(void * old_memmap,int old_shift)512 static void *realloc_pages(void *old_memmap, int old_shift)
513 {
514 void *ret;
515
516 ret = (void *)__get_free_pages(GFP_KERNEL, old_shift + 1);
517 if (!ret)
518 goto out;
519
520 /*
521 * A first-time allocation doesn't have anything to copy.
522 */
523 if (!old_memmap)
524 return ret;
525
526 memcpy(ret, old_memmap, PAGE_SIZE << old_shift);
527
528 out:
529 free_pages((unsigned long)old_memmap, old_shift);
530 return ret;
531 }
532
533 /*
534 * Iterate the EFI memory map in reverse order because the regions
535 * will be mapped top-down. The end result is the same as if we had
536 * mapped things forward, but doesn't require us to change the
537 * existing implementation of efi_map_region().
538 */
efi_map_next_entry_reverse(void * entry)539 static inline void *efi_map_next_entry_reverse(void *entry)
540 {
541 /* Initial call */
542 if (!entry)
543 return efi.memmap.map_end - efi.memmap.desc_size;
544
545 entry -= efi.memmap.desc_size;
546 if (entry < efi.memmap.map)
547 return NULL;
548
549 return entry;
550 }
551
552 /*
553 * efi_map_next_entry - Return the next EFI memory map descriptor
554 * @entry: Previous EFI memory map descriptor
555 *
556 * This is a helper function to iterate over the EFI memory map, which
557 * we do in different orders depending on the current configuration.
558 *
559 * To begin traversing the memory map @entry must be %NULL.
560 *
561 * Returns %NULL when we reach the end of the memory map.
562 */
efi_map_next_entry(void * entry)563 static void *efi_map_next_entry(void *entry)
564 {
565 if (efi_enabled(EFI_64BIT)) {
566 /*
567 * Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE
568 * config table feature requires us to map all entries
569 * in the same order as they appear in the EFI memory
570 * map. That is to say, entry N must have a lower
571 * virtual address than entry N+1. This is because the
572 * firmware toolchain leaves relative references in
573 * the code/data sections, which are split and become
574 * separate EFI memory regions. Mapping things
575 * out-of-order leads to the firmware accessing
576 * unmapped addresses.
577 *
578 * Since we need to map things this way whether or not
579 * the kernel actually makes use of
580 * EFI_PROPERTIES_TABLE, let's just switch to this
581 * scheme by default for 64-bit.
582 */
583 return efi_map_next_entry_reverse(entry);
584 }
585
586 /* Initial call */
587 if (!entry)
588 return efi.memmap.map;
589
590 entry += efi.memmap.desc_size;
591 if (entry >= efi.memmap.map_end)
592 return NULL;
593
594 return entry;
595 }
596
should_map_region(efi_memory_desc_t * md)597 static bool should_map_region(efi_memory_desc_t *md)
598 {
599 /*
600 * Runtime regions always require runtime mappings (obviously).
601 */
602 if (md->attribute & EFI_MEMORY_RUNTIME)
603 return true;
604
605 /*
606 * 32-bit EFI doesn't suffer from the bug that requires us to
607 * reserve boot services regions, and mixed mode support
608 * doesn't exist for 32-bit kernels.
609 */
610 if (IS_ENABLED(CONFIG_X86_32))
611 return false;
612
613 /*
614 * EFI specific purpose memory may be reserved by default
615 * depending on kernel config and boot options.
616 */
617 if (md->type == EFI_CONVENTIONAL_MEMORY &&
618 efi_soft_reserve_enabled() &&
619 (md->attribute & EFI_MEMORY_SP))
620 return false;
621
622 /*
623 * Map all of RAM so that we can access arguments in the 1:1
624 * mapping when making EFI runtime calls.
625 */
626 if (efi_is_mixed()) {
627 if (md->type == EFI_CONVENTIONAL_MEMORY ||
628 md->type == EFI_LOADER_DATA ||
629 md->type == EFI_LOADER_CODE)
630 return true;
631 }
632
633 /*
634 * Map boot services regions as a workaround for buggy
635 * firmware that accesses them even when they shouldn't.
636 *
637 * See efi_{reserve,free}_boot_services().
638 */
639 if (md->type == EFI_BOOT_SERVICES_CODE ||
640 md->type == EFI_BOOT_SERVICES_DATA)
641 return true;
642
643 return false;
644 }
645
646 /*
647 * Map the efi memory ranges of the runtime services and update new_mmap with
648 * virtual addresses.
649 */
efi_map_regions(int * count,int * pg_shift)650 static void * __init efi_map_regions(int *count, int *pg_shift)
651 {
652 void *p, *new_memmap = NULL;
653 unsigned long left = 0;
654 unsigned long desc_size;
655 efi_memory_desc_t *md;
656
657 desc_size = efi.memmap.desc_size;
658
659 p = NULL;
660 while ((p = efi_map_next_entry(p))) {
661 md = p;
662
663 if (!should_map_region(md))
664 continue;
665
666 efi_map_region(md);
667
668 if (left < desc_size) {
669 new_memmap = realloc_pages(new_memmap, *pg_shift);
670 if (!new_memmap)
671 return NULL;
672
673 left += PAGE_SIZE << *pg_shift;
674 (*pg_shift)++;
675 }
676
677 memcpy(new_memmap + (*count * desc_size), md, desc_size);
678
679 left -= desc_size;
680 (*count)++;
681 }
682
683 return new_memmap;
684 }
685
kexec_enter_virtual_mode(void)686 static void __init kexec_enter_virtual_mode(void)
687 {
688 #ifdef CONFIG_KEXEC_CORE
689 efi_memory_desc_t *md;
690 unsigned int num_pages;
691
692 /*
693 * We don't do virtual mode, since we don't do runtime services, on
694 * non-native EFI.
695 */
696 if (efi_is_mixed()) {
697 efi_memmap_unmap();
698 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
699 return;
700 }
701
702 if (efi_alloc_page_tables()) {
703 pr_err("Failed to allocate EFI page tables\n");
704 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
705 return;
706 }
707
708 /*
709 * Map efi regions which were passed via setup_data. The virt_addr is a
710 * fixed addr which was used in first kernel of a kexec boot.
711 */
712 for_each_efi_memory_desc(md)
713 efi_map_region_fixed(md); /* FIXME: add error handling */
714
715 /*
716 * Unregister the early EFI memmap from efi_init() and install
717 * the new EFI memory map.
718 */
719 efi_memmap_unmap();
720
721 if (efi_memmap_init_late(efi.memmap.phys_map,
722 efi.memmap.desc_size * efi.memmap.nr_map)) {
723 pr_err("Failed to remap late EFI memory map\n");
724 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
725 return;
726 }
727
728 num_pages = ALIGN(efi.memmap.nr_map * efi.memmap.desc_size, PAGE_SIZE);
729 num_pages >>= PAGE_SHIFT;
730
731 if (efi_setup_page_tables(efi.memmap.phys_map, num_pages)) {
732 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
733 return;
734 }
735
736 efi_sync_low_kernel_mappings();
737 efi_native_runtime_setup();
738 #endif
739 }
740
741 /*
742 * This function will switch the EFI runtime services to virtual mode.
743 * Essentially, we look through the EFI memmap and map every region that
744 * has the runtime attribute bit set in its memory descriptor into the
745 * efi_pgd page table.
746 *
747 * The new method does a pagetable switch in a preemption-safe manner
748 * so that we're in a different address space when calling a runtime
749 * function. For function arguments passing we do copy the PUDs of the
750 * kernel page table into efi_pgd prior to each call.
751 *
752 * Specially for kexec boot, efi runtime maps in previous kernel should
753 * be passed in via setup_data. In that case runtime ranges will be mapped
754 * to the same virtual addresses as the first kernel, see
755 * kexec_enter_virtual_mode().
756 */
__efi_enter_virtual_mode(void)757 static void __init __efi_enter_virtual_mode(void)
758 {
759 int count = 0, pg_shift = 0;
760 void *new_memmap = NULL;
761 efi_status_t status;
762 unsigned long pa;
763
764 if (efi_alloc_page_tables()) {
765 pr_err("Failed to allocate EFI page tables\n");
766 goto err;
767 }
768
769 efi_merge_regions();
770 new_memmap = efi_map_regions(&count, &pg_shift);
771 if (!new_memmap) {
772 pr_err("Error reallocating memory, EFI runtime non-functional!\n");
773 goto err;
774 }
775
776 pa = __pa(new_memmap);
777
778 /*
779 * Unregister the early EFI memmap from efi_init() and install
780 * the new EFI memory map that we are about to pass to the
781 * firmware via SetVirtualAddressMap().
782 */
783 efi_memmap_unmap();
784
785 if (efi_memmap_init_late(pa, efi.memmap.desc_size * count)) {
786 pr_err("Failed to remap late EFI memory map\n");
787 goto err;
788 }
789
790 if (efi_enabled(EFI_DBG)) {
791 pr_info("EFI runtime memory map:\n");
792 efi_print_memmap();
793 }
794
795 if (efi_setup_page_tables(pa, 1 << pg_shift))
796 goto err;
797
798 efi_sync_low_kernel_mappings();
799
800 status = efi_set_virtual_address_map(efi.memmap.desc_size * count,
801 efi.memmap.desc_size,
802 efi.memmap.desc_version,
803 (efi_memory_desc_t *)pa,
804 efi_systab_phys);
805 if (status != EFI_SUCCESS) {
806 pr_err("Unable to switch EFI into virtual mode (status=%lx)!\n",
807 status);
808 goto err;
809 }
810
811 efi_check_for_embedded_firmwares();
812 efi_free_boot_services();
813
814 if (!efi_is_mixed())
815 efi_native_runtime_setup();
816 else
817 efi_thunk_runtime_setup();
818
819 /*
820 * Apply more restrictive page table mapping attributes now that
821 * SVAM() has been called and the firmware has performed all
822 * necessary relocation fixups for the new virtual addresses.
823 */
824 efi_runtime_update_mappings();
825
826 /* clean DUMMY object */
827 efi_delete_dummy_variable();
828 return;
829
830 err:
831 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
832 }
833
efi_enter_virtual_mode(void)834 void __init efi_enter_virtual_mode(void)
835 {
836 if (efi_enabled(EFI_PARAVIRT))
837 return;
838
839 efi.runtime = (efi_runtime_services_t *)efi_runtime;
840
841 if (efi_setup)
842 kexec_enter_virtual_mode();
843 else
844 __efi_enter_virtual_mode();
845
846 efi_dump_pagetable();
847 }
848
efi_is_table_address(unsigned long phys_addr)849 bool efi_is_table_address(unsigned long phys_addr)
850 {
851 unsigned int i;
852
853 if (phys_addr == EFI_INVALID_TABLE_ADDR)
854 return false;
855
856 for (i = 0; i < ARRAY_SIZE(efi_tables); i++)
857 if (*(efi_tables[i]) == phys_addr)
858 return true;
859
860 return false;
861 }
862
efi_systab_show_arch(char * str)863 char *efi_systab_show_arch(char *str)
864 {
865 if (uga_phys != EFI_INVALID_TABLE_ADDR)
866 str += sprintf(str, "UGA=0x%lx\n", uga_phys);
867 return str;
868 }
869
870 #define EFI_FIELD(var) efi_ ## var
871
872 #define EFI_ATTR_SHOW(name) \
873 static ssize_t name##_show(struct kobject *kobj, \
874 struct kobj_attribute *attr, char *buf) \
875 { \
876 return sprintf(buf, "0x%lx\n", EFI_FIELD(name)); \
877 }
878
879 EFI_ATTR_SHOW(fw_vendor);
880 EFI_ATTR_SHOW(runtime);
881 EFI_ATTR_SHOW(config_table);
882
883 struct kobj_attribute efi_attr_fw_vendor = __ATTR_RO(fw_vendor);
884 struct kobj_attribute efi_attr_runtime = __ATTR_RO(runtime);
885 struct kobj_attribute efi_attr_config_table = __ATTR_RO(config_table);
886
efi_attr_is_visible(struct kobject * kobj,struct attribute * attr,int n)887 umode_t efi_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n)
888 {
889 if (attr == &efi_attr_fw_vendor.attr) {
890 if (efi_enabled(EFI_PARAVIRT) ||
891 efi_fw_vendor == EFI_INVALID_TABLE_ADDR)
892 return 0;
893 } else if (attr == &efi_attr_runtime.attr) {
894 if (efi_runtime == EFI_INVALID_TABLE_ADDR)
895 return 0;
896 } else if (attr == &efi_attr_config_table.attr) {
897 if (efi_config_table == EFI_INVALID_TABLE_ADDR)
898 return 0;
899 }
900 return attr->mode;
901 }
902