1#!/usr/bin/env python3
2#
3# Copyright (c) 2020 Intel Corporation
4#
5# SPDX-License-Identifier: Apache-2.0
6
7"""Create the kernel's page tables for x86 CPUs.
8
9For additional detail on paging and x86 memory management, please
10consult the IA Architecture SW Developer Manual, volume 3a, chapter 4.
11
12This script produces the initial page tables installed into the CPU
13at early boot. These pages will have an identity mapping of the kernel
14image. The script takes the 'zephyr_prebuilt.elf' as input to obtain region
15sizes, certain memory addresses, and configuration values.
16
17If CONFIG_SRAM_REGION_PERMISSIONS is not enabled, the kernel image will be
18mapped with the Present and Write bits set. The linker scripts shouldn't
19add page alignment padding between sections.
20
21If CONFIG_SRAM_REGION_PERMISSIONS is enabled, the access permissions
22vary:
23  - By default, the Present, Write, and Execute Disable bits are
24    set.
25  - The __text_region region will have Present and User bits set
26  - The __rodata_region region will have Present, User, and Execute
27    Disable bits set
28  - On x86_64, the _locore region will have Present set and
29    the _lorodata region will have Present and Execute Disable set.
30
31This script will establish a dual mapping at the address defined by
32CONFIG_KERNEL_VM_BASE if it is not the same as CONFIG_SRAM_BASE_ADDRESS.
33
34  - The double-mapping is used to transition the
35    instruction pointer from a physical address at early boot to the
36    virtual address where the kernel is actually linked.
37
38  - The mapping is always double-mapped at the top-level paging structure
39    and the physical/virtual base addresses must have the same alignment
40    with respect to the scope of top-level paging structure entries.
41    This allows the same second-level paging structure(s) to be used for
42    both memory bases.
43
44  - The double-mapping is needed so that we can still fetch instructions
45    from identity-mapped physical addresses after we program this table
46    into the MMU, then jump to the equivalent virtual address.
47    The kernel then unlinks the identity mapping before continuing,
48    the address space is purely virtual after that.
49
50Because the set of page tables are linked together by physical address,
51we must know a priori the physical address of each table. The linker
52script must define a z_x86_pagetables_start symbol where the page
53tables will be placed, and this memory address must not shift between
54prebuilt and final ELF builds. This script will not work on systems
55where the physical load address of the kernel is unknown at build time.
56
5764-bit systems will always build IA-32e page tables. 32-bit systems
58build PAE page tables if CONFIG_X86_PAE is set, otherwise standard
5932-bit page tables are built.
60
61The kernel will expect to find the top-level structure of the produced
62page tables at the physical address corresponding to the symbol
63z_x86_kernel_ptables. The linker script will need to set that symbol
64to the end of the binary produced by this script, minus the size of the
65top-level paging structure as it is written out last.
66"""
67
68import sys
69import array
70import argparse
71import ctypes
72import os
73import struct
74import re
75import textwrap
76
77from packaging import version
78
79import elftools
80from elftools.elf.elffile import ELFFile
81from elftools.elf.sections import SymbolTableSection
82
83if version.parse(elftools.__version__) < version.parse('0.24'):
84    sys.exit("pyelftools is out of date, need version 0.24 or later")
85
86
87def bit(pos):
88    """Get value by shifting 1 by pos"""
89    return 1 << pos
90
91
92# Page table entry flags
93FLAG_P = bit(0)
94FLAG_RW = bit(1)
95FLAG_US = bit(2)
96FLAG_CD = bit(4)
97FLAG_SZ = bit(7)
98FLAG_G = bit(8)
99FLAG_XD = bit(63)
100
101FLAG_IGNORED0 = bit(9)
102FLAG_IGNORED1 = bit(10)
103FLAG_IGNORED2 = bit(11)
104
105ENTRY_RW = FLAG_RW | FLAG_IGNORED0
106ENTRY_US = FLAG_US | FLAG_IGNORED1
107ENTRY_XD = FLAG_XD | FLAG_IGNORED2
108
109# PD_LEVEL and PT_LEVEL are used as list index to PtableSet.levels[]
110# to get table from back of list.
111PD_LEVEL = -2
112PT_LEVEL = -1
113
114
115def debug(text):
116    """Display verbose debug message"""
117    if not args.verbose:
118        return
119    sys.stdout.write(os.path.basename(sys.argv[0]) + ": " + text + "\n")
120
121
122def verbose(text):
123    """Display --verbose --verbose message"""
124    if args.verbose and args.verbose > 1:
125        sys.stdout.write(os.path.basename(sys.argv[0]) + ": " + text + "\n")
126
127
128def error(text):
129    """Display error message and exit program"""
130    sys.exit(os.path.basename(sys.argv[0]) + ": " + text)
131
132
133def align_check(base, size, scope=4096):
134    """Make sure base and size are page-aligned"""
135    if (base % scope) != 0:
136        error("unaligned base address %x" % base)
137    if (size % scope) != 0:
138        error("Unaligned region size 0x%x for base %x" % (size, base))
139
140
141def dump_flags(flags):
142    """Translate page table flags into string"""
143    ret = ""
144
145    if flags & FLAG_P:
146        ret += "P "
147
148    if flags & FLAG_RW:
149        ret += "RW "
150
151    if flags & FLAG_US:
152        ret += "US "
153
154    if flags & FLAG_G:
155        ret += "G "
156
157    if flags & FLAG_XD:
158        ret += "XD "
159
160    if flags & FLAG_SZ:
161        ret += "SZ "
162
163    if flags & FLAG_CD:
164        ret += "CD "
165
166    return ret.strip()
167
168
169def round_up(val, align):
170    """Round up val to the next multiple of align"""
171    return (val + (align - 1)) & (~(align - 1))
172
173
174def round_down(val, align):
175    """Round down val to the previous multiple of align"""
176    return val & (~(align - 1))
177
178
179# Hard-coded flags for intermediate paging levels. Permissive, we only control
180# access or set caching properties at leaf levels.
181INT_FLAGS = FLAG_P | FLAG_RW | FLAG_US
182
183class MMUTable():
184    """Represents a particular table in a set of page tables, at any level"""
185
186    def __init__(self):
187        self.entries = array.array(self.type_code,
188                                   [0 for i in range(self.num_entries)])
189
190    def get_binary(self):
191        """Return a bytearray representation of this table"""
192        # Always little-endian
193        ctype = "<" + self.type_code
194        entry_size = struct.calcsize(ctype)
195        ret = bytearray(entry_size * self.num_entries)
196
197        for i in range(self.num_entries):
198            struct.pack_into(ctype, ret, entry_size * i, self.entries[i])
199        return ret
200
201    @property
202    def supported_flags(self):
203        """Class property indicating what flag bits are supported"""
204        raise NotImplementedError()
205
206    @property
207    def addr_shift(self):
208        """Class property for how much to shift virtual addresses to obtain
209        the appropriate index in the table for it"""
210        raise NotImplementedError()
211
212    @property
213    def addr_mask(self):
214        """Mask to apply to an individual entry to get the physical address
215        mapping"""
216        raise NotImplementedError()
217
218    @property
219    def type_code(self):
220        """Struct packing letter code for table entries. Either I for
221        32-bit entries, or Q for PAE/IA-32e"""
222        raise NotImplementedError()
223
224    @property
225    def num_entries(self):
226        """Number of entries in the table. Varies by table type and paging
227        mode"""
228        raise NotImplementedError()
229
230    def entry_index(self, virt_addr):
231        """Get the index of the entry in this table that corresponds to the
232        provided virtual address"""
233        return (virt_addr >> self.addr_shift) & (self.num_entries - 1)
234
235    def has_entry(self, virt_addr):
236        """Indicate whether an entry is present in this table for the provided
237        virtual address"""
238        index = self.entry_index(virt_addr)
239
240        return (self.entries[index] & FLAG_P) != 0
241
242    def lookup(self, virt_addr):
243        """Look up the physical mapping for a virtual address.
244
245        If this is a leaf table, this is the physical address mapping. If not,
246        this is the physical address of the next level table"""
247        index = self.entry_index(virt_addr)
248
249        return self.entries[index] & self.addr_mask
250
251    def map(self, virt_addr, phys_addr, entry_flags):
252        """For the table entry corresponding to the provided virtual address,
253        set the corresponding physical entry in the table. Unsupported flags
254        will be filtered out.
255
256        If this is a leaf table, this is the physical address mapping. If not,
257        this is the physical address of the next level table"""
258        index = self.entry_index(virt_addr)
259
260        verbose("%s: mapping 0x%x to 0x%x : %s" %
261                (self.__class__.__name__,
262                 phys_addr, virt_addr, dump_flags(entry_flags)))
263
264        self.entries[index] = ((phys_addr & self.addr_mask) |
265                               (entry_flags & self.supported_flags))
266
267    def set_perms(self, virt_addr, entry_flags):
268        """"For the table entry corresponding to the provided virtual address,
269        update just the flags, leaving the physical mapping alone.
270        Unsupported flags will be filtered out."""
271        index = self.entry_index(virt_addr)
272
273        verbose("%s: changing perm at 0x%x : %s" %
274                (self.__class__.__name__,
275                 virt_addr, dump_flags(entry_flags)))
276
277        self.entries[index] = ((self.entries[index] & self.addr_mask) |
278                               (entry_flags & self.supported_flags))
279
280
281# Specific supported table types
282class Pml4(MMUTable):
283    """Page mapping level 4 for IA-32e"""
284    addr_shift = 39
285    addr_mask = 0x7FFFFFFFFFFFF000
286    type_code = 'Q'
287    num_entries = 512
288    supported_flags = INT_FLAGS
289
290class Pdpt(MMUTable):
291    """Page directory pointer table for IA-32e"""
292    addr_shift = 30
293    addr_mask = 0x7FFFFFFFFFFFF000
294    type_code = 'Q'
295    num_entries = 512
296    supported_flags = INT_FLAGS | FLAG_SZ | FLAG_CD
297
298class PdptPAE(Pdpt):
299    """Page directory pointer table for PAE"""
300    num_entries = 4
301
302class Pd(MMUTable):
303    """Page directory for 32-bit"""
304    addr_shift = 22
305    addr_mask = 0xFFFFF000
306    type_code = 'I'
307    num_entries = 1024
308    supported_flags = INT_FLAGS | FLAG_SZ | FLAG_CD
309
310class PdXd(Pd):
311    """Page directory for either PAE or IA-32e"""
312    addr_shift = 21
313    addr_mask = 0x7FFFFFFFFFFFF000
314    num_entries = 512
315    type_code = 'Q'
316
317class Pt(MMUTable):
318    """Page table for 32-bit"""
319    addr_shift = 12
320    addr_mask = 0xFFFFF000
321    type_code = 'I'
322    num_entries = 1024
323    supported_flags = (FLAG_P | FLAG_RW | FLAG_US | FLAG_G | FLAG_CD |
324                       FLAG_IGNORED0 | FLAG_IGNORED1)
325
326class PtXd(Pt):
327    """Page table for either PAE or IA-32e"""
328    addr_mask = 0x07FFFFFFFFFFF000
329    type_code = 'Q'
330    num_entries = 512
331    supported_flags = (FLAG_P | FLAG_RW | FLAG_US | FLAG_G | FLAG_XD | FLAG_CD |
332                       FLAG_IGNORED0 | FLAG_IGNORED1 | FLAG_IGNORED2)
333
334
335class PtableSet():
336    """Represents a complete set of page tables for any paging mode"""
337
338    def __init__(self, pages_start):
339        """Instantiate a set of page tables which will be located in the
340        image starting at the provided physical memory location"""
341        self.toplevel = self.levels[0]()
342        self.page_pos = pages_start
343
344        debug("%s starting at physical address 0x%x" %
345              (self.__class__.__name__, self.page_pos))
346
347        # Database of page table pages. Maps physical memory address to
348        # MMUTable objects, excluding the top-level table which is tracked
349        # separately. Starts out empty as we haven't mapped anything and
350        # the top-level table is tracked separately.
351        self.tables = {}
352
353    def get_new_mmutable_addr(self):
354        """If we need to instantiate a new MMUTable, return a physical
355        address location for it"""
356        ret = self.page_pos
357        self.page_pos += 4096
358        return ret
359
360    @property
361    def levels(self):
362        """Class hierarchy of paging levels, with the first entry being
363        the toplevel table class, and the last entry always being
364        some kind of leaf page table class (Pt or PtXd)"""
365        raise NotImplementedError()
366
367    def is_mapped(self, virt_addr, level):
368        """
369        Return True if virt_addr has already been mapped.
370
371        level_from_last == 0 only searches leaf level page tables.
372        level_from_last == 1 searches both page directories and page tables.
373
374        """
375        table = self.toplevel
376        num_levels = len(self.levels) + level + 1
377        has_mapping = False
378
379        # Create and link up intermediate tables if necessary
380        for depth in range(0, num_levels):
381            # Create child table if needed
382            if table.has_entry(virt_addr):
383                if depth == num_levels:
384                    has_mapping = True
385                else:
386                    table = self.tables[table.lookup(virt_addr)]
387
388            if has_mapping:
389                # pylint doesn't like break in the above if-block
390                break
391
392        return has_mapping
393
394    def is_region_mapped(self, virt_base, size, level=PT_LEVEL):
395        """Find out if a region has been mapped"""
396        align_check(virt_base, size)
397        for vaddr in range(virt_base, virt_base + size, 4096):
398            if self.is_mapped(vaddr, level):
399                return True
400
401        return False
402
403    def new_child_table(self, table, virt_addr, depth):
404        """Create a new child table"""
405        new_table_addr = self.get_new_mmutable_addr()
406        new_table = self.levels[depth]()
407        debug("new %s at physical addr 0x%x"
408                      % (self.levels[depth].__name__, new_table_addr))
409        self.tables[new_table_addr] = new_table
410        table.map(virt_addr, new_table_addr, INT_FLAGS)
411
412        return new_table
413
414    def map_page(self, virt_addr, phys_addr, flags, reserve, level=PT_LEVEL):
415        """Map a virtual address to a physical address in the page tables,
416        with provided access flags"""
417        table = self.toplevel
418
419        num_levels = len(self.levels) + level + 1
420
421        # Create and link up intermediate tables if necessary
422        for depth in range(1, num_levels):
423            # Create child table if needed
424            if not table.has_entry(virt_addr):
425                table = self.new_child_table(table, virt_addr, depth)
426            else:
427                table = self.tables[table.lookup(virt_addr)]
428
429        # Set up entry in leaf page table
430        if not reserve:
431            table.map(virt_addr, phys_addr, flags)
432
433    def reserve(self, virt_base, size, to_level=PT_LEVEL):
434        """Reserve page table space with already aligned virt_base and size"""
435        debug("Reserving paging structures for 0x%x (0x%x)" %
436              (virt_base, size))
437
438        align_check(virt_base, size)
439
440        # How much memory is covered by leaf page table
441        scope = 1 << self.levels[PD_LEVEL].addr_shift
442
443        if virt_base % scope != 0:
444            error("misaligned virtual address space, 0x%x not a multiple of 0x%x" %
445                  (virt_base, scope))
446
447        for addr in range(virt_base, virt_base + size, scope):
448            self.map_page(addr, 0, 0, True, to_level)
449
450    def reserve_unaligned(self, virt_base, size, to_level=PT_LEVEL):
451        """Reserve page table space with virt_base and size alignment"""
452        # How much memory is covered by leaf page table
453        scope = 1 << self.levels[PD_LEVEL].addr_shift
454
455        mem_start = round_down(virt_base, scope)
456        mem_end = round_up(virt_base + size, scope)
457        mem_size = mem_end - mem_start
458
459        self.reserve(mem_start, mem_size, to_level)
460
461    def map(self, phys_base, virt_base, size, flags, level=PT_LEVEL):
462        """Map an address range in the page tables provided access flags.
463        If virt_base is None, identity mapping using phys_base is done.
464        """
465        is_identity_map = virt_base is None or virt_base == phys_base
466
467        if virt_base is None:
468            virt_base = phys_base
469
470        scope = 1 << self.levels[level].addr_shift
471
472        debug("Mapping 0x%x (0x%x) to 0x%x: %s" %
473                (phys_base, size, virt_base, dump_flags(flags)))
474
475        align_check(phys_base, size, scope)
476        align_check(virt_base, size, scope)
477        for paddr in range(phys_base, phys_base + size, scope):
478            if is_identity_map and paddr == 0 and level == PT_LEVEL:
479                # Never map the NULL page at page table level.
480                continue
481
482            vaddr = virt_base + (paddr - phys_base)
483
484            self.map_page(vaddr, paddr, flags, False, level)
485
486    def identity_map_unaligned(self, phys_base, size, flags, level=PT_LEVEL):
487        """Identity map a region of memory"""
488        scope = 1 << self.levels[level].addr_shift
489
490        phys_aligned_base = round_down(phys_base, scope)
491        phys_aligned_end = round_up(phys_base + size, scope)
492        phys_aligned_size = phys_aligned_end - phys_aligned_base
493
494        self.map(phys_aligned_base, None, phys_aligned_size, flags, level)
495
496    def map_region(self, name, flags, virt_to_phys_offset, level=PT_LEVEL):
497        """Map a named region"""
498        if not isdef(name + "_start"):
499            # Region may not exists
500            return
501
502        region_start = syms[name + "_start"]
503        region_end = syms[name + "_end"]
504        region_size = region_end - region_start
505
506        region_start_phys = region_start
507
508        if virt_to_phys_offset is not None:
509            region_start_phys += virt_to_phys_offset
510
511        self.map(region_start_phys, region_start, region_size, flags, level)
512
513    def set_region_perms(self, name, flags, level=PT_LEVEL):
514        """Set access permissions for a named region that is already mapped
515
516        The bounds of the region will be looked up in the symbol table
517        with _start and _size suffixes. The physical address mapping
518        is unchanged and this will not disturb any double-mapping."""
519        if not isdef(name + "_start"):
520            # Region may not exists
521            return
522
523        # Doesn't matter if this is a virtual address, we have a
524        # either dual mapping or it's the same as physical
525        base = syms[name + "_start"]
526
527        if isdef(name + "_size"):
528            size = syms[name + "_size"]
529        else:
530            region_end = syms[name + "_end"]
531            size = region_end - base
532
533        if size == 0:
534            return
535
536        debug("change flags for %s at 0x%x (0x%x): %s" %
537              (name, base, size, dump_flags(flags)))
538
539        num_levels = len(self.levels) + level + 1
540        scope = 1 << self.levels[level].addr_shift
541
542        align_check(base, size, scope)
543
544        try:
545            for addr in range(base, base + size, scope):
546                # Never map the NULL page
547                if addr == 0:
548                    continue
549
550                table = self.toplevel
551                for _ in range(1, num_levels):
552                    table = self.tables[table.lookup(addr)]
553                table.set_perms(addr, flags)
554        except KeyError:
555            error("no mapping for %s region 0x%x (size 0x%x)" %
556                  (name, base, size))
557
558    def write_output(self, filename):
559        """Write the page tables to the output file in binary format"""
560        written_size = 0
561
562        with open(filename, "wb") as output_fp:
563            for addr in sorted(self.tables):
564                mmu_table = self.tables[addr]
565                mmu_table_bin = mmu_table.get_binary()
566                output_fp.write(mmu_table_bin)
567                written_size += len(mmu_table_bin)
568
569            # We always have the top-level table be last. This is because
570            # in PAE, the top-level PDPT has only 4 entries and is not a
571            # full page in size. We do not put it in the tables dictionary
572            # and treat it as a special case.
573            debug("top-level %s at physical addr 0x%x" %
574                  (self.toplevel.__class__.__name__,
575                   self.get_new_mmutable_addr()))
576            top_level_bin = self.toplevel.get_binary()
577            output_fp.write(top_level_bin)
578            written_size += len(top_level_bin)
579
580        return written_size
581
582# Paging mode classes, we'll use one depending on configuration
583class Ptables32bit(PtableSet):
584    """32-bit Page Tables"""
585    levels = [Pd, Pt]
586
587class PtablesPAE(PtableSet):
588    """PAE Page Tables"""
589    levels = [PdptPAE, PdXd, PtXd]
590
591class PtablesIA32e(PtableSet):
592    """Page Tables under IA32e mode"""
593    levels = [Pml4, Pdpt, PdXd, PtXd]
594
595
596def parse_args():
597    """Parse command line arguments"""
598    global args
599
600    parser = argparse.ArgumentParser(
601        description=__doc__,
602        formatter_class=argparse.RawDescriptionHelpFormatter, allow_abbrev=False)
603
604    parser.add_argument("-k", "--kernel", required=True,
605                        help="path to prebuilt kernel ELF binary")
606    parser.add_argument("-o", "--output", required=True,
607                        help="output file")
608    parser.add_argument("--map", action='append',
609                        help=textwrap.dedent('''\
610                            Map extra memory:
611                            <physical address>,<size>[,<flags:LUWXD>[,<virtual address>]]
612                            where flags can be empty or combination of:
613                                L - Large page (2MB or 4MB),
614                                U - Userspace accessible,
615                                W - Writable,
616                                X - Executable,
617                                D - Cache disabled.
618                            Default is
619                                small (4KB) page,
620                                supervisor only,
621                                read only,
622                                and execution disabled.
623                            '''))
624    parser.add_argument("-v", "--verbose", action="count",
625                        help="Print extra debugging information")
626    args = parser.parse_args()
627    if "VERBOSE" in os.environ:
628        args.verbose = 1
629
630
631def get_symbols(elf_obj):
632    """Get all symbols from the ELF file"""
633    for section in elf_obj.iter_sections():
634        if isinstance(section, SymbolTableSection):
635            return {sym.name: sym.entry.st_value
636                    for sym in section.iter_symbols()}
637
638    raise LookupError("Could not find symbol table")
639
640def isdef(sym_name):
641    """True if symbol is defined in ELF file"""
642    return sym_name in syms
643
644
645def find_symbol(obj, name):
646    """Find symbol object from ELF file"""
647    for section in obj.iter_sections():
648        if isinstance(section, SymbolTableSection):
649            for sym in section.iter_symbols():
650                if sym.name == name:
651                    return sym
652
653    return None
654
655
656def map_extra_regions(pt):
657    """Map extra regions specified in command line"""
658    # Extract command line arguments
659    mappings = []
660
661    for entry in args.map:
662        elements = entry.split(',')
663
664        if len(elements) < 2:
665            error("Not enough arguments for --map %s" % entry)
666
667        one_map = {}
668
669        one_map['cmdline'] = entry
670        one_map['phys'] = int(elements[0], 0)
671        one_map['size']= int(elements[1], 0)
672        one_map['large_page'] = False
673
674        flags = FLAG_P | ENTRY_XD
675        if len(elements) > 2:
676            map_flags = elements[2]
677
678            # Check for allowed flags
679            if not bool(re.match('^[LUWXD]*$', map_flags)):
680                error("Unrecognized flags: %s" % map_flags)
681
682            flags = FLAG_P | ENTRY_XD
683            if 'W' in map_flags:
684                flags |= ENTRY_RW
685            if 'X' in map_flags:
686                flags &= ~ENTRY_XD
687            if 'U' in map_flags:
688                flags |= ENTRY_US
689            if 'L' in map_flags:
690                flags |=  FLAG_SZ
691                one_map['large_page'] = True
692            if 'D' in map_flags:
693                flags |= FLAG_CD
694
695        one_map['flags'] = flags
696
697        if len(elements) > 3:
698            one_map['virt'] = int(elements[3], 16)
699        else:
700            one_map['virt'] = one_map['phys']
701
702        mappings.append(one_map)
703
704    # Map the regions
705    for one_map in mappings:
706        phys = one_map['phys']
707        size = one_map['size']
708        flags = one_map['flags']
709        virt = one_map['virt']
710        level = PD_LEVEL if one_map['large_page'] else PT_LEVEL
711
712        # Check if addresses have already been mapped.
713        # Error out if so as they could override kernel mappings.
714        if pt.is_region_mapped(virt, size, level):
715            error(("Region 0x%x (%d) already been mapped "
716                   "for --map %s" % (virt, size, one_map['cmdline'])))
717
718        # Reserve space in page table, and map the region
719        pt.reserve_unaligned(virt, size, level)
720        pt.map(phys, virt, size, flags, level)
721
722
723def main():
724    """Main program"""
725    global syms
726    parse_args()
727
728    with open(args.kernel, "rb") as elf_fp:
729        kernel = ELFFile(elf_fp)
730        syms = get_symbols(kernel)
731
732        sym_dummy_pagetables = find_symbol(kernel, "dummy_pagetables")
733        if sym_dummy_pagetables:
734            reserved_pt_size = sym_dummy_pagetables['st_size']
735        else:
736            reserved_pt_size = None
737
738    if isdef("CONFIG_X86_64"):
739        pclass = PtablesIA32e
740    elif isdef("CONFIG_X86_PAE"):
741        pclass = PtablesPAE
742    else:
743        pclass = Ptables32bit
744
745    debug("building %s" % pclass.__name__)
746
747    vm_base = syms["CONFIG_KERNEL_VM_BASE"]
748    vm_size = syms["CONFIG_KERNEL_VM_SIZE"]
749    vm_offset = syms["CONFIG_KERNEL_VM_OFFSET"]
750
751    sram_base = syms["CONFIG_SRAM_BASE_ADDRESS"]
752    sram_size = syms["CONFIG_SRAM_SIZE"] * 1024
753
754    mapped_kernel_base = syms["z_mapped_start"]
755    mapped_kernel_size = syms["z_mapped_size"]
756
757    if isdef("CONFIG_SRAM_OFFSET"):
758        sram_offset = syms["CONFIG_SRAM_OFFSET"]
759    else:
760        sram_offset = 0
761
762    # Figure out if there is any need to do virtual-to-physical
763    # address translation
764    virt_to_phys_offset = (sram_base + sram_offset) - (vm_base + vm_offset)
765
766    if isdef("CONFIG_ARCH_MAPS_ALL_RAM"):
767        image_base = sram_base
768        image_size = sram_size
769    else:
770        image_base = mapped_kernel_base
771        image_size = mapped_kernel_size
772
773    image_base_phys = image_base + virt_to_phys_offset
774
775    ptables_phys = syms["z_x86_pagetables_start"] + virt_to_phys_offset
776
777    debug("Address space: 0x%x - 0x%x size 0x%x" %
778          (vm_base, vm_base + vm_size - 1, vm_size))
779
780    debug("Zephyr image: 0x%x - 0x%x size 0x%x" %
781          (image_base, image_base + image_size - 1, image_size))
782
783    if virt_to_phys_offset != 0:
784        debug("Physical address space: 0x%x - 0x%x size 0x%x" %
785              (sram_base, sram_base + sram_size - 1, sram_size))
786
787    is_perm_regions = isdef("CONFIG_SRAM_REGION_PERMISSIONS")
788
789    # Are pages in non-boot, non-pinned sections present at boot.
790    is_generic_section_present = isdef("CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT")
791
792    if image_size >= vm_size:
793        error("VM size is too small (have 0x%x need more than 0x%x)" % (vm_size, image_size))
794
795    map_flags = 0
796
797    if is_perm_regions:
798        # Don't allow execution by default for any pages. We'll adjust this
799        # in later calls to pt.set_region_perms()
800        map_flags = ENTRY_XD
801
802    pt = pclass(ptables_phys)
803    # Instantiate all the paging structures for the address space
804    pt.reserve(vm_base, vm_size)
805    # Map the zephyr image
806    if is_generic_section_present:
807        map_flags = map_flags | FLAG_P
808        pt.map(image_base_phys, image_base, image_size, map_flags | ENTRY_RW)
809    else:
810        # When generic linker sections are not present in physical memory,
811        # the corresponding virtual pages should not be mapped to non-existent
812        # physical pages. So simply identity map them to create the page table
813        # entries but without the present bit set.
814        # Boot and pinned sections (if configured) will be mapped to
815        # physical memory below.
816        pt.map(image_base, image_base, image_size, map_flags | ENTRY_RW)
817
818    if virt_to_phys_offset != 0:
819        # Need to identity map the physical address space
820        # as it is needed during early boot process.
821        # This will be unmapped once z_x86_mmu_init()
822        # is called.
823        # Note that this only does the identity mapping
824        # at the page directory level to minimize wasted space.
825        pt.reserve_unaligned(image_base_phys, image_size, to_level=PD_LEVEL)
826        pt.identity_map_unaligned(image_base_phys, image_size,
827                                  FLAG_P | FLAG_RW | FLAG_SZ, level=PD_LEVEL)
828
829    if isdef("CONFIG_X86_64"):
830        # 64-bit has a special region in the first 64K to bootstrap other CPUs
831        # from real mode
832        locore_base = syms["_locore_start"]
833        locore_size = syms["_lodata_end"] - locore_base
834        debug("Base addresses: physical 0x%x size 0x%x" % (locore_base,
835                                                         locore_size))
836        pt.map(locore_base, None, locore_size, map_flags | FLAG_P | ENTRY_RW)
837
838    if isdef("CONFIG_XIP"):
839        # Additionally identity-map all ROM as read-only
840        pt.map(syms["CONFIG_FLASH_BASE_ADDRESS"], None,
841               syms["CONFIG_FLASH_SIZE"] * 1024, map_flags | FLAG_P)
842
843    if isdef("CONFIG_LINKER_USE_BOOT_SECTION"):
844        pt.map_region("lnkr_boot", map_flags | FLAG_P | ENTRY_RW, virt_to_phys_offset)
845
846    if isdef("CONFIG_LINKER_USE_PINNED_SECTION"):
847        pt.map_region("lnkr_pinned", map_flags | FLAG_P | ENTRY_RW, virt_to_phys_offset)
848
849    # Process extra mapping requests
850    if args.map:
851        map_extra_regions(pt)
852
853    # Adjust mapped region permissions if configured
854    if is_perm_regions:
855        # Need to accomplish the following things:
856        # - Text regions need the XD flag cleared and RW flag removed
857        #   if not built with gdbstub support
858        # - Rodata regions need the RW flag cleared
859        # - User mode needs access as we currently do not separate application
860        #   text/rodata from kernel text/rodata
861        if isdef("CONFIG_GDBSTUB"):
862            flags = ENTRY_US | ENTRY_RW
863        else:
864            flags = ENTRY_US
865
866        if is_generic_section_present:
867            flags = flags | FLAG_P
868
869        pt.set_region_perms("__text_region", flags)
870
871        if isdef("CONFIG_LINKER_USE_BOOT_SECTION"):
872            pt.set_region_perms("lnkr_boot_text", flags | FLAG_P)
873
874        if isdef("CONFIG_LINKER_USE_PINNED_SECTION"):
875            pt.set_region_perms("lnkr_pinned_text", flags | FLAG_P)
876
877        flags = ENTRY_US | ENTRY_XD
878        if is_generic_section_present:
879            flags = flags | FLAG_P
880
881        pt.set_region_perms("__rodata_region", flags)
882
883        if isdef("CONFIG_LINKER_USE_BOOT_SECTION"):
884            pt.set_region_perms("lnkr_boot_rodata", flags | FLAG_P)
885
886        if isdef("CONFIG_LINKER_USE_PINNED_SECTION"):
887            pt.set_region_perms("lnkr_pinned_rodata", flags | FLAG_P)
888
889        if isdef("CONFIG_COVERAGE_GCOV") and isdef("CONFIG_USERSPACE"):
890            # If GCOV is enabled, user mode must be able to write to its
891            # common data area
892            pt.set_region_perms("__gcov_bss",
893                                FLAG_P | ENTRY_RW | ENTRY_US | ENTRY_XD)
894
895        if isdef("CONFIG_X86_64"):
896            # Set appropriate permissions for locore areas much like we did
897            # with the main text/rodata regions
898
899            if isdef("CONFIG_X86_KPTI"):
900                # Set the User bit for the read-only locore/lorodata areas.
901                # This ensures they get mapped into the User page tables if
902                # KPTI is turned on. There is no sensitive data in them, and
903                # they contain text/data needed to take an exception or
904                # interrupt.
905                flag_user = ENTRY_US
906            else:
907                flag_user = 0
908
909            pt.set_region_perms("_locore", FLAG_P | flag_user)
910            pt.set_region_perms("_lorodata", FLAG_P | ENTRY_XD | flag_user)
911
912    written_size = pt.write_output(args.output)
913    debug("Written %d bytes to %s" % (written_size, args.output))
914
915    # Warn if reserved page table is not of correct size
916    if reserved_pt_size and written_size != reserved_pt_size:
917        # Figure out how many extra pages needed
918        size_diff = written_size - reserved_pt_size
919        page_size = syms["CONFIG_MMU_PAGE_SIZE"]
920        extra_pages_needed = int(round_up(size_diff, page_size) / page_size)
921
922        if isdef("CONFIG_X86_EXTRA_PAGE_TABLE_PAGES"):
923            extra_pages_kconfig = syms["CONFIG_X86_EXTRA_PAGE_TABLE_PAGES"]
924            if isdef("CONFIG_X86_64"):
925                extra_pages_needed += ctypes.c_int64(extra_pages_kconfig).value
926            else:
927                extra_pages_needed += ctypes.c_int32(extra_pages_kconfig).value
928
929        reason = "big" if reserved_pt_size > written_size else "small"
930
931        error(("Reserved space for page table is too %s."
932               " Set CONFIG_X86_EXTRA_PAGE_TABLE_PAGES=%d") %
933               (reason, extra_pages_needed))
934
935
936if __name__ == "__main__":
937    main()
938