Lines Matching +full:addr +full:- +full:range

4  * SPDX-License-Identifier: Apache-2.0
16 #include <zephyr/linker/linker-defs.h>
21 #include <xtensa/config/core-matmap.h>
22 #include <xtensa/config/core-isa.h>
45 * - Each MPU region is described by TWO entries:
51 * - The last entry is a special case as there is no more "next"
56 * - Current implementation has following limitations:
57 * - All enabled entries are grouped towards the end of the map.
58 * - Except the last entry which can be disabled. This is
62 * - No disabled MPU entries allowed in between.
66 * - The start addresses must always be in non-descending order.
67 * - The access rights and memory type fields must contain valid values.
68 * - The segment field needs to be correct for each entry.
69 * - MBZ fields must contain only zeroes.
70 * - Although the start address occupies 27 bits of the register,
103 * cacheable, read / write and non-executable
128 /* Mark rodata segment cacheable, read only and non-executable */
139 * Return the pointer to the entry encompassing @a addr out of an array of MPU entries.
141 * Returning the entry where @a addr is greater or equal to the entry's start address,
142 * and where @a addr is less than the starting address of the next entry.
145 * @param[in] addr Address to be matched to one background entry.
153 * @return Pointer to the map entry encompassing @a addr, or NULL if no such entry found.
157 uintptr_t addr, uint8_t first_enabled_idx, in check_addr_in_mpu_entries() argument
168 if (addr < xtensa_mpu_entry_start_address_get(&entries[first_enabled_idx])) { in check_addr_in_mpu_entries()
174 for (idx = first_enabled_idx; idx < (XTENSA_MPU_NUM_ENTRIES - 1); idx++) { in check_addr_in_mpu_entries()
178 if ((addr >= s_addr) && (addr < e_addr)) { in check_addr_in_mpu_entries()
184 idx = XTENSA_MPU_NUM_ENTRIES - 1; in check_addr_in_mpu_entries()
186 if (addr >= s_addr) { in check_addr_in_mpu_entries()
194 if (addr == s_addr) { in check_addr_in_mpu_entries()
241 * @retval -1 First address is less than second address.
254 return -1; in compare_entries()
322 * The MBZ field of the AS register is re-purposed to indicate that in consolidate_entries()
329 entry_1->as.p.mbz = 1U; in consolidate_entries()
338 entry_0->as.p.mbz = 1U; in consolidate_entries()
346 uint8_t read_idx = XTENSA_MPU_NUM_ENTRIES - 1; in consolidate_entries()
353 if (entry_rd->as.p.mbz != 1U) { in consolidate_entries()
356 write_idx--; in consolidate_entries()
360 entry_wr->at.p.segment = write_idx; in consolidate_entries()
363 read_idx--; in consolidate_entries()
373 e->as.raw = 0U; in consolidate_entries()
376 e->at.p.segment = idx_0; in consolidate_entries()
379 e->at.p.access_rights = XTENSA_MPU_ACCESS_P_NA_U_NA; in consolidate_entries()
382 e->at.p.memory_type = CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE; in consolidate_entries()
396 * start and end entries. This may re-use existing entries or add new
407 * @retval -EINVAL Invalid values in function arguments.
419 struct xtensa_mpu_entry *entries = map->entries; in mpu_map_region_add()
422 ret = -EINVAL; in mpu_map_region_add()
435 struct xtensa_mpu_entry *last_entry = &entries[XTENSA_MPU_NUM_ENTRIES - 1]; in mpu_map_region_add()
439 /* Empty table, so populate the entries as-is. */ in mpu_map_region_add()
445 entry_slot_s = &entries[XTENSA_MPU_NUM_ENTRIES - 1]; in mpu_map_region_add()
449 first_enabled_idx = XTENSA_MPU_NUM_ENTRIES - 1; in mpu_map_region_add()
459 entry_slot_s = &entries[XTENSA_MPU_NUM_ENTRIES - 2]; in mpu_map_region_add()
460 entry_slot_e = &entries[XTENSA_MPU_NUM_ENTRIES - 1]; in mpu_map_region_add()
467 first_enabled_idx = XTENSA_MPU_NUM_ENTRIES - 2; in mpu_map_region_add()
478 ret = -EINVAL; in mpu_map_region_add()
495 ret = -EINVAL; in mpu_map_region_add()
509 ret = -ENOMEM; in mpu_map_region_add()
525 * - Use existing entry if start addresses are the same for existing in mpu_map_region_add()
527 * - Add an entry if incoming region is within existing region. in mpu_map_region_add()
534 first_enabled_idx--; in mpu_map_region_add()
544 * - Add an entry if incoming region is within existing region. in mpu_map_region_add()
545 * - If the end address matches exactly to existing entry, there is in mpu_map_region_add()
553 first_enabled_idx--; in mpu_map_region_add()
640 struct xtensa_mpu_map *map = thread->arch.mpu_map; in xtensa_mpu_map_write()
657 for (entry = XTENSA_MPU_NUM_ENTRIES - 1; entry >= 0; entry--) { in xtensa_mpu_map_write()
659 : : "a"(map->entries[entry].at), "a"(map->entries[entry].as)); in xtensa_mpu_map_write()
702 const struct xtensa_mpu_range *range = &mpu_zephyr_ranges[entry]; in xtensa_mpu_init() local
705 range->start, range->end, in xtensa_mpu_init()
706 range->access_rights, range->memory_type, in xtensa_mpu_init()
711 (unsigned int)range->start, in xtensa_mpu_init()
712 (unsigned int)range->end, in xtensa_mpu_init()
721 const struct xtensa_mpu_range *range = &xtensa_soc_mpu_ranges[entry]; in xtensa_mpu_init() local
724 range->start, range->end, in xtensa_mpu_init()
725 range->access_rights, range->memory_type, in xtensa_mpu_init()
730 (unsigned int)range->start, in xtensa_mpu_init()
731 (unsigned int)range->end, in xtensa_mpu_init()
752 domain->arch.mpu_map = xtensa_mpu_map_fg_kernel; in arch_mem_domain_init()
777 struct xtensa_mpu_map *map = &domain->arch.mpu_map; in arch_mem_domain_partition_remove()
778 struct k_mem_partition *partition = &domain->partitions[partition_id]; in arch_mem_domain_partition_remove()
779 uintptr_t end_addr = partition->start + partition->size; in arch_mem_domain_partition_remove()
781 if (end_addr <= partition->start) { in arch_mem_domain_partition_remove()
782 ret = -EINVAL; in arch_mem_domain_partition_remove()
795 switch (partition->attr) { in arch_mem_domain_partition_remove()
840 ret = mpu_map_region_add(map, partition->start, end_addr, in arch_mem_domain_partition_remove()
849 cur_thread = _current_cpu->current; in arch_mem_domain_partition_remove()
850 if (cur_thread->mem_domain_info.mem_domain == domain) { in arch_mem_domain_partition_remove()
863 struct xtensa_mpu_map *map = &domain->arch.mpu_map; in arch_mem_domain_partition_add()
864 struct k_mem_partition *partition = &domain->partitions[partition_id]; in arch_mem_domain_partition_add()
865 uintptr_t end_addr = partition->start + partition->size; in arch_mem_domain_partition_add()
867 if (end_addr <= partition->start) { in arch_mem_domain_partition_add()
868 ret = -EINVAL; in arch_mem_domain_partition_add()
872 ret = mpu_map_region_add(map, partition->start, end_addr, in arch_mem_domain_partition_add()
873 (uint8_t)partition->attr, in arch_mem_domain_partition_add()
885 cur_thread = _current_cpu->current; in arch_mem_domain_partition_add()
886 if (((cur_thread->base.thread_state & _THREAD_DUMMY) != _THREAD_DUMMY) && in arch_mem_domain_partition_add()
887 (cur_thread->mem_domain_info.mem_domain == domain)) { in arch_mem_domain_partition_add()
900 struct k_mem_domain *domain = thread->mem_domain_info.mem_domain; in arch_mem_domain_thread_add()
906 struct xtensa_mpu_map *old_map = thread->arch.mpu_map; in arch_mem_domain_thread_add()
908 bool is_user = (thread->base.user_options & K_USER) != 0; in arch_mem_domain_thread_add()
911 uintptr_t stack_end_addr = thread->stack_info.start + thread->stack_info.size; in arch_mem_domain_thread_add()
913 if (stack_end_addr < thread->stack_info.start) { in arch_mem_domain_thread_add()
925 ret = mpu_map_region_add(&domain->arch.mpu_map, in arch_mem_domain_thread_add()
926 thread->stack_info.start, stack_end_addr, in arch_mem_domain_thread_add()
935 thread->arch.mpu_map = &domain->arch.mpu_map; in arch_mem_domain_thread_add()
949 thread->stack_info.start, stack_end_addr, in arch_mem_domain_thread_add()
959 if (thread == _current_cpu->current) { in arch_mem_domain_thread_add()
971 struct k_mem_domain *domain = thread->mem_domain_info.mem_domain; in arch_mem_domain_thread_remove()
973 if ((thread->base.user_options & K_USER) == 0) { in arch_mem_domain_thread_remove()
978 if ((thread->base.thread_state & _THREAD_DEAD) == 0) { in arch_mem_domain_thread_remove()
988 stack_end_addr = thread->stack_info.start + thread->stack_info.size; in arch_mem_domain_thread_remove()
989 if (stack_end_addr < thread->stack_info.start) { in arch_mem_domain_thread_remove()
998 ret = mpu_map_region_add(&domain->arch.mpu_map, in arch_mem_domain_thread_remove()
999 thread->stack_info.start, stack_end_addr, in arch_mem_domain_thread_remove()
1010 int arch_buffer_validate(const void *addr, size_t size, int write) in arch_buffer_validate() argument
1016 /* addr/size arbitrary, fix this up into an aligned region */ in arch_buffer_validate()
1017 aligned_addr = ROUND_DOWN((uintptr_t)addr, XCHAL_MPU_ALIGN); in arch_buffer_validate()
1018 addr_offset = (uintptr_t)addr - aligned_addr; in arch_buffer_validate()
1029 ret = -EPERM; in arch_buffer_validate()
1049 ret = -EPERM; in arch_buffer_validate()
1071 ret = -EPERM; in arch_buffer_validate()
1081 bool xtensa_mem_kernel_has_access(void *addr, size_t size, int write) in xtensa_mem_kernel_has_access() argument
1087 /* addr/size arbitrary, fix this up into an aligned region */ in xtensa_mem_kernel_has_access()
1088 aligned_addr = ROUND_DOWN((uintptr_t)addr, XCHAL_MPU_ALIGN); in xtensa_mem_kernel_has_access()
1089 addr_offset = (uintptr_t)addr - aligned_addr; in xtensa_mem_kernel_has_access()
1174 uintptr_t stack_end_addr = thread->stack_info.start + thread->stack_info.size; in xtensa_user_stack_perms()
1176 if (stack_end_addr < thread->stack_info.start) { in xtensa_user_stack_perms()
1181 (void)memset((void *)thread->stack_info.start, in xtensa_user_stack_perms()
1183 thread->stack_info.size - thread->stack_info.delta); in xtensa_user_stack_perms()
1186 ret = mpu_map_region_add(thread->arch.mpu_map, in xtensa_user_stack_perms()
1187 thread->stack_info.start, stack_end_addr, in xtensa_user_stack_perms()