1 /*
2 * Copyright (c) 2013-2014 Wind River Systems, Inc.
3 * Copyright (c) 2021 Intel Corporation
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7 /**
8 * @file
9 * @brief Cache manipulation
10 *
11 * This module contains functions for manipulation caches.
12 */
13
14 #include <zephyr/kernel.h>
15 #include <zephyr/arch/cpu.h>
16 #include <zephyr/sys/util.h>
17 #include <zephyr/toolchain.h>
18 #include <zephyr/cache.h>
19 #include <stdbool.h>
20
21 /* Not Write-through bit */
22 #define X86_REG_CR0_NW BIT(29)
23 /* Cache Disable bit */
24 #define X86_REG_CR0_CD BIT(30)
25
z_x86_wbinvd(void)26 static inline void z_x86_wbinvd(void)
27 {
28 __asm__ volatile("wbinvd;\n\t" : : : "memory");
29 }
30
arch_dcache_enable(void)31 void arch_dcache_enable(void)
32 {
33 unsigned long cr0 = 0;
34
35 /* Enable write-back caching by clearing the NW and CD bits */
36 __asm__ volatile("mov %%cr0, %0;\n\t"
37 "and %1, %0;\n\t"
38 "mov %0, %%cr0;\n\t"
39 : "=r" (cr0)
40 : "i" (~(X86_REG_CR0_NW | X86_REG_CR0_CD)));
41 }
42
arch_dcache_disable(void)43 void arch_dcache_disable(void)
44 {
45 unsigned long cr0 = 0;
46
47 /* Enter the no-fill mode by setting NW=0 and CD=1 */
48 __asm__ volatile("mov %%cr0, %0;\n\t"
49 "and %1, %0;\n\t"
50 "or %2, %0;\n\t"
51 "mov %0, %%cr0;\n\t"
52 : "=r" (cr0)
53 : "i" (~(X86_REG_CR0_NW)),
54 "i" (X86_REG_CR0_CD));
55
56 /* Flush all caches */
57 z_x86_wbinvd();
58 }
59
arch_dcache_flush_all(void)60 int arch_dcache_flush_all(void)
61 {
62 z_x86_wbinvd();
63
64 return 0;
65 }
66
arch_dcache_invd_all(void)67 int arch_dcache_invd_all(void)
68 {
69 z_x86_wbinvd();
70
71 return 0;
72 }
73
arch_dcache_flush_and_invd_all(void)74 int arch_dcache_flush_and_invd_all(void)
75 {
76 z_x86_wbinvd();
77
78 return 0;
79 }
80
81 /**
82 * No alignment is required for either <virt> or <size>, but since
83 * sys_cache_flush() iterates on the cache lines, a cache line alignment for
84 * both is optimal.
85 *
86 * The cache line size is specified via the d-cache-line-size DTS property.
87 */
arch_dcache_flush_range(void * start_addr,size_t size)88 int arch_dcache_flush_range(void *start_addr, size_t size)
89 {
90 size_t line_size = sys_cache_data_line_size_get();
91 uintptr_t start = (uintptr_t)start_addr;
92 uintptr_t end = start + size;
93
94 if (line_size == 0U) {
95 return -ENOTSUP;
96 }
97
98 end = ROUND_UP(end, line_size);
99
100 for (; start < end; start += line_size) {
101 __asm__ volatile("clflush %0;\n\t" :
102 "+m"(*(volatile char *)start));
103 }
104
105 #if defined(CONFIG_X86_MFENCE_INSTRUCTION_SUPPORTED)
106 __asm__ volatile("mfence;\n\t":::"memory");
107 #else
108 __asm__ volatile("lock; addl $0,-4(%%esp);\n\t":::"memory", "cc");
109 #endif
110 return 0;
111 }
112
arch_dcache_invd_range(void * start_addr,size_t size)113 int arch_dcache_invd_range(void *start_addr, size_t size)
114 {
115 return arch_dcache_flush_range(start_addr, size);
116 }
117
arch_dcache_flush_and_invd_range(void * start_addr,size_t size)118 int arch_dcache_flush_and_invd_range(void *start_addr, size_t size)
119 {
120 return arch_dcache_flush_range(start_addr, size);
121 }
122