1 /*
2  * Copyright (c) 2013-2014 Wind River Systems, Inc.
3  * Copyright (c) 2021 Intel Corporation
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 /**
8  * @file
9  * @brief Cache manipulation
10  *
11  * This module contains functions for manipulation caches.
12  */
13 
14 #include <zephyr/kernel.h>
15 #include <zephyr/arch/cpu.h>
16 #include <zephyr/sys/util.h>
17 #include <zephyr/toolchain.h>
18 #include <zephyr/cache.h>
19 #include <stdbool.h>
20 
z_x86_wbinvd(void)21 static inline void z_x86_wbinvd(void)
22 {
23 	__asm__ volatile("wbinvd;\n\t" : : : "memory");
24 }
25 
arch_dcache_enable(void)26 void arch_dcache_enable(void)
27 {
28 	uint32_t cr0;
29 
30 	/* Enable write-back caching by clearing the NW and CD bits */
31 	__asm__ volatile("movl %%cr0, %0;\n\t"
32 			"andl $0x9fffffff, %0;\n\t"
33 			"movl %0, %%cr0;\n\t"
34 			: "=r" (cr0));
35 }
36 
arch_dcache_disable(void)37 void arch_dcache_disable(void)
38 {
39 	uint32_t cr0;
40 
41 	/* Enter the no-fill mode by setting NW=0 and CD=1 */
42 	__asm__ volatile("movl %%cr0, %0;\n\t"
43 			"andl $0xdfffffff, %0;\n\t"
44 			"orl $0x40000000, %0;\n\t"
45 			"movl %0, %%cr0;\n\t"
46 			: "=r" (cr0));
47 
48 	/* Flush all caches */
49 	z_x86_wbinvd();
50 }
51 
arch_dcache_flush_all(void)52 int arch_dcache_flush_all(void)
53 {
54 	z_x86_wbinvd();
55 
56 	return 0;
57 }
58 
arch_dcache_invd_all(void)59 int arch_dcache_invd_all(void)
60 {
61 	z_x86_wbinvd();
62 
63 	return 0;
64 }
65 
arch_dcache_flush_and_invd_all(void)66 int arch_dcache_flush_and_invd_all(void)
67 {
68 	z_x86_wbinvd();
69 
70 	return 0;
71 }
72 
73 /**
74  * No alignment is required for either <virt> or <size>, but since
75  * sys_cache_flush() iterates on the cache lines, a cache line alignment for
76  * both is optimal.
77  *
78  * The cache line size is specified via the d-cache-line-size DTS property.
79  */
arch_dcache_flush_range(void * start_addr,size_t size)80 int arch_dcache_flush_range(void *start_addr, size_t size)
81 {
82 	size_t line_size = sys_cache_data_line_size_get();
83 	uintptr_t start = (uintptr_t)start_addr;
84 	uintptr_t end = start + size;
85 
86 	if (line_size == 0U) {
87 		return -ENOTSUP;
88 	}
89 
90 	end = ROUND_UP(end, line_size);
91 
92 	for (; start < end; start += line_size) {
93 		__asm__ volatile("clflush %0;\n\t" :
94 				"+m"(*(volatile char *)start));
95 	}
96 
97 #if defined(CONFIG_X86_MFENCE_INSTRUCTION_SUPPORTED)
98 	__asm__ volatile("mfence;\n\t":::"memory");
99 #else
100 	__asm__ volatile("lock; addl $0,-4(%%esp);\n\t":::"memory", "cc");
101 #endif
102 	return 0;
103 }
104 
arch_dcache_invd_range(void * start_addr,size_t size)105 int arch_dcache_invd_range(void *start_addr, size_t size)
106 {
107 	return arch_dcache_flush_range(start_addr, size);
108 }
109 
arch_dcache_flush_and_invd_range(void * start_addr,size_t size)110 int arch_dcache_flush_and_invd_range(void *start_addr, size_t size)
111 {
112 	return arch_dcache_flush_range(start_addr, size);
113 }
114