1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_CONTEXT_TRACKING_H 3 #define _LINUX_CONTEXT_TRACKING_H 4 5 #include <linux/sched.h> 6 #include <linux/vtime.h> 7 #include <linux/context_tracking_state.h> 8 #include <linux/instrumentation.h> 9 10 #include <asm/ptrace.h> 11 12 13 #ifdef CONFIG_CONTEXT_TRACKING 14 extern void context_tracking_cpu_set(int cpu); 15 16 /* Called with interrupts disabled. */ 17 extern void __context_tracking_enter(enum ctx_state state); 18 extern void __context_tracking_exit(enum ctx_state state); 19 20 extern void context_tracking_enter(enum ctx_state state); 21 extern void context_tracking_exit(enum ctx_state state); 22 extern void context_tracking_user_enter(void); 23 extern void context_tracking_user_exit(void); 24 user_enter(void)25static inline void user_enter(void) 26 { 27 if (context_tracking_enabled()) 28 context_tracking_enter(CONTEXT_USER); 29 30 } user_exit(void)31static inline void user_exit(void) 32 { 33 if (context_tracking_enabled()) 34 context_tracking_exit(CONTEXT_USER); 35 } 36 37 /* Called with interrupts disabled. */ user_enter_irqoff(void)38static __always_inline void user_enter_irqoff(void) 39 { 40 if (context_tracking_enabled()) 41 __context_tracking_enter(CONTEXT_USER); 42 43 } user_exit_irqoff(void)44static __always_inline void user_exit_irqoff(void) 45 { 46 if (context_tracking_enabled()) 47 __context_tracking_exit(CONTEXT_USER); 48 } 49 exception_enter(void)50static inline enum ctx_state exception_enter(void) 51 { 52 enum ctx_state prev_ctx; 53 54 if (!context_tracking_enabled()) 55 return 0; 56 57 prev_ctx = this_cpu_read(context_tracking.state); 58 if (prev_ctx != CONTEXT_KERNEL) 59 context_tracking_exit(prev_ctx); 60 61 return prev_ctx; 62 } 63 exception_exit(enum ctx_state prev_ctx)64static inline void exception_exit(enum ctx_state prev_ctx) 65 { 66 if (context_tracking_enabled()) { 67 if (prev_ctx != CONTEXT_KERNEL) 68 context_tracking_enter(prev_ctx); 69 } 70 } 71 72 73 /** 74 * ct_state() - return the current context tracking state if known 75 * 76 * Returns the current cpu's context tracking state if context tracking 77 * is enabled. If context tracking is disabled, returns 78 * CONTEXT_DISABLED. This should be used primarily for debugging. 79 */ ct_state(void)80static __always_inline enum ctx_state ct_state(void) 81 { 82 return context_tracking_enabled() ? 83 this_cpu_read(context_tracking.state) : CONTEXT_DISABLED; 84 } 85 #else user_enter(void)86static inline void user_enter(void) { } user_exit(void)87static inline void user_exit(void) { } user_enter_irqoff(void)88static inline void user_enter_irqoff(void) { } user_exit_irqoff(void)89static inline void user_exit_irqoff(void) { } exception_enter(void)90static inline enum ctx_state exception_enter(void) { return 0; } exception_exit(enum ctx_state prev_ctx)91static inline void exception_exit(enum ctx_state prev_ctx) { } ct_state(void)92static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; } 93 #endif /* !CONFIG_CONTEXT_TRACKING */ 94 95 #define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond)) 96 97 #ifdef CONFIG_CONTEXT_TRACKING_FORCE 98 extern void context_tracking_init(void); 99 #else context_tracking_init(void)100static inline void context_tracking_init(void) { } 101 #endif /* CONFIG_CONTEXT_TRACKING_FORCE */ 102 103 104 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 105 /* must be called with irqs disabled */ guest_enter_irqoff(void)106static __always_inline void guest_enter_irqoff(void) 107 { 108 instrumentation_begin(); 109 if (vtime_accounting_enabled_this_cpu()) 110 vtime_guest_enter(current); 111 else 112 current->flags |= PF_VCPU; 113 instrumentation_end(); 114 115 if (context_tracking_enabled()) 116 __context_tracking_enter(CONTEXT_GUEST); 117 118 /* KVM does not hold any references to rcu protected data when it 119 * switches CPU into a guest mode. In fact switching to a guest mode 120 * is very similar to exiting to userspace from rcu point of view. In 121 * addition CPU may stay in a guest mode for quite a long time (up to 122 * one time slice). Lets treat guest mode as quiescent state, just like 123 * we do with user-mode execution. 124 */ 125 if (!context_tracking_enabled_this_cpu()) { 126 instrumentation_begin(); 127 rcu_virt_note_context_switch(smp_processor_id()); 128 instrumentation_end(); 129 } 130 } 131 guest_exit_irqoff(void)132static __always_inline void guest_exit_irqoff(void) 133 { 134 if (context_tracking_enabled()) 135 __context_tracking_exit(CONTEXT_GUEST); 136 137 instrumentation_begin(); 138 if (vtime_accounting_enabled_this_cpu()) 139 vtime_guest_exit(current); 140 else 141 current->flags &= ~PF_VCPU; 142 instrumentation_end(); 143 } 144 145 #else guest_enter_irqoff(void)146static __always_inline void guest_enter_irqoff(void) 147 { 148 /* 149 * This is running in ioctl context so its safe 150 * to assume that it's the stime pending cputime 151 * to flush. 152 */ 153 instrumentation_begin(); 154 vtime_account_kernel(current); 155 current->flags |= PF_VCPU; 156 rcu_virt_note_context_switch(smp_processor_id()); 157 instrumentation_end(); 158 } 159 guest_exit_irqoff(void)160static __always_inline void guest_exit_irqoff(void) 161 { 162 instrumentation_begin(); 163 /* Flush the guest cputime we spent on the guest */ 164 vtime_account_kernel(current); 165 current->flags &= ~PF_VCPU; 166 instrumentation_end(); 167 } 168 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */ 169 guest_exit(void)170static inline void guest_exit(void) 171 { 172 unsigned long flags; 173 174 local_irq_save(flags); 175 guest_exit_irqoff(); 176 local_irq_restore(flags); 177 } 178 179 #endif 180