1 /*
2  * jump label support
3  *
4  * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
5  * Copyright (C) 2011 Peter Zijlstra
6  *
7  */
8 #include <linux/memory.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/slab.h>
13 #include <linux/sort.h>
14 #include <linux/err.h>
15 #include <linux/static_key.h>
16 #include <linux/jump_label_ratelimit.h>
17 #include <linux/bug.h>
18 #include <linux/cpu.h>
19 #include <asm/sections.h>
20 
21 #ifdef HAVE_JUMP_LABEL
22 
23 /* mutex to protect coming/going of the the jump_label table */
24 static DEFINE_MUTEX(jump_label_mutex);
25 
jump_label_lock(void)26 void jump_label_lock(void)
27 {
28 	mutex_lock(&jump_label_mutex);
29 }
30 
jump_label_unlock(void)31 void jump_label_unlock(void)
32 {
33 	mutex_unlock(&jump_label_mutex);
34 }
35 
jump_label_cmp(const void * a,const void * b)36 static int jump_label_cmp(const void *a, const void *b)
37 {
38 	const struct jump_entry *jea = a;
39 	const struct jump_entry *jeb = b;
40 
41 	if (jea->key < jeb->key)
42 		return -1;
43 
44 	if (jea->key > jeb->key)
45 		return 1;
46 
47 	return 0;
48 }
49 
50 static void
jump_label_sort_entries(struct jump_entry * start,struct jump_entry * stop)51 jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
52 {
53 	unsigned long size;
54 
55 	size = (((unsigned long)stop - (unsigned long)start)
56 					/ sizeof(struct jump_entry));
57 	sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
58 }
59 
60 static void jump_label_update(struct static_key *key);
61 
62 /*
63  * There are similar definitions for the !HAVE_JUMP_LABEL case in jump_label.h.
64  * The use of 'atomic_read()' requires atomic.h and its problematic for some
65  * kernel headers such as kernel.h and others. Since static_key_count() is not
66  * used in the branch statements as it is for the !HAVE_JUMP_LABEL case its ok
67  * to have it be a function here. Similarly, for 'static_key_enable()' and
68  * 'static_key_disable()', which require bug.h. This should allow jump_label.h
69  * to be included from most/all places for HAVE_JUMP_LABEL.
70  */
static_key_count(struct static_key * key)71 int static_key_count(struct static_key *key)
72 {
73 	/*
74 	 * -1 means the first static_key_slow_inc() is in progress.
75 	 *  static_key_enabled() must return true, so return 1 here.
76 	 */
77 	int n = atomic_read(&key->enabled);
78 
79 	return n >= 0 ? n : 1;
80 }
81 EXPORT_SYMBOL_GPL(static_key_count);
82 
static_key_slow_inc_cpuslocked(struct static_key * key)83 void static_key_slow_inc_cpuslocked(struct static_key *key)
84 {
85 	int v, v1;
86 
87 	STATIC_KEY_CHECK_USE(key);
88 
89 	/*
90 	 * Careful if we get concurrent static_key_slow_inc() calls;
91 	 * later calls must wait for the first one to _finish_ the
92 	 * jump_label_update() process.  At the same time, however,
93 	 * the jump_label_update() call below wants to see
94 	 * static_key_enabled(&key) for jumps to be updated properly.
95 	 *
96 	 * So give a special meaning to negative key->enabled: it sends
97 	 * static_key_slow_inc() down the slow path, and it is non-zero
98 	 * so it counts as "enabled" in jump_label_update().  Note that
99 	 * atomic_inc_unless_negative() checks >= 0, so roll our own.
100 	 */
101 	for (v = atomic_read(&key->enabled); v > 0; v = v1) {
102 		v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
103 		if (likely(v1 == v))
104 			return;
105 	}
106 
107 	jump_label_lock();
108 	if (atomic_read(&key->enabled) == 0) {
109 		atomic_set(&key->enabled, -1);
110 		jump_label_update(key);
111 		/*
112 		 * Ensure that if the above cmpxchg loop observes our positive
113 		 * value, it must also observe all the text changes.
114 		 */
115 		atomic_set_release(&key->enabled, 1);
116 	} else {
117 		atomic_inc(&key->enabled);
118 	}
119 	jump_label_unlock();
120 }
121 
static_key_slow_inc(struct static_key * key)122 void static_key_slow_inc(struct static_key *key)
123 {
124 	cpus_read_lock();
125 	static_key_slow_inc_cpuslocked(key);
126 	cpus_read_unlock();
127 }
128 EXPORT_SYMBOL_GPL(static_key_slow_inc);
129 
static_key_enable_cpuslocked(struct static_key * key)130 void static_key_enable_cpuslocked(struct static_key *key)
131 {
132 	STATIC_KEY_CHECK_USE(key);
133 
134 	if (atomic_read(&key->enabled) > 0) {
135 		WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
136 		return;
137 	}
138 
139 	jump_label_lock();
140 	if (atomic_read(&key->enabled) == 0) {
141 		atomic_set(&key->enabled, -1);
142 		jump_label_update(key);
143 		/*
144 		 * See static_key_slow_inc().
145 		 */
146 		atomic_set_release(&key->enabled, 1);
147 	}
148 	jump_label_unlock();
149 }
150 EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked);
151 
static_key_enable(struct static_key * key)152 void static_key_enable(struct static_key *key)
153 {
154 	cpus_read_lock();
155 	static_key_enable_cpuslocked(key);
156 	cpus_read_unlock();
157 }
158 EXPORT_SYMBOL_GPL(static_key_enable);
159 
static_key_disable_cpuslocked(struct static_key * key)160 void static_key_disable_cpuslocked(struct static_key *key)
161 {
162 	STATIC_KEY_CHECK_USE(key);
163 
164 	if (atomic_read(&key->enabled) != 1) {
165 		WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
166 		return;
167 	}
168 
169 	jump_label_lock();
170 	if (atomic_cmpxchg(&key->enabled, 1, 0))
171 		jump_label_update(key);
172 	jump_label_unlock();
173 }
174 EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked);
175 
static_key_disable(struct static_key * key)176 void static_key_disable(struct static_key *key)
177 {
178 	cpus_read_lock();
179 	static_key_disable_cpuslocked(key);
180 	cpus_read_unlock();
181 }
182 EXPORT_SYMBOL_GPL(static_key_disable);
183 
__static_key_slow_dec_cpuslocked(struct static_key * key,unsigned long rate_limit,struct delayed_work * work)184 static void __static_key_slow_dec_cpuslocked(struct static_key *key,
185 					   unsigned long rate_limit,
186 					   struct delayed_work *work)
187 {
188 	/*
189 	 * The negative count check is valid even when a negative
190 	 * key->enabled is in use by static_key_slow_inc(); a
191 	 * __static_key_slow_dec() before the first static_key_slow_inc()
192 	 * returns is unbalanced, because all other static_key_slow_inc()
193 	 * instances block while the update is in progress.
194 	 */
195 	if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
196 		WARN(atomic_read(&key->enabled) < 0,
197 		     "jump label: negative count!\n");
198 		return;
199 	}
200 
201 	if (rate_limit) {
202 		atomic_inc(&key->enabled);
203 		schedule_delayed_work(work, rate_limit);
204 	} else {
205 		jump_label_update(key);
206 	}
207 	jump_label_unlock();
208 }
209 
__static_key_slow_dec(struct static_key * key,unsigned long rate_limit,struct delayed_work * work)210 static void __static_key_slow_dec(struct static_key *key,
211 				  unsigned long rate_limit,
212 				  struct delayed_work *work)
213 {
214 	cpus_read_lock();
215 	__static_key_slow_dec_cpuslocked(key, rate_limit, work);
216 	cpus_read_unlock();
217 }
218 
jump_label_update_timeout(struct work_struct * work)219 static void jump_label_update_timeout(struct work_struct *work)
220 {
221 	struct static_key_deferred *key =
222 		container_of(work, struct static_key_deferred, work.work);
223 	__static_key_slow_dec(&key->key, 0, NULL);
224 }
225 
static_key_slow_dec(struct static_key * key)226 void static_key_slow_dec(struct static_key *key)
227 {
228 	STATIC_KEY_CHECK_USE(key);
229 	__static_key_slow_dec(key, 0, NULL);
230 }
231 EXPORT_SYMBOL_GPL(static_key_slow_dec);
232 
static_key_slow_dec_cpuslocked(struct static_key * key)233 void static_key_slow_dec_cpuslocked(struct static_key *key)
234 {
235 	STATIC_KEY_CHECK_USE(key);
236 	__static_key_slow_dec_cpuslocked(key, 0, NULL);
237 }
238 
static_key_slow_dec_deferred(struct static_key_deferred * key)239 void static_key_slow_dec_deferred(struct static_key_deferred *key)
240 {
241 	STATIC_KEY_CHECK_USE(key);
242 	__static_key_slow_dec(&key->key, key->timeout, &key->work);
243 }
244 EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
245 
static_key_deferred_flush(struct static_key_deferred * key)246 void static_key_deferred_flush(struct static_key_deferred *key)
247 {
248 	STATIC_KEY_CHECK_USE(key);
249 	flush_delayed_work(&key->work);
250 }
251 EXPORT_SYMBOL_GPL(static_key_deferred_flush);
252 
jump_label_rate_limit(struct static_key_deferred * key,unsigned long rl)253 void jump_label_rate_limit(struct static_key_deferred *key,
254 		unsigned long rl)
255 {
256 	STATIC_KEY_CHECK_USE(key);
257 	key->timeout = rl;
258 	INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
259 }
260 EXPORT_SYMBOL_GPL(jump_label_rate_limit);
261 
addr_conflict(struct jump_entry * entry,void * start,void * end)262 static int addr_conflict(struct jump_entry *entry, void *start, void *end)
263 {
264 	if (entry->code <= (unsigned long)end &&
265 		entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
266 		return 1;
267 
268 	return 0;
269 }
270 
__jump_label_text_reserved(struct jump_entry * iter_start,struct jump_entry * iter_stop,void * start,void * end)271 static int __jump_label_text_reserved(struct jump_entry *iter_start,
272 		struct jump_entry *iter_stop, void *start, void *end)
273 {
274 	struct jump_entry *iter;
275 
276 	iter = iter_start;
277 	while (iter < iter_stop) {
278 		if (addr_conflict(iter, start, end))
279 			return 1;
280 		iter++;
281 	}
282 
283 	return 0;
284 }
285 
286 /*
287  * Update code which is definitely not currently executing.
288  * Architectures which need heavyweight synchronization to modify
289  * running code can override this to make the non-live update case
290  * cheaper.
291  */
arch_jump_label_transform_static(struct jump_entry * entry,enum jump_label_type type)292 void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
293 					    enum jump_label_type type)
294 {
295 	arch_jump_label_transform(entry, type);
296 }
297 
static_key_entries(struct static_key * key)298 static inline struct jump_entry *static_key_entries(struct static_key *key)
299 {
300 	WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED);
301 	return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK);
302 }
303 
static_key_type(struct static_key * key)304 static inline bool static_key_type(struct static_key *key)
305 {
306 	return key->type & JUMP_TYPE_TRUE;
307 }
308 
static_key_linked(struct static_key * key)309 static inline bool static_key_linked(struct static_key *key)
310 {
311 	return key->type & JUMP_TYPE_LINKED;
312 }
313 
static_key_clear_linked(struct static_key * key)314 static inline void static_key_clear_linked(struct static_key *key)
315 {
316 	key->type &= ~JUMP_TYPE_LINKED;
317 }
318 
static_key_set_linked(struct static_key * key)319 static inline void static_key_set_linked(struct static_key *key)
320 {
321 	key->type |= JUMP_TYPE_LINKED;
322 }
323 
jump_entry_key(struct jump_entry * entry)324 static inline struct static_key *jump_entry_key(struct jump_entry *entry)
325 {
326 	return (struct static_key *)((unsigned long)entry->key & ~1UL);
327 }
328 
jump_entry_branch(struct jump_entry * entry)329 static bool jump_entry_branch(struct jump_entry *entry)
330 {
331 	return (unsigned long)entry->key & 1UL;
332 }
333 
334 /***
335  * A 'struct static_key' uses a union such that it either points directly
336  * to a table of 'struct jump_entry' or to a linked list of modules which in
337  * turn point to 'struct jump_entry' tables.
338  *
339  * The two lower bits of the pointer are used to keep track of which pointer
340  * type is in use and to store the initial branch direction, we use an access
341  * function which preserves these bits.
342  */
static_key_set_entries(struct static_key * key,struct jump_entry * entries)343 static void static_key_set_entries(struct static_key *key,
344 				   struct jump_entry *entries)
345 {
346 	unsigned long type;
347 
348 	WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK);
349 	type = key->type & JUMP_TYPE_MASK;
350 	key->entries = entries;
351 	key->type |= type;
352 }
353 
jump_label_type(struct jump_entry * entry)354 static enum jump_label_type jump_label_type(struct jump_entry *entry)
355 {
356 	struct static_key *key = jump_entry_key(entry);
357 	bool enabled = static_key_enabled(key);
358 	bool branch = jump_entry_branch(entry);
359 
360 	/* See the comment in linux/jump_label.h */
361 	return enabled ^ branch;
362 }
363 
__jump_label_update(struct static_key * key,struct jump_entry * entry,struct jump_entry * stop)364 static void __jump_label_update(struct static_key *key,
365 				struct jump_entry *entry,
366 				struct jump_entry *stop)
367 {
368 	for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
369 		/*
370 		 * An entry->code of 0 indicates an entry which has been
371 		 * disabled because it was in an init text area.
372 		 */
373 		if (entry->code) {
374 			if (kernel_text_address(entry->code))
375 				arch_jump_label_transform(entry, jump_label_type(entry));
376 			else
377 				WARN_ONCE(1, "can't patch jump_label at %pS",
378 					  (void *)(unsigned long)entry->code);
379 		}
380 	}
381 }
382 
jump_label_init(void)383 void __init jump_label_init(void)
384 {
385 	struct jump_entry *iter_start = __start___jump_table;
386 	struct jump_entry *iter_stop = __stop___jump_table;
387 	struct static_key *key = NULL;
388 	struct jump_entry *iter;
389 
390 	/*
391 	 * Since we are initializing the static_key.enabled field with
392 	 * with the 'raw' int values (to avoid pulling in atomic.h) in
393 	 * jump_label.h, let's make sure that is safe. There are only two
394 	 * cases to check since we initialize to 0 or 1.
395 	 */
396 	BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
397 	BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
398 
399 	if (static_key_initialized)
400 		return;
401 
402 	cpus_read_lock();
403 	jump_label_lock();
404 	jump_label_sort_entries(iter_start, iter_stop);
405 
406 	for (iter = iter_start; iter < iter_stop; iter++) {
407 		struct static_key *iterk;
408 
409 		/* rewrite NOPs */
410 		if (jump_label_type(iter) == JUMP_LABEL_NOP)
411 			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
412 
413 		iterk = jump_entry_key(iter);
414 		if (iterk == key)
415 			continue;
416 
417 		key = iterk;
418 		static_key_set_entries(key, iter);
419 	}
420 	static_key_initialized = true;
421 	jump_label_unlock();
422 	cpus_read_unlock();
423 }
424 
425 /* Disable any jump label entries in __init/__exit code */
jump_label_invalidate_initmem(void)426 void __init jump_label_invalidate_initmem(void)
427 {
428 	struct jump_entry *iter_start = __start___jump_table;
429 	struct jump_entry *iter_stop = __stop___jump_table;
430 	struct jump_entry *iter;
431 
432 	for (iter = iter_start; iter < iter_stop; iter++) {
433 		if (init_section_contains((void *)(unsigned long)iter->code, 1))
434 			iter->code = 0;
435 	}
436 }
437 
438 #ifdef CONFIG_MODULES
439 
jump_label_init_type(struct jump_entry * entry)440 static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
441 {
442 	struct static_key *key = jump_entry_key(entry);
443 	bool type = static_key_type(key);
444 	bool branch = jump_entry_branch(entry);
445 
446 	/* See the comment in linux/jump_label.h */
447 	return type ^ branch;
448 }
449 
450 struct static_key_mod {
451 	struct static_key_mod *next;
452 	struct jump_entry *entries;
453 	struct module *mod;
454 };
455 
static_key_mod(struct static_key * key)456 static inline struct static_key_mod *static_key_mod(struct static_key *key)
457 {
458 	WARN_ON_ONCE(!(key->type & JUMP_TYPE_LINKED));
459 	return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK);
460 }
461 
462 /***
463  * key->type and key->next are the same via union.
464  * This sets key->next and preserves the type bits.
465  *
466  * See additional comments above static_key_set_entries().
467  */
static_key_set_mod(struct static_key * key,struct static_key_mod * mod)468 static void static_key_set_mod(struct static_key *key,
469 			       struct static_key_mod *mod)
470 {
471 	unsigned long type;
472 
473 	WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK);
474 	type = key->type & JUMP_TYPE_MASK;
475 	key->next = mod;
476 	key->type |= type;
477 }
478 
__jump_label_mod_text_reserved(void * start,void * end)479 static int __jump_label_mod_text_reserved(void *start, void *end)
480 {
481 	struct module *mod;
482 
483 	preempt_disable();
484 	mod = __module_text_address((unsigned long)start);
485 	WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
486 	preempt_enable();
487 
488 	if (!mod)
489 		return 0;
490 
491 
492 	return __jump_label_text_reserved(mod->jump_entries,
493 				mod->jump_entries + mod->num_jump_entries,
494 				start, end);
495 }
496 
__jump_label_mod_update(struct static_key * key)497 static void __jump_label_mod_update(struct static_key *key)
498 {
499 	struct static_key_mod *mod;
500 
501 	for (mod = static_key_mod(key); mod; mod = mod->next) {
502 		struct jump_entry *stop;
503 		struct module *m;
504 
505 		/*
506 		 * NULL if the static_key is defined in a module
507 		 * that does not use it
508 		 */
509 		if (!mod->entries)
510 			continue;
511 
512 		m = mod->mod;
513 		if (!m)
514 			stop = __stop___jump_table;
515 		else
516 			stop = m->jump_entries + m->num_jump_entries;
517 		__jump_label_update(key, mod->entries, stop);
518 	}
519 }
520 
521 /***
522  * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
523  * @mod: module to patch
524  *
525  * Allow for run-time selection of the optimal nops. Before the module
526  * loads patch these with arch_get_jump_label_nop(), which is specified by
527  * the arch specific jump label code.
528  */
jump_label_apply_nops(struct module * mod)529 void jump_label_apply_nops(struct module *mod)
530 {
531 	struct jump_entry *iter_start = mod->jump_entries;
532 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
533 	struct jump_entry *iter;
534 
535 	/* if the module doesn't have jump label entries, just return */
536 	if (iter_start == iter_stop)
537 		return;
538 
539 	for (iter = iter_start; iter < iter_stop; iter++) {
540 		/* Only write NOPs for arch_branch_static(). */
541 		if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
542 			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
543 	}
544 }
545 
jump_label_add_module(struct module * mod)546 static int jump_label_add_module(struct module *mod)
547 {
548 	struct jump_entry *iter_start = mod->jump_entries;
549 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
550 	struct jump_entry *iter;
551 	struct static_key *key = NULL;
552 	struct static_key_mod *jlm, *jlm2;
553 
554 	/* if the module doesn't have jump label entries, just return */
555 	if (iter_start == iter_stop)
556 		return 0;
557 
558 	jump_label_sort_entries(iter_start, iter_stop);
559 
560 	for (iter = iter_start; iter < iter_stop; iter++) {
561 		struct static_key *iterk;
562 
563 		iterk = jump_entry_key(iter);
564 		if (iterk == key)
565 			continue;
566 
567 		key = iterk;
568 		if (within_module(iter->key, mod)) {
569 			static_key_set_entries(key, iter);
570 			continue;
571 		}
572 		jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
573 		if (!jlm)
574 			return -ENOMEM;
575 		if (!static_key_linked(key)) {
576 			jlm2 = kzalloc(sizeof(struct static_key_mod),
577 				       GFP_KERNEL);
578 			if (!jlm2) {
579 				kfree(jlm);
580 				return -ENOMEM;
581 			}
582 			preempt_disable();
583 			jlm2->mod = __module_address((unsigned long)key);
584 			preempt_enable();
585 			jlm2->entries = static_key_entries(key);
586 			jlm2->next = NULL;
587 			static_key_set_mod(key, jlm2);
588 			static_key_set_linked(key);
589 		}
590 		jlm->mod = mod;
591 		jlm->entries = iter;
592 		jlm->next = static_key_mod(key);
593 		static_key_set_mod(key, jlm);
594 		static_key_set_linked(key);
595 
596 		/* Only update if we've changed from our initial state */
597 		if (jump_label_type(iter) != jump_label_init_type(iter))
598 			__jump_label_update(key, iter, iter_stop);
599 	}
600 
601 	return 0;
602 }
603 
jump_label_del_module(struct module * mod)604 static void jump_label_del_module(struct module *mod)
605 {
606 	struct jump_entry *iter_start = mod->jump_entries;
607 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
608 	struct jump_entry *iter;
609 	struct static_key *key = NULL;
610 	struct static_key_mod *jlm, **prev;
611 
612 	for (iter = iter_start; iter < iter_stop; iter++) {
613 		if (jump_entry_key(iter) == key)
614 			continue;
615 
616 		key = jump_entry_key(iter);
617 
618 		if (within_module(iter->key, mod))
619 			continue;
620 
621 		/* No memory during module load */
622 		if (WARN_ON(!static_key_linked(key)))
623 			continue;
624 
625 		prev = &key->next;
626 		jlm = static_key_mod(key);
627 
628 		while (jlm && jlm->mod != mod) {
629 			prev = &jlm->next;
630 			jlm = jlm->next;
631 		}
632 
633 		/* No memory during module load */
634 		if (WARN_ON(!jlm))
635 			continue;
636 
637 		if (prev == &key->next)
638 			static_key_set_mod(key, jlm->next);
639 		else
640 			*prev = jlm->next;
641 
642 		kfree(jlm);
643 
644 		jlm = static_key_mod(key);
645 		/* if only one etry is left, fold it back into the static_key */
646 		if (jlm->next == NULL) {
647 			static_key_set_entries(key, jlm->entries);
648 			static_key_clear_linked(key);
649 			kfree(jlm);
650 		}
651 	}
652 }
653 
654 /* Disable any jump label entries in module init code */
jump_label_invalidate_module_init(struct module * mod)655 static void jump_label_invalidate_module_init(struct module *mod)
656 {
657 	struct jump_entry *iter_start = mod->jump_entries;
658 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
659 	struct jump_entry *iter;
660 
661 	for (iter = iter_start; iter < iter_stop; iter++) {
662 		if (within_module_init(iter->code, mod))
663 			iter->code = 0;
664 	}
665 }
666 
667 static int
jump_label_module_notify(struct notifier_block * self,unsigned long val,void * data)668 jump_label_module_notify(struct notifier_block *self, unsigned long val,
669 			 void *data)
670 {
671 	struct module *mod = data;
672 	int ret = 0;
673 
674 	cpus_read_lock();
675 	jump_label_lock();
676 
677 	switch (val) {
678 	case MODULE_STATE_COMING:
679 		ret = jump_label_add_module(mod);
680 		if (ret) {
681 			WARN(1, "Failed to allocate memory: jump_label may not work properly.\n");
682 			jump_label_del_module(mod);
683 		}
684 		break;
685 	case MODULE_STATE_GOING:
686 		jump_label_del_module(mod);
687 		break;
688 	case MODULE_STATE_LIVE:
689 		jump_label_invalidate_module_init(mod);
690 		break;
691 	}
692 
693 	jump_label_unlock();
694 	cpus_read_unlock();
695 
696 	return notifier_from_errno(ret);
697 }
698 
699 static struct notifier_block jump_label_module_nb = {
700 	.notifier_call = jump_label_module_notify,
701 	.priority = 1, /* higher than tracepoints */
702 };
703 
jump_label_init_module(void)704 static __init int jump_label_init_module(void)
705 {
706 	return register_module_notifier(&jump_label_module_nb);
707 }
708 early_initcall(jump_label_init_module);
709 
710 #endif /* CONFIG_MODULES */
711 
712 /***
713  * jump_label_text_reserved - check if addr range is reserved
714  * @start: start text addr
715  * @end: end text addr
716  *
717  * checks if the text addr located between @start and @end
718  * overlaps with any of the jump label patch addresses. Code
719  * that wants to modify kernel text should first verify that
720  * it does not overlap with any of the jump label addresses.
721  * Caller must hold jump_label_mutex.
722  *
723  * returns 1 if there is an overlap, 0 otherwise
724  */
jump_label_text_reserved(void * start,void * end)725 int jump_label_text_reserved(void *start, void *end)
726 {
727 	int ret = __jump_label_text_reserved(__start___jump_table,
728 			__stop___jump_table, start, end);
729 
730 	if (ret)
731 		return ret;
732 
733 #ifdef CONFIG_MODULES
734 	ret = __jump_label_mod_text_reserved(start, end);
735 #endif
736 	return ret;
737 }
738 
jump_label_update(struct static_key * key)739 static void jump_label_update(struct static_key *key)
740 {
741 	struct jump_entry *stop = __stop___jump_table;
742 	struct jump_entry *entry;
743 #ifdef CONFIG_MODULES
744 	struct module *mod;
745 
746 	if (static_key_linked(key)) {
747 		__jump_label_mod_update(key);
748 		return;
749 	}
750 
751 	preempt_disable();
752 	mod = __module_address((unsigned long)key);
753 	if (mod)
754 		stop = mod->jump_entries + mod->num_jump_entries;
755 	preempt_enable();
756 #endif
757 	entry = static_key_entries(key);
758 	/* if there are no users, entry can be NULL */
759 	if (entry)
760 		__jump_label_update(key, entry, stop);
761 }
762 
763 #ifdef CONFIG_STATIC_KEYS_SELFTEST
764 static DEFINE_STATIC_KEY_TRUE(sk_true);
765 static DEFINE_STATIC_KEY_FALSE(sk_false);
766 
jump_label_test(void)767 static __init int jump_label_test(void)
768 {
769 	int i;
770 
771 	for (i = 0; i < 2; i++) {
772 		WARN_ON(static_key_enabled(&sk_true.key) != true);
773 		WARN_ON(static_key_enabled(&sk_false.key) != false);
774 
775 		WARN_ON(!static_branch_likely(&sk_true));
776 		WARN_ON(!static_branch_unlikely(&sk_true));
777 		WARN_ON(static_branch_likely(&sk_false));
778 		WARN_ON(static_branch_unlikely(&sk_false));
779 
780 		static_branch_disable(&sk_true);
781 		static_branch_enable(&sk_false);
782 
783 		WARN_ON(static_key_enabled(&sk_true.key) == true);
784 		WARN_ON(static_key_enabled(&sk_false.key) == false);
785 
786 		WARN_ON(static_branch_likely(&sk_true));
787 		WARN_ON(static_branch_unlikely(&sk_true));
788 		WARN_ON(!static_branch_likely(&sk_false));
789 		WARN_ON(!static_branch_unlikely(&sk_false));
790 
791 		static_branch_enable(&sk_true);
792 		static_branch_disable(&sk_false);
793 	}
794 
795 	return 0;
796 }
797 early_initcall(jump_label_test);
798 #endif /* STATIC_KEYS_SELFTEST */
799 
800 #endif /* HAVE_JUMP_LABEL */
801