Lines Matching +full:key +full:- +full:code
1 // SPDX-License-Identifier: GPL-2.0-only
41 * Entrires are sorted by key. in jump_label_cmp()
44 return -1; in jump_label_cmp()
50 * In the batching mode, entries should also be sorted by the code in jump_label_cmp()
55 return -1; in jump_label_cmp()
65 long delta = (unsigned long)a - (unsigned long)b; in jump_label_swap()
70 jea->code = jeb->code - delta; in jump_label_swap()
71 jea->target = jeb->target - delta; in jump_label_swap()
72 jea->key = jeb->key - delta; in jump_label_swap()
74 jeb->code = tmp.code + delta; in jump_label_swap()
75 jeb->target = tmp.target + delta; in jump_label_swap()
76 jeb->key = tmp.key + delta; in jump_label_swap()
88 size = (((unsigned long)stop - (unsigned long)start) in jump_label_sort_entries()
93 static void jump_label_update(struct static_key *key);
104 int static_key_count(struct static_key *key) in static_key_count() argument
107 * -1 means the first static_key_slow_inc() is in progress. in static_key_count()
110 int n = atomic_read(&key->enabled); in static_key_count()
116 void static_key_slow_inc_cpuslocked(struct static_key *key) in static_key_slow_inc_cpuslocked() argument
120 STATIC_KEY_CHECK_USE(key); in static_key_slow_inc_cpuslocked()
128 * static_key_enabled(&key) for jumps to be updated properly. in static_key_slow_inc_cpuslocked()
130 * So give a special meaning to negative key->enabled: it sends in static_key_slow_inc_cpuslocked()
131 * static_key_slow_inc() down the slow path, and it is non-zero in static_key_slow_inc_cpuslocked()
135 for (v = atomic_read(&key->enabled); v > 0; v = v1) { in static_key_slow_inc_cpuslocked()
136 v1 = atomic_cmpxchg(&key->enabled, v, v + 1); in static_key_slow_inc_cpuslocked()
142 if (atomic_read(&key->enabled) == 0) { in static_key_slow_inc_cpuslocked()
143 atomic_set(&key->enabled, -1); in static_key_slow_inc_cpuslocked()
144 jump_label_update(key); in static_key_slow_inc_cpuslocked()
149 atomic_set_release(&key->enabled, 1); in static_key_slow_inc_cpuslocked()
151 atomic_inc(&key->enabled); in static_key_slow_inc_cpuslocked()
156 void static_key_slow_inc(struct static_key *key) in static_key_slow_inc() argument
159 static_key_slow_inc_cpuslocked(key); in static_key_slow_inc()
164 void static_key_enable_cpuslocked(struct static_key *key) in static_key_enable_cpuslocked() argument
166 STATIC_KEY_CHECK_USE(key); in static_key_enable_cpuslocked()
169 if (atomic_read(&key->enabled) > 0) { in static_key_enable_cpuslocked()
170 WARN_ON_ONCE(atomic_read(&key->enabled) != 1); in static_key_enable_cpuslocked()
175 if (atomic_read(&key->enabled) == 0) { in static_key_enable_cpuslocked()
176 atomic_set(&key->enabled, -1); in static_key_enable_cpuslocked()
177 jump_label_update(key); in static_key_enable_cpuslocked()
181 atomic_set_release(&key->enabled, 1); in static_key_enable_cpuslocked()
187 void static_key_enable(struct static_key *key) in static_key_enable() argument
190 static_key_enable_cpuslocked(key); in static_key_enable()
195 void static_key_disable_cpuslocked(struct static_key *key) in static_key_disable_cpuslocked() argument
197 STATIC_KEY_CHECK_USE(key); in static_key_disable_cpuslocked()
200 if (atomic_read(&key->enabled) != 1) { in static_key_disable_cpuslocked()
201 WARN_ON_ONCE(atomic_read(&key->enabled) != 0); in static_key_disable_cpuslocked()
206 if (atomic_cmpxchg(&key->enabled, 1, 0)) in static_key_disable_cpuslocked()
207 jump_label_update(key); in static_key_disable_cpuslocked()
212 void static_key_disable(struct static_key *key) in static_key_disable() argument
215 static_key_disable_cpuslocked(key); in static_key_disable()
220 static bool static_key_slow_try_dec(struct static_key *key) in static_key_slow_try_dec() argument
224 val = atomic_fetch_add_unless(&key->enabled, -1, 1); in static_key_slow_try_dec()
230 * key->enabled is in use by static_key_slow_inc(); a in static_key_slow_try_dec()
239 static void __static_key_slow_dec_cpuslocked(struct static_key *key) in __static_key_slow_dec_cpuslocked() argument
243 if (static_key_slow_try_dec(key)) in __static_key_slow_dec_cpuslocked()
247 if (atomic_dec_and_test(&key->enabled)) in __static_key_slow_dec_cpuslocked()
248 jump_label_update(key); in __static_key_slow_dec_cpuslocked()
252 static void __static_key_slow_dec(struct static_key *key) in __static_key_slow_dec() argument
255 __static_key_slow_dec_cpuslocked(key); in __static_key_slow_dec()
261 struct static_key_deferred *key = in jump_label_update_timeout() local
263 __static_key_slow_dec(&key->key); in jump_label_update_timeout()
267 void static_key_slow_dec(struct static_key *key) in static_key_slow_dec() argument
269 STATIC_KEY_CHECK_USE(key); in static_key_slow_dec()
270 __static_key_slow_dec(key); in static_key_slow_dec()
274 void static_key_slow_dec_cpuslocked(struct static_key *key) in static_key_slow_dec_cpuslocked() argument
276 STATIC_KEY_CHECK_USE(key); in static_key_slow_dec_cpuslocked()
277 __static_key_slow_dec_cpuslocked(key); in static_key_slow_dec_cpuslocked()
280 void __static_key_slow_dec_deferred(struct static_key *key, in __static_key_slow_dec_deferred() argument
284 STATIC_KEY_CHECK_USE(key); in __static_key_slow_dec_deferred()
286 if (static_key_slow_try_dec(key)) in __static_key_slow_dec_deferred()
293 void __static_key_deferred_flush(void *key, struct delayed_work *work) in __static_key_deferred_flush() argument
295 STATIC_KEY_CHECK_USE(key); in __static_key_deferred_flush()
300 void jump_label_rate_limit(struct static_key_deferred *key, in jump_label_rate_limit() argument
303 STATIC_KEY_CHECK_USE(key); in jump_label_rate_limit()
304 key->timeout = rl; in jump_label_rate_limit()
305 INIT_DELAYED_WORK(&key->work, jump_label_update_timeout); in jump_label_rate_limit()
343 static inline struct jump_entry *static_key_entries(struct static_key *key) in static_key_entries() argument
345 WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED); in static_key_entries()
346 return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK); in static_key_entries()
349 static inline bool static_key_type(struct static_key *key) in static_key_type() argument
351 return key->type & JUMP_TYPE_TRUE; in static_key_type()
354 static inline bool static_key_linked(struct static_key *key) in static_key_linked() argument
356 return key->type & JUMP_TYPE_LINKED; in static_key_linked()
359 static inline void static_key_clear_linked(struct static_key *key) in static_key_clear_linked() argument
361 key->type &= ~JUMP_TYPE_LINKED; in static_key_clear_linked()
364 static inline void static_key_set_linked(struct static_key *key) in static_key_set_linked() argument
366 key->type |= JUMP_TYPE_LINKED; in static_key_set_linked()
378 static void static_key_set_entries(struct static_key *key, in static_key_set_entries() argument
384 type = key->type & JUMP_TYPE_MASK; in static_key_set_entries()
385 key->entries = entries; in static_key_set_entries()
386 key->type |= type; in static_key_set_entries()
391 struct static_key *key = jump_entry_key(entry); in jump_label_type() local
392 bool enabled = static_key_enabled(key); in jump_label_type()
402 * Cannot update code that was in an init text area. in jump_label_can_update()
409 * This skips patching built-in __exit, which in jump_label_can_update()
413 * Skipping built-in __exit is fine since it in jump_label_can_update()
426 static void __jump_label_update(struct static_key *key, in __jump_label_update() argument
431 for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) { in __jump_label_update()
437 static void __jump_label_update(struct static_key *key, in __jump_label_update() argument
442 for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) { in __jump_label_update()
463 struct static_key *key = NULL; in jump_label_init() local
494 if (iterk == key) in jump_label_init()
497 key = iterk; in jump_label_init()
498 static_key_set_entries(key, iter); in jump_label_init()
509 struct static_key *key = jump_entry_key(entry); in jump_label_init_type() local
510 bool type = static_key_type(key); in jump_label_init_type()
523 static inline struct static_key_mod *static_key_mod(struct static_key *key) in static_key_mod() argument
525 WARN_ON_ONCE(!static_key_linked(key)); in static_key_mod()
526 return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK); in static_key_mod()
530 * key->type and key->next are the same via union.
531 * This sets key->next and preserves the type bits.
535 static void static_key_set_mod(struct static_key *key, in static_key_set_mod() argument
541 type = key->type & JUMP_TYPE_MASK; in static_key_set_mod()
542 key->next = mod; in static_key_set_mod()
543 key->type |= type; in static_key_set_mod()
561 ret = __jump_label_text_reserved(mod->jump_entries, in __jump_label_mod_text_reserved()
562 mod->jump_entries + mod->num_jump_entries, in __jump_label_mod_text_reserved()
563 start, end, mod->state == MODULE_STATE_COMING); in __jump_label_mod_text_reserved()
570 static void __jump_label_mod_update(struct static_key *key) in __jump_label_mod_update() argument
574 for (mod = static_key_mod(key); mod; mod = mod->next) { in __jump_label_mod_update()
582 if (!mod->entries) in __jump_label_mod_update()
585 m = mod->mod; in __jump_label_mod_update()
589 stop = m->jump_entries + m->num_jump_entries; in __jump_label_mod_update()
590 __jump_label_update(key, mod->entries, stop, in __jump_label_mod_update()
591 m && m->state == MODULE_STATE_COMING); in __jump_label_mod_update()
597 struct jump_entry *iter_start = mod->jump_entries; in jump_label_add_module()
598 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; in jump_label_add_module()
600 struct static_key *key = NULL; in jump_label_add_module() local
617 if (iterk == key) in jump_label_add_module()
620 key = iterk; in jump_label_add_module()
621 if (within_module((unsigned long)key, mod)) { in jump_label_add_module()
622 static_key_set_entries(key, iter); in jump_label_add_module()
627 return -ENOMEM; in jump_label_add_module()
628 if (!static_key_linked(key)) { in jump_label_add_module()
633 return -ENOMEM; in jump_label_add_module()
636 jlm2->mod = __module_address((unsigned long)key); in jump_label_add_module()
638 jlm2->entries = static_key_entries(key); in jump_label_add_module()
639 jlm2->next = NULL; in jump_label_add_module()
640 static_key_set_mod(key, jlm2); in jump_label_add_module()
641 static_key_set_linked(key); in jump_label_add_module()
643 jlm->mod = mod; in jump_label_add_module()
644 jlm->entries = iter; in jump_label_add_module()
645 jlm->next = static_key_mod(key); in jump_label_add_module()
646 static_key_set_mod(key, jlm); in jump_label_add_module()
647 static_key_set_linked(key); in jump_label_add_module()
651 __jump_label_update(key, iter, iter_stop, true); in jump_label_add_module()
659 struct jump_entry *iter_start = mod->jump_entries; in jump_label_del_module()
660 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; in jump_label_del_module()
662 struct static_key *key = NULL; in jump_label_del_module() local
666 if (jump_entry_key(iter) == key) in jump_label_del_module()
669 key = jump_entry_key(iter); in jump_label_del_module()
671 if (within_module((unsigned long)key, mod)) in jump_label_del_module()
675 if (WARN_ON(!static_key_linked(key))) in jump_label_del_module()
678 prev = &key->next; in jump_label_del_module()
679 jlm = static_key_mod(key); in jump_label_del_module()
681 while (jlm && jlm->mod != mod) { in jump_label_del_module()
682 prev = &jlm->next; in jump_label_del_module()
683 jlm = jlm->next; in jump_label_del_module()
690 if (prev == &key->next) in jump_label_del_module()
691 static_key_set_mod(key, jlm->next); in jump_label_del_module()
693 *prev = jlm->next; in jump_label_del_module()
697 jlm = static_key_mod(key); in jump_label_del_module()
699 if (jlm->next == NULL) { in jump_label_del_module()
700 static_key_set_entries(key, jlm->entries); in jump_label_del_module()
701 static_key_clear_linked(key); in jump_label_del_module()
750 * jump_label_text_reserved - check if addr range is reserved
755 * overlaps with any of the jump label patch addresses. Code
777 static void jump_label_update(struct static_key *key) in jump_label_update() argument
785 if (static_key_linked(key)) { in jump_label_update()
786 __jump_label_mod_update(key); in jump_label_update()
791 mod = __module_address((unsigned long)key); in jump_label_update()
793 stop = mod->jump_entries + mod->num_jump_entries; in jump_label_update()
794 init = mod->state == MODULE_STATE_COMING; in jump_label_update()
798 entry = static_key_entries(key); in jump_label_update()
801 __jump_label_update(key, entry, stop, init); in jump_label_update()
813 WARN_ON(static_key_enabled(&sk_true.key) != true); in jump_label_test()
814 WARN_ON(static_key_enabled(&sk_false.key) != false); in jump_label_test()
824 WARN_ON(static_key_enabled(&sk_true.key) == true); in jump_label_test()
825 WARN_ON(static_key_enabled(&sk_false.key) == false); in jump_label_test()