Lines Matching +full:cs +full:- +full:number
1 // SPDX-License-Identifier: GPL-2.0+
20 #include "tick-internal.h"
24 * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks
59 sftacc--; in clocks_calc_mult_shift()
66 for (sft = 32; sft > 0; sft--) { in clocks_calc_mult_shift()
78 /*[Clocksource internal variables]---------
88 * Name of the user-specified clocksource.
100 * Also a default for cs->uncertainty_margin when registering clocks.
108 * a lower bound for cs->uncertainty_margin values when registering clocks.
141 static void __clocksource_change_rating(struct clocksource *cs, int rating);
166 static void __clocksource_unstable(struct clocksource *cs) in __clocksource_unstable() argument
168 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); in __clocksource_unstable()
169 cs->flags |= CLOCK_SOURCE_UNSTABLE; in __clocksource_unstable()
173 * re-rate and re-select. in __clocksource_unstable()
175 if (list_empty(&cs->list)) { in __clocksource_unstable()
176 cs->rating = 0; in __clocksource_unstable()
180 if (cs->mark_unstable) in __clocksource_unstable()
181 cs->mark_unstable(cs); in __clocksource_unstable()
189 * clocksource_mark_unstable - mark clocksource unstable via watchdog
190 * @cs: clocksource to be marked unstable
193 * it defers demotion and re-selection to a kthread.
195 void clocksource_mark_unstable(struct clocksource *cs) in clocksource_mark_unstable() argument
200 if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) { in clocksource_mark_unstable()
201 if (!list_empty(&cs->list) && list_empty(&cs->wd_list)) in clocksource_mark_unstable()
202 list_add(&cs->wd_list, &watchdog_list); in clocksource_mark_unstable()
203 __clocksource_unstable(cs); in clocksource_mark_unstable()
220 static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow) in cs_watchdog_read() argument
228 *wdnow = watchdog->read(watchdog); in cs_watchdog_read()
229 *csnow = cs->read(cs); in cs_watchdog_read()
230 wd_end = watchdog->read(watchdog); in cs_watchdog_read()
231 wd_end2 = watchdog->read(watchdog); in cs_watchdog_read()
234 wd_delta = clocksource_delta(wd_end, *wdnow, watchdog->mask); in cs_watchdog_read()
235 wd_delay = clocksource_cyc2ns(wd_delta, watchdog->mult, in cs_watchdog_read()
236 watchdog->shift); in cs_watchdog_read()
240 smp_processor_id(), watchdog->name, nretries); in cs_watchdog_read()
250 * If consecutive WD read-back delay > WATCHDOG_MAX_SKEW/2, in cs_watchdog_read()
254 wd_delta = clocksource_delta(wd_end2, wd_end, watchdog->mask); in cs_watchdog_read()
255 wd_seq_delay = clocksource_cyc2ns(wd_delta, watchdog->mult, watchdog->shift); in cs_watchdog_read()
260 …pr_warn("timekeeping watchdog on CPU%d: %s read-back delay of %lldns, attempt %d, marking unstable… in cs_watchdog_read()
261 smp_processor_id(), watchdog->name, wd_delay, nretries); in cs_watchdog_read()
265 pr_info("timekeeping watchdog on CPU%d: %s wd-wd read-back delay of %lldns\n", in cs_watchdog_read()
266 smp_processor_id(), watchdog->name, wd_seq_delay); in cs_watchdog_read()
267 pr_info("wd-%s-wd read-back delay of %lldns, clock-skew test skipped!\n", in cs_watchdog_read()
268 cs->name, wd_delay); in cs_watchdog_read()
306 * Randomly select the specified number of CPUs. If the same in clocksource_verify_choose_cpus()
309 * situations where verify_n_cpus is greater than the number of in clocksource_verify_choose_cpus()
314 cpu = cpumask_next(cpu - 1, cpu_online_mask); in clocksource_verify_choose_cpus()
327 struct clocksource *cs = (struct clocksource *)csin; in clocksource_verify_one_cpu() local
329 csnow_mid = cs->read(cs); in clocksource_verify_one_cpu()
332 void clocksource_verify_percpu(struct clocksource *cs) in clocksource_verify_percpu() argument
349 pr_warn("Not enough CPUs to check clocksource '%s'.\n", cs->name); in clocksource_verify_percpu()
353 …pr_warn("Checking clocksource %s synchronization from CPU %d to CPUs %*pbl.\n", cs->name, testcpu,… in clocksource_verify_percpu()
357 csnow_begin = cs->read(cs); in clocksource_verify_percpu()
358 smp_call_function_single(cpu, clocksource_verify_one_cpu, cs, 1); in clocksource_verify_percpu()
359 csnow_end = cs->read(cs); in clocksource_verify_percpu()
360 delta = (s64)((csnow_mid - csnow_begin) & cs->mask); in clocksource_verify_percpu()
363 delta = (csnow_end - csnow_mid) & cs->mask; in clocksource_verify_percpu()
366 delta = clocksource_delta(csnow_end, csnow_begin, cs->mask); in clocksource_verify_percpu()
367 cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift); in clocksource_verify_percpu()
377 cpumask_pr_args(&cpus_ahead), testcpu, cs->name); in clocksource_verify_percpu()
380 cpumask_pr_args(&cpus_behind), testcpu, cs->name); in clocksource_verify_percpu()
382 pr_warn(" CPU %d check durations %lldns - %lldns for clocksource %s.\n", in clocksource_verify_percpu()
383 testcpu, cs_nsec_min, cs_nsec_max, cs->name); in clocksource_verify_percpu()
392 struct clocksource *cs; in clocksource_watchdog() local
402 list_for_each_entry(cs, &watchdog_list, wd_list) { in clocksource_watchdog()
405 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { in clocksource_watchdog()
411 read_ret = cs_watchdog_read(cs, &csnow, &wdnow); in clocksource_watchdog()
416 __clocksource_unstable(cs); in clocksource_watchdog()
421 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) || in clocksource_watchdog()
423 cs->flags |= CLOCK_SOURCE_WATCHDOG; in clocksource_watchdog()
424 cs->wd_last = wdnow; in clocksource_watchdog()
425 cs->cs_last = csnow; in clocksource_watchdog()
429 delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask); in clocksource_watchdog()
430 wd_nsec = clocksource_cyc2ns(delta, watchdog->mult, in clocksource_watchdog()
431 watchdog->shift); in clocksource_watchdog()
433 delta = clocksource_delta(csnow, cs->cs_last, cs->mask); in clocksource_watchdog()
434 cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift); in clocksource_watchdog()
435 wdlast = cs->wd_last; /* save these in case we print them */ in clocksource_watchdog()
436 cslast = cs->cs_last; in clocksource_watchdog()
437 cs->cs_last = csnow; in clocksource_watchdog()
438 cs->wd_last = wdnow; in clocksource_watchdog()
444 md = cs->uncertainty_margin + watchdog->uncertainty_margin; in clocksource_watchdog()
445 if (abs(cs_nsec - wd_nsec) > md) { in clocksource_watchdog()
447 smp_processor_id(), cs->name); in clocksource_watchdog()
449 watchdog->name, wd_nsec, wdnow, wdlast, watchdog->mask); in clocksource_watchdog()
451 cs->name, cs_nsec, csnow, cslast, cs->mask); in clocksource_watchdog()
452 if (curr_clocksource == cs) in clocksource_watchdog()
453 pr_warn(" '%s' is current clocksource.\n", cs->name); in clocksource_watchdog()
455 … '%s' (not '%s') is current clocksource.\n", curr_clocksource->name, cs->name); in clocksource_watchdog()
458 __clocksource_unstable(cs); in clocksource_watchdog()
462 if (cs == curr_clocksource && cs->tick_stable) in clocksource_watchdog()
463 cs->tick_stable(cs); in clocksource_watchdog()
465 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && in clocksource_watchdog()
466 (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) && in clocksource_watchdog()
467 (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) { in clocksource_watchdog()
468 /* Mark it valid for high-res. */ in clocksource_watchdog()
469 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; in clocksource_watchdog()
486 if (cs != curr_clocksource) { in clocksource_watchdog()
487 cs->flags |= CLOCK_SOURCE_RESELECT; in clocksource_watchdog()
542 struct clocksource *cs; in clocksource_reset_watchdog() local
544 list_for_each_entry(cs, &watchdog_list, wd_list) in clocksource_reset_watchdog()
545 cs->flags &= ~CLOCK_SOURCE_WATCHDOG; in clocksource_reset_watchdog()
553 static void clocksource_enqueue_watchdog(struct clocksource *cs) in clocksource_enqueue_watchdog() argument
555 INIT_LIST_HEAD(&cs->wd_list); in clocksource_enqueue_watchdog()
557 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { in clocksource_enqueue_watchdog()
558 /* cs is a clocksource to be watched. */ in clocksource_enqueue_watchdog()
559 list_add(&cs->wd_list, &watchdog_list); in clocksource_enqueue_watchdog()
560 cs->flags &= ~CLOCK_SOURCE_WATCHDOG; in clocksource_enqueue_watchdog()
562 /* cs is a watchdog. */ in clocksource_enqueue_watchdog()
563 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) in clocksource_enqueue_watchdog()
564 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; in clocksource_enqueue_watchdog()
570 struct clocksource *cs, *old_wd; in clocksource_select_watchdog() local
579 list_for_each_entry(cs, &clocksource_list, list) { in clocksource_select_watchdog()
580 /* cs is a clocksource to be watched. */ in clocksource_select_watchdog()
581 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) in clocksource_select_watchdog()
585 if (fallback && cs == old_wd) in clocksource_select_watchdog()
589 if (!watchdog || cs->rating > watchdog->rating) in clocksource_select_watchdog()
590 watchdog = cs; in clocksource_select_watchdog()
605 static void clocksource_dequeue_watchdog(struct clocksource *cs) in clocksource_dequeue_watchdog() argument
607 if (cs != watchdog) { in clocksource_dequeue_watchdog()
608 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { in clocksource_dequeue_watchdog()
609 /* cs is a watched clocksource. */ in clocksource_dequeue_watchdog()
610 list_del_init(&cs->wd_list); in clocksource_dequeue_watchdog()
619 struct clocksource *cs, *tmp; in __clocksource_watchdog_kthread() local
623 /* Do any required per-CPU skew verification. */ in __clocksource_watchdog_kthread()
625 curr_clocksource->flags & CLOCK_SOURCE_UNSTABLE && in __clocksource_watchdog_kthread()
626 curr_clocksource->flags & CLOCK_SOURCE_VERIFY_PERCPU) in __clocksource_watchdog_kthread()
630 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { in __clocksource_watchdog_kthread()
631 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { in __clocksource_watchdog_kthread()
632 list_del_init(&cs->wd_list); in __clocksource_watchdog_kthread()
633 __clocksource_change_rating(cs, 0); in __clocksource_watchdog_kthread()
636 if (cs->flags & CLOCK_SOURCE_RESELECT) { in __clocksource_watchdog_kthread()
637 cs->flags &= ~CLOCK_SOURCE_RESELECT; in __clocksource_watchdog_kthread()
657 static bool clocksource_is_watchdog(struct clocksource *cs) in clocksource_is_watchdog() argument
659 return cs == watchdog; in clocksource_is_watchdog()
664 static void clocksource_enqueue_watchdog(struct clocksource *cs) in clocksource_enqueue_watchdog() argument
666 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) in clocksource_enqueue_watchdog()
667 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; in clocksource_enqueue_watchdog()
671 static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } in clocksource_dequeue_watchdog() argument
674 static bool clocksource_is_watchdog(struct clocksource *cs) { return false; } in clocksource_is_watchdog() argument
675 void clocksource_mark_unstable(struct clocksource *cs) { } in clocksource_mark_unstable() argument
682 static bool clocksource_is_suspend(struct clocksource *cs) in clocksource_is_suspend() argument
684 return cs == suspend_clocksource; in clocksource_is_suspend()
687 static void __clocksource_suspend_select(struct clocksource *cs) in __clocksource_suspend_select() argument
692 if (!(cs->flags & CLOCK_SOURCE_SUSPEND_NONSTOP)) in __clocksource_suspend_select()
700 if (cs->suspend || cs->resume) { in __clocksource_suspend_select()
702 cs->name); in __clocksource_suspend_select()
706 if (!suspend_clocksource || cs->rating > suspend_clocksource->rating) in __clocksource_suspend_select()
707 suspend_clocksource = cs; in __clocksource_suspend_select()
711 * clocksource_suspend_select - Select the best clocksource for suspend timing
716 struct clocksource *cs, *old_suspend; in clocksource_suspend_select() local
722 list_for_each_entry(cs, &clocksource_list, list) { in clocksource_suspend_select()
724 if (fallback && cs == old_suspend) in clocksource_suspend_select()
727 __clocksource_suspend_select(cs); in clocksource_suspend_select()
732 * clocksource_start_suspend_timing - Start measuring the suspend timing
733 * @cs: current clocksource from timekeeping
740 * that means processes are frozen, non-boot cpus and interrupts are disabled
744 void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles) in clocksource_start_suspend_timing() argument
754 if (clocksource_is_suspend(cs)) { in clocksource_start_suspend_timing()
759 if (suspend_clocksource->enable && in clocksource_start_suspend_timing()
760 suspend_clocksource->enable(suspend_clocksource)) { in clocksource_start_suspend_timing()
761 pr_warn_once("Failed to enable the non-suspend-able clocksource.\n"); in clocksource_start_suspend_timing()
765 suspend_start = suspend_clocksource->read(suspend_clocksource); in clocksource_start_suspend_timing()
769 * clocksource_stop_suspend_timing - Stop measuring the suspend timing
770 * @cs: current clocksource from timekeeping
782 u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now) in clocksource_stop_suspend_timing() argument
794 if (clocksource_is_suspend(cs)) in clocksource_stop_suspend_timing()
797 now = suspend_clocksource->read(suspend_clocksource); in clocksource_stop_suspend_timing()
801 suspend_clocksource->mask); in clocksource_stop_suspend_timing()
802 nsec = mul_u64_u32_shr(delta, suspend_clocksource->mult, in clocksource_stop_suspend_timing()
803 suspend_clocksource->shift); in clocksource_stop_suspend_timing()
810 if (!clocksource_is_suspend(cs) && suspend_clocksource->disable) in clocksource_stop_suspend_timing()
811 suspend_clocksource->disable(suspend_clocksource); in clocksource_stop_suspend_timing()
817 * clocksource_suspend - suspend the clocksource(s)
821 struct clocksource *cs; in clocksource_suspend() local
823 list_for_each_entry_reverse(cs, &clocksource_list, list) in clocksource_suspend()
824 if (cs->suspend) in clocksource_suspend()
825 cs->suspend(cs); in clocksource_suspend()
829 * clocksource_resume - resume the clocksource(s)
833 struct clocksource *cs; in clocksource_resume() local
835 list_for_each_entry(cs, &clocksource_list, list) in clocksource_resume()
836 if (cs->resume) in clocksource_resume()
837 cs->resume(cs); in clocksource_resume()
843 * clocksource_touch_watchdog - Update watchdog
855 * clocksource_max_adjustment- Returns max adjustment amount
856 * @cs: Pointer to clocksource
859 static u32 clocksource_max_adjustment(struct clocksource *cs) in clocksource_max_adjustment() argument
865 ret = (u64)cs->mult * 11; in clocksource_max_adjustment()
871 * clocks_calc_max_nsecs - Returns maximum nanoseconds that can be converted
880 * return half the number of nanoseconds the hardware counter can technically
890 * Calculate the maximum number of cycles that we can pass to the in clocks_calc_max_nsecs()
891 * cyc2ns() function without overflowing a 64-bit result. in clocks_calc_max_nsecs()
897 * The actual maximum number of cycles we can defer the clocksource is in clocks_calc_max_nsecs()
903 max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift); in clocks_calc_max_nsecs()
916 * clocksource_update_max_deferment - Updates the clocksource max_idle_ns & max_cycles
917 * @cs: Pointer to clocksource to be updated
920 static inline void clocksource_update_max_deferment(struct clocksource *cs) in clocksource_update_max_deferment() argument
922 cs->max_idle_ns = clocks_calc_max_nsecs(cs->mult, cs->shift, in clocksource_update_max_deferment()
923 cs->maxadj, cs->mask, in clocksource_update_max_deferment()
924 &cs->max_cycles); in clocksource_update_max_deferment()
929 struct clocksource *cs; in clocksource_find_best() local
939 list_for_each_entry(cs, &clocksource_list, list) { in clocksource_find_best()
940 if (skipcur && cs == curr_clocksource) in clocksource_find_best()
942 if (oneshot && !(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES)) in clocksource_find_best()
944 return cs; in clocksource_find_best()
952 struct clocksource *best, *cs; in __clocksource_select() local
963 list_for_each_entry(cs, &clocksource_list, list) { in __clocksource_select()
964 if (skipcur && cs == curr_clocksource) in __clocksource_select()
966 if (strcmp(cs->name, override_name) != 0) in __clocksource_select()
969 * Check to make sure we don't switch to a non-highres in __clocksource_select()
973 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) { in __clocksource_select()
975 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { in __clocksource_select()
976 …pr_warn("Override clocksource %s is unstable and not HRT compatible - cannot switch while in HRT/N… in __clocksource_select()
977 cs->name); in __clocksource_select()
984 pr_info("Override clocksource %s is not currently HRT compatible - deferring\n", in __clocksource_select()
985 cs->name); in __clocksource_select()
989 best = cs; in __clocksource_select()
995 pr_info("Switched to clocksource %s\n", best->name); in __clocksource_select()
1001 * clocksource_select - Select the best clocksource available
1019 * clocksource_done_booting - Called near the end of core bootup
1043 static void clocksource_enqueue(struct clocksource *cs) in clocksource_enqueue() argument
1050 if (tmp->rating < cs->rating) in clocksource_enqueue()
1052 entry = &tmp->list; in clocksource_enqueue()
1054 list_add(&cs->list, entry); in clocksource_enqueue()
1058 * __clocksource_update_freq_scale - Used update clocksource with new freq
1059 * @cs: clocksource to be registered
1063 * This should only be called from the clocksource->enable() method.
1069 void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq) in __clocksource_update_freq_scale() argument
1074 * Default clocksources are *special* and self-define their mult/shift. in __clocksource_update_freq_scale()
1079 * Calc the maximum number of seconds which we can run before in __clocksource_update_freq_scale()
1080 * wrapping around. For clocksources which have a mask > 32-bit in __clocksource_update_freq_scale()
1084 * clocksource with mask >= 40-bit and f >= 4GHz. That maps to in __clocksource_update_freq_scale()
1087 sec = cs->mask; in __clocksource_update_freq_scale()
1092 else if (sec > 600 && cs->mask > UINT_MAX) in __clocksource_update_freq_scale()
1095 clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, in __clocksource_update_freq_scale()
1101 * If both scale and freq are non-zero, calculate the clock in __clocksource_update_freq_scale()
1104 * take the tens-of-milliseconds WATCHDOG_THRESHOLD value for the in __clocksource_update_freq_scale()
1109 if (scale && freq && !cs->uncertainty_margin) { in __clocksource_update_freq_scale()
1110 cs->uncertainty_margin = NSEC_PER_SEC / (scale * freq); in __clocksource_update_freq_scale()
1111 if (cs->uncertainty_margin < 2 * WATCHDOG_MAX_SKEW) in __clocksource_update_freq_scale()
1112 cs->uncertainty_margin = 2 * WATCHDOG_MAX_SKEW; in __clocksource_update_freq_scale()
1113 } else if (!cs->uncertainty_margin) { in __clocksource_update_freq_scale()
1114 cs->uncertainty_margin = WATCHDOG_THRESHOLD; in __clocksource_update_freq_scale()
1116 WARN_ON_ONCE(cs->uncertainty_margin < 2 * WATCHDOG_MAX_SKEW); in __clocksource_update_freq_scale()
1122 cs->maxadj = clocksource_max_adjustment(cs); in __clocksource_update_freq_scale()
1123 while (freq && ((cs->mult + cs->maxadj < cs->mult) in __clocksource_update_freq_scale()
1124 || (cs->mult - cs->maxadj > cs->mult))) { in __clocksource_update_freq_scale()
1125 cs->mult >>= 1; in __clocksource_update_freq_scale()
1126 cs->shift--; in __clocksource_update_freq_scale()
1127 cs->maxadj = clocksource_max_adjustment(cs); in __clocksource_update_freq_scale()
1131 * Only warn for *special* clocksources that self-define in __clocksource_update_freq_scale()
1134 WARN_ONCE(cs->mult + cs->maxadj < cs->mult, in __clocksource_update_freq_scale()
1136 cs->name); in __clocksource_update_freq_scale()
1138 clocksource_update_max_deferment(cs); in __clocksource_update_freq_scale()
1141 cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns); in __clocksource_update_freq_scale()
1146 * __clocksource_register_scale - Used to install new clocksources
1147 * @cs: clocksource to be registered
1151 * Returns -EBUSY if registration fails, zero otherwise.
1156 int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) in __clocksource_register_scale() argument
1160 clocksource_arch_init(cs); in __clocksource_register_scale()
1162 if (WARN_ON_ONCE((unsigned int)cs->id >= CSID_MAX)) in __clocksource_register_scale()
1163 cs->id = CSID_GENERIC; in __clocksource_register_scale()
1164 if (cs->vdso_clock_mode < 0 || in __clocksource_register_scale()
1165 cs->vdso_clock_mode >= VDSO_CLOCKMODE_MAX) { in __clocksource_register_scale()
1167 cs->name, cs->vdso_clock_mode); in __clocksource_register_scale()
1168 cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE; in __clocksource_register_scale()
1172 __clocksource_update_freq_scale(cs, scale, freq); in __clocksource_register_scale()
1178 clocksource_enqueue(cs); in __clocksource_register_scale()
1179 clocksource_enqueue_watchdog(cs); in __clocksource_register_scale()
1184 __clocksource_suspend_select(cs); in __clocksource_register_scale()
1190 static void __clocksource_change_rating(struct clocksource *cs, int rating) in __clocksource_change_rating() argument
1192 list_del(&cs->list); in __clocksource_change_rating()
1193 cs->rating = rating; in __clocksource_change_rating()
1194 clocksource_enqueue(cs); in __clocksource_change_rating()
1198 * clocksource_change_rating - Change the rating of a registered clocksource
1199 * @cs: clocksource to be changed
1202 void clocksource_change_rating(struct clocksource *cs, int rating) in clocksource_change_rating() argument
1208 __clocksource_change_rating(cs, rating); in clocksource_change_rating()
1219 * Unbind clocksource @cs. Called with clocksource_mutex held
1221 static int clocksource_unbind(struct clocksource *cs) in clocksource_unbind() argument
1225 if (clocksource_is_watchdog(cs)) { in clocksource_unbind()
1228 if (clocksource_is_watchdog(cs)) in clocksource_unbind()
1229 return -EBUSY; in clocksource_unbind()
1232 if (cs == curr_clocksource) { in clocksource_unbind()
1235 if (curr_clocksource == cs) in clocksource_unbind()
1236 return -EBUSY; in clocksource_unbind()
1239 if (clocksource_is_suspend(cs)) { in clocksource_unbind()
1249 clocksource_dequeue_watchdog(cs); in clocksource_unbind()
1250 list_del_init(&cs->list); in clocksource_unbind()
1257 * clocksource_unregister - remove a registered clocksource
1258 * @cs: clocksource to be unregistered
1260 int clocksource_unregister(struct clocksource *cs) in clocksource_unregister() argument
1265 if (!list_empty(&cs->list)) in clocksource_unregister()
1266 ret = clocksource_unbind(cs); in clocksource_unregister()
1274 * current_clocksource_show - sysfs interface for current clocksource
1288 count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name); in current_clocksource_show()
1300 return -EINVAL; in sysfs_get_uname()
1303 if (buf[cnt-1] == '\n') in sysfs_get_uname()
1304 cnt--; in sysfs_get_uname()
1312 * current_clocksource_store - interface for manually overriding clocksource
1340 * unbind_clocksource_store - interface for manually unbinding clocksource
1352 struct clocksource *cs; in unbind_clocksource_store() local
1360 ret = -ENODEV; in unbind_clocksource_store()
1362 list_for_each_entry(cs, &clocksource_list, list) { in unbind_clocksource_store()
1363 if (strcmp(cs->name, name)) in unbind_clocksource_store()
1365 ret = clocksource_unbind(cs); in unbind_clocksource_store()
1375 * available_clocksource_show - sysfs interface for listing clocksource
1392 * Don't show non-HRES clocksource if the tick code is in available_clocksource_show()
1396 (src->flags & CLOCK_SOURCE_VALID_FOR_HRES)) in available_clocksource_show()
1398 max((ssize_t)PAGE_SIZE - count, (ssize_t)0), in available_clocksource_show()
1399 "%s ", src->name); in available_clocksource_show()
1404 max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n"); in available_clocksource_show()
1443 * boot_override_clocksource - boot clock override
1461 * boot_override_clock - Compatibility layer for deprecated boot option
1470 pr_warn("clock=pmtmr is deprecated - use clocksource=acpi_pm\n"); in boot_override_clock()
1473 pr_warn("clock= boot option is deprecated - use clocksource=xyz\n"); in boot_override_clock()