Lines Matching full:cs
119 static void __clocksource_change_rating(struct clocksource *cs, int rating);
145 static void __clocksource_unstable(struct clocksource *cs) in __clocksource_unstable() argument
147 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); in __clocksource_unstable()
148 cs->flags |= CLOCK_SOURCE_UNSTABLE; in __clocksource_unstable()
154 if (list_empty(&cs->list)) { in __clocksource_unstable()
155 cs->rating = 0; in __clocksource_unstable()
159 if (cs->mark_unstable) in __clocksource_unstable()
160 cs->mark_unstable(cs); in __clocksource_unstable()
169 * @cs: clocksource to be marked unstable
174 void clocksource_mark_unstable(struct clocksource *cs) in clocksource_mark_unstable() argument
179 if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) { in clocksource_mark_unstable()
180 if (!list_empty(&cs->list) && list_empty(&cs->wd_list)) in clocksource_mark_unstable()
181 list_add(&cs->wd_list, &watchdog_list); in clocksource_mark_unstable()
182 __clocksource_unstable(cs); in clocksource_mark_unstable()
189 struct clocksource *cs; in clocksource_watchdog() local
200 list_for_each_entry(cs, &watchdog_list, wd_list) { in clocksource_watchdog()
203 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { in clocksource_watchdog()
210 csnow = cs->read(cs); in clocksource_watchdog()
215 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) || in clocksource_watchdog()
217 cs->flags |= CLOCK_SOURCE_WATCHDOG; in clocksource_watchdog()
218 cs->wd_last = wdnow; in clocksource_watchdog()
219 cs->cs_last = csnow; in clocksource_watchdog()
223 delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask); in clocksource_watchdog()
227 delta = clocksource_delta(csnow, cs->cs_last, cs->mask); in clocksource_watchdog()
228 cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift); in clocksource_watchdog()
229 wdlast = cs->wd_last; /* save these in case we print them */ in clocksource_watchdog()
230 cslast = cs->cs_last; in clocksource_watchdog()
231 cs->cs_last = csnow; in clocksource_watchdog()
232 cs->wd_last = wdnow; in clocksource_watchdog()
240 smp_processor_id(), cs->name); in clocksource_watchdog()
244 cs->name, csnow, cslast, cs->mask); in clocksource_watchdog()
245 __clocksource_unstable(cs); in clocksource_watchdog()
249 if (cs == curr_clocksource && cs->tick_stable) in clocksource_watchdog()
250 cs->tick_stable(cs); in clocksource_watchdog()
252 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && in clocksource_watchdog()
253 (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) && in clocksource_watchdog()
256 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; in clocksource_watchdog()
273 if (cs != curr_clocksource) { in clocksource_watchdog()
274 cs->flags |= CLOCK_SOURCE_RESELECT; in clocksource_watchdog()
322 struct clocksource *cs; in clocksource_reset_watchdog() local
324 list_for_each_entry(cs, &watchdog_list, wd_list) in clocksource_reset_watchdog()
325 cs->flags &= ~CLOCK_SOURCE_WATCHDOG; in clocksource_reset_watchdog()
333 static void clocksource_enqueue_watchdog(struct clocksource *cs) in clocksource_enqueue_watchdog() argument
335 INIT_LIST_HEAD(&cs->wd_list); in clocksource_enqueue_watchdog()
337 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { in clocksource_enqueue_watchdog()
338 /* cs is a clocksource to be watched. */ in clocksource_enqueue_watchdog()
339 list_add(&cs->wd_list, &watchdog_list); in clocksource_enqueue_watchdog()
340 cs->flags &= ~CLOCK_SOURCE_WATCHDOG; in clocksource_enqueue_watchdog()
342 /* cs is a watchdog. */ in clocksource_enqueue_watchdog()
343 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) in clocksource_enqueue_watchdog()
344 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; in clocksource_enqueue_watchdog()
350 struct clocksource *cs, *old_wd; in clocksource_select_watchdog() local
359 list_for_each_entry(cs, &clocksource_list, list) { in clocksource_select_watchdog()
360 /* cs is a clocksource to be watched. */ in clocksource_select_watchdog()
361 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) in clocksource_select_watchdog()
365 if (fallback && cs == old_wd) in clocksource_select_watchdog()
369 if (!watchdog || cs->rating > watchdog->rating) in clocksource_select_watchdog()
370 watchdog = cs; in clocksource_select_watchdog()
385 static void clocksource_dequeue_watchdog(struct clocksource *cs) in clocksource_dequeue_watchdog() argument
387 if (cs != watchdog) { in clocksource_dequeue_watchdog()
388 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { in clocksource_dequeue_watchdog()
389 /* cs is a watched clocksource. */ in clocksource_dequeue_watchdog()
390 list_del_init(&cs->wd_list); in clocksource_dequeue_watchdog()
399 struct clocksource *cs, *tmp; in __clocksource_watchdog_kthread() local
404 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { in __clocksource_watchdog_kthread()
405 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { in __clocksource_watchdog_kthread()
406 list_del_init(&cs->wd_list); in __clocksource_watchdog_kthread()
407 __clocksource_change_rating(cs, 0); in __clocksource_watchdog_kthread()
410 if (cs->flags & CLOCK_SOURCE_RESELECT) { in __clocksource_watchdog_kthread()
411 cs->flags &= ~CLOCK_SOURCE_RESELECT; in __clocksource_watchdog_kthread()
431 static bool clocksource_is_watchdog(struct clocksource *cs) in clocksource_is_watchdog() argument
433 return cs == watchdog; in clocksource_is_watchdog()
438 static void clocksource_enqueue_watchdog(struct clocksource *cs) in clocksource_enqueue_watchdog() argument
440 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) in clocksource_enqueue_watchdog()
441 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; in clocksource_enqueue_watchdog()
445 static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } in clocksource_dequeue_watchdog() argument
448 static bool clocksource_is_watchdog(struct clocksource *cs) { return false; } in clocksource_is_watchdog() argument
449 void clocksource_mark_unstable(struct clocksource *cs) { } in clocksource_mark_unstable() argument
456 static bool clocksource_is_suspend(struct clocksource *cs) in clocksource_is_suspend() argument
458 return cs == suspend_clocksource; in clocksource_is_suspend()
461 static void __clocksource_suspend_select(struct clocksource *cs) in __clocksource_suspend_select() argument
466 if (!(cs->flags & CLOCK_SOURCE_SUSPEND_NONSTOP)) in __clocksource_suspend_select()
474 if (cs->suspend || cs->resume) { in __clocksource_suspend_select()
476 cs->name); in __clocksource_suspend_select()
480 if (!suspend_clocksource || cs->rating > suspend_clocksource->rating) in __clocksource_suspend_select()
481 suspend_clocksource = cs; in __clocksource_suspend_select()
490 struct clocksource *cs, *old_suspend; in clocksource_suspend_select() local
496 list_for_each_entry(cs, &clocksource_list, list) { in clocksource_suspend_select()
498 if (fallback && cs == old_suspend) in clocksource_suspend_select()
501 __clocksource_suspend_select(cs); in clocksource_suspend_select()
507 * @cs: current clocksource from timekeeping
518 void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles) in clocksource_start_suspend_timing() argument
528 if (clocksource_is_suspend(cs)) { in clocksource_start_suspend_timing()
544 * @cs: current clocksource from timekeeping
556 u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now) in clocksource_stop_suspend_timing() argument
568 if (clocksource_is_suspend(cs)) in clocksource_stop_suspend_timing()
584 if (!clocksource_is_suspend(cs) && suspend_clocksource->disable) in clocksource_stop_suspend_timing()
595 struct clocksource *cs; in clocksource_suspend() local
597 list_for_each_entry_reverse(cs, &clocksource_list, list) in clocksource_suspend()
598 if (cs->suspend) in clocksource_suspend()
599 cs->suspend(cs); in clocksource_suspend()
607 struct clocksource *cs; in clocksource_resume() local
609 list_for_each_entry(cs, &clocksource_list, list) in clocksource_resume()
610 if (cs->resume) in clocksource_resume()
611 cs->resume(cs); in clocksource_resume()
630 * @cs: Pointer to clocksource
633 static u32 clocksource_max_adjustment(struct clocksource *cs) in clocksource_max_adjustment() argument
639 ret = (u64)cs->mult * 11; in clocksource_max_adjustment()
691 * @cs: Pointer to clocksource to be updated
694 static inline void clocksource_update_max_deferment(struct clocksource *cs) in clocksource_update_max_deferment() argument
696 cs->max_idle_ns = clocks_calc_max_nsecs(cs->mult, cs->shift, in clocksource_update_max_deferment()
697 cs->maxadj, cs->mask, in clocksource_update_max_deferment()
698 &cs->max_cycles); in clocksource_update_max_deferment()
705 struct clocksource *cs; in clocksource_find_best() local
715 list_for_each_entry(cs, &clocksource_list, list) { in clocksource_find_best()
716 if (skipcur && cs == curr_clocksource) in clocksource_find_best()
718 if (oneshot && !(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES)) in clocksource_find_best()
720 return cs; in clocksource_find_best()
728 struct clocksource *best, *cs; in __clocksource_select() local
739 list_for_each_entry(cs, &clocksource_list, list) { in __clocksource_select()
740 if (skipcur && cs == curr_clocksource) in __clocksource_select()
742 if (strcmp(cs->name, override_name) != 0) in __clocksource_select()
749 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) { in __clocksource_select()
751 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { in __clocksource_select()
753 cs->name); in __clocksource_select()
761 cs->name); in __clocksource_select()
765 best = cs; in __clocksource_select()
825 static void clocksource_enqueue(struct clocksource *cs) in clocksource_enqueue() argument
832 if (tmp->rating < cs->rating) in clocksource_enqueue()
836 list_add(&cs->list, entry); in clocksource_enqueue()
841 * @cs: clocksource to be registered
851 void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq) in __clocksource_update_freq_scale() argument
869 sec = cs->mask; in __clocksource_update_freq_scale()
874 else if (sec > 600 && cs->mask > UINT_MAX) in __clocksource_update_freq_scale()
877 clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, in __clocksource_update_freq_scale()
884 cs->maxadj = clocksource_max_adjustment(cs); in __clocksource_update_freq_scale()
885 while (freq && ((cs->mult + cs->maxadj < cs->mult) in __clocksource_update_freq_scale()
886 || (cs->mult - cs->maxadj > cs->mult))) { in __clocksource_update_freq_scale()
887 cs->mult >>= 1; in __clocksource_update_freq_scale()
888 cs->shift--; in __clocksource_update_freq_scale()
889 cs->maxadj = clocksource_max_adjustment(cs); in __clocksource_update_freq_scale()
896 WARN_ONCE(cs->mult + cs->maxadj < cs->mult, in __clocksource_update_freq_scale()
898 cs->name); in __clocksource_update_freq_scale()
900 clocksource_update_max_deferment(cs); in __clocksource_update_freq_scale()
903 cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns); in __clocksource_update_freq_scale()
909 * @cs: clocksource to be registered
918 int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) in __clocksource_register_scale() argument
922 clocksource_arch_init(cs); in __clocksource_register_scale()
925 __clocksource_update_freq_scale(cs, scale, freq); in __clocksource_register_scale()
931 clocksource_enqueue(cs); in __clocksource_register_scale()
932 clocksource_enqueue_watchdog(cs); in __clocksource_register_scale()
937 __clocksource_suspend_select(cs); in __clocksource_register_scale()
943 static void __clocksource_change_rating(struct clocksource *cs, int rating) in __clocksource_change_rating() argument
945 list_del(&cs->list); in __clocksource_change_rating()
946 cs->rating = rating; in __clocksource_change_rating()
947 clocksource_enqueue(cs); in __clocksource_change_rating()
952 * @cs: clocksource to be changed
955 void clocksource_change_rating(struct clocksource *cs, int rating) in clocksource_change_rating() argument
961 __clocksource_change_rating(cs, rating); in clocksource_change_rating()
972 * Unbind clocksource @cs. Called with clocksource_mutex held
974 static int clocksource_unbind(struct clocksource *cs) in clocksource_unbind() argument
978 if (clocksource_is_watchdog(cs)) { in clocksource_unbind()
981 if (clocksource_is_watchdog(cs)) in clocksource_unbind()
985 if (cs == curr_clocksource) { in clocksource_unbind()
988 if (curr_clocksource == cs) in clocksource_unbind()
992 if (clocksource_is_suspend(cs)) { in clocksource_unbind()
1002 clocksource_dequeue_watchdog(cs); in clocksource_unbind()
1003 list_del_init(&cs->list); in clocksource_unbind()
1011 * @cs: clocksource to be unregistered
1013 int clocksource_unregister(struct clocksource *cs) in clocksource_unregister() argument
1018 if (!list_empty(&cs->list)) in clocksource_unregister()
1019 ret = clocksource_unbind(cs); in clocksource_unregister()
1105 struct clocksource *cs; in unbind_clocksource_store() local
1115 list_for_each_entry(cs, &clocksource_list, list) { in unbind_clocksource_store()
1116 if (strcmp(cs->name, name)) in unbind_clocksource_store()
1118 ret = clocksource_unbind(cs); in unbind_clocksource_store()