1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   *  linux/arch/alpha/kernel/time.c
4   *
5   *  Copyright (C) 1991, 1992, 1995, 1999, 2000  Linus Torvalds
6   *
7   * This file contains the clocksource time handling.
8   * 1997-09-10	Updated NTP code according to technical memorandum Jan '96
9   *		"A Kernel Model for Precision Timekeeping" by Dave Mills
10   * 1997-01-09    Adrian Sun
11   *      use interval timer if CONFIG_RTC=y
12   * 1997-10-29    John Bowman (bowman@math.ualberta.ca)
13   *      fixed tick loss calculation in timer_interrupt
14   *      (round system clock to nearest tick instead of truncating)
15   *      fixed algorithm in time_init for getting time from CMOS clock
16   * 1999-04-16	Thorsten Kranzkowski (dl8bcu@gmx.net)
17   *	fixed algorithm in do_gettimeofday() for calculating the precise time
18   *	from processor cycle counter (now taking lost_ticks into account)
19   * 2003-06-03	R. Scott Bailey <scott.bailey@eds.com>
20   *	Tighten sanity in time_init from 1% (10,000 PPM) to 250 PPM
21   */
22  #include <linux/errno.h>
23  #include <linux/module.h>
24  #include <linux/sched.h>
25  #include <linux/kernel.h>
26  #include <linux/param.h>
27  #include <linux/string.h>
28  #include <linux/mm.h>
29  #include <linux/delay.h>
30  #include <linux/ioport.h>
31  #include <linux/irq.h>
32  #include <linux/interrupt.h>
33  #include <linux/init.h>
34  #include <linux/bcd.h>
35  #include <linux/profile.h>
36  #include <linux/irq_work.h>
37  
38  #include <linux/uaccess.h>
39  #include <asm/io.h>
40  #include <asm/hwrpb.h>
41  
42  #include <linux/mc146818rtc.h>
43  #include <linux/time.h>
44  #include <linux/timex.h>
45  #include <linux/clocksource.h>
46  #include <linux/clockchips.h>
47  
48  #include "proto.h"
49  #include "irq_impl.h"
50  
51  DEFINE_SPINLOCK(rtc_lock);
52  EXPORT_SYMBOL(rtc_lock);
53  
54  unsigned long est_cycle_freq;
55  
56  #ifdef CONFIG_IRQ_WORK
57  
58  DEFINE_PER_CPU(u8, irq_work_pending);
59  
60  #define set_irq_work_pending_flag()  __this_cpu_write(irq_work_pending, 1)
61  #define test_irq_work_pending()      __this_cpu_read(irq_work_pending)
62  #define clear_irq_work_pending()     __this_cpu_write(irq_work_pending, 0)
63  
arch_irq_work_raise(void)64  void arch_irq_work_raise(void)
65  {
66  	set_irq_work_pending_flag();
67  }
68  
69  #else  /* CONFIG_IRQ_WORK */
70  
71  #define test_irq_work_pending()      0
72  #define clear_irq_work_pending()
73  
74  #endif /* CONFIG_IRQ_WORK */
75  
76  
rpcc(void)77  static inline __u32 rpcc(void)
78  {
79  	return __builtin_alpha_rpcc();
80  }
81  
82  
83  
84  /*
85   * The RTC as a clock_event_device primitive.
86   */
87  
88  static DEFINE_PER_CPU(struct clock_event_device, cpu_ce);
89  
90  irqreturn_t
rtc_timer_interrupt(int irq,void * dev)91  rtc_timer_interrupt(int irq, void *dev)
92  {
93  	int cpu = smp_processor_id();
94  	struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
95  
96  	/* Don't run the hook for UNUSED or SHUTDOWN.  */
97  	if (likely(clockevent_state_periodic(ce)))
98  		ce->event_handler(ce);
99  
100  	if (test_irq_work_pending()) {
101  		clear_irq_work_pending();
102  		irq_work_run();
103  	}
104  
105  	return IRQ_HANDLED;
106  }
107  
108  static int
rtc_ce_set_next_event(unsigned long evt,struct clock_event_device * ce)109  rtc_ce_set_next_event(unsigned long evt, struct clock_event_device *ce)
110  {
111  	/* This hook is for oneshot mode, which we don't support.  */
112  	return -EINVAL;
113  }
114  
115  static void __init
init_rtc_clockevent(void)116  init_rtc_clockevent(void)
117  {
118  	int cpu = smp_processor_id();
119  	struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
120  
121  	*ce = (struct clock_event_device){
122  		.name = "rtc",
123  		.features = CLOCK_EVT_FEAT_PERIODIC,
124  		.rating = 100,
125  		.cpumask = cpumask_of(cpu),
126  		.set_next_event = rtc_ce_set_next_event,
127  	};
128  
129  	clockevents_config_and_register(ce, CONFIG_HZ, 0, 0);
130  }
131  
132  
133  /*
134   * The QEMU clock as a clocksource primitive.
135   */
136  
137  static u64
qemu_cs_read(struct clocksource * cs)138  qemu_cs_read(struct clocksource *cs)
139  {
140  	return qemu_get_vmtime();
141  }
142  
143  static struct clocksource qemu_cs = {
144  	.name                   = "qemu",
145  	.rating                 = 400,
146  	.read                   = qemu_cs_read,
147  	.mask                   = CLOCKSOURCE_MASK(64),
148  	.flags                  = CLOCK_SOURCE_IS_CONTINUOUS,
149  	.max_idle_ns		= LONG_MAX
150  };
151  
152  
153  /*
154   * The QEMU alarm as a clock_event_device primitive.
155   */
156  
qemu_ce_shutdown(struct clock_event_device * ce)157  static int qemu_ce_shutdown(struct clock_event_device *ce)
158  {
159  	/* The mode member of CE is updated for us in generic code.
160  	   Just make sure that the event is disabled.  */
161  	qemu_set_alarm_abs(0);
162  	return 0;
163  }
164  
165  static int
qemu_ce_set_next_event(unsigned long evt,struct clock_event_device * ce)166  qemu_ce_set_next_event(unsigned long evt, struct clock_event_device *ce)
167  {
168  	qemu_set_alarm_rel(evt);
169  	return 0;
170  }
171  
172  static irqreturn_t
qemu_timer_interrupt(int irq,void * dev)173  qemu_timer_interrupt(int irq, void *dev)
174  {
175  	int cpu = smp_processor_id();
176  	struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
177  
178  	ce->event_handler(ce);
179  	return IRQ_HANDLED;
180  }
181  
182  static void __init
init_qemu_clockevent(void)183  init_qemu_clockevent(void)
184  {
185  	int cpu = smp_processor_id();
186  	struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
187  
188  	*ce = (struct clock_event_device){
189  		.name = "qemu",
190  		.features = CLOCK_EVT_FEAT_ONESHOT,
191  		.rating = 400,
192  		.cpumask = cpumask_of(cpu),
193  		.set_state_shutdown = qemu_ce_shutdown,
194  		.set_state_oneshot = qemu_ce_shutdown,
195  		.tick_resume = qemu_ce_shutdown,
196  		.set_next_event = qemu_ce_set_next_event,
197  	};
198  
199  	clockevents_config_and_register(ce, NSEC_PER_SEC, 1000, LONG_MAX);
200  }
201  
202  
203  void __init
common_init_rtc(void)204  common_init_rtc(void)
205  {
206  	unsigned char x, sel = 0;
207  
208  	/* Reset periodic interrupt frequency.  */
209  #if CONFIG_HZ == 1024 || CONFIG_HZ == 1200
210   	x = CMOS_READ(RTC_FREQ_SELECT) & 0x3f;
211  	/* Test includes known working values on various platforms
212  	   where 0x26 is wrong; we refuse to change those. */
213   	if (x != 0x26 && x != 0x25 && x != 0x19 && x != 0x06) {
214  		sel = RTC_REF_CLCK_32KHZ + 6;
215  	}
216  #elif CONFIG_HZ == 256 || CONFIG_HZ == 128 || CONFIG_HZ == 64 || CONFIG_HZ == 32
217  	sel = RTC_REF_CLCK_32KHZ + __builtin_ffs(32768 / CONFIG_HZ);
218  #else
219  # error "Unknown HZ from arch/alpha/Kconfig"
220  #endif
221  	if (sel) {
222  		printk(KERN_INFO "Setting RTC_FREQ to %d Hz (%x)\n",
223  		       CONFIG_HZ, sel);
224  		CMOS_WRITE(sel, RTC_FREQ_SELECT);
225   	}
226  
227  	/* Turn on periodic interrupts.  */
228  	x = CMOS_READ(RTC_CONTROL);
229  	if (!(x & RTC_PIE)) {
230  		printk("Turning on RTC interrupts.\n");
231  		x |= RTC_PIE;
232  		x &= ~(RTC_AIE | RTC_UIE);
233  		CMOS_WRITE(x, RTC_CONTROL);
234  	}
235  	(void) CMOS_READ(RTC_INTR_FLAGS);
236  
237  	outb(0x36, 0x43);	/* pit counter 0: system timer */
238  	outb(0x00, 0x40);
239  	outb(0x00, 0x40);
240  
241  	outb(0xb6, 0x43);	/* pit counter 2: speaker */
242  	outb(0x31, 0x42);
243  	outb(0x13, 0x42);
244  
245  	init_rtc_irq(NULL);
246  }
247  
248  
249  #ifndef CONFIG_ALPHA_WTINT
250  /*
251   * The RPCC as a clocksource primitive.
252   *
253   * While we have free-running timecounters running on all CPUs, and we make
254   * a half-hearted attempt in init_rtc_rpcc_info to sync the timecounter
255   * with the wall clock, that initialization isn't kept up-to-date across
256   * different time counters in SMP mode.  Therefore we can only use this
257   * method when there's only one CPU enabled.
258   *
259   * When using the WTINT PALcall, the RPCC may shift to a lower frequency,
260   * or stop altogether, while waiting for the interrupt.  Therefore we cannot
261   * use this method when WTINT is in use.
262   */
263  
read_rpcc(struct clocksource * cs)264  static u64 read_rpcc(struct clocksource *cs)
265  {
266  	return rpcc();
267  }
268  
269  static struct clocksource clocksource_rpcc = {
270  	.name                   = "rpcc",
271  	.rating                 = 300,
272  	.read                   = read_rpcc,
273  	.mask                   = CLOCKSOURCE_MASK(32),
274  	.flags                  = CLOCK_SOURCE_IS_CONTINUOUS
275  };
276  #endif /* ALPHA_WTINT */
277  
278  
279  /* Validate a computed cycle counter result against the known bounds for
280     the given processor core.  There's too much brokenness in the way of
281     timing hardware for any one method to work everywhere.  :-(
282  
283     Return 0 if the result cannot be trusted, otherwise return the argument.  */
284  
285  static unsigned long __init
validate_cc_value(unsigned long cc)286  validate_cc_value(unsigned long cc)
287  {
288  	static struct bounds {
289  		unsigned int min, max;
290  	} cpu_hz[] __initdata = {
291  		[EV3_CPU]    = {   50000000,  200000000 },	/* guess */
292  		[EV4_CPU]    = {  100000000,  300000000 },
293  		[LCA4_CPU]   = {  100000000,  300000000 },	/* guess */
294  		[EV45_CPU]   = {  200000000,  300000000 },
295  		[EV5_CPU]    = {  250000000,  433000000 },
296  		[EV56_CPU]   = {  333000000,  667000000 },
297  		[PCA56_CPU]  = {  400000000,  600000000 },	/* guess */
298  		[PCA57_CPU]  = {  500000000,  600000000 },	/* guess */
299  		[EV6_CPU]    = {  466000000,  600000000 },
300  		[EV67_CPU]   = {  600000000,  750000000 },
301  		[EV68AL_CPU] = {  750000000,  940000000 },
302  		[EV68CB_CPU] = { 1000000000, 1333333333 },
303  		/* None of the following are shipping as of 2001-11-01.  */
304  		[EV68CX_CPU] = { 1000000000, 1700000000 },	/* guess */
305  		[EV69_CPU]   = { 1000000000, 1700000000 },	/* guess */
306  		[EV7_CPU]    = {  800000000, 1400000000 },	/* guess */
307  		[EV79_CPU]   = { 1000000000, 2000000000 },	/* guess */
308  	};
309  
310  	/* Allow for some drift in the crystal.  10MHz is more than enough.  */
311  	const unsigned int deviation = 10000000;
312  
313  	struct percpu_struct *cpu;
314  	unsigned int index;
315  
316  	cpu = (struct percpu_struct *)((char*)hwrpb + hwrpb->processor_offset);
317  	index = cpu->type & 0xffffffff;
318  
319  	/* If index out of bounds, no way to validate.  */
320  	if (index >= ARRAY_SIZE(cpu_hz))
321  		return cc;
322  
323  	/* If index contains no data, no way to validate.  */
324  	if (cpu_hz[index].max == 0)
325  		return cc;
326  
327  	if (cc < cpu_hz[index].min - deviation
328  	    || cc > cpu_hz[index].max + deviation)
329  		return 0;
330  
331  	return cc;
332  }
333  
334  
335  /*
336   * Calibrate CPU clock using legacy 8254 timer/counter. Stolen from
337   * arch/i386/time.c.
338   */
339  
340  #define CALIBRATE_LATCH	0xffff
341  #define TIMEOUT_COUNT	0x100000
342  
343  static unsigned long __init
calibrate_cc_with_pit(void)344  calibrate_cc_with_pit(void)
345  {
346  	int cc, count = 0;
347  
348  	/* Set the Gate high, disable speaker */
349  	outb((inb(0x61) & ~0x02) | 0x01, 0x61);
350  
351  	/*
352  	 * Now let's take care of CTC channel 2
353  	 *
354  	 * Set the Gate high, program CTC channel 2 for mode 0,
355  	 * (interrupt on terminal count mode), binary count,
356  	 * load 5 * LATCH count, (LSB and MSB) to begin countdown.
357  	 */
358  	outb(0xb0, 0x43);		/* binary, mode 0, LSB/MSB, Ch 2 */
359  	outb(CALIBRATE_LATCH & 0xff, 0x42);	/* LSB of count */
360  	outb(CALIBRATE_LATCH >> 8, 0x42);	/* MSB of count */
361  
362  	cc = rpcc();
363  	do {
364  		count++;
365  	} while ((inb(0x61) & 0x20) == 0 && count < TIMEOUT_COUNT);
366  	cc = rpcc() - cc;
367  
368  	/* Error: ECTCNEVERSET or ECPUTOOFAST.  */
369  	if (count <= 1 || count == TIMEOUT_COUNT)
370  		return 0;
371  
372  	return ((long)cc * PIT_TICK_RATE) / (CALIBRATE_LATCH + 1);
373  }
374  
375  /* The Linux interpretation of the CMOS clock register contents:
376     When the Update-In-Progress (UIP) flag goes from 1 to 0, the
377     RTC registers show the second which has precisely just started.
378     Let's hope other operating systems interpret the RTC the same way.  */
379  
380  static unsigned long __init
rpcc_after_update_in_progress(void)381  rpcc_after_update_in_progress(void)
382  {
383  	do { } while (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP));
384  	do { } while (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP);
385  
386  	return rpcc();
387  }
388  
389  void __init
time_init(void)390  time_init(void)
391  {
392  	unsigned int cc1, cc2;
393  	unsigned long cycle_freq, tolerance;
394  	long diff;
395  
396  	if (alpha_using_qemu) {
397  		clocksource_register_hz(&qemu_cs, NSEC_PER_SEC);
398  		init_qemu_clockevent();
399  		init_rtc_irq(qemu_timer_interrupt);
400  		return;
401  	}
402  
403  	/* Calibrate CPU clock -- attempt #1.  */
404  	if (!est_cycle_freq)
405  		est_cycle_freq = validate_cc_value(calibrate_cc_with_pit());
406  
407  	cc1 = rpcc();
408  
409  	/* Calibrate CPU clock -- attempt #2.  */
410  	if (!est_cycle_freq) {
411  		cc1 = rpcc_after_update_in_progress();
412  		cc2 = rpcc_after_update_in_progress();
413  		est_cycle_freq = validate_cc_value(cc2 - cc1);
414  		cc1 = cc2;
415  	}
416  
417  	cycle_freq = hwrpb->cycle_freq;
418  	if (est_cycle_freq) {
419  		/* If the given value is within 250 PPM of what we calculated,
420  		   accept it.  Otherwise, use what we found.  */
421  		tolerance = cycle_freq / 4000;
422  		diff = cycle_freq - est_cycle_freq;
423  		if (diff < 0)
424  			diff = -diff;
425  		if ((unsigned long)diff > tolerance) {
426  			cycle_freq = est_cycle_freq;
427  			printk("HWRPB cycle frequency bogus.  "
428  			       "Estimated %lu Hz\n", cycle_freq);
429  		} else {
430  			est_cycle_freq = 0;
431  		}
432  	} else if (! validate_cc_value (cycle_freq)) {
433  		printk("HWRPB cycle frequency bogus, "
434  		       "and unable to estimate a proper value!\n");
435  	}
436  
437  	/* See above for restrictions on using clocksource_rpcc.  */
438  #ifndef CONFIG_ALPHA_WTINT
439  	if (hwrpb->nr_processors == 1)
440  		clocksource_register_hz(&clocksource_rpcc, cycle_freq);
441  #endif
442  
443  	/* Startup the timer source. */
444  	alpha_mv.init_rtc();
445  	init_rtc_clockevent();
446  }
447  
448  /* Initialize the clock_event_device for secondary cpus.  */
449  #ifdef CONFIG_SMP
450  void __init
init_clockevent(void)451  init_clockevent(void)
452  {
453  	if (alpha_using_qemu)
454  		init_qemu_clockevent();
455  	else
456  		init_rtc_clockevent();
457  }
458  #endif
459