1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2022 Hewlett-Packard Enterprise Development Company, L.P. */
3
4 #include <linux/clk.h>
5 #include <linux/clockchips.h>
6 #include <linux/clocksource.h>
7 #include <linux/interrupt.h>
8 #include <linux/of_address.h>
9 #include <linux/of_irq.h>
10 #include <linux/of_platform.h>
11 #include <linux/sched_clock.h>
12
13 #define TIMER0_FREQ 1000000
14 #define GXP_TIMER_CNT_OFS 0x00
15 #define GXP_TIMESTAMP_OFS 0x08
16 #define GXP_TIMER_CTRL_OFS 0x14
17
18 /* TCS Stands for Timer Control/Status: these are masks to be used in */
19 /* the Timer Count Registers */
20 #define MASK_TCS_ENABLE 0x01
21 #define MASK_TCS_PERIOD 0x02
22 #define MASK_TCS_RELOAD 0x04
23 #define MASK_TCS_TC 0x80
24
25 struct gxp_timer {
26 void __iomem *counter;
27 void __iomem *control;
28 struct clock_event_device evt;
29 };
30
31 static struct gxp_timer *gxp_timer;
32
33 static void __iomem *system_clock __ro_after_init;
34
to_gxp_timer(struct clock_event_device * evt_dev)35 static inline struct gxp_timer *to_gxp_timer(struct clock_event_device *evt_dev)
36 {
37 return container_of(evt_dev, struct gxp_timer, evt);
38 }
39
gxp_sched_read(void)40 static u64 notrace gxp_sched_read(void)
41 {
42 return readl_relaxed(system_clock);
43 }
44
gxp_time_set_next_event(unsigned long event,struct clock_event_device * evt_dev)45 static int gxp_time_set_next_event(unsigned long event, struct clock_event_device *evt_dev)
46 {
47 struct gxp_timer *timer = to_gxp_timer(evt_dev);
48
49 /* Stop counting and disable interrupt before updating */
50 writeb_relaxed(MASK_TCS_TC, timer->control);
51 writel_relaxed(event, timer->counter);
52 writeb_relaxed(MASK_TCS_TC | MASK_TCS_ENABLE, timer->control);
53
54 return 0;
55 }
56
gxp_timer_interrupt(int irq,void * dev_id)57 static irqreturn_t gxp_timer_interrupt(int irq, void *dev_id)
58 {
59 struct gxp_timer *timer = (struct gxp_timer *)dev_id;
60
61 if (!(readb_relaxed(timer->control) & MASK_TCS_TC))
62 return IRQ_NONE;
63
64 writeb_relaxed(MASK_TCS_TC, timer->control);
65
66 timer->evt.event_handler(&timer->evt);
67
68 return IRQ_HANDLED;
69 }
70
gxp_timer_init(struct device_node * node)71 static int __init gxp_timer_init(struct device_node *node)
72 {
73 void __iomem *base;
74 struct clk *clk;
75 u32 freq;
76 int ret, irq;
77
78 gxp_timer = kzalloc(sizeof(*gxp_timer), GFP_KERNEL);
79 if (!gxp_timer) {
80 ret = -ENOMEM;
81 pr_err("Can't allocate gxp_timer");
82 return ret;
83 }
84
85 clk = of_clk_get(node, 0);
86 if (IS_ERR(clk)) {
87 ret = (int)PTR_ERR(clk);
88 pr_err("%pOFn clock not found: %d\n", node, ret);
89 goto err_free;
90 }
91
92 ret = clk_prepare_enable(clk);
93 if (ret) {
94 pr_err("%pOFn clock enable failed: %d\n", node, ret);
95 goto err_clk_enable;
96 }
97
98 base = of_iomap(node, 0);
99 if (!base) {
100 ret = -ENXIO;
101 pr_err("Can't map timer base registers");
102 goto err_iomap;
103 }
104
105 /* Set the offsets to the clock register and timer registers */
106 gxp_timer->counter = base + GXP_TIMER_CNT_OFS;
107 gxp_timer->control = base + GXP_TIMER_CTRL_OFS;
108 system_clock = base + GXP_TIMESTAMP_OFS;
109
110 gxp_timer->evt.name = node->name;
111 gxp_timer->evt.rating = 300;
112 gxp_timer->evt.features = CLOCK_EVT_FEAT_ONESHOT;
113 gxp_timer->evt.set_next_event = gxp_time_set_next_event;
114 gxp_timer->evt.cpumask = cpumask_of(0);
115
116 irq = irq_of_parse_and_map(node, 0);
117 if (irq <= 0) {
118 ret = -EINVAL;
119 pr_err("GXP Timer Can't parse IRQ %d", irq);
120 goto err_exit;
121 }
122
123 freq = clk_get_rate(clk);
124
125 ret = clocksource_mmio_init(system_clock, node->name, freq,
126 300, 32, clocksource_mmio_readl_up);
127 if (ret) {
128 pr_err("%pOFn init clocksource failed: %d", node, ret);
129 goto err_exit;
130 }
131
132 sched_clock_register(gxp_sched_read, 32, freq);
133
134 irq = irq_of_parse_and_map(node, 0);
135 if (irq <= 0) {
136 ret = -EINVAL;
137 pr_err("%pOFn Can't parse IRQ %d", node, irq);
138 goto err_exit;
139 }
140
141 clockevents_config_and_register(&gxp_timer->evt, TIMER0_FREQ,
142 0xf, 0xffffffff);
143
144 ret = request_irq(irq, gxp_timer_interrupt, IRQF_TIMER | IRQF_SHARED,
145 node->name, gxp_timer);
146 if (ret) {
147 pr_err("%pOFn request_irq() failed: %d", node, ret);
148 goto err_exit;
149 }
150
151 pr_debug("gxp: system timer (irq = %d)\n", irq);
152 return 0;
153
154 err_exit:
155 iounmap(base);
156 err_iomap:
157 clk_disable_unprepare(clk);
158 err_clk_enable:
159 clk_put(clk);
160 err_free:
161 kfree(gxp_timer);
162 return ret;
163 }
164
165 /*
166 * This probe gets called after the timer is already up and running. This will create
167 * the watchdog device as a child since the registers are shared.
168 */
169
gxp_timer_probe(struct platform_device * pdev)170 static int gxp_timer_probe(struct platform_device *pdev)
171 {
172 struct platform_device *gxp_watchdog_device;
173 struct device *dev = &pdev->dev;
174 int ret;
175
176 if (!gxp_timer) {
177 pr_err("Gxp Timer not initialized, cannot create watchdog");
178 return -ENOMEM;
179 }
180
181 gxp_watchdog_device = platform_device_alloc("gxp-wdt", -1);
182 if (!gxp_watchdog_device) {
183 pr_err("Timer failed to allocate gxp-wdt");
184 return -ENOMEM;
185 }
186
187 /* Pass the base address (counter) as platform data and nothing else */
188 gxp_watchdog_device->dev.platform_data = gxp_timer->counter;
189 gxp_watchdog_device->dev.parent = dev;
190
191 ret = platform_device_add(gxp_watchdog_device);
192 if (ret)
193 platform_device_put(gxp_watchdog_device);
194
195 return ret;
196 }
197
198 static const struct of_device_id gxp_timer_of_match[] = {
199 { .compatible = "hpe,gxp-timer", },
200 {},
201 };
202
203 static struct platform_driver gxp_timer_driver = {
204 .probe = gxp_timer_probe,
205 .driver = {
206 .name = "gxp-timer",
207 .of_match_table = gxp_timer_of_match,
208 .suppress_bind_attrs = true,
209 },
210 };
211
212 builtin_platform_driver(gxp_timer_driver);
213
214 TIMER_OF_DECLARE(gxp, "hpe,gxp-timer", gxp_timer_init);
215