1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell PTP driver
3 *
4 * Copyright (C) 2020 Marvell.
5 *
6 */
7
8 #include <linux/bitfield.h>
9 #include <linux/device.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12
13 #include "ptp.h"
14 #include "mbox.h"
15 #include "rvu.h"
16
17 #define DRV_NAME "Marvell PTP Driver"
18
19 #define PCI_DEVID_OCTEONTX2_PTP 0xA00C
20 #define PCI_SUBSYS_DEVID_OCTX2_98xx_PTP 0xB100
21 #define PCI_SUBSYS_DEVID_OCTX2_96XX_PTP 0xB200
22 #define PCI_SUBSYS_DEVID_OCTX2_95XX_PTP 0xB300
23 #define PCI_SUBSYS_DEVID_OCTX2_95XXN_PTP 0xB400
24 #define PCI_SUBSYS_DEVID_OCTX2_95MM_PTP 0xB500
25 #define PCI_SUBSYS_DEVID_OCTX2_95XXO_PTP 0xB600
26 #define PCI_DEVID_OCTEONTX2_RST 0xA085
27 #define PCI_DEVID_CN10K_PTP 0xA09E
28
29 #define PCI_PTP_BAR_NO 0
30 #define PCI_RST_BAR_NO 0
31
32 #define PTP_CLOCK_CFG 0xF00ULL
33 #define PTP_CLOCK_CFG_PTP_EN BIT_ULL(0)
34 #define PTP_CLOCK_LO 0xF08ULL
35 #define PTP_CLOCK_HI 0xF10ULL
36 #define PTP_CLOCK_COMP 0xF18ULL
37
38 #define RST_BOOT 0x1600ULL
39 #define RST_MUL_BITS GENMASK_ULL(38, 33)
40 #define CLOCK_BASE_RATE 50000000ULL
41
42 static struct ptp *first_ptp_block;
43 static const struct pci_device_id ptp_id_table[];
44
get_clock_rate(void)45 static u64 get_clock_rate(void)
46 {
47 u64 cfg, ret = CLOCK_BASE_RATE * 16;
48 struct pci_dev *pdev;
49 void __iomem *base;
50
51 /* To get the input clock frequency with which PTP co-processor
52 * block is running the base frequency(50 MHz) needs to be multiplied
53 * with multiplier bits present in RST_BOOT register of RESET block.
54 * Hence below code gets the multiplier bits from the RESET PCI
55 * device present in the system.
56 */
57 pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
58 PCI_DEVID_OCTEONTX2_RST, NULL);
59 if (!pdev)
60 goto error;
61
62 base = pci_ioremap_bar(pdev, PCI_RST_BAR_NO);
63 if (!base)
64 goto error_put_pdev;
65
66 cfg = readq(base + RST_BOOT);
67 ret = CLOCK_BASE_RATE * FIELD_GET(RST_MUL_BITS, cfg);
68
69 iounmap(base);
70
71 error_put_pdev:
72 pci_dev_put(pdev);
73
74 error:
75 return ret;
76 }
77
ptp_get(void)78 struct ptp *ptp_get(void)
79 {
80 struct ptp *ptp = first_ptp_block;
81
82 /* Check PTP block is present in hardware */
83 if (!pci_dev_present(ptp_id_table))
84 return ERR_PTR(-ENODEV);
85 /* Check driver is bound to PTP block */
86 if (!ptp)
87 ptp = ERR_PTR(-EPROBE_DEFER);
88
89 return ptp;
90 }
91
ptp_put(struct ptp * ptp)92 void ptp_put(struct ptp *ptp)
93 {
94 if (!ptp)
95 return;
96
97 pci_dev_put(ptp->pdev);
98 }
99
ptp_adjfine(struct ptp * ptp,long scaled_ppm)100 static int ptp_adjfine(struct ptp *ptp, long scaled_ppm)
101 {
102 bool neg_adj = false;
103 u64 comp;
104 u64 adj;
105 s64 ppb;
106
107 if (scaled_ppm < 0) {
108 neg_adj = true;
109 scaled_ppm = -scaled_ppm;
110 }
111
112 /* The hardware adds the clock compensation value to the PTP clock
113 * on every coprocessor clock cycle. Typical convention is that it
114 * represent number of nanosecond betwen each cycle. In this
115 * convention compensation value is in 64 bit fixed-point
116 * representation where upper 32 bits are number of nanoseconds
117 * and lower is fractions of nanosecond.
118 * The scaled_ppm represent the ratio in "parts per million" by which
119 * the compensation value should be corrected.
120 * To calculate new compenstation value we use 64bit fixed point
121 * arithmetic on following formula
122 * comp = tbase + tbase * scaled_ppm / (1M * 2^16)
123 * where tbase is the basic compensation value calculated
124 * initialy in the probe function.
125 */
126 comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
127 /* convert scaled_ppm to ppb */
128 ppb = 1 + scaled_ppm;
129 ppb *= 125;
130 ppb >>= 13;
131 adj = comp * ppb;
132 adj = div_u64(adj, 1000000000ull);
133 comp = neg_adj ? comp - adj : comp + adj;
134
135 writeq(comp, ptp->reg_base + PTP_CLOCK_COMP);
136
137 return 0;
138 }
139
ptp_get_clock(struct ptp * ptp,u64 * clk)140 static int ptp_get_clock(struct ptp *ptp, u64 *clk)
141 {
142 /* Return the current PTP clock */
143 *clk = readq(ptp->reg_base + PTP_CLOCK_HI);
144
145 return 0;
146 }
147
ptp_probe(struct pci_dev * pdev,const struct pci_device_id * ent)148 static int ptp_probe(struct pci_dev *pdev,
149 const struct pci_device_id *ent)
150 {
151 struct device *dev = &pdev->dev;
152 struct ptp *ptp;
153 u64 clock_comp;
154 u64 clock_cfg;
155 int err;
156
157 ptp = devm_kzalloc(dev, sizeof(*ptp), GFP_KERNEL);
158 if (!ptp) {
159 err = -ENOMEM;
160 goto error;
161 }
162
163 ptp->pdev = pdev;
164
165 err = pcim_enable_device(pdev);
166 if (err)
167 goto error_free;
168
169 err = pcim_iomap_regions(pdev, 1 << PCI_PTP_BAR_NO, pci_name(pdev));
170 if (err)
171 goto error_free;
172
173 ptp->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO];
174
175 ptp->clock_rate = get_clock_rate();
176
177 /* Enable PTP clock */
178 clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
179 clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
180 writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
181
182 clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
183 /* Initial compensation value to start the nanosecs counter */
184 writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP);
185
186 pci_set_drvdata(pdev, ptp);
187 if (!first_ptp_block)
188 first_ptp_block = ptp;
189
190 return 0;
191
192 error_free:
193 devm_kfree(dev, ptp);
194
195 error:
196 /* For `ptp_get()` we need to differentiate between the case
197 * when the core has not tried to probe this device and the case when
198 * the probe failed. In the later case we pretend that the
199 * initialization was successful and keep the error in
200 * `dev->driver_data`.
201 */
202 pci_set_drvdata(pdev, ERR_PTR(err));
203 if (!first_ptp_block)
204 first_ptp_block = ERR_PTR(err);
205
206 return 0;
207 }
208
ptp_remove(struct pci_dev * pdev)209 static void ptp_remove(struct pci_dev *pdev)
210 {
211 struct ptp *ptp = pci_get_drvdata(pdev);
212 u64 clock_cfg;
213
214 if (IS_ERR_OR_NULL(ptp))
215 return;
216
217 /* Disable PTP clock */
218 clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
219 clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
220 writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
221 }
222
223 static const struct pci_device_id ptp_id_table[] = {
224 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
225 PCI_VENDOR_ID_CAVIUM,
226 PCI_SUBSYS_DEVID_OCTX2_98xx_PTP) },
227 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
228 PCI_VENDOR_ID_CAVIUM,
229 PCI_SUBSYS_DEVID_OCTX2_96XX_PTP) },
230 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
231 PCI_VENDOR_ID_CAVIUM,
232 PCI_SUBSYS_DEVID_OCTX2_95XX_PTP) },
233 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
234 PCI_VENDOR_ID_CAVIUM,
235 PCI_SUBSYS_DEVID_OCTX2_95XXN_PTP) },
236 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
237 PCI_VENDOR_ID_CAVIUM,
238 PCI_SUBSYS_DEVID_OCTX2_95MM_PTP) },
239 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
240 PCI_VENDOR_ID_CAVIUM,
241 PCI_SUBSYS_DEVID_OCTX2_95XXO_PTP) },
242 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_PTP) },
243 { 0, }
244 };
245
246 struct pci_driver ptp_driver = {
247 .name = DRV_NAME,
248 .id_table = ptp_id_table,
249 .probe = ptp_probe,
250 .remove = ptp_remove,
251 };
252
rvu_mbox_handler_ptp_op(struct rvu * rvu,struct ptp_req * req,struct ptp_rsp * rsp)253 int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req,
254 struct ptp_rsp *rsp)
255 {
256 int err = 0;
257
258 /* This function is the PTP mailbox handler invoked when
259 * called by AF consumers/netdev drivers via mailbox mechanism.
260 * It is used by netdev driver to get the PTP clock and to set
261 * frequency adjustments. Since mailbox can be called without
262 * notion of whether the driver is bound to ptp device below
263 * validation is needed as first step.
264 */
265 if (!rvu->ptp)
266 return -ENODEV;
267
268 switch (req->op) {
269 case PTP_OP_ADJFINE:
270 err = ptp_adjfine(rvu->ptp, req->scaled_ppm);
271 break;
272 case PTP_OP_GET_CLOCK:
273 err = ptp_get_clock(rvu->ptp, &rsp->clk);
274 break;
275 default:
276 err = -EINVAL;
277 break;
278 }
279
280 return err;
281 }
282