1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Driver for FPGA Accelerated Function Unit (AFU) Error Reporting
4 *
5 * Copyright 2019 Intel Corporation, Inc.
6 *
7 * Authors:
8 * Wu Hao <hao.wu@linux.intel.com>
9 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
10 * Joseph Grecco <joe.grecco@intel.com>
11 * Enno Luebbers <enno.luebbers@intel.com>
12 * Tim Whisonant <tim.whisonant@intel.com>
13 * Ananda Ravuri <ananda.ravuri@intel.com>
14 * Mitchel Henry <henry.mitchel@intel.com>
15 */
16
17 #include <linux/fpga-dfl.h>
18 #include <linux/uaccess.h>
19
20 #include "dfl-afu.h"
21
22 #define PORT_ERROR_MASK 0x8
23 #define PORT_ERROR 0x10
24 #define PORT_FIRST_ERROR 0x18
25 #define PORT_MALFORMED_REQ0 0x20
26 #define PORT_MALFORMED_REQ1 0x28
27
28 #define ERROR_MASK GENMASK_ULL(63, 0)
29
30 /* mask or unmask port errors by the error mask register. */
__afu_port_err_mask(struct device * dev,bool mask)31 static void __afu_port_err_mask(struct device *dev, bool mask)
32 {
33 void __iomem *base;
34
35 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
36
37 writeq(mask ? ERROR_MASK : 0, base + PORT_ERROR_MASK);
38 }
39
afu_port_err_mask(struct device * dev,bool mask)40 static void afu_port_err_mask(struct device *dev, bool mask)
41 {
42 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
43
44 mutex_lock(&pdata->lock);
45 __afu_port_err_mask(dev, mask);
46 mutex_unlock(&pdata->lock);
47 }
48
49 /* clear port errors. */
afu_port_err_clear(struct device * dev,u64 err)50 static int afu_port_err_clear(struct device *dev, u64 err)
51 {
52 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
53 struct platform_device *pdev = to_platform_device(dev);
54 void __iomem *base_err, *base_hdr;
55 int ret = -EBUSY;
56 u64 v;
57
58 base_err = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
59 base_hdr = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
60
61 mutex_lock(&pdata->lock);
62
63 /*
64 * clear Port Errors
65 *
66 * - Check for AP6 State
67 * - Halt Port by keeping Port in reset
68 * - Set PORT Error mask to all 1 to mask errors
69 * - Clear all errors
70 * - Set Port mask to all 0 to enable errors
71 * - All errors start capturing new errors
72 * - Enable Port by pulling the port out of reset
73 */
74
75 /* if device is still in AP6 power state, can not clear any error. */
76 v = readq(base_hdr + PORT_HDR_STS);
77 if (FIELD_GET(PORT_STS_PWR_STATE, v) == PORT_STS_PWR_STATE_AP6) {
78 dev_err(dev, "Could not clear errors, device in AP6 state.\n");
79 goto done;
80 }
81
82 /* Halt Port by keeping Port in reset */
83 ret = __afu_port_disable(pdev);
84 if (ret)
85 goto done;
86
87 /* Mask all errors */
88 __afu_port_err_mask(dev, true);
89
90 /* Clear errors if err input matches with current port errors.*/
91 v = readq(base_err + PORT_ERROR);
92
93 if (v == err) {
94 writeq(v, base_err + PORT_ERROR);
95
96 v = readq(base_err + PORT_FIRST_ERROR);
97 writeq(v, base_err + PORT_FIRST_ERROR);
98 } else {
99 ret = -EINVAL;
100 }
101
102 /* Clear mask */
103 __afu_port_err_mask(dev, false);
104
105 /* Enable the Port by clear the reset */
106 __afu_port_enable(pdev);
107
108 done:
109 mutex_unlock(&pdata->lock);
110 return ret;
111 }
112
errors_show(struct device * dev,struct device_attribute * attr,char * buf)113 static ssize_t errors_show(struct device *dev, struct device_attribute *attr,
114 char *buf)
115 {
116 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
117 void __iomem *base;
118 u64 error;
119
120 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
121
122 mutex_lock(&pdata->lock);
123 error = readq(base + PORT_ERROR);
124 mutex_unlock(&pdata->lock);
125
126 return sprintf(buf, "0x%llx\n", (unsigned long long)error);
127 }
128
errors_store(struct device * dev,struct device_attribute * attr,const char * buff,size_t count)129 static ssize_t errors_store(struct device *dev, struct device_attribute *attr,
130 const char *buff, size_t count)
131 {
132 u64 value;
133 int ret;
134
135 if (kstrtou64(buff, 0, &value))
136 return -EINVAL;
137
138 ret = afu_port_err_clear(dev, value);
139
140 return ret ? ret : count;
141 }
142 static DEVICE_ATTR_RW(errors);
143
first_error_show(struct device * dev,struct device_attribute * attr,char * buf)144 static ssize_t first_error_show(struct device *dev,
145 struct device_attribute *attr, char *buf)
146 {
147 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
148 void __iomem *base;
149 u64 error;
150
151 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
152
153 mutex_lock(&pdata->lock);
154 error = readq(base + PORT_FIRST_ERROR);
155 mutex_unlock(&pdata->lock);
156
157 return sprintf(buf, "0x%llx\n", (unsigned long long)error);
158 }
159 static DEVICE_ATTR_RO(first_error);
160
first_malformed_req_show(struct device * dev,struct device_attribute * attr,char * buf)161 static ssize_t first_malformed_req_show(struct device *dev,
162 struct device_attribute *attr,
163 char *buf)
164 {
165 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
166 void __iomem *base;
167 u64 req0, req1;
168
169 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
170
171 mutex_lock(&pdata->lock);
172 req0 = readq(base + PORT_MALFORMED_REQ0);
173 req1 = readq(base + PORT_MALFORMED_REQ1);
174 mutex_unlock(&pdata->lock);
175
176 return sprintf(buf, "0x%016llx%016llx\n",
177 (unsigned long long)req1, (unsigned long long)req0);
178 }
179 static DEVICE_ATTR_RO(first_malformed_req);
180
181 static struct attribute *port_err_attrs[] = {
182 &dev_attr_errors.attr,
183 &dev_attr_first_error.attr,
184 &dev_attr_first_malformed_req.attr,
185 NULL,
186 };
187
port_err_attrs_visible(struct kobject * kobj,struct attribute * attr,int n)188 static umode_t port_err_attrs_visible(struct kobject *kobj,
189 struct attribute *attr, int n)
190 {
191 struct device *dev = kobj_to_dev(kobj);
192
193 /*
194 * sysfs entries are visible only if related private feature is
195 * enumerated.
196 */
197 if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_ERROR))
198 return 0;
199
200 return attr->mode;
201 }
202
203 const struct attribute_group port_err_group = {
204 .name = "errors",
205 .attrs = port_err_attrs,
206 .is_visible = port_err_attrs_visible,
207 };
208
port_err_init(struct platform_device * pdev,struct dfl_feature * feature)209 static int port_err_init(struct platform_device *pdev,
210 struct dfl_feature *feature)
211 {
212 afu_port_err_mask(&pdev->dev, false);
213
214 return 0;
215 }
216
port_err_uinit(struct platform_device * pdev,struct dfl_feature * feature)217 static void port_err_uinit(struct platform_device *pdev,
218 struct dfl_feature *feature)
219 {
220 afu_port_err_mask(&pdev->dev, true);
221 }
222
223 static long
port_err_ioctl(struct platform_device * pdev,struct dfl_feature * feature,unsigned int cmd,unsigned long arg)224 port_err_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
225 unsigned int cmd, unsigned long arg)
226 {
227 switch (cmd) {
228 case DFL_FPGA_PORT_ERR_GET_IRQ_NUM:
229 return dfl_feature_ioctl_get_num_irqs(pdev, feature, arg);
230 case DFL_FPGA_PORT_ERR_SET_IRQ:
231 return dfl_feature_ioctl_set_irq(pdev, feature, arg);
232 default:
233 dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
234 return -ENODEV;
235 }
236 }
237
238 const struct dfl_feature_id port_err_id_table[] = {
239 {.id = PORT_FEATURE_ID_ERROR,},
240 {0,}
241 };
242
243 const struct dfl_feature_ops port_err_ops = {
244 .init = port_err_init,
245 .uinit = port_err_uinit,
246 .ioctl = port_err_ioctl,
247 };
248