1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2015 - 2020 Intel Corporation */
3 #include <linux/delay.h>
4 #include "adf_accel_devices.h"
5 #include "adf_common_drv.h"
6 #include "adf_pf2vf_msg.h"
7 
8 #define ADF_DH895XCC_EP_OFFSET	0x3A000
9 #define ADF_DH895XCC_ERRMSK3	(ADF_DH895XCC_EP_OFFSET + 0x1C)
10 #define ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask) ((vf_mask & 0xFFFF) << 9)
11 #define ADF_DH895XCC_ERRMSK5	(ADF_DH895XCC_EP_OFFSET + 0xDC)
12 #define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16)
13 
__adf_enable_vf2pf_interrupts(struct adf_accel_dev * accel_dev,u32 vf_mask)14 static void __adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
15 					  u32 vf_mask)
16 {
17 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
18 	struct adf_bar *pmisc =
19 			&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
20 	void __iomem *pmisc_addr = pmisc->virt_addr;
21 	u32 reg;
22 
23 	/* Enable VF2PF Messaging Ints - VFs 1 through 16 per vf_mask[15:0] */
24 	if (vf_mask & 0xFFFF) {
25 		reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3);
26 		reg &= ~ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
27 		ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
28 	}
29 
30 	/* Enable VF2PF Messaging Ints - VFs 17 through 32 per vf_mask[31:16] */
31 	if (vf_mask >> 16) {
32 		reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5);
33 		reg &= ~ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
34 		ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
35 	}
36 }
37 
adf_enable_vf2pf_interrupts(struct adf_accel_dev * accel_dev,u32 vf_mask)38 void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
39 {
40 	unsigned long flags;
41 
42 	spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
43 	__adf_enable_vf2pf_interrupts(accel_dev, vf_mask);
44 	spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
45 }
46 
__adf_disable_vf2pf_interrupts(struct adf_accel_dev * accel_dev,u32 vf_mask)47 static void __adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
48 					   u32 vf_mask)
49 {
50 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
51 	struct adf_bar *pmisc =
52 			&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
53 	void __iomem *pmisc_addr = pmisc->virt_addr;
54 	u32 reg;
55 
56 	/* Disable VF2PF interrupts for VFs 1 through 16 per vf_mask[15:0] */
57 	if (vf_mask & 0xFFFF) {
58 		reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3) |
59 			ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
60 		ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
61 	}
62 
63 	/* Disable VF2PF interrupts for VFs 17 through 32 per vf_mask[31:16] */
64 	if (vf_mask >> 16) {
65 		reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5) |
66 			ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
67 		ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
68 	}
69 }
70 
adf_disable_vf2pf_interrupts(struct adf_accel_dev * accel_dev,u32 vf_mask)71 void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
72 {
73 	unsigned long flags;
74 
75 	spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
76 	__adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
77 	spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
78 }
79 
adf_disable_vf2pf_interrupts_irq(struct adf_accel_dev * accel_dev,u32 vf_mask)80 void adf_disable_vf2pf_interrupts_irq(struct adf_accel_dev *accel_dev, u32 vf_mask)
81 {
82 	spin_lock(&accel_dev->pf.vf2pf_ints_lock);
83 	__adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
84 	spin_unlock(&accel_dev->pf.vf2pf_ints_lock);
85 }
86 
__adf_iov_putmsg(struct adf_accel_dev * accel_dev,u32 msg,u8 vf_nr)87 static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
88 {
89 	struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
90 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
91 	void __iomem *pmisc_bar_addr =
92 		pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
93 	u32 val, pf2vf_offset, count = 0;
94 	u32 local_in_use_mask, local_in_use_pattern;
95 	u32 remote_in_use_mask, remote_in_use_pattern;
96 	struct mutex *lock;	/* lock preventing concurrent acces of CSR */
97 	u32 int_bit;
98 	int ret = 0;
99 
100 	if (accel_dev->is_vf) {
101 		pf2vf_offset = hw_data->get_pf2vf_offset(0);
102 		lock = &accel_dev->vf.vf2pf_lock;
103 		local_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
104 		local_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
105 		remote_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
106 		remote_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
107 		int_bit = ADF_VF2PF_INT;
108 	} else {
109 		pf2vf_offset = hw_data->get_pf2vf_offset(vf_nr);
110 		lock = &accel_dev->pf.vf_info[vf_nr].pf2vf_lock;
111 		local_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
112 		local_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
113 		remote_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
114 		remote_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
115 		int_bit = ADF_PF2VF_INT;
116 	}
117 
118 	mutex_lock(lock);
119 
120 	/* Check if PF2VF CSR is in use by remote function */
121 	val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
122 	if ((val & remote_in_use_mask) == remote_in_use_pattern) {
123 		dev_dbg(&GET_DEV(accel_dev),
124 			"PF2VF CSR in use by remote function\n");
125 		ret = -EBUSY;
126 		goto out;
127 	}
128 
129 	/* Attempt to get ownership of PF2VF CSR */
130 	msg &= ~local_in_use_mask;
131 	msg |= local_in_use_pattern;
132 	ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg);
133 
134 	/* Wait in case remote func also attempting to get ownership */
135 	msleep(ADF_IOV_MSG_COLLISION_DETECT_DELAY);
136 
137 	val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
138 	if ((val & local_in_use_mask) != local_in_use_pattern) {
139 		dev_dbg(&GET_DEV(accel_dev),
140 			"PF2VF CSR in use by remote - collision detected\n");
141 		ret = -EBUSY;
142 		goto out;
143 	}
144 
145 	/*
146 	 * This function now owns the PV2VF CSR.  The IN_USE_BY pattern must
147 	 * remain in the PF2VF CSR for all writes including ACK from remote
148 	 * until this local function relinquishes the CSR.  Send the message
149 	 * by interrupting the remote.
150 	 */
151 	ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg | int_bit);
152 
153 	/* Wait for confirmation from remote func it received the message */
154 	do {
155 		msleep(ADF_IOV_MSG_ACK_DELAY);
156 		val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
157 	} while ((val & int_bit) && (count++ < ADF_IOV_MSG_ACK_MAX_RETRY));
158 
159 	if (val & int_bit) {
160 		dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
161 		val &= ~int_bit;
162 		ret = -EIO;
163 	}
164 
165 	/* Finished with PF2VF CSR; relinquish it and leave msg in CSR */
166 	ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, val & ~local_in_use_mask);
167 out:
168 	mutex_unlock(lock);
169 	return ret;
170 }
171 
172 /**
173  * adf_iov_putmsg() - send PF2VF message
174  * @accel_dev:  Pointer to acceleration device.
175  * @msg:	Message to send
176  * @vf_nr:	VF number to which the message will be sent
177  *
178  * Function sends a message from the PF to a VF
179  *
180  * Return: 0 on success, error code otherwise.
181  */
adf_iov_putmsg(struct adf_accel_dev * accel_dev,u32 msg,u8 vf_nr)182 int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
183 {
184 	u32 count = 0;
185 	int ret;
186 
187 	do {
188 		ret = __adf_iov_putmsg(accel_dev, msg, vf_nr);
189 		if (ret)
190 			msleep(ADF_IOV_MSG_RETRY_DELAY);
191 	} while (ret && (count++ < ADF_IOV_MSG_MAX_RETRIES));
192 
193 	return ret;
194 }
195 
adf_vf2pf_req_hndl(struct adf_accel_vf_info * vf_info)196 void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
197 {
198 	struct adf_accel_dev *accel_dev = vf_info->accel_dev;
199 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
200 	int bar_id = hw_data->get_misc_bar_id(hw_data);
201 	struct adf_bar *pmisc = &GET_BARS(accel_dev)[bar_id];
202 	void __iomem *pmisc_addr = pmisc->virt_addr;
203 	u32 msg, resp = 0, vf_nr = vf_info->vf_nr;
204 
205 	/* Read message from the VF */
206 	msg = ADF_CSR_RD(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr));
207 
208 	/* To ACK, clear the VF2PFINT bit */
209 	msg &= ~ADF_VF2PF_INT;
210 	ADF_CSR_WR(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr), msg);
211 
212 	if (!(msg & ADF_VF2PF_MSGORIGIN_SYSTEM))
213 		/* Ignore legacy non-system (non-kernel) VF2PF messages */
214 		goto err;
215 
216 	switch ((msg & ADF_VF2PF_MSGTYPE_MASK) >> ADF_VF2PF_MSGTYPE_SHIFT) {
217 	case ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ:
218 		{
219 		u8 vf_compat_ver = msg >> ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
220 
221 		resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
222 			 (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
223 			  ADF_PF2VF_MSGTYPE_SHIFT) |
224 			 (ADF_PFVF_COMPAT_THIS_VERSION <<
225 			  ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
226 
227 		dev_dbg(&GET_DEV(accel_dev),
228 			"Compatibility Version Request from VF%d vers=%u\n",
229 			vf_nr + 1, vf_compat_ver);
230 
231 		if (vf_compat_ver < hw_data->min_iov_compat_ver) {
232 			dev_err(&GET_DEV(accel_dev),
233 				"VF (vers %d) incompatible with PF (vers %d)\n",
234 				vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
235 			resp |= ADF_PF2VF_VF_INCOMPATIBLE <<
236 				ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
237 		} else if (vf_compat_ver > ADF_PFVF_COMPAT_THIS_VERSION) {
238 			dev_err(&GET_DEV(accel_dev),
239 				"VF (vers %d) compat with PF (vers %d) unkn.\n",
240 				vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
241 			resp |= ADF_PF2VF_VF_COMPAT_UNKNOWN <<
242 				ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
243 		} else {
244 			dev_dbg(&GET_DEV(accel_dev),
245 				"VF (vers %d) compatible with PF (vers %d)\n",
246 				vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
247 			resp |= ADF_PF2VF_VF_COMPATIBLE <<
248 				ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
249 		}
250 		}
251 		break;
252 	case ADF_VF2PF_MSGTYPE_VERSION_REQ:
253 		dev_dbg(&GET_DEV(accel_dev),
254 			"Legacy VersionRequest received from VF%d 0x%x\n",
255 			vf_nr + 1, msg);
256 		resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
257 			 (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
258 			  ADF_PF2VF_MSGTYPE_SHIFT) |
259 			 (ADF_PFVF_COMPAT_THIS_VERSION <<
260 			  ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
261 		resp |= ADF_PF2VF_VF_COMPATIBLE <<
262 			ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
263 		/* Set legacy major and minor version num */
264 		resp |= 1 << ADF_PF2VF_MAJORVERSION_SHIFT |
265 			1 << ADF_PF2VF_MINORVERSION_SHIFT;
266 		break;
267 	case ADF_VF2PF_MSGTYPE_INIT:
268 		{
269 		dev_dbg(&GET_DEV(accel_dev),
270 			"Init message received from VF%d 0x%x\n",
271 			vf_nr + 1, msg);
272 		vf_info->init = true;
273 		}
274 		break;
275 	case ADF_VF2PF_MSGTYPE_SHUTDOWN:
276 		{
277 		dev_dbg(&GET_DEV(accel_dev),
278 			"Shutdown message received from VF%d 0x%x\n",
279 			vf_nr + 1, msg);
280 		vf_info->init = false;
281 		}
282 		break;
283 	default:
284 		goto err;
285 	}
286 
287 	if (resp && adf_iov_putmsg(accel_dev, resp, vf_nr))
288 		dev_err(&GET_DEV(accel_dev), "Failed to send response to VF\n");
289 
290 	/* re-enable interrupt on PF from this VF */
291 	adf_enable_vf2pf_interrupts(accel_dev, (1 << vf_nr));
292 
293 	return;
294 err:
295 	dev_dbg(&GET_DEV(accel_dev), "Unknown message from VF%d (0x%x);\n",
296 		vf_nr + 1, msg);
297 }
298 
adf_pf2vf_notify_restarting(struct adf_accel_dev * accel_dev)299 void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
300 {
301 	struct adf_accel_vf_info *vf;
302 	u32 msg = (ADF_PF2VF_MSGORIGIN_SYSTEM |
303 		(ADF_PF2VF_MSGTYPE_RESTARTING << ADF_PF2VF_MSGTYPE_SHIFT));
304 	int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
305 
306 	for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
307 		if (vf->init && adf_iov_putmsg(accel_dev, msg, i))
308 			dev_err(&GET_DEV(accel_dev),
309 				"Failed to send restarting msg to VF%d\n", i);
310 	}
311 }
312 
adf_vf2pf_request_version(struct adf_accel_dev * accel_dev)313 static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
314 {
315 	unsigned long timeout = msecs_to_jiffies(ADF_IOV_MSG_RESP_TIMEOUT);
316 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
317 	u32 msg = 0;
318 	int ret;
319 
320 	msg = ADF_VF2PF_MSGORIGIN_SYSTEM;
321 	msg |= ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ << ADF_VF2PF_MSGTYPE_SHIFT;
322 	msg |= ADF_PFVF_COMPAT_THIS_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
323 	BUILD_BUG_ON(ADF_PFVF_COMPAT_THIS_VERSION > 255);
324 
325 	reinit_completion(&accel_dev->vf.iov_msg_completion);
326 
327 	/* Send request from VF to PF */
328 	ret = adf_iov_putmsg(accel_dev, msg, 0);
329 	if (ret) {
330 		dev_err(&GET_DEV(accel_dev),
331 			"Failed to send Compatibility Version Request.\n");
332 		return ret;
333 	}
334 
335 	/* Wait for response */
336 	if (!wait_for_completion_timeout(&accel_dev->vf.iov_msg_completion,
337 					 timeout)) {
338 		dev_err(&GET_DEV(accel_dev),
339 			"IOV request/response message timeout expired\n");
340 		return -EIO;
341 	}
342 
343 	/* Response from PF received, check compatibility */
344 	switch (accel_dev->vf.compatible) {
345 	case ADF_PF2VF_VF_COMPATIBLE:
346 		break;
347 	case ADF_PF2VF_VF_COMPAT_UNKNOWN:
348 		/* VF is newer than PF and decides whether it is compatible */
349 		if (accel_dev->vf.pf_version >= hw_data->min_iov_compat_ver) {
350 			accel_dev->vf.compatible = ADF_PF2VF_VF_COMPATIBLE;
351 			break;
352 		}
353 		fallthrough;
354 	case ADF_PF2VF_VF_INCOMPATIBLE:
355 		dev_err(&GET_DEV(accel_dev),
356 			"PF (vers %d) and VF (vers %d) are not compatible\n",
357 			accel_dev->vf.pf_version,
358 			ADF_PFVF_COMPAT_THIS_VERSION);
359 		return -EINVAL;
360 	default:
361 		dev_err(&GET_DEV(accel_dev),
362 			"Invalid response from PF; assume not compatible\n");
363 		return -EINVAL;
364 	}
365 	return ret;
366 }
367 
368 /**
369  * adf_enable_vf2pf_comms() - Function enables communication from vf to pf
370  *
371  * @accel_dev: Pointer to acceleration device virtual function.
372  *
373  * Return: 0 on success, error code otherwise.
374  */
adf_enable_vf2pf_comms(struct adf_accel_dev * accel_dev)375 int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
376 {
377 	adf_enable_pf2vf_interrupts(accel_dev);
378 	return adf_vf2pf_request_version(accel_dev);
379 }
380 EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms);
381