1 /*
2   This file is provided under a dual BSD/GPLv2 license.  When using or
3   redistributing this file, you may do so under either license.
4 
5   GPL LICENSE SUMMARY
6   Copyright(c) 2015 Intel Corporation.
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of version 2 of the GNU General Public License as
9   published by the Free Software Foundation.
10 
11   This program is distributed in the hope that it will be useful, but
12   WITHOUT ANY WARRANTY; without even the implied warranty of
13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14   General Public License for more details.
15 
16   Contact Information:
17   qat-linux@intel.com
18 
19   BSD LICENSE
20   Copyright(c) 2015 Intel Corporation.
21   Redistribution and use in source and binary forms, with or without
22   modification, are permitted provided that the following conditions
23   are met:
24 
25     * Redistributions of source code must retain the above copyright
26       notice, this list of conditions and the following disclaimer.
27     * Redistributions in binary form must reproduce the above copyright
28       notice, this list of conditions and the following disclaimer in
29       the documentation and/or other materials provided with the
30       distribution.
31     * Neither the name of Intel Corporation nor the names of its
32       contributors may be used to endorse or promote products derived
33       from this software without specific prior written permission.
34 
35   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47 
48 #include <linux/delay.h>
49 #include "adf_accel_devices.h"
50 #include "adf_common_drv.h"
51 #include "adf_pf2vf_msg.h"
52 
53 #define ADF_DH895XCC_EP_OFFSET	0x3A000
54 #define ADF_DH895XCC_ERRMSK3	(ADF_DH895XCC_EP_OFFSET + 0x1C)
55 #define ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask) ((vf_mask & 0xFFFF) << 9)
56 #define ADF_DH895XCC_ERRMSK5	(ADF_DH895XCC_EP_OFFSET + 0xDC)
57 #define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16)
58 
adf_enable_pf2vf_interrupts(struct adf_accel_dev * accel_dev)59 void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
60 {
61 	struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
62 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
63 	void __iomem *pmisc_bar_addr =
64 		pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
65 
66 	ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x0);
67 }
68 
adf_disable_pf2vf_interrupts(struct adf_accel_dev * accel_dev)69 void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
70 {
71 	struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
72 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
73 	void __iomem *pmisc_bar_addr =
74 		pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
75 
76 	ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x2);
77 }
78 
adf_enable_vf2pf_interrupts(struct adf_accel_dev * accel_dev,u32 vf_mask)79 void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
80 				 u32 vf_mask)
81 {
82 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
83 	struct adf_bar *pmisc =
84 			&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
85 	void __iomem *pmisc_addr = pmisc->virt_addr;
86 	u32 reg;
87 
88 	/* Enable VF2PF Messaging Ints - VFs 1 through 16 per vf_mask[15:0] */
89 	if (vf_mask & 0xFFFF) {
90 		reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3);
91 		reg &= ~ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
92 		ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
93 	}
94 
95 	/* Enable VF2PF Messaging Ints - VFs 17 through 32 per vf_mask[31:16] */
96 	if (vf_mask >> 16) {
97 		reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5);
98 		reg &= ~ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
99 		ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
100 	}
101 }
102 
adf_disable_vf2pf_interrupts(struct adf_accel_dev * accel_dev,u32 vf_mask)103 void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
104 {
105 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
106 	struct adf_bar *pmisc =
107 			&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
108 	void __iomem *pmisc_addr = pmisc->virt_addr;
109 	u32 reg;
110 
111 	/* Disable VF2PF interrupts for VFs 1 through 16 per vf_mask[15:0] */
112 	if (vf_mask & 0xFFFF) {
113 		reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3) |
114 			ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
115 		ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
116 	}
117 
118 	/* Disable VF2PF interrupts for VFs 17 through 32 per vf_mask[31:16] */
119 	if (vf_mask >> 16) {
120 		reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5) |
121 			ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
122 		ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
123 	}
124 }
125 
__adf_iov_putmsg(struct adf_accel_dev * accel_dev,u32 msg,u8 vf_nr)126 static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
127 {
128 	struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
129 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
130 	void __iomem *pmisc_bar_addr =
131 		pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
132 	u32 val, pf2vf_offset, count = 0;
133 	u32 local_in_use_mask, local_in_use_pattern;
134 	u32 remote_in_use_mask, remote_in_use_pattern;
135 	struct mutex *lock;	/* lock preventing concurrent acces of CSR */
136 	u32 int_bit;
137 	int ret = 0;
138 
139 	if (accel_dev->is_vf) {
140 		pf2vf_offset = hw_data->get_pf2vf_offset(0);
141 		lock = &accel_dev->vf.vf2pf_lock;
142 		local_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
143 		local_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
144 		remote_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
145 		remote_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
146 		int_bit = ADF_VF2PF_INT;
147 	} else {
148 		pf2vf_offset = hw_data->get_pf2vf_offset(vf_nr);
149 		lock = &accel_dev->pf.vf_info[vf_nr].pf2vf_lock;
150 		local_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
151 		local_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
152 		remote_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
153 		remote_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
154 		int_bit = ADF_PF2VF_INT;
155 	}
156 
157 	mutex_lock(lock);
158 
159 	/* Check if PF2VF CSR is in use by remote function */
160 	val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
161 	if ((val & remote_in_use_mask) == remote_in_use_pattern) {
162 		dev_dbg(&GET_DEV(accel_dev),
163 			"PF2VF CSR in use by remote function\n");
164 		ret = -EBUSY;
165 		goto out;
166 	}
167 
168 	/* Attempt to get ownership of PF2VF CSR */
169 	msg &= ~local_in_use_mask;
170 	msg |= local_in_use_pattern;
171 	ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg);
172 
173 	/* Wait in case remote func also attempting to get ownership */
174 	msleep(ADF_IOV_MSG_COLLISION_DETECT_DELAY);
175 
176 	val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
177 	if ((val & local_in_use_mask) != local_in_use_pattern) {
178 		dev_dbg(&GET_DEV(accel_dev),
179 			"PF2VF CSR in use by remote - collision detected\n");
180 		ret = -EBUSY;
181 		goto out;
182 	}
183 
184 	/*
185 	 * This function now owns the PV2VF CSR.  The IN_USE_BY pattern must
186 	 * remain in the PF2VF CSR for all writes including ACK from remote
187 	 * until this local function relinquishes the CSR.  Send the message
188 	 * by interrupting the remote.
189 	 */
190 	ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg | int_bit);
191 
192 	/* Wait for confirmation from remote func it received the message */
193 	do {
194 		msleep(ADF_IOV_MSG_ACK_DELAY);
195 		val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
196 	} while ((val & int_bit) && (count++ < ADF_IOV_MSG_ACK_MAX_RETRY));
197 
198 	if (val & int_bit) {
199 		dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
200 		val &= ~int_bit;
201 		ret = -EIO;
202 	}
203 
204 	/* Finished with PF2VF CSR; relinquish it and leave msg in CSR */
205 	ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, val & ~local_in_use_mask);
206 out:
207 	mutex_unlock(lock);
208 	return ret;
209 }
210 
211 /**
212  * adf_iov_putmsg() - send PF2VF message
213  * @accel_dev:  Pointer to acceleration device.
214  * @msg:	Message to send
215  * @vf_nr:	VF number to which the message will be sent
216  *
217  * Function sends a messge from the PF to a VF
218  *
219  * Return: 0 on success, error code otherwise.
220  */
adf_iov_putmsg(struct adf_accel_dev * accel_dev,u32 msg,u8 vf_nr)221 int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
222 {
223 	u32 count = 0;
224 	int ret;
225 
226 	do {
227 		ret = __adf_iov_putmsg(accel_dev, msg, vf_nr);
228 		if (ret)
229 			msleep(ADF_IOV_MSG_RETRY_DELAY);
230 	} while (ret && (count++ < ADF_IOV_MSG_MAX_RETRIES));
231 
232 	return ret;
233 }
234 EXPORT_SYMBOL_GPL(adf_iov_putmsg);
235 
adf_vf2pf_req_hndl(struct adf_accel_vf_info * vf_info)236 void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
237 {
238 	struct adf_accel_dev *accel_dev = vf_info->accel_dev;
239 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
240 	int bar_id = hw_data->get_misc_bar_id(hw_data);
241 	struct adf_bar *pmisc = &GET_BARS(accel_dev)[bar_id];
242 	void __iomem *pmisc_addr = pmisc->virt_addr;
243 	u32 msg, resp = 0, vf_nr = vf_info->vf_nr;
244 
245 	/* Read message from the VF */
246 	msg = ADF_CSR_RD(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr));
247 
248 	/* To ACK, clear the VF2PFINT bit */
249 	msg &= ~ADF_VF2PF_INT;
250 	ADF_CSR_WR(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr), msg);
251 
252 	if (!(msg & ADF_VF2PF_MSGORIGIN_SYSTEM))
253 		/* Ignore legacy non-system (non-kernel) VF2PF messages */
254 		goto err;
255 
256 	switch ((msg & ADF_VF2PF_MSGTYPE_MASK) >> ADF_VF2PF_MSGTYPE_SHIFT) {
257 	case ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ:
258 		{
259 		u8 vf_compat_ver = msg >> ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
260 
261 		resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
262 			 (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
263 			  ADF_PF2VF_MSGTYPE_SHIFT) |
264 			 (ADF_PFVF_COMPATIBILITY_VERSION <<
265 			  ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
266 
267 		dev_dbg(&GET_DEV(accel_dev),
268 			"Compatibility Version Request from VF%d vers=%u\n",
269 			vf_nr + 1, vf_compat_ver);
270 
271 		if (vf_compat_ver < hw_data->min_iov_compat_ver) {
272 			dev_err(&GET_DEV(accel_dev),
273 				"VF (vers %d) incompatible with PF (vers %d)\n",
274 				vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
275 			resp |= ADF_PF2VF_VF_INCOMPATIBLE <<
276 				ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
277 		} else if (vf_compat_ver > ADF_PFVF_COMPATIBILITY_VERSION) {
278 			dev_err(&GET_DEV(accel_dev),
279 				"VF (vers %d) compat with PF (vers %d) unkn.\n",
280 				vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
281 			resp |= ADF_PF2VF_VF_COMPAT_UNKNOWN <<
282 				ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
283 		} else {
284 			dev_dbg(&GET_DEV(accel_dev),
285 				"VF (vers %d) compatible with PF (vers %d)\n",
286 				vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
287 			resp |= ADF_PF2VF_VF_COMPATIBLE <<
288 				ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
289 		}
290 		}
291 		break;
292 	case ADF_VF2PF_MSGTYPE_VERSION_REQ:
293 		dev_dbg(&GET_DEV(accel_dev),
294 			"Legacy VersionRequest received from VF%d 0x%x\n",
295 			vf_nr + 1, msg);
296 		resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
297 			 (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
298 			  ADF_PF2VF_MSGTYPE_SHIFT) |
299 			 (ADF_PFVF_COMPATIBILITY_VERSION <<
300 			  ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
301 		resp |= ADF_PF2VF_VF_COMPATIBLE <<
302 			ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
303 		/* Set legacy major and minor version num */
304 		resp |= 1 << ADF_PF2VF_MAJORVERSION_SHIFT |
305 			1 << ADF_PF2VF_MINORVERSION_SHIFT;
306 		break;
307 	case ADF_VF2PF_MSGTYPE_INIT:
308 		{
309 		dev_dbg(&GET_DEV(accel_dev),
310 			"Init message received from VF%d 0x%x\n",
311 			vf_nr + 1, msg);
312 		vf_info->init = true;
313 		}
314 		break;
315 	case ADF_VF2PF_MSGTYPE_SHUTDOWN:
316 		{
317 		dev_dbg(&GET_DEV(accel_dev),
318 			"Shutdown message received from VF%d 0x%x\n",
319 			vf_nr + 1, msg);
320 		vf_info->init = false;
321 		}
322 		break;
323 	default:
324 		goto err;
325 	}
326 
327 	if (resp && adf_iov_putmsg(accel_dev, resp, vf_nr))
328 		dev_err(&GET_DEV(accel_dev), "Failed to send response to VF\n");
329 
330 	/* re-enable interrupt on PF from this VF */
331 	adf_enable_vf2pf_interrupts(accel_dev, (1 << vf_nr));
332 	return;
333 err:
334 	dev_dbg(&GET_DEV(accel_dev), "Unknown message from VF%d (0x%x);\n",
335 		vf_nr + 1, msg);
336 }
337 
adf_pf2vf_notify_restarting(struct adf_accel_dev * accel_dev)338 void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
339 {
340 	struct adf_accel_vf_info *vf;
341 	u32 msg = (ADF_PF2VF_MSGORIGIN_SYSTEM |
342 		(ADF_PF2VF_MSGTYPE_RESTARTING << ADF_PF2VF_MSGTYPE_SHIFT));
343 	int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
344 
345 	for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
346 		if (vf->init && adf_iov_putmsg(accel_dev, msg, i))
347 			dev_err(&GET_DEV(accel_dev),
348 				"Failed to send restarting msg to VF%d\n", i);
349 	}
350 }
351 
adf_vf2pf_request_version(struct adf_accel_dev * accel_dev)352 static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
353 {
354 	unsigned long timeout = msecs_to_jiffies(ADF_IOV_MSG_RESP_TIMEOUT);
355 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
356 	u32 msg = 0;
357 	int ret;
358 
359 	msg = ADF_VF2PF_MSGORIGIN_SYSTEM;
360 	msg |= ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ << ADF_VF2PF_MSGTYPE_SHIFT;
361 	msg |= ADF_PFVF_COMPATIBILITY_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
362 	BUILD_BUG_ON(ADF_PFVF_COMPATIBILITY_VERSION > 255);
363 
364 	/* Send request from VF to PF */
365 	ret = adf_iov_putmsg(accel_dev, msg, 0);
366 	if (ret) {
367 		dev_err(&GET_DEV(accel_dev),
368 			"Failed to send Compatibility Version Request.\n");
369 		return ret;
370 	}
371 
372 	/* Wait for response */
373 	if (!wait_for_completion_timeout(&accel_dev->vf.iov_msg_completion,
374 					 timeout)) {
375 		dev_err(&GET_DEV(accel_dev),
376 			"IOV request/response message timeout expired\n");
377 		return -EIO;
378 	}
379 
380 	/* Response from PF received, check compatibility */
381 	switch (accel_dev->vf.compatible) {
382 	case ADF_PF2VF_VF_COMPATIBLE:
383 		break;
384 	case ADF_PF2VF_VF_COMPAT_UNKNOWN:
385 		/* VF is newer than PF and decides whether it is compatible */
386 		if (accel_dev->vf.pf_version >= hw_data->min_iov_compat_ver)
387 			break;
388 		/* fall through */
389 	case ADF_PF2VF_VF_INCOMPATIBLE:
390 		dev_err(&GET_DEV(accel_dev),
391 			"PF (vers %d) and VF (vers %d) are not compatible\n",
392 			accel_dev->vf.pf_version,
393 			ADF_PFVF_COMPATIBILITY_VERSION);
394 		return -EINVAL;
395 	default:
396 		dev_err(&GET_DEV(accel_dev),
397 			"Invalid response from PF; assume not compatible\n");
398 		return -EINVAL;
399 	}
400 	return ret;
401 }
402 
403 /**
404  * adf_enable_vf2pf_comms() - Function enables communication from vf to pf
405  *
406  * @accel_dev: Pointer to acceleration device virtual function.
407  *
408  * Return: 0 on success, error code otherwise.
409  */
adf_enable_vf2pf_comms(struct adf_accel_dev * accel_dev)410 int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
411 {
412 	adf_enable_pf2vf_interrupts(accel_dev);
413 	return adf_vf2pf_request_version(accel_dev);
414 }
415 EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms);
416