1 /*
2   This file is provided under a dual BSD/GPLv2 license.  When using or
3   redistributing this file, you may do so under either license.
4 
5   GPL LICENSE SUMMARY
6   Copyright(c) 2014 Intel Corporation.
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of version 2 of the GNU General Public License as
9   published by the Free Software Foundation.
10 
11   This program is distributed in the hope that it will be useful, but
12   WITHOUT ANY WARRANTY; without even the implied warranty of
13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14   General Public License for more details.
15 
16   Contact Information:
17   qat-linux@intel.com
18 
19   BSD LICENSE
20   Copyright(c) 2014 Intel Corporation.
21   Redistribution and use in source and binary forms, with or without
22   modification, are permitted provided that the following conditions
23   are met:
24 
25     * Redistributions of source code must retain the above copyright
26       notice, this list of conditions and the following disclaimer.
27     * Redistributions in binary form must reproduce the above copyright
28       notice, this list of conditions and the following disclaimer in
29       the documentation and/or other materials provided with the
30       distribution.
31     * Neither the name of Intel Corporation nor the names of its
32       contributors may be used to endorse or promote products derived
33       from this software without specific prior written permission.
34 
35   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47 #include <linux/kernel.h>
48 #include <linux/module.h>
49 #include <linux/pci.h>
50 #include <linux/init.h>
51 #include <linux/types.h>
52 #include <linux/fs.h>
53 #include <linux/slab.h>
54 #include <linux/errno.h>
55 #include <linux/device.h>
56 #include <linux/dma-mapping.h>
57 #include <linux/platform_device.h>
58 #include <linux/workqueue.h>
59 #include <linux/io.h>
60 #include <adf_accel_devices.h>
61 #include <adf_common_drv.h>
62 #include <adf_cfg.h>
63 #include "adf_c3xxxvf_hw_data.h"
64 
65 #define ADF_SYSTEM_DEVICE(device_id) \
66 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
67 
68 static const struct pci_device_id adf_pci_tbl[] = {
69 	ADF_SYSTEM_DEVICE(ADF_C3XXXIOV_PCI_DEVICE_ID),
70 	{0,}
71 };
72 MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
73 
74 static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
75 static void adf_remove(struct pci_dev *dev);
76 
77 static struct pci_driver adf_driver = {
78 	.id_table = adf_pci_tbl,
79 	.name = ADF_C3XXXVF_DEVICE_NAME,
80 	.probe = adf_probe,
81 	.remove = adf_remove,
82 };
83 
adf_cleanup_pci_dev(struct adf_accel_dev * accel_dev)84 static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
85 {
86 	pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
87 	pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
88 }
89 
adf_cleanup_accel(struct adf_accel_dev * accel_dev)90 static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
91 {
92 	struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
93 	struct adf_accel_dev *pf;
94 	int i;
95 
96 	for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
97 		struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
98 
99 		if (bar->virt_addr)
100 			pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
101 	}
102 
103 	if (accel_dev->hw_device) {
104 		switch (accel_pci_dev->pci_dev->device) {
105 		case ADF_C3XXXIOV_PCI_DEVICE_ID:
106 			adf_clean_hw_data_c3xxxiov(accel_dev->hw_device);
107 			break;
108 		default:
109 			break;
110 		}
111 		kfree(accel_dev->hw_device);
112 		accel_dev->hw_device = NULL;
113 	}
114 	adf_cfg_dev_remove(accel_dev);
115 	debugfs_remove(accel_dev->debugfs_dir);
116 	pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
117 	adf_devmgr_rm_dev(accel_dev, pf);
118 }
119 
adf_probe(struct pci_dev * pdev,const struct pci_device_id * ent)120 static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
121 {
122 	struct adf_accel_dev *accel_dev;
123 	struct adf_accel_dev *pf;
124 	struct adf_accel_pci *accel_pci_dev;
125 	struct adf_hw_device_data *hw_data;
126 	char name[ADF_DEVICE_NAME_LENGTH];
127 	unsigned int i, bar_nr;
128 	unsigned long bar_mask;
129 	int ret;
130 
131 	switch (ent->device) {
132 	case ADF_C3XXXIOV_PCI_DEVICE_ID:
133 		break;
134 	default:
135 		dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
136 		return -ENODEV;
137 	}
138 
139 	accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
140 				 dev_to_node(&pdev->dev));
141 	if (!accel_dev)
142 		return -ENOMEM;
143 
144 	accel_dev->is_vf = true;
145 	pf = adf_devmgr_pci_to_accel_dev(pdev->physfn);
146 	accel_pci_dev = &accel_dev->accel_pci_dev;
147 	accel_pci_dev->pci_dev = pdev;
148 
149 	/* Add accel device to accel table */
150 	if (adf_devmgr_add_dev(accel_dev, pf)) {
151 		dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
152 		kfree(accel_dev);
153 		return -EFAULT;
154 	}
155 	INIT_LIST_HEAD(&accel_dev->crypto_list);
156 
157 	accel_dev->owner = THIS_MODULE;
158 	/* Allocate and configure device configuration structure */
159 	hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
160 			       dev_to_node(&pdev->dev));
161 	if (!hw_data) {
162 		ret = -ENOMEM;
163 		goto out_err;
164 	}
165 	accel_dev->hw_device = hw_data;
166 	adf_init_hw_data_c3xxxiov(accel_dev->hw_device);
167 
168 	/* Get Accelerators and Accelerators Engines masks */
169 	hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
170 	hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
171 	accel_pci_dev->sku = hw_data->get_sku(hw_data);
172 
173 	/* Create dev top level debugfs entry */
174 	snprintf(name, sizeof(name), "%s%s_%02x:%02d.%d",
175 		 ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
176 		 pdev->bus->number, PCI_SLOT(pdev->devfn),
177 		 PCI_FUNC(pdev->devfn));
178 
179 	accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
180 	if (!accel_dev->debugfs_dir) {
181 		dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
182 		ret = -EINVAL;
183 		goto out_err;
184 	}
185 
186 	/* Create device configuration table */
187 	ret = adf_cfg_dev_add(accel_dev);
188 	if (ret)
189 		goto out_err;
190 
191 	/* enable PCI device */
192 	if (pci_enable_device(pdev)) {
193 		ret = -EFAULT;
194 		goto out_err;
195 	}
196 
197 	/* set dma identifier */
198 	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
199 		if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
200 			dev_err(&pdev->dev, "No usable DMA configuration\n");
201 			ret = -EFAULT;
202 			goto out_err_disable;
203 		} else {
204 			pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
205 		}
206 
207 	} else {
208 		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
209 	}
210 
211 	if (pci_request_regions(pdev, ADF_C3XXXVF_DEVICE_NAME)) {
212 		ret = -EFAULT;
213 		goto out_err_disable;
214 	}
215 
216 	/* Find and map all the device's BARS */
217 	i = 0;
218 	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
219 	for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
220 		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
221 
222 		bar->base_addr = pci_resource_start(pdev, bar_nr);
223 		if (!bar->base_addr)
224 			break;
225 		bar->size = pci_resource_len(pdev, bar_nr);
226 		bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
227 		if (!bar->virt_addr) {
228 			dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
229 			ret = -EFAULT;
230 			goto out_err_free_reg;
231 		}
232 	}
233 	pci_set_master(pdev);
234 	/* Completion for VF2PF request/response message exchange */
235 	init_completion(&accel_dev->vf.iov_msg_completion);
236 
237 	ret = qat_crypto_dev_config(accel_dev);
238 	if (ret)
239 		goto out_err_free_reg;
240 
241 	set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
242 
243 	ret = adf_dev_init(accel_dev);
244 	if (ret)
245 		goto out_err_dev_shutdown;
246 
247 	ret = adf_dev_start(accel_dev);
248 	if (ret)
249 		goto out_err_dev_stop;
250 
251 	return ret;
252 
253 out_err_dev_stop:
254 	adf_dev_stop(accel_dev);
255 out_err_dev_shutdown:
256 	adf_dev_shutdown(accel_dev);
257 out_err_free_reg:
258 	pci_release_regions(accel_pci_dev->pci_dev);
259 out_err_disable:
260 	pci_disable_device(accel_pci_dev->pci_dev);
261 out_err:
262 	adf_cleanup_accel(accel_dev);
263 	kfree(accel_dev);
264 	return ret;
265 }
266 
adf_remove(struct pci_dev * pdev)267 static void adf_remove(struct pci_dev *pdev)
268 {
269 	struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
270 
271 	if (!accel_dev) {
272 		pr_err("QAT: Driver removal failed\n");
273 		return;
274 	}
275 	adf_dev_stop(accel_dev);
276 	adf_dev_shutdown(accel_dev);
277 	adf_cleanup_accel(accel_dev);
278 	adf_cleanup_pci_dev(accel_dev);
279 	kfree(accel_dev);
280 }
281 
adfdrv_init(void)282 static int __init adfdrv_init(void)
283 {
284 	request_module("intel_qat");
285 
286 	if (pci_register_driver(&adf_driver)) {
287 		pr_err("QAT: Driver initialization failed\n");
288 		return -EFAULT;
289 	}
290 	return 0;
291 }
292 
adfdrv_release(void)293 static void __exit adfdrv_release(void)
294 {
295 	pci_unregister_driver(&adf_driver);
296 	adf_clean_vf_map(true);
297 }
298 
299 module_init(adfdrv_init);
300 module_exit(adfdrv_release);
301 
302 MODULE_LICENSE("Dual BSD/GPL");
303 MODULE_AUTHOR("Intel");
304 MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
305 MODULE_VERSION(ADF_DRV_VERSION);
306