1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Host side test driver to test endpoint functionality
4  *
5  * Copyright (C) 2017 Texas Instruments
6  * Author: Kishon Vijay Abraham I <kishon@ti.com>
7  */
8 
9 #include <linux/crc32.h>
10 #include <linux/delay.h>
11 #include <linux/fs.h>
12 #include <linux/io.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/miscdevice.h>
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/random.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/pci.h>
22 #include <linux/pci_ids.h>
23 
24 #include <linux/pci_regs.h>
25 
26 #include <uapi/linux/pcitest.h>
27 
28 #define DRV_MODULE_NAME				"pci-endpoint-test"
29 
30 #define IRQ_TYPE_UNDEFINED			-1
31 #define IRQ_TYPE_LEGACY				0
32 #define IRQ_TYPE_MSI				1
33 #define IRQ_TYPE_MSIX				2
34 
35 #define PCI_ENDPOINT_TEST_MAGIC			0x0
36 
37 #define PCI_ENDPOINT_TEST_COMMAND		0x4
38 #define COMMAND_RAISE_LEGACY_IRQ		BIT(0)
39 #define COMMAND_RAISE_MSI_IRQ			BIT(1)
40 #define COMMAND_RAISE_MSIX_IRQ			BIT(2)
41 #define COMMAND_READ				BIT(3)
42 #define COMMAND_WRITE				BIT(4)
43 #define COMMAND_COPY				BIT(5)
44 
45 #define PCI_ENDPOINT_TEST_STATUS		0x8
46 #define STATUS_READ_SUCCESS			BIT(0)
47 #define STATUS_READ_FAIL			BIT(1)
48 #define STATUS_WRITE_SUCCESS			BIT(2)
49 #define STATUS_WRITE_FAIL			BIT(3)
50 #define STATUS_COPY_SUCCESS			BIT(4)
51 #define STATUS_COPY_FAIL			BIT(5)
52 #define STATUS_IRQ_RAISED			BIT(6)
53 #define STATUS_SRC_ADDR_INVALID			BIT(7)
54 #define STATUS_DST_ADDR_INVALID			BIT(8)
55 
56 #define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR	0x0c
57 #define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR	0x10
58 
59 #define PCI_ENDPOINT_TEST_LOWER_DST_ADDR	0x14
60 #define PCI_ENDPOINT_TEST_UPPER_DST_ADDR	0x18
61 
62 #define PCI_ENDPOINT_TEST_SIZE			0x1c
63 #define PCI_ENDPOINT_TEST_CHECKSUM		0x20
64 
65 #define PCI_ENDPOINT_TEST_IRQ_TYPE		0x24
66 #define PCI_ENDPOINT_TEST_IRQ_NUMBER		0x28
67 
68 #define PCI_ENDPOINT_TEST_FLAGS			0x2c
69 #define FLAG_USE_DMA				BIT(0)
70 
71 #define PCI_DEVICE_ID_TI_AM654			0xb00c
72 #define PCI_DEVICE_ID_TI_J7200			0xb00f
73 #define PCI_DEVICE_ID_TI_AM64			0xb010
74 #define PCI_DEVICE_ID_LS1088A			0x80c0
75 #define PCI_DEVICE_ID_IMX8			0x0808
76 
77 #define is_am654_pci_dev(pdev)		\
78 		((pdev)->device == PCI_DEVICE_ID_TI_AM654)
79 
80 #define PCI_DEVICE_ID_RENESAS_R8A774A1		0x0028
81 #define PCI_DEVICE_ID_RENESAS_R8A774B1		0x002b
82 #define PCI_DEVICE_ID_RENESAS_R8A774C0		0x002d
83 #define PCI_DEVICE_ID_RENESAS_R8A774E1		0x0025
84 
85 static DEFINE_IDA(pci_endpoint_test_ida);
86 
87 #define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
88 					    miscdev)
89 
90 static bool no_msi;
91 module_param(no_msi, bool, 0444);
92 MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
93 
94 static int irq_type = IRQ_TYPE_MSI;
95 module_param(irq_type, int, 0444);
96 MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)");
97 
98 enum pci_barno {
99 	BAR_0,
100 	BAR_1,
101 	BAR_2,
102 	BAR_3,
103 	BAR_4,
104 	BAR_5,
105 };
106 
107 struct pci_endpoint_test {
108 	struct pci_dev	*pdev;
109 	void __iomem	*base;
110 	void __iomem	*bar[PCI_STD_NUM_BARS];
111 	struct completion irq_raised;
112 	int		last_irq;
113 	int		num_irqs;
114 	int		irq_type;
115 	/* mutex to protect the ioctls */
116 	struct mutex	mutex;
117 	struct miscdevice miscdev;
118 	enum pci_barno test_reg_bar;
119 	size_t alignment;
120 	const char *name;
121 };
122 
123 struct pci_endpoint_test_data {
124 	enum pci_barno test_reg_bar;
125 	size_t alignment;
126 	int irq_type;
127 };
128 
pci_endpoint_test_readl(struct pci_endpoint_test * test,u32 offset)129 static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
130 					  u32 offset)
131 {
132 	return readl(test->base + offset);
133 }
134 
pci_endpoint_test_writel(struct pci_endpoint_test * test,u32 offset,u32 value)135 static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
136 					    u32 offset, u32 value)
137 {
138 	writel(value, test->base + offset);
139 }
140 
pci_endpoint_test_bar_readl(struct pci_endpoint_test * test,int bar,int offset)141 static inline u32 pci_endpoint_test_bar_readl(struct pci_endpoint_test *test,
142 					      int bar, int offset)
143 {
144 	return readl(test->bar[bar] + offset);
145 }
146 
pci_endpoint_test_bar_writel(struct pci_endpoint_test * test,int bar,u32 offset,u32 value)147 static inline void pci_endpoint_test_bar_writel(struct pci_endpoint_test *test,
148 						int bar, u32 offset, u32 value)
149 {
150 	writel(value, test->bar[bar] + offset);
151 }
152 
pci_endpoint_test_irqhandler(int irq,void * dev_id)153 static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
154 {
155 	struct pci_endpoint_test *test = dev_id;
156 	u32 reg;
157 
158 	reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
159 	if (reg & STATUS_IRQ_RAISED) {
160 		test->last_irq = irq;
161 		complete(&test->irq_raised);
162 	}
163 
164 	return IRQ_HANDLED;
165 }
166 
pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test * test)167 static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
168 {
169 	struct pci_dev *pdev = test->pdev;
170 
171 	pci_free_irq_vectors(pdev);
172 	test->irq_type = IRQ_TYPE_UNDEFINED;
173 }
174 
pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test * test,int type)175 static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
176 						int type)
177 {
178 	int irq = -1;
179 	struct pci_dev *pdev = test->pdev;
180 	struct device *dev = &pdev->dev;
181 	bool res = true;
182 
183 	switch (type) {
184 	case IRQ_TYPE_LEGACY:
185 		irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
186 		if (irq < 0)
187 			dev_err(dev, "Failed to get Legacy interrupt\n");
188 		break;
189 	case IRQ_TYPE_MSI:
190 		irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
191 		if (irq < 0)
192 			dev_err(dev, "Failed to get MSI interrupts\n");
193 		break;
194 	case IRQ_TYPE_MSIX:
195 		irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
196 		if (irq < 0)
197 			dev_err(dev, "Failed to get MSI-X interrupts\n");
198 		break;
199 	default:
200 		dev_err(dev, "Invalid IRQ type selected\n");
201 	}
202 
203 	if (irq < 0) {
204 		irq = 0;
205 		res = false;
206 	}
207 
208 	test->irq_type = type;
209 	test->num_irqs = irq;
210 
211 	return res;
212 }
213 
pci_endpoint_test_release_irq(struct pci_endpoint_test * test)214 static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
215 {
216 	int i;
217 	struct pci_dev *pdev = test->pdev;
218 	struct device *dev = &pdev->dev;
219 
220 	for (i = 0; i < test->num_irqs; i++)
221 		devm_free_irq(dev, pci_irq_vector(pdev, i), test);
222 
223 	test->num_irqs = 0;
224 }
225 
pci_endpoint_test_request_irq(struct pci_endpoint_test * test)226 static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
227 {
228 	int i;
229 	int err;
230 	struct pci_dev *pdev = test->pdev;
231 	struct device *dev = &pdev->dev;
232 
233 	for (i = 0; i < test->num_irqs; i++) {
234 		err = devm_request_irq(dev, pci_irq_vector(pdev, i),
235 				       pci_endpoint_test_irqhandler,
236 				       IRQF_SHARED, test->name, test);
237 		if (err)
238 			goto fail;
239 	}
240 
241 	return true;
242 
243 fail:
244 	switch (irq_type) {
245 	case IRQ_TYPE_LEGACY:
246 		dev_err(dev, "Failed to request IRQ %d for Legacy\n",
247 			pci_irq_vector(pdev, i));
248 		break;
249 	case IRQ_TYPE_MSI:
250 		dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
251 			pci_irq_vector(pdev, i),
252 			i + 1);
253 		break;
254 	case IRQ_TYPE_MSIX:
255 		dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
256 			pci_irq_vector(pdev, i),
257 			i + 1);
258 		break;
259 	}
260 
261 	return false;
262 }
263 
pci_endpoint_test_bar(struct pci_endpoint_test * test,enum pci_barno barno)264 static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
265 				  enum pci_barno barno)
266 {
267 	int j;
268 	u32 val;
269 	int size;
270 	struct pci_dev *pdev = test->pdev;
271 
272 	if (!test->bar[barno])
273 		return false;
274 
275 	size = pci_resource_len(pdev, barno);
276 
277 	if (barno == test->test_reg_bar)
278 		size = 0x4;
279 
280 	for (j = 0; j < size; j += 4)
281 		pci_endpoint_test_bar_writel(test, barno, j, 0xA0A0A0A0);
282 
283 	for (j = 0; j < size; j += 4) {
284 		val = pci_endpoint_test_bar_readl(test, barno, j);
285 		if (val != 0xA0A0A0A0)
286 			return false;
287 	}
288 
289 	return true;
290 }
291 
pci_endpoint_test_legacy_irq(struct pci_endpoint_test * test)292 static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test)
293 {
294 	u32 val;
295 
296 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
297 				 IRQ_TYPE_LEGACY);
298 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
299 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
300 				 COMMAND_RAISE_LEGACY_IRQ);
301 	val = wait_for_completion_timeout(&test->irq_raised,
302 					  msecs_to_jiffies(1000));
303 	if (!val)
304 		return false;
305 
306 	return true;
307 }
308 
pci_endpoint_test_msi_irq(struct pci_endpoint_test * test,u16 msi_num,bool msix)309 static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
310 				       u16 msi_num, bool msix)
311 {
312 	u32 val;
313 	struct pci_dev *pdev = test->pdev;
314 
315 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
316 				 msix ? IRQ_TYPE_MSIX : IRQ_TYPE_MSI);
317 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
318 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
319 				 msix ? COMMAND_RAISE_MSIX_IRQ :
320 				 COMMAND_RAISE_MSI_IRQ);
321 	val = wait_for_completion_timeout(&test->irq_raised,
322 					  msecs_to_jiffies(1000));
323 	if (!val)
324 		return false;
325 
326 	return pci_irq_vector(pdev, msi_num - 1) == test->last_irq;
327 }
328 
pci_endpoint_test_validate_xfer_params(struct device * dev,struct pci_endpoint_test_xfer_param * param,size_t alignment)329 static int pci_endpoint_test_validate_xfer_params(struct device *dev,
330 		struct pci_endpoint_test_xfer_param *param, size_t alignment)
331 {
332 	if (!param->size) {
333 		dev_dbg(dev, "Data size is zero\n");
334 		return -EINVAL;
335 	}
336 
337 	if (param->size > SIZE_MAX - alignment) {
338 		dev_dbg(dev, "Maximum transfer data size exceeded\n");
339 		return -EINVAL;
340 	}
341 
342 	return 0;
343 }
344 
pci_endpoint_test_copy(struct pci_endpoint_test * test,unsigned long arg)345 static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
346 				   unsigned long arg)
347 {
348 	struct pci_endpoint_test_xfer_param param;
349 	bool ret = false;
350 	void *src_addr;
351 	void *dst_addr;
352 	u32 flags = 0;
353 	bool use_dma;
354 	size_t size;
355 	dma_addr_t src_phys_addr;
356 	dma_addr_t dst_phys_addr;
357 	struct pci_dev *pdev = test->pdev;
358 	struct device *dev = &pdev->dev;
359 	void *orig_src_addr;
360 	dma_addr_t orig_src_phys_addr;
361 	void *orig_dst_addr;
362 	dma_addr_t orig_dst_phys_addr;
363 	size_t offset;
364 	size_t alignment = test->alignment;
365 	int irq_type = test->irq_type;
366 	u32 src_crc32;
367 	u32 dst_crc32;
368 	int err;
369 
370 	err = copy_from_user(&param, (void __user *)arg, sizeof(param));
371 	if (err) {
372 		dev_err(dev, "Failed to get transfer param\n");
373 		return false;
374 	}
375 
376 	err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
377 	if (err)
378 		return false;
379 
380 	size = param.size;
381 
382 	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
383 	if (use_dma)
384 		flags |= FLAG_USE_DMA;
385 
386 	if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
387 		dev_err(dev, "Invalid IRQ type option\n");
388 		goto err;
389 	}
390 
391 	orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
392 	if (!orig_src_addr) {
393 		dev_err(dev, "Failed to allocate source buffer\n");
394 		ret = false;
395 		goto err;
396 	}
397 
398 	get_random_bytes(orig_src_addr, size + alignment);
399 	orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
400 					    size + alignment, DMA_TO_DEVICE);
401 	if (dma_mapping_error(dev, orig_src_phys_addr)) {
402 		dev_err(dev, "failed to map source buffer address\n");
403 		ret = false;
404 		goto err_src_phys_addr;
405 	}
406 
407 	if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
408 		src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
409 		offset = src_phys_addr - orig_src_phys_addr;
410 		src_addr = orig_src_addr + offset;
411 	} else {
412 		src_phys_addr = orig_src_phys_addr;
413 		src_addr = orig_src_addr;
414 	}
415 
416 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
417 				 lower_32_bits(src_phys_addr));
418 
419 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
420 				 upper_32_bits(src_phys_addr));
421 
422 	src_crc32 = crc32_le(~0, src_addr, size);
423 
424 	orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
425 	if (!orig_dst_addr) {
426 		dev_err(dev, "Failed to allocate destination address\n");
427 		ret = false;
428 		goto err_dst_addr;
429 	}
430 
431 	orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
432 					    size + alignment, DMA_FROM_DEVICE);
433 	if (dma_mapping_error(dev, orig_dst_phys_addr)) {
434 		dev_err(dev, "failed to map destination buffer address\n");
435 		ret = false;
436 		goto err_dst_phys_addr;
437 	}
438 
439 	if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
440 		dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
441 		offset = dst_phys_addr - orig_dst_phys_addr;
442 		dst_addr = orig_dst_addr + offset;
443 	} else {
444 		dst_phys_addr = orig_dst_phys_addr;
445 		dst_addr = orig_dst_addr;
446 	}
447 
448 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
449 				 lower_32_bits(dst_phys_addr));
450 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
451 				 upper_32_bits(dst_phys_addr));
452 
453 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
454 				 size);
455 
456 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
457 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
458 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
459 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
460 				 COMMAND_COPY);
461 
462 	wait_for_completion(&test->irq_raised);
463 
464 	dma_unmap_single(dev, orig_dst_phys_addr, size + alignment,
465 			 DMA_FROM_DEVICE);
466 
467 	dst_crc32 = crc32_le(~0, dst_addr, size);
468 	if (dst_crc32 == src_crc32)
469 		ret = true;
470 
471 err_dst_phys_addr:
472 	kfree(orig_dst_addr);
473 
474 err_dst_addr:
475 	dma_unmap_single(dev, orig_src_phys_addr, size + alignment,
476 			 DMA_TO_DEVICE);
477 
478 err_src_phys_addr:
479 	kfree(orig_src_addr);
480 
481 err:
482 	return ret;
483 }
484 
pci_endpoint_test_write(struct pci_endpoint_test * test,unsigned long arg)485 static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
486 				    unsigned long arg)
487 {
488 	struct pci_endpoint_test_xfer_param param;
489 	bool ret = false;
490 	u32 flags = 0;
491 	bool use_dma;
492 	u32 reg;
493 	void *addr;
494 	dma_addr_t phys_addr;
495 	struct pci_dev *pdev = test->pdev;
496 	struct device *dev = &pdev->dev;
497 	void *orig_addr;
498 	dma_addr_t orig_phys_addr;
499 	size_t offset;
500 	size_t alignment = test->alignment;
501 	int irq_type = test->irq_type;
502 	size_t size;
503 	u32 crc32;
504 	int err;
505 
506 	err = copy_from_user(&param, (void __user *)arg, sizeof(param));
507 	if (err != 0) {
508 		dev_err(dev, "Failed to get transfer param\n");
509 		return false;
510 	}
511 
512 	err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
513 	if (err)
514 		return false;
515 
516 	size = param.size;
517 
518 	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
519 	if (use_dma)
520 		flags |= FLAG_USE_DMA;
521 
522 	if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
523 		dev_err(dev, "Invalid IRQ type option\n");
524 		goto err;
525 	}
526 
527 	orig_addr = kzalloc(size + alignment, GFP_KERNEL);
528 	if (!orig_addr) {
529 		dev_err(dev, "Failed to allocate address\n");
530 		ret = false;
531 		goto err;
532 	}
533 
534 	get_random_bytes(orig_addr, size + alignment);
535 
536 	orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
537 					DMA_TO_DEVICE);
538 	if (dma_mapping_error(dev, orig_phys_addr)) {
539 		dev_err(dev, "failed to map source buffer address\n");
540 		ret = false;
541 		goto err_phys_addr;
542 	}
543 
544 	if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
545 		phys_addr =  PTR_ALIGN(orig_phys_addr, alignment);
546 		offset = phys_addr - orig_phys_addr;
547 		addr = orig_addr + offset;
548 	} else {
549 		phys_addr = orig_phys_addr;
550 		addr = orig_addr;
551 	}
552 
553 	crc32 = crc32_le(~0, addr, size);
554 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
555 				 crc32);
556 
557 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
558 				 lower_32_bits(phys_addr));
559 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
560 				 upper_32_bits(phys_addr));
561 
562 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
563 
564 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
565 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
566 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
567 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
568 				 COMMAND_READ);
569 
570 	wait_for_completion(&test->irq_raised);
571 
572 	reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
573 	if (reg & STATUS_READ_SUCCESS)
574 		ret = true;
575 
576 	dma_unmap_single(dev, orig_phys_addr, size + alignment,
577 			 DMA_TO_DEVICE);
578 
579 err_phys_addr:
580 	kfree(orig_addr);
581 
582 err:
583 	return ret;
584 }
585 
pci_endpoint_test_read(struct pci_endpoint_test * test,unsigned long arg)586 static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
587 				   unsigned long arg)
588 {
589 	struct pci_endpoint_test_xfer_param param;
590 	bool ret = false;
591 	u32 flags = 0;
592 	bool use_dma;
593 	size_t size;
594 	void *addr;
595 	dma_addr_t phys_addr;
596 	struct pci_dev *pdev = test->pdev;
597 	struct device *dev = &pdev->dev;
598 	void *orig_addr;
599 	dma_addr_t orig_phys_addr;
600 	size_t offset;
601 	size_t alignment = test->alignment;
602 	int irq_type = test->irq_type;
603 	u32 crc32;
604 	int err;
605 
606 	err = copy_from_user(&param, (void __user *)arg, sizeof(param));
607 	if (err) {
608 		dev_err(dev, "Failed to get transfer param\n");
609 		return false;
610 	}
611 
612 	err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
613 	if (err)
614 		return false;
615 
616 	size = param.size;
617 
618 	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
619 	if (use_dma)
620 		flags |= FLAG_USE_DMA;
621 
622 	if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
623 		dev_err(dev, "Invalid IRQ type option\n");
624 		goto err;
625 	}
626 
627 	orig_addr = kzalloc(size + alignment, GFP_KERNEL);
628 	if (!orig_addr) {
629 		dev_err(dev, "Failed to allocate destination address\n");
630 		ret = false;
631 		goto err;
632 	}
633 
634 	orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
635 					DMA_FROM_DEVICE);
636 	if (dma_mapping_error(dev, orig_phys_addr)) {
637 		dev_err(dev, "failed to map source buffer address\n");
638 		ret = false;
639 		goto err_phys_addr;
640 	}
641 
642 	if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
643 		phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
644 		offset = phys_addr - orig_phys_addr;
645 		addr = orig_addr + offset;
646 	} else {
647 		phys_addr = orig_phys_addr;
648 		addr = orig_addr;
649 	}
650 
651 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
652 				 lower_32_bits(phys_addr));
653 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
654 				 upper_32_bits(phys_addr));
655 
656 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
657 
658 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
659 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
660 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
661 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
662 				 COMMAND_WRITE);
663 
664 	wait_for_completion(&test->irq_raised);
665 
666 	dma_unmap_single(dev, orig_phys_addr, size + alignment,
667 			 DMA_FROM_DEVICE);
668 
669 	crc32 = crc32_le(~0, addr, size);
670 	if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
671 		ret = true;
672 
673 err_phys_addr:
674 	kfree(orig_addr);
675 err:
676 	return ret;
677 }
678 
pci_endpoint_test_clear_irq(struct pci_endpoint_test * test)679 static bool pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
680 {
681 	pci_endpoint_test_release_irq(test);
682 	pci_endpoint_test_free_irq_vectors(test);
683 	return true;
684 }
685 
pci_endpoint_test_set_irq(struct pci_endpoint_test * test,int req_irq_type)686 static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
687 				      int req_irq_type)
688 {
689 	struct pci_dev *pdev = test->pdev;
690 	struct device *dev = &pdev->dev;
691 
692 	if (req_irq_type < IRQ_TYPE_LEGACY || req_irq_type > IRQ_TYPE_MSIX) {
693 		dev_err(dev, "Invalid IRQ type option\n");
694 		return false;
695 	}
696 
697 	if (test->irq_type == req_irq_type)
698 		return true;
699 
700 	pci_endpoint_test_release_irq(test);
701 	pci_endpoint_test_free_irq_vectors(test);
702 
703 	if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type))
704 		goto err;
705 
706 	if (!pci_endpoint_test_request_irq(test))
707 		goto err;
708 
709 	return true;
710 
711 err:
712 	pci_endpoint_test_free_irq_vectors(test);
713 	return false;
714 }
715 
pci_endpoint_test_ioctl(struct file * file,unsigned int cmd,unsigned long arg)716 static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
717 				    unsigned long arg)
718 {
719 	int ret = -EINVAL;
720 	enum pci_barno bar;
721 	struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
722 	struct pci_dev *pdev = test->pdev;
723 
724 	mutex_lock(&test->mutex);
725 
726 	reinit_completion(&test->irq_raised);
727 	test->last_irq = -ENODATA;
728 
729 	switch (cmd) {
730 	case PCITEST_BAR:
731 		bar = arg;
732 		if (bar > BAR_5)
733 			goto ret;
734 		if (is_am654_pci_dev(pdev) && bar == BAR_0)
735 			goto ret;
736 		ret = pci_endpoint_test_bar(test, bar);
737 		break;
738 	case PCITEST_LEGACY_IRQ:
739 		ret = pci_endpoint_test_legacy_irq(test);
740 		break;
741 	case PCITEST_MSI:
742 	case PCITEST_MSIX:
743 		ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
744 		break;
745 	case PCITEST_WRITE:
746 		ret = pci_endpoint_test_write(test, arg);
747 		break;
748 	case PCITEST_READ:
749 		ret = pci_endpoint_test_read(test, arg);
750 		break;
751 	case PCITEST_COPY:
752 		ret = pci_endpoint_test_copy(test, arg);
753 		break;
754 	case PCITEST_SET_IRQTYPE:
755 		ret = pci_endpoint_test_set_irq(test, arg);
756 		break;
757 	case PCITEST_GET_IRQTYPE:
758 		ret = irq_type;
759 		break;
760 	case PCITEST_CLEAR_IRQ:
761 		ret = pci_endpoint_test_clear_irq(test);
762 		break;
763 	}
764 
765 ret:
766 	mutex_unlock(&test->mutex);
767 	return ret;
768 }
769 
770 static const struct file_operations pci_endpoint_test_fops = {
771 	.owner = THIS_MODULE,
772 	.unlocked_ioctl = pci_endpoint_test_ioctl,
773 };
774 
pci_endpoint_test_probe(struct pci_dev * pdev,const struct pci_device_id * ent)775 static int pci_endpoint_test_probe(struct pci_dev *pdev,
776 				   const struct pci_device_id *ent)
777 {
778 	int err;
779 	int id;
780 	char name[24];
781 	enum pci_barno bar;
782 	void __iomem *base;
783 	struct device *dev = &pdev->dev;
784 	struct pci_endpoint_test *test;
785 	struct pci_endpoint_test_data *data;
786 	enum pci_barno test_reg_bar = BAR_0;
787 	struct miscdevice *misc_device;
788 
789 	if (pci_is_bridge(pdev))
790 		return -ENODEV;
791 
792 	test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
793 	if (!test)
794 		return -ENOMEM;
795 
796 	test->test_reg_bar = 0;
797 	test->alignment = 0;
798 	test->pdev = pdev;
799 	test->irq_type = IRQ_TYPE_UNDEFINED;
800 
801 	if (no_msi)
802 		irq_type = IRQ_TYPE_LEGACY;
803 
804 	data = (struct pci_endpoint_test_data *)ent->driver_data;
805 	if (data) {
806 		test_reg_bar = data->test_reg_bar;
807 		test->test_reg_bar = test_reg_bar;
808 		test->alignment = data->alignment;
809 		irq_type = data->irq_type;
810 	}
811 
812 	init_completion(&test->irq_raised);
813 	mutex_init(&test->mutex);
814 
815 	if ((dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)) != 0) &&
816 	    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
817 		dev_err(dev, "Cannot set DMA mask\n");
818 		return -EINVAL;
819 	}
820 
821 	err = pci_enable_device(pdev);
822 	if (err) {
823 		dev_err(dev, "Cannot enable PCI device\n");
824 		return err;
825 	}
826 
827 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
828 	if (err) {
829 		dev_err(dev, "Cannot obtain PCI resources\n");
830 		goto err_disable_pdev;
831 	}
832 
833 	pci_set_master(pdev);
834 
835 	if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type)) {
836 		err = -EINVAL;
837 		goto err_disable_irq;
838 	}
839 
840 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
841 		if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
842 			base = pci_ioremap_bar(pdev, bar);
843 			if (!base) {
844 				dev_err(dev, "Failed to read BAR%d\n", bar);
845 				WARN_ON(bar == test_reg_bar);
846 			}
847 			test->bar[bar] = base;
848 		}
849 	}
850 
851 	test->base = test->bar[test_reg_bar];
852 	if (!test->base) {
853 		err = -ENOMEM;
854 		dev_err(dev, "Cannot perform PCI test without BAR%d\n",
855 			test_reg_bar);
856 		goto err_iounmap;
857 	}
858 
859 	pci_set_drvdata(pdev, test);
860 
861 	id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
862 	if (id < 0) {
863 		err = id;
864 		dev_err(dev, "Unable to get id\n");
865 		goto err_iounmap;
866 	}
867 
868 	snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
869 	test->name = kstrdup(name, GFP_KERNEL);
870 	if (!test->name) {
871 		err = -ENOMEM;
872 		goto err_ida_remove;
873 	}
874 
875 	if (!pci_endpoint_test_request_irq(test)) {
876 		err = -EINVAL;
877 		goto err_kfree_test_name;
878 	}
879 
880 	misc_device = &test->miscdev;
881 	misc_device->minor = MISC_DYNAMIC_MINOR;
882 	misc_device->name = kstrdup(name, GFP_KERNEL);
883 	if (!misc_device->name) {
884 		err = -ENOMEM;
885 		goto err_release_irq;
886 	}
887 	misc_device->parent = &pdev->dev;
888 	misc_device->fops = &pci_endpoint_test_fops;
889 
890 	err = misc_register(misc_device);
891 	if (err) {
892 		dev_err(dev, "Failed to register device\n");
893 		goto err_kfree_name;
894 	}
895 
896 	return 0;
897 
898 err_kfree_name:
899 	kfree(misc_device->name);
900 
901 err_release_irq:
902 	pci_endpoint_test_release_irq(test);
903 
904 err_kfree_test_name:
905 	kfree(test->name);
906 
907 err_ida_remove:
908 	ida_simple_remove(&pci_endpoint_test_ida, id);
909 
910 err_iounmap:
911 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
912 		if (test->bar[bar])
913 			pci_iounmap(pdev, test->bar[bar]);
914 	}
915 
916 err_disable_irq:
917 	pci_endpoint_test_free_irq_vectors(test);
918 	pci_release_regions(pdev);
919 
920 err_disable_pdev:
921 	pci_disable_device(pdev);
922 
923 	return err;
924 }
925 
pci_endpoint_test_remove(struct pci_dev * pdev)926 static void pci_endpoint_test_remove(struct pci_dev *pdev)
927 {
928 	int id;
929 	enum pci_barno bar;
930 	struct pci_endpoint_test *test = pci_get_drvdata(pdev);
931 	struct miscdevice *misc_device = &test->miscdev;
932 
933 	if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
934 		return;
935 	if (id < 0)
936 		return;
937 
938 	pci_endpoint_test_release_irq(test);
939 	pci_endpoint_test_free_irq_vectors(test);
940 
941 	misc_deregister(&test->miscdev);
942 	kfree(misc_device->name);
943 	kfree(test->name);
944 	ida_simple_remove(&pci_endpoint_test_ida, id);
945 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
946 		if (test->bar[bar])
947 			pci_iounmap(pdev, test->bar[bar]);
948 	}
949 
950 	pci_release_regions(pdev);
951 	pci_disable_device(pdev);
952 }
953 
954 static const struct pci_endpoint_test_data default_data = {
955 	.test_reg_bar = BAR_0,
956 	.alignment = SZ_4K,
957 	.irq_type = IRQ_TYPE_MSI,
958 };
959 
960 static const struct pci_endpoint_test_data am654_data = {
961 	.test_reg_bar = BAR_2,
962 	.alignment = SZ_64K,
963 	.irq_type = IRQ_TYPE_MSI,
964 };
965 
966 static const struct pci_endpoint_test_data j721e_data = {
967 	.alignment = 256,
968 	.irq_type = IRQ_TYPE_MSI,
969 };
970 
971 static const struct pci_device_id pci_endpoint_test_tbl[] = {
972 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x),
973 	  .driver_data = (kernel_ulong_t)&default_data,
974 	},
975 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x),
976 	  .driver_data = (kernel_ulong_t)&default_data,
977 	},
978 	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0),
979 	  .driver_data = (kernel_ulong_t)&default_data,
980 	},
981 	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_IMX8),},
982 	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_LS1088A),
983 	  .driver_data = (kernel_ulong_t)&default_data,
984 	},
985 	{ PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
986 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
987 	  .driver_data = (kernel_ulong_t)&am654_data
988 	},
989 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774A1),},
990 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),},
991 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),},
992 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),},
993 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
994 	  .driver_data = (kernel_ulong_t)&j721e_data,
995 	},
996 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J7200),
997 	  .driver_data = (kernel_ulong_t)&j721e_data,
998 	},
999 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
1000 	  .driver_data = (kernel_ulong_t)&j721e_data,
1001 	},
1002 	{ }
1003 };
1004 MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
1005 
1006 static struct pci_driver pci_endpoint_test_driver = {
1007 	.name		= DRV_MODULE_NAME,
1008 	.id_table	= pci_endpoint_test_tbl,
1009 	.probe		= pci_endpoint_test_probe,
1010 	.remove		= pci_endpoint_test_remove,
1011 	.sriov_configure = pci_sriov_configure_simple,
1012 };
1013 module_pci_driver(pci_endpoint_test_driver);
1014 
1015 MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
1016 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1017 MODULE_LICENSE("GPL v2");
1018