1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PCIe host controller driver for Mobiveil PCIe Host controller
4  *
5  * Copyright (c) 2018 Mobiveil Inc.
6  * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
7  */
8 
9 #include <linux/delay.h>
10 #include <linux/init.h>
11 #include <linux/interrupt.h>
12 #include <linux/irq.h>
13 #include <linux/irqchip/chained_irq.h>
14 #include <linux/irqdomain.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/msi.h>
18 #include <linux/of_address.h>
19 #include <linux/of_irq.h>
20 #include <linux/of_platform.h>
21 #include <linux/of_pci.h>
22 #include <linux/pci.h>
23 #include <linux/platform_device.h>
24 #include <linux/slab.h>
25 
26 #include "../pci.h"
27 
28 /* register offsets and bit positions */
29 
30 /*
31  * translation tables are grouped into windows, each window registers are
32  * grouped into blocks of 4 or 16 registers each
33  */
34 #define PAB_REG_BLOCK_SIZE		16
35 #define PAB_EXT_REG_BLOCK_SIZE		4
36 
37 #define PAB_REG_ADDR(offset, win)	\
38 	(offset + (win * PAB_REG_BLOCK_SIZE))
39 #define PAB_EXT_REG_ADDR(offset, win)	\
40 	(offset + (win * PAB_EXT_REG_BLOCK_SIZE))
41 
42 #define LTSSM_STATUS			0x0404
43 #define  LTSSM_STATUS_L0_MASK		0x3f
44 #define  LTSSM_STATUS_L0		0x2d
45 
46 #define PAB_CTRL			0x0808
47 #define  AMBA_PIO_ENABLE_SHIFT		0
48 #define  PEX_PIO_ENABLE_SHIFT		1
49 #define  PAGE_SEL_SHIFT			13
50 #define  PAGE_SEL_MASK			0x3f
51 #define  PAGE_LO_MASK			0x3ff
52 #define  PAGE_SEL_OFFSET_SHIFT		10
53 
54 #define PAB_AXI_PIO_CTRL		0x0840
55 #define  APIO_EN_MASK			0xf
56 
57 #define PAB_PEX_PIO_CTRL		0x08c0
58 #define  PIO_ENABLE_SHIFT		0
59 
60 #define PAB_INTP_AMBA_MISC_ENB		0x0b0c
61 #define PAB_INTP_AMBA_MISC_STAT		0x0b1c
62 #define  PAB_INTP_INTX_MASK		0x01e0
63 #define  PAB_INTP_MSI_MASK		0x8
64 
65 #define PAB_AXI_AMAP_CTRL(win)		PAB_REG_ADDR(0x0ba0, win)
66 #define  WIN_ENABLE_SHIFT		0
67 #define  WIN_TYPE_SHIFT			1
68 #define  WIN_TYPE_MASK			0x3
69 #define  WIN_SIZE_MASK			0xfffffc00
70 
71 #define PAB_EXT_AXI_AMAP_SIZE(win)	PAB_EXT_REG_ADDR(0xbaf0, win)
72 
73 #define PAB_EXT_AXI_AMAP_AXI_WIN(win)	PAB_EXT_REG_ADDR(0x80a0, win)
74 #define PAB_AXI_AMAP_AXI_WIN(win)	PAB_REG_ADDR(0x0ba4, win)
75 #define  AXI_WINDOW_ALIGN_MASK		3
76 
77 #define PAB_AXI_AMAP_PEX_WIN_L(win)	PAB_REG_ADDR(0x0ba8, win)
78 #define  PAB_BUS_SHIFT			24
79 #define  PAB_DEVICE_SHIFT		19
80 #define  PAB_FUNCTION_SHIFT		16
81 
82 #define PAB_AXI_AMAP_PEX_WIN_H(win)	PAB_REG_ADDR(0x0bac, win)
83 #define PAB_INTP_AXI_PIO_CLASS		0x474
84 
85 #define PAB_PEX_AMAP_CTRL(win)		PAB_REG_ADDR(0x4ba0, win)
86 #define  AMAP_CTRL_EN_SHIFT		0
87 #define  AMAP_CTRL_TYPE_SHIFT		1
88 #define  AMAP_CTRL_TYPE_MASK		3
89 
90 #define PAB_EXT_PEX_AMAP_SIZEN(win)	PAB_EXT_REG_ADDR(0xbef0, win)
91 #define PAB_EXT_PEX_AMAP_AXI_WIN(win)	PAB_EXT_REG_ADDR(0xb4a0, win)
92 #define PAB_PEX_AMAP_AXI_WIN(win)	PAB_REG_ADDR(0x4ba4, win)
93 #define PAB_PEX_AMAP_PEX_WIN_L(win)	PAB_REG_ADDR(0x4ba8, win)
94 #define PAB_PEX_AMAP_PEX_WIN_H(win)	PAB_REG_ADDR(0x4bac, win)
95 
96 /* starting offset of INTX bits in status register */
97 #define PAB_INTX_START			5
98 
99 /* supported number of MSI interrupts */
100 #define PCI_NUM_MSI			16
101 
102 /* MSI registers */
103 #define MSI_BASE_LO_OFFSET		0x04
104 #define MSI_BASE_HI_OFFSET		0x08
105 #define MSI_SIZE_OFFSET			0x0c
106 #define MSI_ENABLE_OFFSET		0x14
107 #define MSI_STATUS_OFFSET		0x18
108 #define MSI_DATA_OFFSET			0x20
109 #define MSI_ADDR_L_OFFSET		0x24
110 #define MSI_ADDR_H_OFFSET		0x28
111 
112 /* outbound and inbound window definitions */
113 #define WIN_NUM_0			0
114 #define WIN_NUM_1			1
115 #define CFG_WINDOW_TYPE			0
116 #define IO_WINDOW_TYPE			1
117 #define MEM_WINDOW_TYPE			2
118 #define IB_WIN_SIZE			((u64)256 * 1024 * 1024 * 1024)
119 #define MAX_PIO_WINDOWS			8
120 
121 /* Parameters for the waiting for link up routine */
122 #define LINK_WAIT_MAX_RETRIES		10
123 #define LINK_WAIT_MIN			90000
124 #define LINK_WAIT_MAX			100000
125 
126 #define PAGED_ADDR_BNDRY		0xc00
127 #define OFFSET_TO_PAGE_ADDR(off)	\
128 	((off & PAGE_LO_MASK) | PAGED_ADDR_BNDRY)
129 #define OFFSET_TO_PAGE_IDX(off)		\
130 	((off >> PAGE_SEL_OFFSET_SHIFT) & PAGE_SEL_MASK)
131 
132 struct mobiveil_msi {			/* MSI information */
133 	struct mutex lock;		/* protect bitmap variable */
134 	struct irq_domain *msi_domain;
135 	struct irq_domain *dev_domain;
136 	phys_addr_t msi_pages_phys;
137 	int num_of_vectors;
138 	DECLARE_BITMAP(msi_irq_in_use, PCI_NUM_MSI);
139 };
140 
141 struct mobiveil_pcie {
142 	struct platform_device *pdev;
143 	struct list_head resources;
144 	void __iomem *config_axi_slave_base;	/* endpoint config base */
145 	void __iomem *csr_axi_slave_base;	/* root port config base */
146 	void __iomem *apb_csr_base;	/* MSI register base */
147 	phys_addr_t pcie_reg_base;	/* Physical PCIe Controller Base */
148 	struct irq_domain *intx_domain;
149 	raw_spinlock_t intx_mask_lock;
150 	int irq;
151 	int apio_wins;
152 	int ppio_wins;
153 	int ob_wins_configured;		/* configured outbound windows */
154 	int ib_wins_configured;		/* configured inbound windows */
155 	struct resource *ob_io_res;
156 	char root_bus_nr;
157 	struct mobiveil_msi msi;
158 };
159 
160 /*
161  * mobiveil_pcie_sel_page - routine to access paged register
162  *
163  * Registers whose address greater than PAGED_ADDR_BNDRY (0xc00) are paged,
164  * for this scheme to work extracted higher 6 bits of the offset will be
165  * written to pg_sel field of PAB_CTRL register and rest of the lower 10
166  * bits enabled with PAGED_ADDR_BNDRY are used as offset of the register.
167  */
mobiveil_pcie_sel_page(struct mobiveil_pcie * pcie,u8 pg_idx)168 static void mobiveil_pcie_sel_page(struct mobiveil_pcie *pcie, u8 pg_idx)
169 {
170 	u32 val;
171 
172 	val = readl(pcie->csr_axi_slave_base + PAB_CTRL);
173 	val &= ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT);
174 	val |= (pg_idx & PAGE_SEL_MASK) << PAGE_SEL_SHIFT;
175 
176 	writel(val, pcie->csr_axi_slave_base + PAB_CTRL);
177 }
178 
mobiveil_pcie_comp_addr(struct mobiveil_pcie * pcie,u32 off)179 static void *mobiveil_pcie_comp_addr(struct mobiveil_pcie *pcie, u32 off)
180 {
181 	if (off < PAGED_ADDR_BNDRY) {
182 		/* For directly accessed registers, clear the pg_sel field */
183 		mobiveil_pcie_sel_page(pcie, 0);
184 		return pcie->csr_axi_slave_base + off;
185 	}
186 
187 	mobiveil_pcie_sel_page(pcie, OFFSET_TO_PAGE_IDX(off));
188 	return pcie->csr_axi_slave_base + OFFSET_TO_PAGE_ADDR(off);
189 }
190 
mobiveil_pcie_read(void __iomem * addr,int size,u32 * val)191 static int mobiveil_pcie_read(void __iomem *addr, int size, u32 *val)
192 {
193 	if ((uintptr_t)addr & (size - 1)) {
194 		*val = 0;
195 		return PCIBIOS_BAD_REGISTER_NUMBER;
196 	}
197 
198 	switch (size) {
199 	case 4:
200 		*val = readl(addr);
201 		break;
202 	case 2:
203 		*val = readw(addr);
204 		break;
205 	case 1:
206 		*val = readb(addr);
207 		break;
208 	default:
209 		*val = 0;
210 		return PCIBIOS_BAD_REGISTER_NUMBER;
211 	}
212 
213 	return PCIBIOS_SUCCESSFUL;
214 }
215 
mobiveil_pcie_write(void __iomem * addr,int size,u32 val)216 static int mobiveil_pcie_write(void __iomem *addr, int size, u32 val)
217 {
218 	if ((uintptr_t)addr & (size - 1))
219 		return PCIBIOS_BAD_REGISTER_NUMBER;
220 
221 	switch (size) {
222 	case 4:
223 		writel(val, addr);
224 		break;
225 	case 2:
226 		writew(val, addr);
227 		break;
228 	case 1:
229 		writeb(val, addr);
230 		break;
231 	default:
232 		return PCIBIOS_BAD_REGISTER_NUMBER;
233 	}
234 
235 	return PCIBIOS_SUCCESSFUL;
236 }
237 
csr_read(struct mobiveil_pcie * pcie,u32 off,size_t size)238 static u32 csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
239 {
240 	void *addr;
241 	u32 val;
242 	int ret;
243 
244 	addr = mobiveil_pcie_comp_addr(pcie, off);
245 
246 	ret = mobiveil_pcie_read(addr, size, &val);
247 	if (ret)
248 		dev_err(&pcie->pdev->dev, "read CSR address failed\n");
249 
250 	return val;
251 }
252 
csr_write(struct mobiveil_pcie * pcie,u32 val,u32 off,size_t size)253 static void csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, size_t size)
254 {
255 	void *addr;
256 	int ret;
257 
258 	addr = mobiveil_pcie_comp_addr(pcie, off);
259 
260 	ret = mobiveil_pcie_write(addr, size, val);
261 	if (ret)
262 		dev_err(&pcie->pdev->dev, "write CSR address failed\n");
263 }
264 
csr_readl(struct mobiveil_pcie * pcie,u32 off)265 static u32 csr_readl(struct mobiveil_pcie *pcie, u32 off)
266 {
267 	return csr_read(pcie, off, 0x4);
268 }
269 
csr_writel(struct mobiveil_pcie * pcie,u32 val,u32 off)270 static void csr_writel(struct mobiveil_pcie *pcie, u32 val, u32 off)
271 {
272 	csr_write(pcie, val, off, 0x4);
273 }
274 
mobiveil_pcie_link_up(struct mobiveil_pcie * pcie)275 static bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie)
276 {
277 	return (csr_readl(pcie, LTSSM_STATUS) &
278 		LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0;
279 }
280 
mobiveil_pcie_valid_device(struct pci_bus * bus,unsigned int devfn)281 static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
282 {
283 	struct mobiveil_pcie *pcie = bus->sysdata;
284 
285 	/* Only one device down on each root port */
286 	if ((bus->number == pcie->root_bus_nr) && (devfn > 0))
287 		return false;
288 
289 	/*
290 	 * Do not read more than one device on the bus directly
291 	 * attached to RC
292 	 */
293 	if ((bus->primary == pcie->root_bus_nr) && (PCI_SLOT(devfn) > 0))
294 		return false;
295 
296 	return true;
297 }
298 
299 /*
300  * mobiveil_pcie_map_bus - routine to get the configuration base of either
301  * root port or endpoint
302  */
mobiveil_pcie_map_bus(struct pci_bus * bus,unsigned int devfn,int where)303 static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus,
304 					   unsigned int devfn, int where)
305 {
306 	struct mobiveil_pcie *pcie = bus->sysdata;
307 	u32 value;
308 
309 	if (!mobiveil_pcie_valid_device(bus, devfn))
310 		return NULL;
311 
312 	/* RC config access */
313 	if (bus->number == pcie->root_bus_nr)
314 		return pcie->csr_axi_slave_base + where;
315 
316 	/*
317 	 * EP config access (in Config/APIO space)
318 	 * Program PEX Address base (31..16 bits) with appropriate value
319 	 * (BDF) in PAB_AXI_AMAP_PEX_WIN_L0 Register.
320 	 * Relies on pci_lock serialization
321 	 */
322 	value = bus->number << PAB_BUS_SHIFT |
323 		PCI_SLOT(devfn) << PAB_DEVICE_SHIFT |
324 		PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT;
325 
326 	csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
327 
328 	return pcie->config_axi_slave_base + where;
329 }
330 
331 static struct pci_ops mobiveil_pcie_ops = {
332 	.map_bus = mobiveil_pcie_map_bus,
333 	.read = pci_generic_config_read,
334 	.write = pci_generic_config_write,
335 };
336 
mobiveil_pcie_isr(struct irq_desc * desc)337 static void mobiveil_pcie_isr(struct irq_desc *desc)
338 {
339 	struct irq_chip *chip = irq_desc_get_chip(desc);
340 	struct mobiveil_pcie *pcie = irq_desc_get_handler_data(desc);
341 	struct device *dev = &pcie->pdev->dev;
342 	struct mobiveil_msi *msi = &pcie->msi;
343 	u32 msi_data, msi_addr_lo, msi_addr_hi;
344 	u32 intr_status, msi_status;
345 	unsigned long shifted_status;
346 	u32 bit, virq, val, mask;
347 
348 	/*
349 	 * The core provides a single interrupt for both INTx/MSI messages.
350 	 * So we'll read both INTx and MSI status
351 	 */
352 
353 	chained_irq_enter(chip, desc);
354 
355 	/* read INTx status */
356 	val = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
357 	mask = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
358 	intr_status = val & mask;
359 
360 	/* Handle INTx */
361 	if (intr_status & PAB_INTP_INTX_MASK) {
362 		shifted_status = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
363 		shifted_status &= PAB_INTP_INTX_MASK;
364 		shifted_status >>= PAB_INTX_START;
365 		do {
366 			for_each_set_bit(bit, &shifted_status, PCI_NUM_INTX) {
367 				virq = irq_find_mapping(pcie->intx_domain,
368 							bit + 1);
369 				if (virq)
370 					generic_handle_irq(virq);
371 				else
372 					dev_err_ratelimited(dev, "unexpected IRQ, INT%d\n",
373 							    bit);
374 
375 				/* clear interrupt handled */
376 				csr_writel(pcie, 1 << (PAB_INTX_START + bit),
377 					   PAB_INTP_AMBA_MISC_STAT);
378 			}
379 
380 			shifted_status = csr_readl(pcie,
381 						   PAB_INTP_AMBA_MISC_STAT);
382 			shifted_status &= PAB_INTP_INTX_MASK;
383 			shifted_status >>= PAB_INTX_START;
384 		} while (shifted_status != 0);
385 	}
386 
387 	/* read extra MSI status register */
388 	msi_status = readl_relaxed(pcie->apb_csr_base + MSI_STATUS_OFFSET);
389 
390 	/* handle MSI interrupts */
391 	while (msi_status & 1) {
392 		msi_data = readl_relaxed(pcie->apb_csr_base + MSI_DATA_OFFSET);
393 
394 		/*
395 		 * MSI_STATUS_OFFSET register gets updated to zero
396 		 * once we pop not only the MSI data but also address
397 		 * from MSI hardware FIFO. So keeping these following
398 		 * two dummy reads.
399 		 */
400 		msi_addr_lo = readl_relaxed(pcie->apb_csr_base +
401 					    MSI_ADDR_L_OFFSET);
402 		msi_addr_hi = readl_relaxed(pcie->apb_csr_base +
403 					    MSI_ADDR_H_OFFSET);
404 		dev_dbg(dev, "MSI registers, data: %08x, addr: %08x:%08x\n",
405 			msi_data, msi_addr_hi, msi_addr_lo);
406 
407 		virq = irq_find_mapping(msi->dev_domain, msi_data);
408 		if (virq)
409 			generic_handle_irq(virq);
410 
411 		msi_status = readl_relaxed(pcie->apb_csr_base +
412 					   MSI_STATUS_OFFSET);
413 	}
414 
415 	/* Clear the interrupt status */
416 	csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT);
417 	chained_irq_exit(chip, desc);
418 }
419 
mobiveil_pcie_parse_dt(struct mobiveil_pcie * pcie)420 static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
421 {
422 	struct device *dev = &pcie->pdev->dev;
423 	struct platform_device *pdev = pcie->pdev;
424 	struct device_node *node = dev->of_node;
425 	struct resource *res;
426 
427 	/* map config resource */
428 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
429 					   "config_axi_slave");
430 	pcie->config_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
431 	if (IS_ERR(pcie->config_axi_slave_base))
432 		return PTR_ERR(pcie->config_axi_slave_base);
433 	pcie->ob_io_res = res;
434 
435 	/* map csr resource */
436 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
437 					   "csr_axi_slave");
438 	pcie->csr_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
439 	if (IS_ERR(pcie->csr_axi_slave_base))
440 		return PTR_ERR(pcie->csr_axi_slave_base);
441 	pcie->pcie_reg_base = res->start;
442 
443 	/* map MSI config resource */
444 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb_csr");
445 	pcie->apb_csr_base = devm_pci_remap_cfg_resource(dev, res);
446 	if (IS_ERR(pcie->apb_csr_base))
447 		return PTR_ERR(pcie->apb_csr_base);
448 
449 	/* read the number of windows requested */
450 	if (of_property_read_u32(node, "apio-wins", &pcie->apio_wins))
451 		pcie->apio_wins = MAX_PIO_WINDOWS;
452 
453 	if (of_property_read_u32(node, "ppio-wins", &pcie->ppio_wins))
454 		pcie->ppio_wins = MAX_PIO_WINDOWS;
455 
456 	pcie->irq = platform_get_irq(pdev, 0);
457 	if (pcie->irq <= 0) {
458 		dev_err(dev, "failed to map IRQ: %d\n", pcie->irq);
459 		return -ENODEV;
460 	}
461 
462 	return 0;
463 }
464 
program_ib_windows(struct mobiveil_pcie * pcie,int win_num,u64 cpu_addr,u64 pci_addr,u32 type,u64 size)465 static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num,
466 			       u64 cpu_addr, u64 pci_addr, u32 type, u64 size)
467 {
468 	u32 value;
469 	u64 size64 = ~(size - 1);
470 
471 	if (win_num >= pcie->ppio_wins) {
472 		dev_err(&pcie->pdev->dev,
473 			"ERROR: max inbound windows reached !\n");
474 		return;
475 	}
476 
477 	value = csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num));
478 	value &= ~(AMAP_CTRL_TYPE_MASK << AMAP_CTRL_TYPE_SHIFT | WIN_SIZE_MASK);
479 	value |= type << AMAP_CTRL_TYPE_SHIFT | 1 << AMAP_CTRL_EN_SHIFT |
480 		 (lower_32_bits(size64) & WIN_SIZE_MASK);
481 	csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num));
482 
483 	csr_writel(pcie, upper_32_bits(size64),
484 		   PAB_EXT_PEX_AMAP_SIZEN(win_num));
485 
486 	csr_writel(pcie, lower_32_bits(cpu_addr),
487 		   PAB_PEX_AMAP_AXI_WIN(win_num));
488 	csr_writel(pcie, upper_32_bits(cpu_addr),
489 		   PAB_EXT_PEX_AMAP_AXI_WIN(win_num));
490 
491 	csr_writel(pcie, lower_32_bits(pci_addr),
492 		   PAB_PEX_AMAP_PEX_WIN_L(win_num));
493 	csr_writel(pcie, upper_32_bits(pci_addr),
494 		   PAB_PEX_AMAP_PEX_WIN_H(win_num));
495 
496 	pcie->ib_wins_configured++;
497 }
498 
499 /*
500  * routine to program the outbound windows
501  */
program_ob_windows(struct mobiveil_pcie * pcie,int win_num,u64 cpu_addr,u64 pci_addr,u32 type,u64 size)502 static void program_ob_windows(struct mobiveil_pcie *pcie, int win_num,
503 			       u64 cpu_addr, u64 pci_addr, u32 type, u64 size)
504 {
505 	u32 value;
506 	u64 size64 = ~(size - 1);
507 
508 	if (win_num >= pcie->apio_wins) {
509 		dev_err(&pcie->pdev->dev,
510 			"ERROR: max outbound windows reached !\n");
511 		return;
512 	}
513 
514 	/*
515 	 * program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit
516 	 * to 4 KB in PAB_AXI_AMAP_CTRL register
517 	 */
518 	value = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
519 	value &= ~(WIN_TYPE_MASK << WIN_TYPE_SHIFT | WIN_SIZE_MASK);
520 	value |= 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
521 		 (lower_32_bits(size64) & WIN_SIZE_MASK);
522 	csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num));
523 
524 	csr_writel(pcie, upper_32_bits(size64), PAB_EXT_AXI_AMAP_SIZE(win_num));
525 
526 	/*
527 	 * program AXI window base with appropriate value in
528 	 * PAB_AXI_AMAP_AXI_WIN0 register
529 	 */
530 	csr_writel(pcie, lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK),
531 		   PAB_AXI_AMAP_AXI_WIN(win_num));
532 	csr_writel(pcie, upper_32_bits(cpu_addr),
533 		   PAB_EXT_AXI_AMAP_AXI_WIN(win_num));
534 
535 	csr_writel(pcie, lower_32_bits(pci_addr),
536 		   PAB_AXI_AMAP_PEX_WIN_L(win_num));
537 	csr_writel(pcie, upper_32_bits(pci_addr),
538 		   PAB_AXI_AMAP_PEX_WIN_H(win_num));
539 
540 	pcie->ob_wins_configured++;
541 }
542 
mobiveil_bringup_link(struct mobiveil_pcie * pcie)543 static int mobiveil_bringup_link(struct mobiveil_pcie *pcie)
544 {
545 	int retries;
546 
547 	/* check if the link is up or not */
548 	for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
549 		if (mobiveil_pcie_link_up(pcie))
550 			return 0;
551 
552 		usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
553 	}
554 
555 	dev_err(&pcie->pdev->dev, "link never came up\n");
556 
557 	return -ETIMEDOUT;
558 }
559 
mobiveil_pcie_enable_msi(struct mobiveil_pcie * pcie)560 static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie)
561 {
562 	phys_addr_t msg_addr = pcie->pcie_reg_base;
563 	struct mobiveil_msi *msi = &pcie->msi;
564 
565 	pcie->msi.num_of_vectors = PCI_NUM_MSI;
566 	msi->msi_pages_phys = (phys_addr_t)msg_addr;
567 
568 	writel_relaxed(lower_32_bits(msg_addr),
569 		       pcie->apb_csr_base + MSI_BASE_LO_OFFSET);
570 	writel_relaxed(upper_32_bits(msg_addr),
571 		       pcie->apb_csr_base + MSI_BASE_HI_OFFSET);
572 	writel_relaxed(4096, pcie->apb_csr_base + MSI_SIZE_OFFSET);
573 	writel_relaxed(1, pcie->apb_csr_base + MSI_ENABLE_OFFSET);
574 }
575 
mobiveil_host_init(struct mobiveil_pcie * pcie)576 static int mobiveil_host_init(struct mobiveil_pcie *pcie)
577 {
578 	u32 value, pab_ctrl, type;
579 	struct resource_entry *win;
580 
581 	/* setup bus numbers */
582 	value = csr_readl(pcie, PCI_PRIMARY_BUS);
583 	value &= 0xff000000;
584 	value |= 0x00ff0100;
585 	csr_writel(pcie, value, PCI_PRIMARY_BUS);
586 
587 	/*
588 	 * program Bus Master Enable Bit in Command Register in PAB Config
589 	 * Space
590 	 */
591 	value = csr_readl(pcie, PCI_COMMAND);
592 	value |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
593 	csr_writel(pcie, value, PCI_COMMAND);
594 
595 	/*
596 	 * program PIO Enable Bit to 1 (and PEX PIO Enable to 1) in PAB_CTRL
597 	 * register
598 	 */
599 	pab_ctrl = csr_readl(pcie, PAB_CTRL);
600 	pab_ctrl |= (1 << AMBA_PIO_ENABLE_SHIFT) | (1 << PEX_PIO_ENABLE_SHIFT);
601 	csr_writel(pcie, pab_ctrl, PAB_CTRL);
602 
603 	csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
604 		   PAB_INTP_AMBA_MISC_ENB);
605 
606 	/*
607 	 * program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in
608 	 * PAB_AXI_PIO_CTRL Register
609 	 */
610 	value = csr_readl(pcie, PAB_AXI_PIO_CTRL);
611 	value |= APIO_EN_MASK;
612 	csr_writel(pcie, value, PAB_AXI_PIO_CTRL);
613 
614 	/* Enable PCIe PIO master */
615 	value = csr_readl(pcie, PAB_PEX_PIO_CTRL);
616 	value |= 1 << PIO_ENABLE_SHIFT;
617 	csr_writel(pcie, value, PAB_PEX_PIO_CTRL);
618 
619 	/*
620 	 * we'll program one outbound window for config reads and
621 	 * another default inbound window for all the upstream traffic
622 	 * rest of the outbound windows will be configured according to
623 	 * the "ranges" field defined in device tree
624 	 */
625 
626 	/* config outbound translation window */
627 	program_ob_windows(pcie, WIN_NUM_0, pcie->ob_io_res->start, 0,
628 			   CFG_WINDOW_TYPE, resource_size(pcie->ob_io_res));
629 
630 	/* memory inbound translation window */
631 	program_ib_windows(pcie, WIN_NUM_0, 0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
632 
633 	/* Get the I/O and memory ranges from DT */
634 	resource_list_for_each_entry(win, &pcie->resources) {
635 		if (resource_type(win->res) == IORESOURCE_MEM)
636 			type = MEM_WINDOW_TYPE;
637 		else if (resource_type(win->res) == IORESOURCE_IO)
638 			type = IO_WINDOW_TYPE;
639 		else
640 			continue;
641 
642 		/* configure outbound translation window */
643 		program_ob_windows(pcie, pcie->ob_wins_configured,
644 				   win->res->start,
645 				   win->res->start - win->offset,
646 				   type, resource_size(win->res));
647 	}
648 
649 	/* fixup for PCIe class register */
650 	value = csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS);
651 	value &= 0xff;
652 	value |= (PCI_CLASS_BRIDGE_PCI << 16);
653 	csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
654 
655 	/* setup MSI hardware registers */
656 	mobiveil_pcie_enable_msi(pcie);
657 
658 	return 0;
659 }
660 
mobiveil_mask_intx_irq(struct irq_data * data)661 static void mobiveil_mask_intx_irq(struct irq_data *data)
662 {
663 	struct irq_desc *desc = irq_to_desc(data->irq);
664 	struct mobiveil_pcie *pcie;
665 	unsigned long flags;
666 	u32 mask, shifted_val;
667 
668 	pcie = irq_desc_get_chip_data(desc);
669 	mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
670 	raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
671 	shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
672 	shifted_val &= ~mask;
673 	csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
674 	raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
675 }
676 
mobiveil_unmask_intx_irq(struct irq_data * data)677 static void mobiveil_unmask_intx_irq(struct irq_data *data)
678 {
679 	struct irq_desc *desc = irq_to_desc(data->irq);
680 	struct mobiveil_pcie *pcie;
681 	unsigned long flags;
682 	u32 shifted_val, mask;
683 
684 	pcie = irq_desc_get_chip_data(desc);
685 	mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
686 	raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
687 	shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
688 	shifted_val |= mask;
689 	csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
690 	raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
691 }
692 
693 static struct irq_chip intx_irq_chip = {
694 	.name = "mobiveil_pcie:intx",
695 	.irq_enable = mobiveil_unmask_intx_irq,
696 	.irq_disable = mobiveil_mask_intx_irq,
697 	.irq_mask = mobiveil_mask_intx_irq,
698 	.irq_unmask = mobiveil_unmask_intx_irq,
699 };
700 
701 /* routine to setup the INTx related data */
mobiveil_pcie_intx_map(struct irq_domain * domain,unsigned int irq,irq_hw_number_t hwirq)702 static int mobiveil_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
703 				  irq_hw_number_t hwirq)
704 {
705 	irq_set_chip_and_handler(irq, &intx_irq_chip, handle_level_irq);
706 	irq_set_chip_data(irq, domain->host_data);
707 
708 	return 0;
709 }
710 
711 /* INTx domain operations structure */
712 static const struct irq_domain_ops intx_domain_ops = {
713 	.map = mobiveil_pcie_intx_map,
714 };
715 
716 static struct irq_chip mobiveil_msi_irq_chip = {
717 	.name = "Mobiveil PCIe MSI",
718 	.irq_mask = pci_msi_mask_irq,
719 	.irq_unmask = pci_msi_unmask_irq,
720 };
721 
722 static struct msi_domain_info mobiveil_msi_domain_info = {
723 	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
724 		   MSI_FLAG_PCI_MSIX),
725 	.chip	= &mobiveil_msi_irq_chip,
726 };
727 
mobiveil_compose_msi_msg(struct irq_data * data,struct msi_msg * msg)728 static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
729 {
730 	struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(data);
731 	phys_addr_t addr = pcie->pcie_reg_base + (data->hwirq * sizeof(int));
732 
733 	msg->address_lo = lower_32_bits(addr);
734 	msg->address_hi = upper_32_bits(addr);
735 	msg->data = data->hwirq;
736 
737 	dev_dbg(&pcie->pdev->dev, "msi#%d address_hi %#x address_lo %#x\n",
738 		(int)data->hwirq, msg->address_hi, msg->address_lo);
739 }
740 
mobiveil_msi_set_affinity(struct irq_data * irq_data,const struct cpumask * mask,bool force)741 static int mobiveil_msi_set_affinity(struct irq_data *irq_data,
742 				     const struct cpumask *mask, bool force)
743 {
744 	return -EINVAL;
745 }
746 
747 static struct irq_chip mobiveil_msi_bottom_irq_chip = {
748 	.name			= "Mobiveil MSI",
749 	.irq_compose_msi_msg	= mobiveil_compose_msi_msg,
750 	.irq_set_affinity	= mobiveil_msi_set_affinity,
751 };
752 
mobiveil_irq_msi_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)753 static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain,
754 					 unsigned int virq,
755 					 unsigned int nr_irqs, void *args)
756 {
757 	struct mobiveil_pcie *pcie = domain->host_data;
758 	struct mobiveil_msi *msi = &pcie->msi;
759 	unsigned long bit;
760 
761 	WARN_ON(nr_irqs != 1);
762 	mutex_lock(&msi->lock);
763 
764 	bit = find_first_zero_bit(msi->msi_irq_in_use, msi->num_of_vectors);
765 	if (bit >= msi->num_of_vectors) {
766 		mutex_unlock(&msi->lock);
767 		return -ENOSPC;
768 	}
769 
770 	set_bit(bit, msi->msi_irq_in_use);
771 
772 	mutex_unlock(&msi->lock);
773 
774 	irq_domain_set_info(domain, virq, bit, &mobiveil_msi_bottom_irq_chip,
775 			    domain->host_data, handle_level_irq, NULL, NULL);
776 	return 0;
777 }
778 
mobiveil_irq_msi_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)779 static void mobiveil_irq_msi_domain_free(struct irq_domain *domain,
780 					 unsigned int virq,
781 					 unsigned int nr_irqs)
782 {
783 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
784 	struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(d);
785 	struct mobiveil_msi *msi = &pcie->msi;
786 
787 	mutex_lock(&msi->lock);
788 
789 	if (!test_bit(d->hwirq, msi->msi_irq_in_use))
790 		dev_err(&pcie->pdev->dev, "trying to free unused MSI#%lu\n",
791 			d->hwirq);
792 	else
793 		__clear_bit(d->hwirq, msi->msi_irq_in_use);
794 
795 	mutex_unlock(&msi->lock);
796 }
797 static const struct irq_domain_ops msi_domain_ops = {
798 	.alloc	= mobiveil_irq_msi_domain_alloc,
799 	.free	= mobiveil_irq_msi_domain_free,
800 };
801 
mobiveil_allocate_msi_domains(struct mobiveil_pcie * pcie)802 static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
803 {
804 	struct device *dev = &pcie->pdev->dev;
805 	struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
806 	struct mobiveil_msi *msi = &pcie->msi;
807 
808 	mutex_init(&pcie->msi.lock);
809 	msi->dev_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
810 						&msi_domain_ops, pcie);
811 	if (!msi->dev_domain) {
812 		dev_err(dev, "failed to create IRQ domain\n");
813 		return -ENOMEM;
814 	}
815 
816 	msi->msi_domain = pci_msi_create_irq_domain(fwnode,
817 						    &mobiveil_msi_domain_info,
818 						    msi->dev_domain);
819 	if (!msi->msi_domain) {
820 		dev_err(dev, "failed to create MSI domain\n");
821 		irq_domain_remove(msi->dev_domain);
822 		return -ENOMEM;
823 	}
824 
825 	return 0;
826 }
827 
mobiveil_pcie_init_irq_domain(struct mobiveil_pcie * pcie)828 static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
829 {
830 	struct device *dev = &pcie->pdev->dev;
831 	struct device_node *node = dev->of_node;
832 	int ret;
833 
834 	/* setup INTx */
835 	pcie->intx_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
836 						  &intx_domain_ops, pcie);
837 
838 	if (!pcie->intx_domain) {
839 		dev_err(dev, "Failed to get a INTx IRQ domain\n");
840 		return -ENOMEM;
841 	}
842 
843 	raw_spin_lock_init(&pcie->intx_mask_lock);
844 
845 	/* setup MSI */
846 	ret = mobiveil_allocate_msi_domains(pcie);
847 	if (ret)
848 		return ret;
849 
850 	return 0;
851 }
852 
mobiveil_pcie_probe(struct platform_device * pdev)853 static int mobiveil_pcie_probe(struct platform_device *pdev)
854 {
855 	struct mobiveil_pcie *pcie;
856 	struct pci_bus *bus;
857 	struct pci_bus *child;
858 	struct pci_host_bridge *bridge;
859 	struct device *dev = &pdev->dev;
860 	resource_size_t iobase;
861 	int ret;
862 
863 	/* allocate the PCIe port */
864 	bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
865 	if (!bridge)
866 		return -ENOMEM;
867 
868 	pcie = pci_host_bridge_priv(bridge);
869 
870 	pcie->pdev = pdev;
871 
872 	ret = mobiveil_pcie_parse_dt(pcie);
873 	if (ret) {
874 		dev_err(dev, "Parsing DT failed, ret: %x\n", ret);
875 		return ret;
876 	}
877 
878 	INIT_LIST_HEAD(&pcie->resources);
879 
880 	/* parse the host bridge base addresses from the device tree file */
881 	ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
882 						    &pcie->resources, &iobase);
883 	if (ret) {
884 		dev_err(dev, "Getting bridge resources failed\n");
885 		return ret;
886 	}
887 
888 	/*
889 	 * configure all inbound and outbound windows and prepare the RC for
890 	 * config access
891 	 */
892 	ret = mobiveil_host_init(pcie);
893 	if (ret) {
894 		dev_err(dev, "Failed to initialize host\n");
895 		goto error;
896 	}
897 
898 	/* initialize the IRQ domains */
899 	ret = mobiveil_pcie_init_irq_domain(pcie);
900 	if (ret) {
901 		dev_err(dev, "Failed creating IRQ Domain\n");
902 		goto error;
903 	}
904 
905 	irq_set_chained_handler_and_data(pcie->irq, mobiveil_pcie_isr, pcie);
906 
907 	ret = devm_request_pci_bus_resources(dev, &pcie->resources);
908 	if (ret)
909 		goto error;
910 
911 	/* Initialize bridge */
912 	list_splice_init(&pcie->resources, &bridge->windows);
913 	bridge->dev.parent = dev;
914 	bridge->sysdata = pcie;
915 	bridge->busnr = pcie->root_bus_nr;
916 	bridge->ops = &mobiveil_pcie_ops;
917 	bridge->map_irq = of_irq_parse_and_map_pci;
918 	bridge->swizzle_irq = pci_common_swizzle;
919 
920 	ret = mobiveil_bringup_link(pcie);
921 	if (ret) {
922 		dev_info(dev, "link bring-up failed\n");
923 		goto error;
924 	}
925 
926 	/* setup the kernel resources for the newly added PCIe root bus */
927 	ret = pci_scan_root_bus_bridge(bridge);
928 	if (ret)
929 		goto error;
930 
931 	bus = bridge->bus;
932 
933 	pci_assign_unassigned_bus_resources(bus);
934 	list_for_each_entry(child, &bus->children, node)
935 		pcie_bus_configure_settings(child);
936 	pci_bus_add_devices(bus);
937 
938 	return 0;
939 error:
940 	pci_free_resource_list(&pcie->resources);
941 	return ret;
942 }
943 
944 static const struct of_device_id mobiveil_pcie_of_match[] = {
945 	{.compatible = "mbvl,gpex40-pcie",},
946 	{},
947 };
948 
949 MODULE_DEVICE_TABLE(of, mobiveil_pcie_of_match);
950 
951 static struct platform_driver mobiveil_pcie_driver = {
952 	.probe = mobiveil_pcie_probe,
953 	.driver = {
954 		.name = "mobiveil-pcie",
955 		.of_match_table = mobiveil_pcie_of_match,
956 		.suppress_bind_attrs = true,
957 	},
958 };
959 
960 builtin_platform_driver(mobiveil_pcie_driver);
961 
962 MODULE_LICENSE("GPL v2");
963 MODULE_DESCRIPTION("Mobiveil PCIe host controller driver");
964 MODULE_AUTHOR("Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>");
965