1 // SPDX-License-Identifier: GPL-2.0+
2 /**
3  * APM X-Gene PCIe Driver
4  *
5  * Copyright (c) 2014 Applied Micro Circuits Corporation.
6  *
7  * Author: Tanmay Inamdar <tinamdar@apm.com>.
8  */
9 #include <linux/clk.h>
10 #include <linux/delay.h>
11 #include <linux/io.h>
12 #include <linux/jiffies.h>
13 #include <linux/memblock.h>
14 #include <linux/init.h>
15 #include <linux/of.h>
16 #include <linux/of_address.h>
17 #include <linux/of_irq.h>
18 #include <linux/of_pci.h>
19 #include <linux/pci.h>
20 #include <linux/pci-acpi.h>
21 #include <linux/pci-ecam.h>
22 #include <linux/platform_device.h>
23 #include <linux/slab.h>
24 
25 #include "../pci.h"
26 
27 #define PCIECORE_CTLANDSTATUS		0x50
28 #define PIM1_1L				0x80
29 #define IBAR2				0x98
30 #define IR2MSK				0x9c
31 #define PIM2_1L				0xa0
32 #define IBAR3L				0xb4
33 #define IR3MSKL				0xbc
34 #define PIM3_1L				0xc4
35 #define OMR1BARL			0x100
36 #define OMR2BARL			0x118
37 #define OMR3BARL			0x130
38 #define CFGBARL				0x154
39 #define CFGBARH				0x158
40 #define CFGCTL				0x15c
41 #define RTDID				0x160
42 #define BRIDGE_CFG_0			0x2000
43 #define BRIDGE_CFG_4			0x2010
44 #define BRIDGE_STATUS_0			0x2600
45 
46 #define LINK_UP_MASK			0x00000100
47 #define AXI_EP_CFG_ACCESS		0x10000
48 #define EN_COHERENCY			0xF0000000
49 #define EN_REG				0x00000001
50 #define OB_LO_IO			0x00000002
51 #define XGENE_PCIE_VENDORID		0x10E8
52 #define XGENE_PCIE_DEVICEID		0xE004
53 #define SZ_1T				(SZ_1G*1024ULL)
54 #define PIPE_PHY_RATE_RD(src)		((0xc000 & (u32)(src)) >> 0xe)
55 
56 #define XGENE_V1_PCI_EXP_CAP		0x40
57 
58 /* PCIe IP version */
59 #define XGENE_PCIE_IP_VER_UNKN		0
60 #define XGENE_PCIE_IP_VER_1		1
61 #define XGENE_PCIE_IP_VER_2		2
62 
63 #if defined(CONFIG_PCI_XGENE) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
64 struct xgene_pcie_port {
65 	struct device_node	*node;
66 	struct device		*dev;
67 	struct clk		*clk;
68 	void __iomem		*csr_base;
69 	void __iomem		*cfg_base;
70 	unsigned long		cfg_addr;
71 	bool			link_up;
72 	u32			version;
73 };
74 
xgene_pcie_readl(struct xgene_pcie_port * port,u32 reg)75 static u32 xgene_pcie_readl(struct xgene_pcie_port *port, u32 reg)
76 {
77 	return readl(port->csr_base + reg);
78 }
79 
xgene_pcie_writel(struct xgene_pcie_port * port,u32 reg,u32 val)80 static void xgene_pcie_writel(struct xgene_pcie_port *port, u32 reg, u32 val)
81 {
82 	writel(val, port->csr_base + reg);
83 }
84 
pcie_bar_low_val(u32 addr,u32 flags)85 static inline u32 pcie_bar_low_val(u32 addr, u32 flags)
86 {
87 	return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags;
88 }
89 
pcie_bus_to_port(struct pci_bus * bus)90 static inline struct xgene_pcie_port *pcie_bus_to_port(struct pci_bus *bus)
91 {
92 	struct pci_config_window *cfg;
93 
94 	if (acpi_disabled)
95 		return (struct xgene_pcie_port *)(bus->sysdata);
96 
97 	cfg = bus->sysdata;
98 	return (struct xgene_pcie_port *)(cfg->priv);
99 }
100 
101 /*
102  * When the address bit [17:16] is 2'b01, the Configuration access will be
103  * treated as Type 1 and it will be forwarded to external PCIe device.
104  */
xgene_pcie_get_cfg_base(struct pci_bus * bus)105 static void __iomem *xgene_pcie_get_cfg_base(struct pci_bus *bus)
106 {
107 	struct xgene_pcie_port *port = pcie_bus_to_port(bus);
108 
109 	if (bus->number >= (bus->primary + 1))
110 		return port->cfg_base + AXI_EP_CFG_ACCESS;
111 
112 	return port->cfg_base;
113 }
114 
115 /*
116  * For Configuration request, RTDID register is used as Bus Number,
117  * Device Number and Function number of the header fields.
118  */
xgene_pcie_set_rtdid_reg(struct pci_bus * bus,uint devfn)119 static void xgene_pcie_set_rtdid_reg(struct pci_bus *bus, uint devfn)
120 {
121 	struct xgene_pcie_port *port = pcie_bus_to_port(bus);
122 	unsigned int b, d, f;
123 	u32 rtdid_val = 0;
124 
125 	b = bus->number;
126 	d = PCI_SLOT(devfn);
127 	f = PCI_FUNC(devfn);
128 
129 	if (!pci_is_root_bus(bus))
130 		rtdid_val = (b << 8) | (d << 3) | f;
131 
132 	xgene_pcie_writel(port, RTDID, rtdid_val);
133 	/* read the register back to ensure flush */
134 	xgene_pcie_readl(port, RTDID);
135 }
136 
137 /*
138  * X-Gene PCIe port uses BAR0-BAR1 of RC's configuration space as
139  * the translation from PCI bus to native BUS.  Entire DDR region
140  * is mapped into PCIe space using these registers, so it can be
141  * reached by DMA from EP devices.  The BAR0/1 of bridge should be
142  * hidden during enumeration to avoid the sizing and resource allocation
143  * by PCIe core.
144  */
xgene_pcie_hide_rc_bars(struct pci_bus * bus,int offset)145 static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset)
146 {
147 	if (pci_is_root_bus(bus) && ((offset == PCI_BASE_ADDRESS_0) ||
148 				     (offset == PCI_BASE_ADDRESS_1)))
149 		return true;
150 
151 	return false;
152 }
153 
xgene_pcie_map_bus(struct pci_bus * bus,unsigned int devfn,int offset)154 static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
155 					int offset)
156 {
157 	if ((pci_is_root_bus(bus) && devfn != 0) ||
158 	    xgene_pcie_hide_rc_bars(bus, offset))
159 		return NULL;
160 
161 	xgene_pcie_set_rtdid_reg(bus, devfn);
162 	return xgene_pcie_get_cfg_base(bus) + offset;
163 }
164 
xgene_pcie_config_read32(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)165 static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
166 				    int where, int size, u32 *val)
167 {
168 	struct xgene_pcie_port *port = pcie_bus_to_port(bus);
169 
170 	if (pci_generic_config_read32(bus, devfn, where & ~0x3, 4, val) !=
171 	    PCIBIOS_SUCCESSFUL)
172 		return PCIBIOS_DEVICE_NOT_FOUND;
173 
174 	/*
175 	 * The v1 controller has a bug in its Configuration Request
176 	 * Retry Status (CRS) logic: when CRS is enabled and we read the
177 	 * Vendor and Device ID of a non-existent device, the controller
178 	 * fabricates return data of 0xFFFF0001 ("device exists but is not
179 	 * ready") instead of 0xFFFFFFFF ("device does not exist").  This
180 	 * causes the PCI core to retry the read until it times out.
181 	 * Avoid this by not claiming to support CRS.
182 	 */
183 	if (pci_is_root_bus(bus) && (port->version == XGENE_PCIE_IP_VER_1) &&
184 	    ((where & ~0x3) == XGENE_V1_PCI_EXP_CAP + PCI_EXP_RTCTL))
185 		*val &= ~(PCI_EXP_RTCAP_CRSVIS << 16);
186 
187 	if (size <= 2)
188 		*val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
189 
190 	return PCIBIOS_SUCCESSFUL;
191 }
192 #endif
193 
194 #if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
xgene_get_csr_resource(struct acpi_device * adev,struct resource * res)195 static int xgene_get_csr_resource(struct acpi_device *adev,
196 				  struct resource *res)
197 {
198 	struct device *dev = &adev->dev;
199 	struct resource_entry *entry;
200 	struct list_head list;
201 	unsigned long flags;
202 	int ret;
203 
204 	INIT_LIST_HEAD(&list);
205 	flags = IORESOURCE_MEM;
206 	ret = acpi_dev_get_resources(adev, &list,
207 				     acpi_dev_filter_resource_type_cb,
208 				     (void *) flags);
209 	if (ret < 0) {
210 		dev_err(dev, "failed to parse _CRS method, error code %d\n",
211 			ret);
212 		return ret;
213 	}
214 
215 	if (ret == 0) {
216 		dev_err(dev, "no IO and memory resources present in _CRS\n");
217 		return -EINVAL;
218 	}
219 
220 	entry = list_first_entry(&list, struct resource_entry, node);
221 	*res = *entry->res;
222 	acpi_dev_free_resource_list(&list);
223 	return 0;
224 }
225 
xgene_pcie_ecam_init(struct pci_config_window * cfg,u32 ipversion)226 static int xgene_pcie_ecam_init(struct pci_config_window *cfg, u32 ipversion)
227 {
228 	struct device *dev = cfg->parent;
229 	struct acpi_device *adev = to_acpi_device(dev);
230 	struct xgene_pcie_port *port;
231 	struct resource csr;
232 	int ret;
233 
234 	port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
235 	if (!port)
236 		return -ENOMEM;
237 
238 	ret = xgene_get_csr_resource(adev, &csr);
239 	if (ret) {
240 		dev_err(dev, "can't get CSR resource\n");
241 		return ret;
242 	}
243 	port->csr_base = devm_pci_remap_cfg_resource(dev, &csr);
244 	if (IS_ERR(port->csr_base))
245 		return PTR_ERR(port->csr_base);
246 
247 	port->cfg_base = cfg->win;
248 	port->version = ipversion;
249 
250 	cfg->priv = port;
251 	return 0;
252 }
253 
xgene_v1_pcie_ecam_init(struct pci_config_window * cfg)254 static int xgene_v1_pcie_ecam_init(struct pci_config_window *cfg)
255 {
256 	return xgene_pcie_ecam_init(cfg, XGENE_PCIE_IP_VER_1);
257 }
258 
259 struct pci_ecam_ops xgene_v1_pcie_ecam_ops = {
260 	.bus_shift	= 16,
261 	.init		= xgene_v1_pcie_ecam_init,
262 	.pci_ops	= {
263 		.map_bus	= xgene_pcie_map_bus,
264 		.read		= xgene_pcie_config_read32,
265 		.write		= pci_generic_config_write,
266 	}
267 };
268 
xgene_v2_pcie_ecam_init(struct pci_config_window * cfg)269 static int xgene_v2_pcie_ecam_init(struct pci_config_window *cfg)
270 {
271 	return xgene_pcie_ecam_init(cfg, XGENE_PCIE_IP_VER_2);
272 }
273 
274 struct pci_ecam_ops xgene_v2_pcie_ecam_ops = {
275 	.bus_shift	= 16,
276 	.init		= xgene_v2_pcie_ecam_init,
277 	.pci_ops	= {
278 		.map_bus	= xgene_pcie_map_bus,
279 		.read		= xgene_pcie_config_read32,
280 		.write		= pci_generic_config_write,
281 	}
282 };
283 #endif
284 
285 #if defined(CONFIG_PCI_XGENE)
xgene_pcie_set_ib_mask(struct xgene_pcie_port * port,u32 addr,u32 flags,u64 size)286 static u64 xgene_pcie_set_ib_mask(struct xgene_pcie_port *port, u32 addr,
287 				  u32 flags, u64 size)
288 {
289 	u64 mask = (~(size - 1) & PCI_BASE_ADDRESS_MEM_MASK) | flags;
290 	u32 val32 = 0;
291 	u32 val;
292 
293 	val32 = xgene_pcie_readl(port, addr);
294 	val = (val32 & 0x0000ffff) | (lower_32_bits(mask) << 16);
295 	xgene_pcie_writel(port, addr, val);
296 
297 	val32 = xgene_pcie_readl(port, addr + 0x04);
298 	val = (val32 & 0xffff0000) | (lower_32_bits(mask) >> 16);
299 	xgene_pcie_writel(port, addr + 0x04, val);
300 
301 	val32 = xgene_pcie_readl(port, addr + 0x04);
302 	val = (val32 & 0x0000ffff) | (upper_32_bits(mask) << 16);
303 	xgene_pcie_writel(port, addr + 0x04, val);
304 
305 	val32 = xgene_pcie_readl(port, addr + 0x08);
306 	val = (val32 & 0xffff0000) | (upper_32_bits(mask) >> 16);
307 	xgene_pcie_writel(port, addr + 0x08, val);
308 
309 	return mask;
310 }
311 
xgene_pcie_linkup(struct xgene_pcie_port * port,u32 * lanes,u32 * speed)312 static void xgene_pcie_linkup(struct xgene_pcie_port *port,
313 			      u32 *lanes, u32 *speed)
314 {
315 	u32 val32;
316 
317 	port->link_up = false;
318 	val32 = xgene_pcie_readl(port, PCIECORE_CTLANDSTATUS);
319 	if (val32 & LINK_UP_MASK) {
320 		port->link_up = true;
321 		*speed = PIPE_PHY_RATE_RD(val32);
322 		val32 = xgene_pcie_readl(port, BRIDGE_STATUS_0);
323 		*lanes = val32 >> 26;
324 	}
325 }
326 
xgene_pcie_init_port(struct xgene_pcie_port * port)327 static int xgene_pcie_init_port(struct xgene_pcie_port *port)
328 {
329 	struct device *dev = port->dev;
330 	int rc;
331 
332 	port->clk = clk_get(dev, NULL);
333 	if (IS_ERR(port->clk)) {
334 		dev_err(dev, "clock not available\n");
335 		return -ENODEV;
336 	}
337 
338 	rc = clk_prepare_enable(port->clk);
339 	if (rc) {
340 		dev_err(dev, "clock enable failed\n");
341 		return rc;
342 	}
343 
344 	return 0;
345 }
346 
xgene_pcie_map_reg(struct xgene_pcie_port * port,struct platform_device * pdev)347 static int xgene_pcie_map_reg(struct xgene_pcie_port *port,
348 			      struct platform_device *pdev)
349 {
350 	struct device *dev = port->dev;
351 	struct resource *res;
352 
353 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr");
354 	port->csr_base = devm_pci_remap_cfg_resource(dev, res);
355 	if (IS_ERR(port->csr_base))
356 		return PTR_ERR(port->csr_base);
357 
358 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
359 	port->cfg_base = devm_ioremap_resource(dev, res);
360 	if (IS_ERR(port->cfg_base))
361 		return PTR_ERR(port->cfg_base);
362 	port->cfg_addr = res->start;
363 
364 	return 0;
365 }
366 
xgene_pcie_setup_ob_reg(struct xgene_pcie_port * port,struct resource * res,u32 offset,u64 cpu_addr,u64 pci_addr)367 static void xgene_pcie_setup_ob_reg(struct xgene_pcie_port *port,
368 				    struct resource *res, u32 offset,
369 				    u64 cpu_addr, u64 pci_addr)
370 {
371 	struct device *dev = port->dev;
372 	resource_size_t size = resource_size(res);
373 	u64 restype = resource_type(res);
374 	u64 mask = 0;
375 	u32 min_size;
376 	u32 flag = EN_REG;
377 
378 	if (restype == IORESOURCE_MEM) {
379 		min_size = SZ_128M;
380 	} else {
381 		min_size = 128;
382 		flag |= OB_LO_IO;
383 	}
384 
385 	if (size >= min_size)
386 		mask = ~(size - 1) | flag;
387 	else
388 		dev_warn(dev, "res size 0x%llx less than minimum 0x%x\n",
389 			 (u64)size, min_size);
390 
391 	xgene_pcie_writel(port, offset, lower_32_bits(cpu_addr));
392 	xgene_pcie_writel(port, offset + 0x04, upper_32_bits(cpu_addr));
393 	xgene_pcie_writel(port, offset + 0x08, lower_32_bits(mask));
394 	xgene_pcie_writel(port, offset + 0x0c, upper_32_bits(mask));
395 	xgene_pcie_writel(port, offset + 0x10, lower_32_bits(pci_addr));
396 	xgene_pcie_writel(port, offset + 0x14, upper_32_bits(pci_addr));
397 }
398 
xgene_pcie_setup_cfg_reg(struct xgene_pcie_port * port)399 static void xgene_pcie_setup_cfg_reg(struct xgene_pcie_port *port)
400 {
401 	u64 addr = port->cfg_addr;
402 
403 	xgene_pcie_writel(port, CFGBARL, lower_32_bits(addr));
404 	xgene_pcie_writel(port, CFGBARH, upper_32_bits(addr));
405 	xgene_pcie_writel(port, CFGCTL, EN_REG);
406 }
407 
xgene_pcie_map_ranges(struct xgene_pcie_port * port,struct list_head * res,resource_size_t io_base)408 static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
409 				 struct list_head *res,
410 				 resource_size_t io_base)
411 {
412 	struct resource_entry *window;
413 	struct device *dev = port->dev;
414 	int ret;
415 
416 	resource_list_for_each_entry(window, res) {
417 		struct resource *res = window->res;
418 		u64 restype = resource_type(res);
419 
420 		dev_dbg(dev, "%pR\n", res);
421 
422 		switch (restype) {
423 		case IORESOURCE_IO:
424 			xgene_pcie_setup_ob_reg(port, res, OMR3BARL, io_base,
425 						res->start - window->offset);
426 			ret = devm_pci_remap_iospace(dev, res, io_base);
427 			if (ret < 0)
428 				return ret;
429 			break;
430 		case IORESOURCE_MEM:
431 			if (res->flags & IORESOURCE_PREFETCH)
432 				xgene_pcie_setup_ob_reg(port, res, OMR2BARL,
433 							res->start,
434 							res->start -
435 							window->offset);
436 			else
437 				xgene_pcie_setup_ob_reg(port, res, OMR1BARL,
438 							res->start,
439 							res->start -
440 							window->offset);
441 			break;
442 		case IORESOURCE_BUS:
443 			break;
444 		default:
445 			dev_err(dev, "invalid resource %pR\n", res);
446 			return -EINVAL;
447 		}
448 	}
449 	xgene_pcie_setup_cfg_reg(port);
450 	return 0;
451 }
452 
xgene_pcie_setup_pims(struct xgene_pcie_port * port,u32 pim_reg,u64 pim,u64 size)453 static void xgene_pcie_setup_pims(struct xgene_pcie_port *port, u32 pim_reg,
454 				  u64 pim, u64 size)
455 {
456 	xgene_pcie_writel(port, pim_reg, lower_32_bits(pim));
457 	xgene_pcie_writel(port, pim_reg + 0x04,
458 			  upper_32_bits(pim) | EN_COHERENCY);
459 	xgene_pcie_writel(port, pim_reg + 0x10, lower_32_bits(size));
460 	xgene_pcie_writel(port, pim_reg + 0x14, upper_32_bits(size));
461 }
462 
463 /*
464  * X-Gene PCIe support maximum 3 inbound memory regions
465  * This function helps to select a region based on size of region
466  */
xgene_pcie_select_ib_reg(u8 * ib_reg_mask,u64 size)467 static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size)
468 {
469 	if ((size > 4) && (size < SZ_16M) && !(*ib_reg_mask & (1 << 1))) {
470 		*ib_reg_mask |= (1 << 1);
471 		return 1;
472 	}
473 
474 	if ((size > SZ_1K) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 0))) {
475 		*ib_reg_mask |= (1 << 0);
476 		return 0;
477 	}
478 
479 	if ((size > SZ_1M) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 2))) {
480 		*ib_reg_mask |= (1 << 2);
481 		return 2;
482 	}
483 
484 	return -EINVAL;
485 }
486 
xgene_pcie_setup_ib_reg(struct xgene_pcie_port * port,struct of_pci_range * range,u8 * ib_reg_mask)487 static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
488 				    struct of_pci_range *range, u8 *ib_reg_mask)
489 {
490 	void __iomem *cfg_base = port->cfg_base;
491 	struct device *dev = port->dev;
492 	void *bar_addr;
493 	u32 pim_reg;
494 	u64 cpu_addr = range->cpu_addr;
495 	u64 pci_addr = range->pci_addr;
496 	u64 size = range->size;
497 	u64 mask = ~(size - 1) | EN_REG;
498 	u32 flags = PCI_BASE_ADDRESS_MEM_TYPE_64;
499 	u32 bar_low;
500 	int region;
501 
502 	region = xgene_pcie_select_ib_reg(ib_reg_mask, range->size);
503 	if (region < 0) {
504 		dev_warn(dev, "invalid pcie dma-range config\n");
505 		return;
506 	}
507 
508 	if (range->flags & IORESOURCE_PREFETCH)
509 		flags |= PCI_BASE_ADDRESS_MEM_PREFETCH;
510 
511 	bar_low = pcie_bar_low_val((u32)cpu_addr, flags);
512 	switch (region) {
513 	case 0:
514 		xgene_pcie_set_ib_mask(port, BRIDGE_CFG_4, flags, size);
515 		bar_addr = cfg_base + PCI_BASE_ADDRESS_0;
516 		writel(bar_low, bar_addr);
517 		writel(upper_32_bits(cpu_addr), bar_addr + 0x4);
518 		pim_reg = PIM1_1L;
519 		break;
520 	case 1:
521 		xgene_pcie_writel(port, IBAR2, bar_low);
522 		xgene_pcie_writel(port, IR2MSK, lower_32_bits(mask));
523 		pim_reg = PIM2_1L;
524 		break;
525 	case 2:
526 		xgene_pcie_writel(port, IBAR3L, bar_low);
527 		xgene_pcie_writel(port, IBAR3L + 0x4, upper_32_bits(cpu_addr));
528 		xgene_pcie_writel(port, IR3MSKL, lower_32_bits(mask));
529 		xgene_pcie_writel(port, IR3MSKL + 0x4, upper_32_bits(mask));
530 		pim_reg = PIM3_1L;
531 		break;
532 	}
533 
534 	xgene_pcie_setup_pims(port, pim_reg, pci_addr, ~(size - 1));
535 }
536 
xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port * port)537 static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port)
538 {
539 	struct device_node *np = port->node;
540 	struct of_pci_range range;
541 	struct of_pci_range_parser parser;
542 	struct device *dev = port->dev;
543 	u8 ib_reg_mask = 0;
544 
545 	if (of_pci_dma_range_parser_init(&parser, np)) {
546 		dev_err(dev, "missing dma-ranges property\n");
547 		return -EINVAL;
548 	}
549 
550 	/* Get the dma-ranges from DT */
551 	for_each_of_pci_range(&parser, &range) {
552 		u64 end = range.cpu_addr + range.size - 1;
553 
554 		dev_dbg(dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
555 			range.flags, range.cpu_addr, end, range.pci_addr);
556 		xgene_pcie_setup_ib_reg(port, &range, &ib_reg_mask);
557 	}
558 	return 0;
559 }
560 
561 /* clear BAR configuration which was done by firmware */
xgene_pcie_clear_config(struct xgene_pcie_port * port)562 static void xgene_pcie_clear_config(struct xgene_pcie_port *port)
563 {
564 	int i;
565 
566 	for (i = PIM1_1L; i <= CFGCTL; i += 4)
567 		xgene_pcie_writel(port, i, 0);
568 }
569 
xgene_pcie_setup(struct xgene_pcie_port * port,struct list_head * res,resource_size_t io_base)570 static int xgene_pcie_setup(struct xgene_pcie_port *port, struct list_head *res,
571 			    resource_size_t io_base)
572 {
573 	struct device *dev = port->dev;
574 	u32 val, lanes = 0, speed = 0;
575 	int ret;
576 
577 	xgene_pcie_clear_config(port);
578 
579 	/* setup the vendor and device IDs correctly */
580 	val = (XGENE_PCIE_DEVICEID << 16) | XGENE_PCIE_VENDORID;
581 	xgene_pcie_writel(port, BRIDGE_CFG_0, val);
582 
583 	ret = xgene_pcie_map_ranges(port, res, io_base);
584 	if (ret)
585 		return ret;
586 
587 	ret = xgene_pcie_parse_map_dma_ranges(port);
588 	if (ret)
589 		return ret;
590 
591 	xgene_pcie_linkup(port, &lanes, &speed);
592 	if (!port->link_up)
593 		dev_info(dev, "(rc) link down\n");
594 	else
595 		dev_info(dev, "(rc) x%d gen-%d link up\n", lanes, speed + 1);
596 	return 0;
597 }
598 
599 static struct pci_ops xgene_pcie_ops = {
600 	.map_bus = xgene_pcie_map_bus,
601 	.read = xgene_pcie_config_read32,
602 	.write = pci_generic_config_write32,
603 };
604 
xgene_pcie_probe(struct platform_device * pdev)605 static int xgene_pcie_probe(struct platform_device *pdev)
606 {
607 	struct device *dev = &pdev->dev;
608 	struct device_node *dn = dev->of_node;
609 	struct xgene_pcie_port *port;
610 	resource_size_t iobase = 0;
611 	struct pci_bus *bus, *child;
612 	struct pci_host_bridge *bridge;
613 	int ret;
614 	LIST_HEAD(res);
615 
616 	bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
617 	if (!bridge)
618 		return -ENOMEM;
619 
620 	port = pci_host_bridge_priv(bridge);
621 
622 	port->node = of_node_get(dn);
623 	port->dev = dev;
624 
625 	port->version = XGENE_PCIE_IP_VER_UNKN;
626 	if (of_device_is_compatible(port->node, "apm,xgene-pcie"))
627 		port->version = XGENE_PCIE_IP_VER_1;
628 
629 	ret = xgene_pcie_map_reg(port, pdev);
630 	if (ret)
631 		return ret;
632 
633 	ret = xgene_pcie_init_port(port);
634 	if (ret)
635 		return ret;
636 
637 	ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
638 						    &iobase);
639 	if (ret)
640 		return ret;
641 
642 	ret = devm_request_pci_bus_resources(dev, &res);
643 	if (ret)
644 		goto error;
645 
646 	ret = xgene_pcie_setup(port, &res, iobase);
647 	if (ret)
648 		goto error;
649 
650 	list_splice_init(&res, &bridge->windows);
651 	bridge->dev.parent = dev;
652 	bridge->sysdata = port;
653 	bridge->busnr = 0;
654 	bridge->ops = &xgene_pcie_ops;
655 	bridge->map_irq = of_irq_parse_and_map_pci;
656 	bridge->swizzle_irq = pci_common_swizzle;
657 
658 	ret = pci_scan_root_bus_bridge(bridge);
659 	if (ret < 0)
660 		goto error;
661 
662 	bus = bridge->bus;
663 
664 	pci_assign_unassigned_bus_resources(bus);
665 	list_for_each_entry(child, &bus->children, node)
666 		pcie_bus_configure_settings(child);
667 	pci_bus_add_devices(bus);
668 	return 0;
669 
670 error:
671 	pci_free_resource_list(&res);
672 	return ret;
673 }
674 
675 static const struct of_device_id xgene_pcie_match_table[] = {
676 	{.compatible = "apm,xgene-pcie",},
677 	{},
678 };
679 
680 static struct platform_driver xgene_pcie_driver = {
681 	.driver = {
682 		.name = "xgene-pcie",
683 		.of_match_table = of_match_ptr(xgene_pcie_match_table),
684 		.suppress_bind_attrs = true,
685 	},
686 	.probe = xgene_pcie_probe,
687 };
688 builtin_platform_driver(xgene_pcie_driver);
689 #endif
690