1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Contains common pci routines for ALL ppc platform
4  * (based on pci_32.c and pci_64.c)
5  *
6  * Port for PPC64 David Engebretsen, IBM Corp.
7  * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
8  *
9  * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
10  *   Rework, based on alpha PCI code.
11  *
12  * Common pmac/prep/chrp pci routines. -- Cort
13  */
14 
15 #include <linux/kernel.h>
16 #include <linux/pci.h>
17 #include <linux/string.h>
18 #include <linux/init.h>
19 #include <linux/memblock.h>
20 #include <linux/mm.h>
21 #include <linux/shmem_fs.h>
22 #include <linux/list.h>
23 #include <linux/syscalls.h>
24 #include <linux/irq.h>
25 #include <linux/vmalloc.h>
26 #include <linux/slab.h>
27 #include <linux/of.h>
28 #include <linux/of_address.h>
29 #include <linux/of_irq.h>
30 #include <linux/of_pci.h>
31 #include <linux/export.h>
32 
33 #include <asm/processor.h>
34 #include <linux/io.h>
35 #include <asm/pci-bridge.h>
36 #include <asm/byteorder.h>
37 
38 static DEFINE_SPINLOCK(hose_spinlock);
39 LIST_HEAD(hose_list);
40 
41 /* XXX kill that some day ... */
42 static int global_phb_number;		/* Global phb counter */
43 
44 /* ISA Memory physical address */
45 resource_size_t isa_mem_base;
46 
47 unsigned long isa_io_base;
48 EXPORT_SYMBOL(isa_io_base);
49 
50 static int pci_bus_count;
51 
pcibios_alloc_controller(struct device_node * dev)52 struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
53 {
54 	struct pci_controller *phb;
55 
56 	phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
57 	if (!phb)
58 		return NULL;
59 	spin_lock(&hose_spinlock);
60 	phb->global_number = global_phb_number++;
61 	list_add_tail(&phb->list_node, &hose_list);
62 	spin_unlock(&hose_spinlock);
63 	phb->dn = dev;
64 	phb->is_dynamic = mem_init_done;
65 	return phb;
66 }
67 
pcibios_free_controller(struct pci_controller * phb)68 void pcibios_free_controller(struct pci_controller *phb)
69 {
70 	spin_lock(&hose_spinlock);
71 	list_del(&phb->list_node);
72 	spin_unlock(&hose_spinlock);
73 
74 	if (phb->is_dynamic)
75 		kfree(phb);
76 }
77 
pcibios_io_size(const struct pci_controller * hose)78 static resource_size_t pcibios_io_size(const struct pci_controller *hose)
79 {
80 	return resource_size(&hose->io_resource);
81 }
82 
pcibios_vaddr_is_ioport(void __iomem * address)83 int pcibios_vaddr_is_ioport(void __iomem *address)
84 {
85 	int ret = 0;
86 	struct pci_controller *hose;
87 	resource_size_t size;
88 
89 	spin_lock(&hose_spinlock);
90 	list_for_each_entry(hose, &hose_list, list_node) {
91 		size = pcibios_io_size(hose);
92 		if (address >= hose->io_base_virt &&
93 		    address < (hose->io_base_virt + size)) {
94 			ret = 1;
95 			break;
96 		}
97 	}
98 	spin_unlock(&hose_spinlock);
99 	return ret;
100 }
101 
pci_address_to_pio(phys_addr_t address)102 unsigned long pci_address_to_pio(phys_addr_t address)
103 {
104 	struct pci_controller *hose;
105 	resource_size_t size;
106 	unsigned long ret = ~0;
107 
108 	spin_lock(&hose_spinlock);
109 	list_for_each_entry(hose, &hose_list, list_node) {
110 		size = pcibios_io_size(hose);
111 		if (address >= hose->io_base_phys &&
112 		    address < (hose->io_base_phys + size)) {
113 			unsigned long base =
114 				(unsigned long)hose->io_base_virt - _IO_BASE;
115 			ret = base + (address - hose->io_base_phys);
116 			break;
117 		}
118 	}
119 	spin_unlock(&hose_spinlock);
120 
121 	return ret;
122 }
123 EXPORT_SYMBOL_GPL(pci_address_to_pio);
124 
125 /* This routine is meant to be used early during boot, when the
126  * PCI bus numbers have not yet been assigned, and you need to
127  * issue PCI config cycles to an OF device.
128  * It could also be used to "fix" RTAS config cycles if you want
129  * to set pci_assign_all_buses to 1 and still use RTAS for PCI
130  * config cycles.
131  */
pci_find_hose_for_OF_device(struct device_node * node)132 struct pci_controller *pci_find_hose_for_OF_device(struct device_node *node)
133 {
134 	while (node) {
135 		struct pci_controller *hose, *tmp;
136 		list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
137 			if (hose->dn == node)
138 				return hose;
139 		node = node->parent;
140 	}
141 	return NULL;
142 }
143 
pcibios_set_master(struct pci_dev * dev)144 void pcibios_set_master(struct pci_dev *dev)
145 {
146 	/* No special bus mastering setup handling */
147 }
148 
149 /*
150  * Platform support for /proc/bus/pci/X/Y mmap()s.
151  */
152 
pci_iobar_pfn(struct pci_dev * pdev,int bar,struct vm_area_struct * vma)153 int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma)
154 {
155 	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
156 	resource_size_t ioaddr = pci_resource_start(pdev, bar);
157 
158 	if (!hose)
159 		return -EINVAL;		/* should never happen */
160 
161 	/* Convert to an offset within this PCI controller */
162 	ioaddr -= (unsigned long)hose->io_base_virt - _IO_BASE;
163 
164 	vma->vm_pgoff += (ioaddr + hose->io_base_phys) >> PAGE_SHIFT;
165 	return 0;
166 }
167 
168 /*
169  * This one is used by /dev/mem and fbdev who have no clue about the
170  * PCI device, it tries to find the PCI device first and calls the
171  * above routine
172  */
pci_phys_mem_access_prot(struct file * file,unsigned long pfn,unsigned long size,pgprot_t prot)173 pgprot_t pci_phys_mem_access_prot(struct file *file,
174 				  unsigned long pfn,
175 				  unsigned long size,
176 				  pgprot_t prot)
177 {
178 	struct pci_dev *pdev = NULL;
179 	struct resource *found = NULL;
180 	resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
181 	int i;
182 
183 	if (page_is_ram(pfn))
184 		return prot;
185 
186 	prot = pgprot_noncached(prot);
187 	for_each_pci_dev(pdev) {
188 		for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
189 			struct resource *rp = &pdev->resource[i];
190 			int flags = rp->flags;
191 
192 			/* Active and same type? */
193 			if ((flags & IORESOURCE_MEM) == 0)
194 				continue;
195 			/* In the range of this resource? */
196 			if (offset < (rp->start & PAGE_MASK) ||
197 			    offset > rp->end)
198 				continue;
199 			found = rp;
200 			break;
201 		}
202 		if (found)
203 			break;
204 	}
205 	if (found) {
206 		if (found->flags & IORESOURCE_PREFETCH)
207 			prot = pgprot_noncached_wc(prot);
208 		pci_dev_put(pdev);
209 	}
210 
211 	pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n",
212 		 (unsigned long long)offset, pgprot_val(prot));
213 
214 	return prot;
215 }
216 
217 /* This provides legacy IO read access on a bus */
pci_legacy_read(struct pci_bus * bus,loff_t port,u32 * val,size_t size)218 int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
219 {
220 	unsigned long offset;
221 	struct pci_controller *hose = pci_bus_to_host(bus);
222 	struct resource *rp = &hose->io_resource;
223 	void __iomem *addr;
224 
225 	/* Check if port can be supported by that bus. We only check
226 	 * the ranges of the PHB though, not the bus itself as the rules
227 	 * for forwarding legacy cycles down bridges are not our problem
228 	 * here. So if the host bridge supports it, we do it.
229 	 */
230 	offset = (unsigned long)hose->io_base_virt - _IO_BASE;
231 	offset += port;
232 
233 	if (!(rp->flags & IORESOURCE_IO))
234 		return -ENXIO;
235 	if (offset < rp->start || (offset + size) > rp->end)
236 		return -ENXIO;
237 	addr = hose->io_base_virt + port;
238 
239 	switch (size) {
240 	case 1:
241 		*((u8 *)val) = in_8(addr);
242 		return 1;
243 	case 2:
244 		if (port & 1)
245 			return -EINVAL;
246 		*((u16 *)val) = in_le16(addr);
247 		return 2;
248 	case 4:
249 		if (port & 3)
250 			return -EINVAL;
251 		*((u32 *)val) = in_le32(addr);
252 		return 4;
253 	}
254 	return -EINVAL;
255 }
256 
257 /* This provides legacy IO write access on a bus */
pci_legacy_write(struct pci_bus * bus,loff_t port,u32 val,size_t size)258 int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size)
259 {
260 	unsigned long offset;
261 	struct pci_controller *hose = pci_bus_to_host(bus);
262 	struct resource *rp = &hose->io_resource;
263 	void __iomem *addr;
264 
265 	/* Check if port can be supported by that bus. We only check
266 	 * the ranges of the PHB though, not the bus itself as the rules
267 	 * for forwarding legacy cycles down bridges are not our problem
268 	 * here. So if the host bridge supports it, we do it.
269 	 */
270 	offset = (unsigned long)hose->io_base_virt - _IO_BASE;
271 	offset += port;
272 
273 	if (!(rp->flags & IORESOURCE_IO))
274 		return -ENXIO;
275 	if (offset < rp->start || (offset + size) > rp->end)
276 		return -ENXIO;
277 	addr = hose->io_base_virt + port;
278 
279 	/* WARNING: The generic code is idiotic. It gets passed a pointer
280 	 * to what can be a 1, 2 or 4 byte quantity and always reads that
281 	 * as a u32, which means that we have to correct the location of
282 	 * the data read within those 32 bits for size 1 and 2
283 	 */
284 	switch (size) {
285 	case 1:
286 		out_8(addr, val >> 24);
287 		return 1;
288 	case 2:
289 		if (port & 1)
290 			return -EINVAL;
291 		out_le16(addr, val >> 16);
292 		return 2;
293 	case 4:
294 		if (port & 3)
295 			return -EINVAL;
296 		out_le32(addr, val);
297 		return 4;
298 	}
299 	return -EINVAL;
300 }
301 
302 /* This provides legacy IO or memory mmap access on a bus */
pci_mmap_legacy_page_range(struct pci_bus * bus,struct vm_area_struct * vma,enum pci_mmap_state mmap_state)303 int pci_mmap_legacy_page_range(struct pci_bus *bus,
304 			       struct vm_area_struct *vma,
305 			       enum pci_mmap_state mmap_state)
306 {
307 	struct pci_controller *hose = pci_bus_to_host(bus);
308 	resource_size_t offset =
309 		((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
310 	resource_size_t size = vma->vm_end - vma->vm_start;
311 	struct resource *rp;
312 
313 	pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n",
314 		 pci_domain_nr(bus), bus->number,
315 		 mmap_state == pci_mmap_mem ? "MEM" : "IO",
316 		 (unsigned long long)offset,
317 		 (unsigned long long)(offset + size - 1));
318 
319 	if (mmap_state == pci_mmap_mem) {
320 		/* Hack alert !
321 		 *
322 		 * Because X is lame and can fail starting if it gets an error
323 		 * trying to mmap legacy_mem (instead of just moving on without
324 		 * legacy memory access) we fake it here by giving it anonymous
325 		 * memory, effectively behaving just like /dev/zero
326 		 */
327 		if ((offset + size) > hose->isa_mem_size) {
328 #ifdef CONFIG_MMU
329 			pr_debug("Process %s (pid:%d) mapped non-existing PCI",
330 				current->comm, current->pid);
331 			pr_debug("legacy memory for 0%04x:%02x\n",
332 				pci_domain_nr(bus), bus->number);
333 #endif
334 			if (vma->vm_flags & VM_SHARED)
335 				return shmem_zero_setup(vma);
336 			return 0;
337 		}
338 		offset += hose->isa_mem_phys;
339 	} else {
340 		unsigned long io_offset = (unsigned long)hose->io_base_virt -
341 								_IO_BASE;
342 		unsigned long roffset = offset + io_offset;
343 		rp = &hose->io_resource;
344 		if (!(rp->flags & IORESOURCE_IO))
345 			return -ENXIO;
346 		if (roffset < rp->start || (roffset + size) > rp->end)
347 			return -ENXIO;
348 		offset += hose->io_base_phys;
349 	}
350 	pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
351 
352 	vma->vm_pgoff = offset >> PAGE_SHIFT;
353 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
354 	return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
355 			       vma->vm_end - vma->vm_start,
356 			       vma->vm_page_prot);
357 }
358 
pci_resource_to_user(const struct pci_dev * dev,int bar,const struct resource * rsrc,resource_size_t * start,resource_size_t * end)359 void pci_resource_to_user(const struct pci_dev *dev, int bar,
360 			  const struct resource *rsrc,
361 			  resource_size_t *start, resource_size_t *end)
362 {
363 	struct pci_bus_region region;
364 
365 	if (rsrc->flags & IORESOURCE_IO) {
366 		pcibios_resource_to_bus(dev->bus, &region,
367 					(struct resource *) rsrc);
368 		*start = region.start;
369 		*end = region.end;
370 		return;
371 	}
372 
373 	/* We pass a CPU physical address to userland for MMIO instead of a
374 	 * BAR value because X is lame and expects to be able to use that
375 	 * to pass to /dev/mem!
376 	 *
377 	 * That means we may have 64-bit values where some apps only expect
378 	 * 32 (like X itself since it thinks only Sparc has 64-bit MMIO).
379 	 */
380 	*start = rsrc->start;
381 	*end = rsrc->end;
382 }
383 
384 /**
385  * pci_process_bridge_OF_ranges - Parse PCI bridge resources from device tree
386  * @hose: newly allocated pci_controller to be setup
387  * @dev: device node of the host bridge
388  * @primary: set if primary bus (32 bits only, soon to be deprecated)
389  *
390  * This function will parse the "ranges" property of a PCI host bridge device
391  * node and setup the resource mapping of a pci controller based on its
392  * content.
393  *
394  * Life would be boring if it wasn't for a few issues that we have to deal
395  * with here:
396  *
397  *   - We can only cope with one IO space range and up to 3 Memory space
398  *     ranges. However, some machines (thanks Apple !) tend to split their
399  *     space into lots of small contiguous ranges. So we have to coalesce.
400  *
401  *   - We can only cope with all memory ranges having the same offset
402  *     between CPU addresses and PCI addresses. Unfortunately, some bridges
403  *     are setup for a large 1:1 mapping along with a small "window" which
404  *     maps PCI address 0 to some arbitrary high address of the CPU space in
405  *     order to give access to the ISA memory hole.
406  *     The way out of here that I've chosen for now is to always set the
407  *     offset based on the first resource found, then override it if we
408  *     have a different offset and the previous was set by an ISA hole.
409  *
410  *   - Some busses have IO space not starting at 0, which causes trouble with
411  *     the way we do our IO resource renumbering. The code somewhat deals with
412  *     it for 64 bits but I would expect problems on 32 bits.
413  *
414  *   - Some 32 bits platforms such as 4xx can have physical space larger than
415  *     32 bits so we need to use 64 bits values for the parsing
416  */
pci_process_bridge_OF_ranges(struct pci_controller * hose,struct device_node * dev,int primary)417 void pci_process_bridge_OF_ranges(struct pci_controller *hose,
418 				  struct device_node *dev, int primary)
419 {
420 	int memno = 0, isa_hole = -1;
421 	unsigned long long isa_mb = 0;
422 	struct resource *res;
423 	struct of_pci_range range;
424 	struct of_pci_range_parser parser;
425 
426 	pr_info("PCI host bridge %pOF %s ranges:\n",
427 	       dev, primary ? "(primary)" : "");
428 
429 	/* Check for ranges property */
430 	if (of_pci_range_parser_init(&parser, dev))
431 		return;
432 
433 	pr_debug("Parsing ranges property...\n");
434 	for_each_of_pci_range(&parser, &range) {
435 		/* Read next ranges element */
436 		pr_debug("pci_space: 0x%08x pci_addr:0x%016llx ",
437 				range.pci_space, range.pci_addr);
438 		pr_debug("cpu_addr:0x%016llx size:0x%016llx\n",
439 					range.cpu_addr, range.size);
440 
441 		/* If we failed translation or got a zero-sized region
442 		 * (some FW try to feed us with non sensical zero sized regions
443 		 * such as power3 which look like some kind of attempt
444 		 * at exposing the VGA memory hole)
445 		 */
446 		if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
447 			continue;
448 
449 		/* Act based on address space type */
450 		res = NULL;
451 		switch (range.flags & IORESOURCE_TYPE_BITS) {
452 		case IORESOURCE_IO:
453 			pr_info("  IO 0x%016llx..0x%016llx -> 0x%016llx\n",
454 				range.cpu_addr, range.cpu_addr + range.size - 1,
455 				range.pci_addr);
456 
457 			/* We support only one IO range */
458 			if (hose->pci_io_size) {
459 				pr_info(" \\--> Skipped (too many) !\n");
460 				continue;
461 			}
462 			/* On 32 bits, limit I/O space to 16MB */
463 			if (range.size > 0x01000000)
464 				range.size = 0x01000000;
465 
466 			/* 32 bits needs to map IOs here */
467 			hose->io_base_virt = ioremap(range.cpu_addr,
468 						range.size);
469 
470 			/* Expect trouble if pci_addr is not 0 */
471 			if (primary)
472 				isa_io_base =
473 					(unsigned long)hose->io_base_virt;
474 			/* pci_io_size and io_base_phys always represent IO
475 			 * space starting at 0 so we factor in pci_addr
476 			 */
477 			hose->pci_io_size = range.pci_addr + range.size;
478 			hose->io_base_phys = range.cpu_addr - range.pci_addr;
479 
480 			/* Build resource */
481 			res = &hose->io_resource;
482 			range.cpu_addr = range.pci_addr;
483 
484 			break;
485 		case IORESOURCE_MEM:
486 			pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
487 				range.cpu_addr, range.cpu_addr + range.size - 1,
488 				range.pci_addr,
489 				(range.pci_space & 0x40000000) ?
490 				"Prefetch" : "");
491 
492 			/* We support only 3 memory ranges */
493 			if (memno >= 3) {
494 				pr_info(" \\--> Skipped (too many) !\n");
495 				continue;
496 			}
497 			/* Handles ISA memory hole space here */
498 			if (range.pci_addr == 0) {
499 				isa_mb = range.cpu_addr;
500 				isa_hole = memno;
501 				if (primary || isa_mem_base == 0)
502 					isa_mem_base = range.cpu_addr;
503 				hose->isa_mem_phys = range.cpu_addr;
504 				hose->isa_mem_size = range.size;
505 			}
506 
507 			/* We get the PCI/Mem offset from the first range or
508 			 * the, current one if the offset came from an ISA
509 			 * hole. If they don't match, bugger.
510 			 */
511 			if (memno == 0 ||
512 			    (isa_hole >= 0 && range.pci_addr != 0 &&
513 			     hose->pci_mem_offset == isa_mb))
514 				hose->pci_mem_offset = range.cpu_addr -
515 							range.pci_addr;
516 			else if (range.pci_addr != 0 &&
517 				 hose->pci_mem_offset != range.cpu_addr -
518 							range.pci_addr) {
519 				pr_info(" \\--> Skipped (offset mismatch) !\n");
520 				continue;
521 			}
522 
523 			/* Build resource */
524 			res = &hose->mem_resources[memno++];
525 			break;
526 		}
527 		if (res != NULL) {
528 			res->name = dev->full_name;
529 			res->flags = range.flags;
530 			res->start = range.cpu_addr;
531 			res->end = range.cpu_addr + range.size - 1;
532 			res->parent = res->child = res->sibling = NULL;
533 		}
534 	}
535 
536 	/* If there's an ISA hole and the pci_mem_offset is -not- matching
537 	 * the ISA hole offset, then we need to remove the ISA hole from
538 	 * the resource list for that brige
539 	 */
540 	if (isa_hole >= 0 && hose->pci_mem_offset != isa_mb) {
541 		unsigned int next = isa_hole + 1;
542 		pr_info(" Removing ISA hole at 0x%016llx\n", isa_mb);
543 		if (next < memno)
544 			memmove(&hose->mem_resources[isa_hole],
545 				&hose->mem_resources[next],
546 				sizeof(struct resource) * (memno - next));
547 		hose->mem_resources[--memno].flags = 0;
548 	}
549 }
550 
551 /* Display the domain number in /proc */
pci_proc_domain(struct pci_bus * bus)552 int pci_proc_domain(struct pci_bus *bus)
553 {
554 	return pci_domain_nr(bus);
555 }
556 
557 /* This header fixup will do the resource fixup for all devices as they are
558  * probed, but not for bridge ranges
559  */
pcibios_fixup_resources(struct pci_dev * dev)560 static void pcibios_fixup_resources(struct pci_dev *dev)
561 {
562 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
563 	int i;
564 
565 	if (!hose) {
566 		pr_err("No host bridge for PCI dev %s !\n",
567 		       pci_name(dev));
568 		return;
569 	}
570 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
571 		struct resource *res = dev->resource + i;
572 		if (!res->flags)
573 			continue;
574 		if (res->start == 0) {
575 			pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]",
576 				 pci_name(dev), i,
577 				 (unsigned long long)res->start,
578 				 (unsigned long long)res->end,
579 				 (unsigned int)res->flags);
580 			pr_debug("is unassigned\n");
581 			res->end -= res->start;
582 			res->start = 0;
583 			res->flags |= IORESOURCE_UNSET;
584 			continue;
585 		}
586 
587 		pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]\n",
588 			 pci_name(dev), i,
589 			 (unsigned long long)res->start,
590 			 (unsigned long long)res->end,
591 			 (unsigned int)res->flags);
592 	}
593 }
594 DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
595 
pcibios_add_device(struct pci_dev * dev)596 int pcibios_add_device(struct pci_dev *dev)
597 {
598 	dev->irq = of_irq_parse_and_map_pci(dev, 0, 0);
599 
600 	return 0;
601 }
602 EXPORT_SYMBOL(pcibios_add_device);
603 
604 /*
605  * Reparent resource children of pr that conflict with res
606  * under res, and make res replace those children.
607  */
reparent_resources(struct resource * parent,struct resource * res)608 static int __init reparent_resources(struct resource *parent,
609 				     struct resource *res)
610 {
611 	struct resource *p, **pp;
612 	struct resource **firstpp = NULL;
613 
614 	for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
615 		if (p->end < res->start)
616 			continue;
617 		if (res->end < p->start)
618 			break;
619 		if (p->start < res->start || p->end > res->end)
620 			return -1;	/* not completely contained */
621 		if (firstpp == NULL)
622 			firstpp = pp;
623 	}
624 	if (firstpp == NULL)
625 		return -1;	/* didn't find any conflicting entries? */
626 	res->parent = parent;
627 	res->child = *firstpp;
628 	res->sibling = *pp;
629 	*firstpp = res;
630 	*pp = NULL;
631 	for (p = res->child; p != NULL; p = p->sibling) {
632 		p->parent = res;
633 		pr_debug("PCI: Reparented %s [%llx..%llx] under %s\n",
634 			 p->name,
635 			 (unsigned long long)p->start,
636 			 (unsigned long long)p->end, res->name);
637 	}
638 	return 0;
639 }
640 
641 /*
642  *  Handle resources of PCI devices.  If the world were perfect, we could
643  *  just allocate all the resource regions and do nothing more.  It isn't.
644  *  On the other hand, we cannot just re-allocate all devices, as it would
645  *  require us to know lots of host bridge internals.  So we attempt to
646  *  keep as much of the original configuration as possible, but tweak it
647  *  when it's found to be wrong.
648  *
649  *  Known BIOS problems we have to work around:
650  *	- I/O or memory regions not configured
651  *	- regions configured, but not enabled in the command register
652  *	- bogus I/O addresses above 64K used
653  *	- expansion ROMs left enabled (this may sound harmless, but given
654  *	  the fact the PCI specs explicitly allow address decoders to be
655  *	  shared between expansion ROMs and other resource regions, it's
656  *	  at least dangerous)
657  *
658  *  Our solution:
659  *	(1) Allocate resources for all buses behind PCI-to-PCI bridges.
660  *	    This gives us fixed barriers on where we can allocate.
661  *	(2) Allocate resources for all enabled devices.  If there is
662  *	    a collision, just mark the resource as unallocated. Also
663  *	    disable expansion ROMs during this step.
664  *	(3) Try to allocate resources for disabled devices.  If the
665  *	    resources were assigned correctly, everything goes well,
666  *	    if they weren't, they won't disturb allocation of other
667  *	    resources.
668  *	(4) Assign new addresses to resources which were either
669  *	    not configured at all or misconfigured.  If explicitly
670  *	    requested by the user, configure expansion ROM address
671  *	    as well.
672  */
673 
pcibios_allocate_bus_resources(struct pci_bus * bus)674 static void pcibios_allocate_bus_resources(struct pci_bus *bus)
675 {
676 	struct pci_bus *b;
677 	int i;
678 	struct resource *res, *pr;
679 
680 	pr_debug("PCI: Allocating bus resources for %04x:%02x...\n",
681 		 pci_domain_nr(bus), bus->number);
682 
683 	pci_bus_for_each_resource(bus, res, i) {
684 		if (!res || !res->flags
685 		    || res->start > res->end || res->parent)
686 			continue;
687 		if (bus->parent == NULL)
688 			pr = (res->flags & IORESOURCE_IO) ?
689 				&ioport_resource : &iomem_resource;
690 		else {
691 			/* Don't bother with non-root busses when
692 			 * re-assigning all resources. We clear the
693 			 * resource flags as if they were colliding
694 			 * and as such ensure proper re-allocation
695 			 * later.
696 			 */
697 			pr = pci_find_parent_resource(bus->self, res);
698 			if (pr == res) {
699 				/* this happens when the generic PCI
700 				 * code (wrongly) decides that this
701 				 * bridge is transparent  -- paulus
702 				 */
703 				continue;
704 			}
705 		}
706 
707 		pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx ",
708 			 bus->self ? pci_name(bus->self) : "PHB",
709 			 bus->number, i,
710 			 (unsigned long long)res->start,
711 			 (unsigned long long)res->end);
712 		pr_debug("[0x%x], parent %p (%s)\n",
713 			 (unsigned int)res->flags,
714 			 pr, (pr && pr->name) ? pr->name : "nil");
715 
716 		if (pr && !(pr->flags & IORESOURCE_UNSET)) {
717 			struct pci_dev *dev = bus->self;
718 
719 			if (request_resource(pr, res) == 0)
720 				continue;
721 			/*
722 			 * Must be a conflict with an existing entry.
723 			 * Move that entry (or entries) under the
724 			 * bridge resource and try again.
725 			 */
726 			if (reparent_resources(pr, res) == 0)
727 				continue;
728 
729 			if (dev && i < PCI_BRIDGE_RESOURCE_NUM &&
730 			    pci_claim_bridge_resource(dev,
731 						 i + PCI_BRIDGE_RESOURCES) == 0)
732 				continue;
733 
734 		}
735 		pr_warn("PCI: Cannot allocate resource region ");
736 		pr_cont("%d of PCI bridge %d, will remap\n", i, bus->number);
737 		res->start = res->end = 0;
738 		res->flags = 0;
739 	}
740 
741 	list_for_each_entry(b, &bus->children, node)
742 		pcibios_allocate_bus_resources(b);
743 }
744 
alloc_resource(struct pci_dev * dev,int idx)745 static inline void alloc_resource(struct pci_dev *dev, int idx)
746 {
747 	struct resource *pr, *r = &dev->resource[idx];
748 
749 	pr_debug("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n",
750 		 pci_name(dev), idx,
751 		 (unsigned long long)r->start,
752 		 (unsigned long long)r->end,
753 		 (unsigned int)r->flags);
754 
755 	pr = pci_find_parent_resource(dev, r);
756 	if (!pr || (pr->flags & IORESOURCE_UNSET) ||
757 	    request_resource(pr, r) < 0) {
758 		pr_warn("PCI: Cannot allocate resource region %d ", idx);
759 		pr_cont("of device %s, will remap\n", pci_name(dev));
760 		if (pr)
761 			pr_debug("PCI:  parent is %p: %016llx-%016llx [%x]\n",
762 				 pr,
763 				 (unsigned long long)pr->start,
764 				 (unsigned long long)pr->end,
765 				 (unsigned int)pr->flags);
766 		/* We'll assign a new address later */
767 		r->flags |= IORESOURCE_UNSET;
768 		r->end -= r->start;
769 		r->start = 0;
770 	}
771 }
772 
pcibios_allocate_resources(int pass)773 static void __init pcibios_allocate_resources(int pass)
774 {
775 	struct pci_dev *dev = NULL;
776 	int idx, disabled;
777 	u16 command;
778 	struct resource *r;
779 
780 	for_each_pci_dev(dev) {
781 		pci_read_config_word(dev, PCI_COMMAND, &command);
782 		for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
783 			r = &dev->resource[idx];
784 			if (r->parent)		/* Already allocated */
785 				continue;
786 			if (!r->flags || (r->flags & IORESOURCE_UNSET))
787 				continue;	/* Not assigned at all */
788 			/* We only allocate ROMs on pass 1 just in case they
789 			 * have been screwed up by firmware
790 			 */
791 			if (idx == PCI_ROM_RESOURCE)
792 				disabled = 1;
793 			if (r->flags & IORESOURCE_IO)
794 				disabled = !(command & PCI_COMMAND_IO);
795 			else
796 				disabled = !(command & PCI_COMMAND_MEMORY);
797 			if (pass == disabled)
798 				alloc_resource(dev, idx);
799 		}
800 		if (pass)
801 			continue;
802 		r = &dev->resource[PCI_ROM_RESOURCE];
803 		if (r->flags) {
804 			/* Turn the ROM off, leave the resource region,
805 			 * but keep it unregistered.
806 			 */
807 			u32 reg;
808 			pci_read_config_dword(dev, dev->rom_base_reg, &reg);
809 			if (reg & PCI_ROM_ADDRESS_ENABLE) {
810 				pr_debug("PCI: Switching off ROM of %s\n",
811 					 pci_name(dev));
812 				r->flags &= ~IORESOURCE_ROM_ENABLE;
813 				pci_write_config_dword(dev, dev->rom_base_reg,
814 						reg & ~PCI_ROM_ADDRESS_ENABLE);
815 			}
816 		}
817 	}
818 }
819 
pcibios_reserve_legacy_regions(struct pci_bus * bus)820 static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
821 {
822 	struct pci_controller *hose = pci_bus_to_host(bus);
823 	resource_size_t	offset;
824 	struct resource *res, *pres;
825 	int i;
826 
827 	pr_debug("Reserving legacy ranges for domain %04x\n",
828 							pci_domain_nr(bus));
829 
830 	/* Check for IO */
831 	if (!(hose->io_resource.flags & IORESOURCE_IO))
832 		goto no_io;
833 	offset = (unsigned long)hose->io_base_virt - _IO_BASE;
834 	res = kzalloc(sizeof(struct resource), GFP_KERNEL);
835 	BUG_ON(res == NULL);
836 	res->name = "Legacy IO";
837 	res->flags = IORESOURCE_IO;
838 	res->start = offset;
839 	res->end = (offset + 0xfff) & 0xfffffffful;
840 	pr_debug("Candidate legacy IO: %pR\n", res);
841 	if (request_resource(&hose->io_resource, res)) {
842 		pr_debug("PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
843 		       pci_domain_nr(bus), bus->number, res);
844 		kfree(res);
845 	}
846 
847  no_io:
848 	/* Check for memory */
849 	offset = hose->pci_mem_offset;
850 	pr_debug("hose mem offset: %016llx\n", (unsigned long long)offset);
851 	for (i = 0; i < 3; i++) {
852 		pres = &hose->mem_resources[i];
853 		if (!(pres->flags & IORESOURCE_MEM))
854 			continue;
855 		pr_debug("hose mem res: %pR\n", pres);
856 		if ((pres->start - offset) <= 0xa0000 &&
857 		    (pres->end - offset) >= 0xbffff)
858 			break;
859 	}
860 	if (i >= 3)
861 		return;
862 	res = kzalloc(sizeof(struct resource), GFP_KERNEL);
863 	BUG_ON(res == NULL);
864 	res->name = "Legacy VGA memory";
865 	res->flags = IORESOURCE_MEM;
866 	res->start = 0xa0000 + offset;
867 	res->end = 0xbffff + offset;
868 	pr_debug("Candidate VGA memory: %pR\n", res);
869 	if (request_resource(pres, res)) {
870 		pr_debug("PCI %04x:%02x Cannot reserve VGA memory %pR\n",
871 		       pci_domain_nr(bus), bus->number, res);
872 		kfree(res);
873 	}
874 }
875 
pcibios_resource_survey(void)876 void __init pcibios_resource_survey(void)
877 {
878 	struct pci_bus *b;
879 
880 	/* Allocate and assign resources. If we re-assign everything, then
881 	 * we skip the allocate phase
882 	 */
883 	list_for_each_entry(b, &pci_root_buses, node)
884 		pcibios_allocate_bus_resources(b);
885 
886 	pcibios_allocate_resources(0);
887 	pcibios_allocate_resources(1);
888 
889 	/* Before we start assigning unassigned resource, we try to reserve
890 	 * the low IO area and the VGA memory area if they intersect the
891 	 * bus available resources to avoid allocating things on top of them
892 	 */
893 	list_for_each_entry(b, &pci_root_buses, node)
894 		pcibios_reserve_legacy_regions(b);
895 
896 	/* Now proceed to assigning things that were left unassigned */
897 	pr_debug("PCI: Assigning unassigned resources...\n");
898 	pci_assign_unassigned_resources();
899 }
900 
pcibios_setup_phb_resources(struct pci_controller * hose,struct list_head * resources)901 static void pcibios_setup_phb_resources(struct pci_controller *hose,
902 					struct list_head *resources)
903 {
904 	unsigned long io_offset;
905 	struct resource *res;
906 	int i;
907 
908 	/* Hookup PHB IO resource */
909 	res = &hose->io_resource;
910 
911 	/* Fixup IO space offset */
912 	io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
913 	res->start = (res->start + io_offset) & 0xffffffffu;
914 	res->end = (res->end + io_offset) & 0xffffffffu;
915 
916 	if (!res->flags) {
917 		pr_warn("PCI: I/O resource not set for host ");
918 		pr_cont("bridge %pOF (domain %d)\n",
919 			hose->dn, hose->global_number);
920 		/* Workaround for lack of IO resource only on 32-bit */
921 		res->start = (unsigned long)hose->io_base_virt - isa_io_base;
922 		res->end = res->start + IO_SPACE_LIMIT;
923 		res->flags = IORESOURCE_IO;
924 	}
925 	pci_add_resource_offset(resources, res,
926 		(__force resource_size_t)(hose->io_base_virt - _IO_BASE));
927 
928 	pr_debug("PCI: PHB IO resource    = %016llx-%016llx [%lx]\n",
929 		 (unsigned long long)res->start,
930 		 (unsigned long long)res->end,
931 		 (unsigned long)res->flags);
932 
933 	/* Hookup PHB Memory resources */
934 	for (i = 0; i < 3; ++i) {
935 		res = &hose->mem_resources[i];
936 		if (!res->flags) {
937 			if (i > 0)
938 				continue;
939 			pr_err("PCI: Memory resource 0 not set for ");
940 			pr_cont("host bridge %pOF (domain %d)\n",
941 				hose->dn, hose->global_number);
942 
943 			/* Workaround for lack of MEM resource only on 32-bit */
944 			res->start = hose->pci_mem_offset;
945 			res->end = (resource_size_t)-1LL;
946 			res->flags = IORESOURCE_MEM;
947 
948 		}
949 		pci_add_resource_offset(resources, res, hose->pci_mem_offset);
950 
951 		pr_debug("PCI: PHB MEM resource %d = %016llx-%016llx [%lx]\n",
952 			i, (unsigned long long)res->start,
953 			(unsigned long long)res->end,
954 			(unsigned long)res->flags);
955 	}
956 
957 	pr_debug("PCI: PHB MEM offset     = %016llx\n",
958 		 (unsigned long long)hose->pci_mem_offset);
959 	pr_debug("PCI: PHB IO  offset     = %08lx\n",
960 		 (unsigned long)hose->io_base_virt - _IO_BASE);
961 }
962 
pcibios_scan_phb(struct pci_controller * hose)963 static void pcibios_scan_phb(struct pci_controller *hose)
964 {
965 	LIST_HEAD(resources);
966 	struct pci_bus *bus;
967 	struct device_node *node = hose->dn;
968 
969 	pr_debug("PCI: Scanning PHB %pOF\n", node);
970 
971 	pcibios_setup_phb_resources(hose, &resources);
972 
973 	bus = pci_scan_root_bus(hose->parent, hose->first_busno,
974 				hose->ops, hose, &resources);
975 	if (bus == NULL) {
976 		pr_err("Failed to create bus for PCI domain %04x\n",
977 		       hose->global_number);
978 		pci_free_resource_list(&resources);
979 		return;
980 	}
981 	bus->busn_res.start = hose->first_busno;
982 	hose->bus = bus;
983 
984 	hose->last_busno = bus->busn_res.end;
985 }
986 
pcibios_init(void)987 static int __init pcibios_init(void)
988 {
989 	struct pci_controller *hose, *tmp;
990 	int next_busno = 0;
991 
992 	pr_info("PCI: Probing PCI hardware\n");
993 
994 	/* Scan all of the recorded PCI controllers.  */
995 	list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
996 		hose->last_busno = 0xff;
997 		pcibios_scan_phb(hose);
998 		if (next_busno <= hose->last_busno)
999 			next_busno = hose->last_busno + 1;
1000 	}
1001 	pci_bus_count = next_busno;
1002 
1003 	/* Call common code to handle resource allocation */
1004 	pcibios_resource_survey();
1005 	list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
1006 		if (hose->bus)
1007 			pci_bus_add_devices(hose->bus);
1008 	}
1009 
1010 	return 0;
1011 }
1012 
1013 subsys_initcall(pcibios_init);
1014 
pci_bus_to_hose(int bus)1015 static struct pci_controller *pci_bus_to_hose(int bus)
1016 {
1017 	struct pci_controller *hose, *tmp;
1018 
1019 	list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
1020 		if (bus >= hose->first_busno && bus <= hose->last_busno)
1021 			return hose;
1022 	return NULL;
1023 }
1024 
1025 /* Provide information on locations of various I/O regions in physical
1026  * memory.  Do this on a per-card basis so that we choose the right
1027  * root bridge.
1028  * Note that the returned IO or memory base is a physical address
1029  */
1030 
sys_pciconfig_iobase(long which,unsigned long bus,unsigned long devfn)1031 long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn)
1032 {
1033 	struct pci_controller *hose;
1034 	long result = -EOPNOTSUPP;
1035 
1036 	hose = pci_bus_to_hose(bus);
1037 	if (!hose)
1038 		return -ENODEV;
1039 
1040 	switch (which) {
1041 	case IOBASE_BRIDGE_NUMBER:
1042 		return (long)hose->first_busno;
1043 	case IOBASE_MEMORY:
1044 		return (long)hose->pci_mem_offset;
1045 	case IOBASE_IO:
1046 		return (long)hose->io_base_phys;
1047 	case IOBASE_ISA_IO:
1048 		return (long)isa_io_base;
1049 	case IOBASE_ISA_MEM:
1050 		return (long)isa_mem_base;
1051 	}
1052 
1053 	return result;
1054 }
1055 
1056 /*
1057  * Null PCI config access functions, for the case when we can't
1058  * find a hose.
1059  */
1060 #define NULL_PCI_OP(rw, size, type)					\
1061 static int								\
1062 null_##rw##_config_##size(struct pci_dev *dev, int offset, type val)	\
1063 {									\
1064 	return PCIBIOS_DEVICE_NOT_FOUND;				\
1065 }
1066 
1067 static int
null_read_config(struct pci_bus * bus,unsigned int devfn,int offset,int len,u32 * val)1068 null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
1069 		 int len, u32 *val)
1070 {
1071 	return PCIBIOS_DEVICE_NOT_FOUND;
1072 }
1073 
1074 static int
null_write_config(struct pci_bus * bus,unsigned int devfn,int offset,int len,u32 val)1075 null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
1076 		  int len, u32 val)
1077 {
1078 	return PCIBIOS_DEVICE_NOT_FOUND;
1079 }
1080 
1081 static struct pci_ops null_pci_ops = {
1082 	.read = null_read_config,
1083 	.write = null_write_config,
1084 };
1085 
1086 /*
1087  * These functions are used early on before PCI scanning is done
1088  * and all of the pci_dev and pci_bus structures have been created.
1089  */
1090 static struct pci_bus *
fake_pci_bus(struct pci_controller * hose,int busnr)1091 fake_pci_bus(struct pci_controller *hose, int busnr)
1092 {
1093 	static struct pci_bus bus;
1094 
1095 	if (!hose)
1096 		pr_err("Can't find hose for PCI bus %d!\n", busnr);
1097 
1098 	bus.number = busnr;
1099 	bus.sysdata = hose;
1100 	bus.ops = hose ? hose->ops : &null_pci_ops;
1101 	return &bus;
1102 }
1103 
1104 #define EARLY_PCI_OP(rw, size, type)					\
1105 int early_##rw##_config_##size(struct pci_controller *hose, int bus,	\
1106 			       int devfn, int offset, type value)	\
1107 {									\
1108 	return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus),	\
1109 					    devfn, offset, value);	\
1110 }
1111 
EARLY_PCI_OP(read,byte,u8 *)1112 EARLY_PCI_OP(read, byte, u8 *)
1113 EARLY_PCI_OP(read, word, u16 *)
1114 EARLY_PCI_OP(read, dword, u32 *)
1115 EARLY_PCI_OP(write, byte, u8)
1116 EARLY_PCI_OP(write, word, u16)
1117 EARLY_PCI_OP(write, dword, u32)
1118 
1119 int early_find_capability(struct pci_controller *hose, int bus, int devfn,
1120 			  int cap)
1121 {
1122 	return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
1123 }
1124 
1125