1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Contains common pci routines for ALL ppc platform
4 * (based on pci_32.c and pci_64.c)
5 *
6 * Port for PPC64 David Engebretsen, IBM Corp.
7 * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
8 *
9 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
10 * Rework, based on alpha PCI code.
11 *
12 * Common pmac/prep/chrp pci routines. -- Cort
13 */
14
15 #include <linux/kernel.h>
16 #include <linux/pci.h>
17 #include <linux/string.h>
18 #include <linux/init.h>
19 #include <linux/delay.h>
20 #include <linux/export.h>
21 #include <linux/of_address.h>
22 #include <linux/of_pci.h>
23 #include <linux/mm.h>
24 #include <linux/shmem_fs.h>
25 #include <linux/list.h>
26 #include <linux/syscalls.h>
27 #include <linux/irq.h>
28 #include <linux/vmalloc.h>
29 #include <linux/slab.h>
30 #include <linux/vgaarb.h>
31 #include <linux/numa.h>
32
33 #include <asm/processor.h>
34 #include <asm/io.h>
35 #include <asm/prom.h>
36 #include <asm/pci-bridge.h>
37 #include <asm/byteorder.h>
38 #include <asm/machdep.h>
39 #include <asm/ppc-pci.h>
40 #include <asm/eeh.h>
41
42 #include "../../../drivers/pci/pci.h"
43
44 /* hose_spinlock protects accesses to the the phb_bitmap. */
45 static DEFINE_SPINLOCK(hose_spinlock);
46 LIST_HEAD(hose_list);
47
48 /* For dynamic PHB numbering on get_phb_number(): max number of PHBs. */
49 #define MAX_PHBS 0x10000
50
51 /*
52 * For dynamic PHB numbering: used/free PHBs tracking bitmap.
53 * Accesses to this bitmap should be protected by hose_spinlock.
54 */
55 static DECLARE_BITMAP(phb_bitmap, MAX_PHBS);
56
57 /* ISA Memory physical address */
58 resource_size_t isa_mem_base;
59 EXPORT_SYMBOL(isa_mem_base);
60
61
62 static const struct dma_map_ops *pci_dma_ops;
63
set_pci_dma_ops(const struct dma_map_ops * dma_ops)64 void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
65 {
66 pci_dma_ops = dma_ops;
67 }
68
69 /*
70 * This function should run under locking protection, specifically
71 * hose_spinlock.
72 */
get_phb_number(struct device_node * dn)73 static int get_phb_number(struct device_node *dn)
74 {
75 int ret, phb_id = -1;
76 u32 prop_32;
77 u64 prop;
78
79 /*
80 * Try fixed PHB numbering first, by checking archs and reading
81 * the respective device-tree properties. Firstly, try powernv by
82 * reading "ibm,opal-phbid", only present in OPAL environment.
83 */
84 ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop);
85 if (ret) {
86 ret = of_property_read_u32_index(dn, "reg", 1, &prop_32);
87 prop = prop_32;
88 }
89
90 if (!ret)
91 phb_id = (int)(prop & (MAX_PHBS - 1));
92
93 /* We need to be sure to not use the same PHB number twice. */
94 if ((phb_id >= 0) && !test_and_set_bit(phb_id, phb_bitmap))
95 return phb_id;
96
97 /*
98 * If not pseries nor powernv, or if fixed PHB numbering tried to add
99 * the same PHB number twice, then fallback to dynamic PHB numbering.
100 */
101 phb_id = find_first_zero_bit(phb_bitmap, MAX_PHBS);
102 BUG_ON(phb_id >= MAX_PHBS);
103 set_bit(phb_id, phb_bitmap);
104
105 return phb_id;
106 }
107
pcibios_alloc_controller(struct device_node * dev)108 struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
109 {
110 struct pci_controller *phb;
111
112 phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
113 if (phb == NULL)
114 return NULL;
115 spin_lock(&hose_spinlock);
116 phb->global_number = get_phb_number(dev);
117 list_add_tail(&phb->list_node, &hose_list);
118 spin_unlock(&hose_spinlock);
119 phb->dn = dev;
120 phb->is_dynamic = slab_is_available();
121 #ifdef CONFIG_PPC64
122 if (dev) {
123 int nid = of_node_to_nid(dev);
124
125 if (nid < 0 || !node_online(nid))
126 nid = NUMA_NO_NODE;
127
128 PHB_SET_NODE(phb, nid);
129 }
130 #endif
131 return phb;
132 }
133 EXPORT_SYMBOL_GPL(pcibios_alloc_controller);
134
pcibios_free_controller(struct pci_controller * phb)135 void pcibios_free_controller(struct pci_controller *phb)
136 {
137 spin_lock(&hose_spinlock);
138
139 /* Clear bit of phb_bitmap to allow reuse of this PHB number. */
140 if (phb->global_number < MAX_PHBS)
141 clear_bit(phb->global_number, phb_bitmap);
142
143 list_del(&phb->list_node);
144 spin_unlock(&hose_spinlock);
145
146 if (phb->is_dynamic)
147 kfree(phb);
148 }
149 EXPORT_SYMBOL_GPL(pcibios_free_controller);
150
151 /*
152 * This function is used to call pcibios_free_controller()
153 * in a deferred manner: a callback from the PCI subsystem.
154 *
155 * _*DO NOT*_ call pcibios_free_controller() explicitly if
156 * this is used (or it may access an invalid *phb pointer).
157 *
158 * The callback occurs when all references to the root bus
159 * are dropped (e.g., child buses/devices and their users).
160 *
161 * It's called as .release_fn() of 'struct pci_host_bridge'
162 * which is associated with the 'struct pci_controller.bus'
163 * (root bus) - it expects .release_data to hold a pointer
164 * to 'struct pci_controller'.
165 *
166 * In order to use it, register .release_fn()/release_data
167 * like this:
168 *
169 * pci_set_host_bridge_release(bridge,
170 * pcibios_free_controller_deferred
171 * (void *) phb);
172 *
173 * e.g. in the pcibios_root_bridge_prepare() callback from
174 * pci_create_root_bus().
175 */
pcibios_free_controller_deferred(struct pci_host_bridge * bridge)176 void pcibios_free_controller_deferred(struct pci_host_bridge *bridge)
177 {
178 struct pci_controller *phb = (struct pci_controller *)
179 bridge->release_data;
180
181 pr_debug("domain %d, dynamic %d\n", phb->global_number, phb->is_dynamic);
182
183 pcibios_free_controller(phb);
184 }
185 EXPORT_SYMBOL_GPL(pcibios_free_controller_deferred);
186
187 /*
188 * The function is used to return the minimal alignment
189 * for memory or I/O windows of the associated P2P bridge.
190 * By default, 4KiB alignment for I/O windows and 1MiB for
191 * memory windows.
192 */
pcibios_window_alignment(struct pci_bus * bus,unsigned long type)193 resource_size_t pcibios_window_alignment(struct pci_bus *bus,
194 unsigned long type)
195 {
196 struct pci_controller *phb = pci_bus_to_host(bus);
197
198 if (phb->controller_ops.window_alignment)
199 return phb->controller_ops.window_alignment(bus, type);
200
201 /*
202 * PCI core will figure out the default
203 * alignment: 4KiB for I/O and 1MiB for
204 * memory window.
205 */
206 return 1;
207 }
208
pcibios_setup_bridge(struct pci_bus * bus,unsigned long type)209 void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type)
210 {
211 struct pci_controller *hose = pci_bus_to_host(bus);
212
213 if (hose->controller_ops.setup_bridge)
214 hose->controller_ops.setup_bridge(bus, type);
215 }
216
pcibios_reset_secondary_bus(struct pci_dev * dev)217 void pcibios_reset_secondary_bus(struct pci_dev *dev)
218 {
219 struct pci_controller *phb = pci_bus_to_host(dev->bus);
220
221 if (phb->controller_ops.reset_secondary_bus) {
222 phb->controller_ops.reset_secondary_bus(dev);
223 return;
224 }
225
226 pci_reset_secondary_bus(dev);
227 }
228
pcibios_default_alignment(void)229 resource_size_t pcibios_default_alignment(void)
230 {
231 if (ppc_md.pcibios_default_alignment)
232 return ppc_md.pcibios_default_alignment();
233
234 return 0;
235 }
236
237 #ifdef CONFIG_PCI_IOV
pcibios_iov_resource_alignment(struct pci_dev * pdev,int resno)238 resource_size_t pcibios_iov_resource_alignment(struct pci_dev *pdev, int resno)
239 {
240 if (ppc_md.pcibios_iov_resource_alignment)
241 return ppc_md.pcibios_iov_resource_alignment(pdev, resno);
242
243 return pci_iov_resource_size(pdev, resno);
244 }
245
pcibios_sriov_enable(struct pci_dev * pdev,u16 num_vfs)246 int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
247 {
248 if (ppc_md.pcibios_sriov_enable)
249 return ppc_md.pcibios_sriov_enable(pdev, num_vfs);
250
251 return 0;
252 }
253
pcibios_sriov_disable(struct pci_dev * pdev)254 int pcibios_sriov_disable(struct pci_dev *pdev)
255 {
256 if (ppc_md.pcibios_sriov_disable)
257 return ppc_md.pcibios_sriov_disable(pdev);
258
259 return 0;
260 }
261
262 #endif /* CONFIG_PCI_IOV */
263
pcibios_bus_add_device(struct pci_dev * pdev)264 void pcibios_bus_add_device(struct pci_dev *pdev)
265 {
266 if (ppc_md.pcibios_bus_add_device)
267 ppc_md.pcibios_bus_add_device(pdev);
268 }
269
pcibios_io_size(const struct pci_controller * hose)270 static resource_size_t pcibios_io_size(const struct pci_controller *hose)
271 {
272 #ifdef CONFIG_PPC64
273 return hose->pci_io_size;
274 #else
275 return resource_size(&hose->io_resource);
276 #endif
277 }
278
pcibios_vaddr_is_ioport(void __iomem * address)279 int pcibios_vaddr_is_ioport(void __iomem *address)
280 {
281 int ret = 0;
282 struct pci_controller *hose;
283 resource_size_t size;
284
285 spin_lock(&hose_spinlock);
286 list_for_each_entry(hose, &hose_list, list_node) {
287 size = pcibios_io_size(hose);
288 if (address >= hose->io_base_virt &&
289 address < (hose->io_base_virt + size)) {
290 ret = 1;
291 break;
292 }
293 }
294 spin_unlock(&hose_spinlock);
295 return ret;
296 }
297
pci_address_to_pio(phys_addr_t address)298 unsigned long pci_address_to_pio(phys_addr_t address)
299 {
300 struct pci_controller *hose;
301 resource_size_t size;
302 unsigned long ret = ~0;
303
304 spin_lock(&hose_spinlock);
305 list_for_each_entry(hose, &hose_list, list_node) {
306 size = pcibios_io_size(hose);
307 if (address >= hose->io_base_phys &&
308 address < (hose->io_base_phys + size)) {
309 unsigned long base =
310 (unsigned long)hose->io_base_virt - _IO_BASE;
311 ret = base + (address - hose->io_base_phys);
312 break;
313 }
314 }
315 spin_unlock(&hose_spinlock);
316
317 return ret;
318 }
319 EXPORT_SYMBOL_GPL(pci_address_to_pio);
320
321 /*
322 * Return the domain number for this bus.
323 */
pci_domain_nr(struct pci_bus * bus)324 int pci_domain_nr(struct pci_bus *bus)
325 {
326 struct pci_controller *hose = pci_bus_to_host(bus);
327
328 return hose->global_number;
329 }
330 EXPORT_SYMBOL(pci_domain_nr);
331
332 /* This routine is meant to be used early during boot, when the
333 * PCI bus numbers have not yet been assigned, and you need to
334 * issue PCI config cycles to an OF device.
335 * It could also be used to "fix" RTAS config cycles if you want
336 * to set pci_assign_all_buses to 1 and still use RTAS for PCI
337 * config cycles.
338 */
pci_find_hose_for_OF_device(struct device_node * node)339 struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
340 {
341 while(node) {
342 struct pci_controller *hose, *tmp;
343 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
344 if (hose->dn == node)
345 return hose;
346 node = node->parent;
347 }
348 return NULL;
349 }
350
pci_find_controller_for_domain(int domain_nr)351 struct pci_controller *pci_find_controller_for_domain(int domain_nr)
352 {
353 struct pci_controller *hose;
354
355 list_for_each_entry(hose, &hose_list, list_node)
356 if (hose->global_number == domain_nr)
357 return hose;
358
359 return NULL;
360 }
361
362 /*
363 * Reads the interrupt pin to determine if interrupt is use by card.
364 * If the interrupt is used, then gets the interrupt line from the
365 * openfirmware and sets it in the pci_dev and pci_config line.
366 */
pci_read_irq_line(struct pci_dev * pci_dev)367 static int pci_read_irq_line(struct pci_dev *pci_dev)
368 {
369 int virq;
370
371 pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
372
373 /* Try to get a mapping from the device-tree */
374 virq = of_irq_parse_and_map_pci(pci_dev, 0, 0);
375 if (virq <= 0) {
376 u8 line, pin;
377
378 /* If that fails, lets fallback to what is in the config
379 * space and map that through the default controller. We
380 * also set the type to level low since that's what PCI
381 * interrupts are. If your platform does differently, then
382 * either provide a proper interrupt tree or don't use this
383 * function.
384 */
385 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin))
386 return -1;
387 if (pin == 0)
388 return -1;
389 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
390 line == 0xff || line == 0) {
391 return -1;
392 }
393 pr_debug(" No map ! Using line %d (pin %d) from PCI config\n",
394 line, pin);
395
396 virq = irq_create_mapping(NULL, line);
397 if (virq)
398 irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
399 }
400
401 if (!virq) {
402 pr_debug(" Failed to map !\n");
403 return -1;
404 }
405
406 pr_debug(" Mapped to linux irq %d\n", virq);
407
408 pci_dev->irq = virq;
409
410 return 0;
411 }
412
413 /*
414 * Platform support for /proc/bus/pci/X/Y mmap()s.
415 * -- paulus.
416 */
pci_iobar_pfn(struct pci_dev * pdev,int bar,struct vm_area_struct * vma)417 int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma)
418 {
419 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
420 resource_size_t ioaddr = pci_resource_start(pdev, bar);
421
422 if (!hose)
423 return -EINVAL;
424
425 /* Convert to an offset within this PCI controller */
426 ioaddr -= (unsigned long)hose->io_base_virt - _IO_BASE;
427
428 vma->vm_pgoff += (ioaddr + hose->io_base_phys) >> PAGE_SHIFT;
429 return 0;
430 }
431
432 /*
433 * This one is used by /dev/mem and fbdev who have no clue about the
434 * PCI device, it tries to find the PCI device first and calls the
435 * above routine
436 */
pci_phys_mem_access_prot(struct file * file,unsigned long pfn,unsigned long size,pgprot_t prot)437 pgprot_t pci_phys_mem_access_prot(struct file *file,
438 unsigned long pfn,
439 unsigned long size,
440 pgprot_t prot)
441 {
442 struct pci_dev *pdev = NULL;
443 struct resource *found = NULL;
444 resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
445 int i;
446
447 if (page_is_ram(pfn))
448 return prot;
449
450 prot = pgprot_noncached(prot);
451 for_each_pci_dev(pdev) {
452 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
453 struct resource *rp = &pdev->resource[i];
454 int flags = rp->flags;
455
456 /* Active and same type? */
457 if ((flags & IORESOURCE_MEM) == 0)
458 continue;
459 /* In the range of this resource? */
460 if (offset < (rp->start & PAGE_MASK) ||
461 offset > rp->end)
462 continue;
463 found = rp;
464 break;
465 }
466 if (found)
467 break;
468 }
469 if (found) {
470 if (found->flags & IORESOURCE_PREFETCH)
471 prot = pgprot_noncached_wc(prot);
472 pci_dev_put(pdev);
473 }
474
475 pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n",
476 (unsigned long long)offset, pgprot_val(prot));
477
478 return prot;
479 }
480
481 /* This provides legacy IO read access on a bus */
pci_legacy_read(struct pci_bus * bus,loff_t port,u32 * val,size_t size)482 int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
483 {
484 unsigned long offset;
485 struct pci_controller *hose = pci_bus_to_host(bus);
486 struct resource *rp = &hose->io_resource;
487 void __iomem *addr;
488
489 /* Check if port can be supported by that bus. We only check
490 * the ranges of the PHB though, not the bus itself as the rules
491 * for forwarding legacy cycles down bridges are not our problem
492 * here. So if the host bridge supports it, we do it.
493 */
494 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
495 offset += port;
496
497 if (!(rp->flags & IORESOURCE_IO))
498 return -ENXIO;
499 if (offset < rp->start || (offset + size) > rp->end)
500 return -ENXIO;
501 addr = hose->io_base_virt + port;
502
503 switch(size) {
504 case 1:
505 *((u8 *)val) = in_8(addr);
506 return 1;
507 case 2:
508 if (port & 1)
509 return -EINVAL;
510 *((u16 *)val) = in_le16(addr);
511 return 2;
512 case 4:
513 if (port & 3)
514 return -EINVAL;
515 *((u32 *)val) = in_le32(addr);
516 return 4;
517 }
518 return -EINVAL;
519 }
520
521 /* This provides legacy IO write access on a bus */
pci_legacy_write(struct pci_bus * bus,loff_t port,u32 val,size_t size)522 int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size)
523 {
524 unsigned long offset;
525 struct pci_controller *hose = pci_bus_to_host(bus);
526 struct resource *rp = &hose->io_resource;
527 void __iomem *addr;
528
529 /* Check if port can be supported by that bus. We only check
530 * the ranges of the PHB though, not the bus itself as the rules
531 * for forwarding legacy cycles down bridges are not our problem
532 * here. So if the host bridge supports it, we do it.
533 */
534 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
535 offset += port;
536
537 if (!(rp->flags & IORESOURCE_IO))
538 return -ENXIO;
539 if (offset < rp->start || (offset + size) > rp->end)
540 return -ENXIO;
541 addr = hose->io_base_virt + port;
542
543 /* WARNING: The generic code is idiotic. It gets passed a pointer
544 * to what can be a 1, 2 or 4 byte quantity and always reads that
545 * as a u32, which means that we have to correct the location of
546 * the data read within those 32 bits for size 1 and 2
547 */
548 switch(size) {
549 case 1:
550 out_8(addr, val >> 24);
551 return 1;
552 case 2:
553 if (port & 1)
554 return -EINVAL;
555 out_le16(addr, val >> 16);
556 return 2;
557 case 4:
558 if (port & 3)
559 return -EINVAL;
560 out_le32(addr, val);
561 return 4;
562 }
563 return -EINVAL;
564 }
565
566 /* This provides legacy IO or memory mmap access on a bus */
pci_mmap_legacy_page_range(struct pci_bus * bus,struct vm_area_struct * vma,enum pci_mmap_state mmap_state)567 int pci_mmap_legacy_page_range(struct pci_bus *bus,
568 struct vm_area_struct *vma,
569 enum pci_mmap_state mmap_state)
570 {
571 struct pci_controller *hose = pci_bus_to_host(bus);
572 resource_size_t offset =
573 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
574 resource_size_t size = vma->vm_end - vma->vm_start;
575 struct resource *rp;
576
577 pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n",
578 pci_domain_nr(bus), bus->number,
579 mmap_state == pci_mmap_mem ? "MEM" : "IO",
580 (unsigned long long)offset,
581 (unsigned long long)(offset + size - 1));
582
583 if (mmap_state == pci_mmap_mem) {
584 /* Hack alert !
585 *
586 * Because X is lame and can fail starting if it gets an error trying
587 * to mmap legacy_mem (instead of just moving on without legacy memory
588 * access) we fake it here by giving it anonymous memory, effectively
589 * behaving just like /dev/zero
590 */
591 if ((offset + size) > hose->isa_mem_size) {
592 printk(KERN_DEBUG
593 "Process %s (pid:%d) mapped non-existing PCI legacy memory for 0%04x:%02x\n",
594 current->comm, current->pid, pci_domain_nr(bus), bus->number);
595 if (vma->vm_flags & VM_SHARED)
596 return shmem_zero_setup(vma);
597 return 0;
598 }
599 offset += hose->isa_mem_phys;
600 } else {
601 unsigned long io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
602 unsigned long roffset = offset + io_offset;
603 rp = &hose->io_resource;
604 if (!(rp->flags & IORESOURCE_IO))
605 return -ENXIO;
606 if (roffset < rp->start || (roffset + size) > rp->end)
607 return -ENXIO;
608 offset += hose->io_base_phys;
609 }
610 pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
611
612 vma->vm_pgoff = offset >> PAGE_SHIFT;
613 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
614 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
615 vma->vm_end - vma->vm_start,
616 vma->vm_page_prot);
617 }
618
pci_resource_to_user(const struct pci_dev * dev,int bar,const struct resource * rsrc,resource_size_t * start,resource_size_t * end)619 void pci_resource_to_user(const struct pci_dev *dev, int bar,
620 const struct resource *rsrc,
621 resource_size_t *start, resource_size_t *end)
622 {
623 struct pci_bus_region region;
624
625 if (rsrc->flags & IORESOURCE_IO) {
626 pcibios_resource_to_bus(dev->bus, ®ion,
627 (struct resource *) rsrc);
628 *start = region.start;
629 *end = region.end;
630 return;
631 }
632
633 /* We pass a CPU physical address to userland for MMIO instead of a
634 * BAR value because X is lame and expects to be able to use that
635 * to pass to /dev/mem!
636 *
637 * That means we may have 64-bit values where some apps only expect
638 * 32 (like X itself since it thinks only Sparc has 64-bit MMIO).
639 */
640 *start = rsrc->start;
641 *end = rsrc->end;
642 }
643
644 /**
645 * pci_process_bridge_OF_ranges - Parse PCI bridge resources from device tree
646 * @hose: newly allocated pci_controller to be setup
647 * @dev: device node of the host bridge
648 * @primary: set if primary bus (32 bits only, soon to be deprecated)
649 *
650 * This function will parse the "ranges" property of a PCI host bridge device
651 * node and setup the resource mapping of a pci controller based on its
652 * content.
653 *
654 * Life would be boring if it wasn't for a few issues that we have to deal
655 * with here:
656 *
657 * - We can only cope with one IO space range and up to 3 Memory space
658 * ranges. However, some machines (thanks Apple !) tend to split their
659 * space into lots of small contiguous ranges. So we have to coalesce.
660 *
661 * - Some busses have IO space not starting at 0, which causes trouble with
662 * the way we do our IO resource renumbering. The code somewhat deals with
663 * it for 64 bits but I would expect problems on 32 bits.
664 *
665 * - Some 32 bits platforms such as 4xx can have physical space larger than
666 * 32 bits so we need to use 64 bits values for the parsing
667 */
pci_process_bridge_OF_ranges(struct pci_controller * hose,struct device_node * dev,int primary)668 void pci_process_bridge_OF_ranges(struct pci_controller *hose,
669 struct device_node *dev, int primary)
670 {
671 int memno = 0;
672 struct resource *res;
673 struct of_pci_range range;
674 struct of_pci_range_parser parser;
675
676 printk(KERN_INFO "PCI host bridge %pOF %s ranges:\n",
677 dev, primary ? "(primary)" : "");
678
679 /* Check for ranges property */
680 if (of_pci_range_parser_init(&parser, dev))
681 return;
682
683 /* Parse it */
684 for_each_of_pci_range(&parser, &range) {
685 /* If we failed translation or got a zero-sized region
686 * (some FW try to feed us with non sensical zero sized regions
687 * such as power3 which look like some kind of attempt at exposing
688 * the VGA memory hole)
689 */
690 if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
691 continue;
692
693 /* Act based on address space type */
694 res = NULL;
695 switch (range.flags & IORESOURCE_TYPE_BITS) {
696 case IORESOURCE_IO:
697 printk(KERN_INFO
698 " IO 0x%016llx..0x%016llx -> 0x%016llx\n",
699 range.cpu_addr, range.cpu_addr + range.size - 1,
700 range.pci_addr);
701
702 /* We support only one IO range */
703 if (hose->pci_io_size) {
704 printk(KERN_INFO
705 " \\--> Skipped (too many) !\n");
706 continue;
707 }
708 #ifdef CONFIG_PPC32
709 /* On 32 bits, limit I/O space to 16MB */
710 if (range.size > 0x01000000)
711 range.size = 0x01000000;
712
713 /* 32 bits needs to map IOs here */
714 hose->io_base_virt = ioremap(range.cpu_addr,
715 range.size);
716
717 /* Expect trouble if pci_addr is not 0 */
718 if (primary)
719 isa_io_base =
720 (unsigned long)hose->io_base_virt;
721 #endif /* CONFIG_PPC32 */
722 /* pci_io_size and io_base_phys always represent IO
723 * space starting at 0 so we factor in pci_addr
724 */
725 hose->pci_io_size = range.pci_addr + range.size;
726 hose->io_base_phys = range.cpu_addr - range.pci_addr;
727
728 /* Build resource */
729 res = &hose->io_resource;
730 range.cpu_addr = range.pci_addr;
731 break;
732 case IORESOURCE_MEM:
733 printk(KERN_INFO
734 " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
735 range.cpu_addr, range.cpu_addr + range.size - 1,
736 range.pci_addr,
737 (range.pci_space & 0x40000000) ?
738 "Prefetch" : "");
739
740 /* We support only 3 memory ranges */
741 if (memno >= 3) {
742 printk(KERN_INFO
743 " \\--> Skipped (too many) !\n");
744 continue;
745 }
746 /* Handles ISA memory hole space here */
747 if (range.pci_addr == 0) {
748 if (primary || isa_mem_base == 0)
749 isa_mem_base = range.cpu_addr;
750 hose->isa_mem_phys = range.cpu_addr;
751 hose->isa_mem_size = range.size;
752 }
753
754 /* Build resource */
755 hose->mem_offset[memno] = range.cpu_addr -
756 range.pci_addr;
757 res = &hose->mem_resources[memno++];
758 break;
759 }
760 if (res != NULL) {
761 res->name = dev->full_name;
762 res->flags = range.flags;
763 res->start = range.cpu_addr;
764 res->end = range.cpu_addr + range.size - 1;
765 res->parent = res->child = res->sibling = NULL;
766 }
767 }
768 }
769
770 /* Decide whether to display the domain number in /proc */
pci_proc_domain(struct pci_bus * bus)771 int pci_proc_domain(struct pci_bus *bus)
772 {
773 struct pci_controller *hose = pci_bus_to_host(bus);
774
775 if (!pci_has_flag(PCI_ENABLE_PROC_DOMAINS))
776 return 0;
777 if (pci_has_flag(PCI_COMPAT_DOMAIN_0))
778 return hose->global_number != 0;
779 return 1;
780 }
781
pcibios_root_bridge_prepare(struct pci_host_bridge * bridge)782 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
783 {
784 if (ppc_md.pcibios_root_bridge_prepare)
785 return ppc_md.pcibios_root_bridge_prepare(bridge);
786
787 return 0;
788 }
789
790 /* This header fixup will do the resource fixup for all devices as they are
791 * probed, but not for bridge ranges
792 */
pcibios_fixup_resources(struct pci_dev * dev)793 static void pcibios_fixup_resources(struct pci_dev *dev)
794 {
795 struct pci_controller *hose = pci_bus_to_host(dev->bus);
796 int i;
797
798 if (!hose) {
799 printk(KERN_ERR "No host bridge for PCI dev %s !\n",
800 pci_name(dev));
801 return;
802 }
803
804 if (dev->is_virtfn)
805 return;
806
807 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
808 struct resource *res = dev->resource + i;
809 struct pci_bus_region reg;
810 if (!res->flags)
811 continue;
812
813 /* If we're going to re-assign everything, we mark all resources
814 * as unset (and 0-base them). In addition, we mark BARs starting
815 * at 0 as unset as well, except if PCI_PROBE_ONLY is also set
816 * since in that case, we don't want to re-assign anything
817 */
818 pcibios_resource_to_bus(dev->bus, ®, res);
819 if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) ||
820 (reg.start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
821 /* Only print message if not re-assigning */
822 if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC))
823 pr_debug("PCI:%s Resource %d %pR is unassigned\n",
824 pci_name(dev), i, res);
825 res->end -= res->start;
826 res->start = 0;
827 res->flags |= IORESOURCE_UNSET;
828 continue;
829 }
830
831 pr_debug("PCI:%s Resource %d %pR\n", pci_name(dev), i, res);
832 }
833
834 /* Call machine specific resource fixup */
835 if (ppc_md.pcibios_fixup_resources)
836 ppc_md.pcibios_fixup_resources(dev);
837 }
838 DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
839
840 /* This function tries to figure out if a bridge resource has been initialized
841 * by the firmware or not. It doesn't have to be absolutely bullet proof, but
842 * things go more smoothly when it gets it right. It should covers cases such
843 * as Apple "closed" bridge resources and bare-metal pSeries unassigned bridges
844 */
pcibios_uninitialized_bridge_resource(struct pci_bus * bus,struct resource * res)845 static int pcibios_uninitialized_bridge_resource(struct pci_bus *bus,
846 struct resource *res)
847 {
848 struct pci_controller *hose = pci_bus_to_host(bus);
849 struct pci_dev *dev = bus->self;
850 resource_size_t offset;
851 struct pci_bus_region region;
852 u16 command;
853 int i;
854
855 /* We don't do anything if PCI_PROBE_ONLY is set */
856 if (pci_has_flag(PCI_PROBE_ONLY))
857 return 0;
858
859 /* Job is a bit different between memory and IO */
860 if (res->flags & IORESOURCE_MEM) {
861 pcibios_resource_to_bus(dev->bus, ®ion, res);
862
863 /* If the BAR is non-0 then it's probably been initialized */
864 if (region.start != 0)
865 return 0;
866
867 /* The BAR is 0, let's check if memory decoding is enabled on
868 * the bridge. If not, we consider it unassigned
869 */
870 pci_read_config_word(dev, PCI_COMMAND, &command);
871 if ((command & PCI_COMMAND_MEMORY) == 0)
872 return 1;
873
874 /* Memory decoding is enabled and the BAR is 0. If any of the bridge
875 * resources covers that starting address (0 then it's good enough for
876 * us for memory space)
877 */
878 for (i = 0; i < 3; i++) {
879 if ((hose->mem_resources[i].flags & IORESOURCE_MEM) &&
880 hose->mem_resources[i].start == hose->mem_offset[i])
881 return 0;
882 }
883
884 /* Well, it starts at 0 and we know it will collide so we may as
885 * well consider it as unassigned. That covers the Apple case.
886 */
887 return 1;
888 } else {
889 /* If the BAR is non-0, then we consider it assigned */
890 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
891 if (((res->start - offset) & 0xfffffffful) != 0)
892 return 0;
893
894 /* Here, we are a bit different than memory as typically IO space
895 * starting at low addresses -is- valid. What we do instead if that
896 * we consider as unassigned anything that doesn't have IO enabled
897 * in the PCI command register, and that's it.
898 */
899 pci_read_config_word(dev, PCI_COMMAND, &command);
900 if (command & PCI_COMMAND_IO)
901 return 0;
902
903 /* It's starting at 0 and IO is disabled in the bridge, consider
904 * it unassigned
905 */
906 return 1;
907 }
908 }
909
910 /* Fixup resources of a PCI<->PCI bridge */
pcibios_fixup_bridge(struct pci_bus * bus)911 static void pcibios_fixup_bridge(struct pci_bus *bus)
912 {
913 struct resource *res;
914 int i;
915
916 struct pci_dev *dev = bus->self;
917
918 pci_bus_for_each_resource(bus, res, i) {
919 if (!res || !res->flags)
920 continue;
921 if (i >= 3 && bus->self->transparent)
922 continue;
923
924 /* If we're going to reassign everything, we can
925 * shrink the P2P resource to have size as being
926 * of 0 in order to save space.
927 */
928 if (pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
929 res->flags |= IORESOURCE_UNSET;
930 res->start = 0;
931 res->end = -1;
932 continue;
933 }
934
935 pr_debug("PCI:%s Bus rsrc %d %pR\n", pci_name(dev), i, res);
936
937 /* Try to detect uninitialized P2P bridge resources,
938 * and clear them out so they get re-assigned later
939 */
940 if (pcibios_uninitialized_bridge_resource(bus, res)) {
941 res->flags = 0;
942 pr_debug("PCI:%s (unassigned)\n", pci_name(dev));
943 }
944 }
945 }
946
pcibios_setup_bus_self(struct pci_bus * bus)947 void pcibios_setup_bus_self(struct pci_bus *bus)
948 {
949 struct pci_controller *phb;
950
951 /* Fix up the bus resources for P2P bridges */
952 if (bus->self != NULL)
953 pcibios_fixup_bridge(bus);
954
955 /* Platform specific bus fixups. This is currently only used
956 * by fsl_pci and I'm hoping to get rid of it at some point
957 */
958 if (ppc_md.pcibios_fixup_bus)
959 ppc_md.pcibios_fixup_bus(bus);
960
961 /* Setup bus DMA mappings */
962 phb = pci_bus_to_host(bus);
963 if (phb->controller_ops.dma_bus_setup)
964 phb->controller_ops.dma_bus_setup(bus);
965 }
966
pcibios_setup_device(struct pci_dev * dev)967 static void pcibios_setup_device(struct pci_dev *dev)
968 {
969 struct pci_controller *phb;
970 /* Fixup NUMA node as it may not be setup yet by the generic
971 * code and is needed by the DMA init
972 */
973 set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
974
975 /* Hook up default DMA ops */
976 set_dma_ops(&dev->dev, pci_dma_ops);
977 dev->dev.archdata.dma_offset = PCI_DRAM_OFFSET;
978
979 /* Additional platform DMA/iommu setup */
980 phb = pci_bus_to_host(dev->bus);
981 if (phb->controller_ops.dma_dev_setup)
982 phb->controller_ops.dma_dev_setup(dev);
983
984 /* Read default IRQs and fixup if necessary */
985 pci_read_irq_line(dev);
986 if (ppc_md.pci_irq_fixup)
987 ppc_md.pci_irq_fixup(dev);
988 }
989
pcibios_add_device(struct pci_dev * dev)990 int pcibios_add_device(struct pci_dev *dev)
991 {
992 /*
993 * We can only call pcibios_setup_device() after bus setup is complete,
994 * since some of the platform specific DMA setup code depends on it.
995 */
996 if (dev->bus->is_added)
997 pcibios_setup_device(dev);
998
999 #ifdef CONFIG_PCI_IOV
1000 if (ppc_md.pcibios_fixup_sriov)
1001 ppc_md.pcibios_fixup_sriov(dev);
1002 #endif /* CONFIG_PCI_IOV */
1003
1004 return 0;
1005 }
1006
pcibios_setup_bus_devices(struct pci_bus * bus)1007 void pcibios_setup_bus_devices(struct pci_bus *bus)
1008 {
1009 struct pci_dev *dev;
1010
1011 pr_debug("PCI: Fixup bus devices %d (%s)\n",
1012 bus->number, bus->self ? pci_name(bus->self) : "PHB");
1013
1014 list_for_each_entry(dev, &bus->devices, bus_list) {
1015 /* Cardbus can call us to add new devices to a bus, so ignore
1016 * those who are already fully discovered
1017 */
1018 if (pci_dev_is_added(dev))
1019 continue;
1020
1021 pcibios_setup_device(dev);
1022 }
1023 }
1024
pcibios_set_master(struct pci_dev * dev)1025 void pcibios_set_master(struct pci_dev *dev)
1026 {
1027 /* No special bus mastering setup handling */
1028 }
1029
pcibios_fixup_bus(struct pci_bus * bus)1030 void pcibios_fixup_bus(struct pci_bus *bus)
1031 {
1032 /* When called from the generic PCI probe, read PCI<->PCI bridge
1033 * bases. This is -not- called when generating the PCI tree from
1034 * the OF device-tree.
1035 */
1036 pci_read_bridge_bases(bus);
1037
1038 /* Now fixup the bus bus */
1039 pcibios_setup_bus_self(bus);
1040
1041 /* Now fixup devices on that bus */
1042 pcibios_setup_bus_devices(bus);
1043 }
1044 EXPORT_SYMBOL(pcibios_fixup_bus);
1045
pci_fixup_cardbus(struct pci_bus * bus)1046 void pci_fixup_cardbus(struct pci_bus *bus)
1047 {
1048 /* Now fixup devices on that bus */
1049 pcibios_setup_bus_devices(bus);
1050 }
1051
1052
skip_isa_ioresource_align(struct pci_dev * dev)1053 static int skip_isa_ioresource_align(struct pci_dev *dev)
1054 {
1055 if (pci_has_flag(PCI_CAN_SKIP_ISA_ALIGN) &&
1056 !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA))
1057 return 1;
1058 return 0;
1059 }
1060
1061 /*
1062 * We need to avoid collisions with `mirrored' VGA ports
1063 * and other strange ISA hardware, so we always want the
1064 * addresses to be allocated in the 0x000-0x0ff region
1065 * modulo 0x400.
1066 *
1067 * Why? Because some silly external IO cards only decode
1068 * the low 10 bits of the IO address. The 0x00-0xff region
1069 * is reserved for motherboard devices that decode all 16
1070 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
1071 * but we want to try to avoid allocating at 0x2900-0x2bff
1072 * which might have be mirrored at 0x0100-0x03ff..
1073 */
pcibios_align_resource(void * data,const struct resource * res,resource_size_t size,resource_size_t align)1074 resource_size_t pcibios_align_resource(void *data, const struct resource *res,
1075 resource_size_t size, resource_size_t align)
1076 {
1077 struct pci_dev *dev = data;
1078 resource_size_t start = res->start;
1079
1080 if (res->flags & IORESOURCE_IO) {
1081 if (skip_isa_ioresource_align(dev))
1082 return start;
1083 if (start & 0x300)
1084 start = (start + 0x3ff) & ~0x3ff;
1085 }
1086
1087 return start;
1088 }
1089 EXPORT_SYMBOL(pcibios_align_resource);
1090
1091 /*
1092 * Reparent resource children of pr that conflict with res
1093 * under res, and make res replace those children.
1094 */
reparent_resources(struct resource * parent,struct resource * res)1095 static int reparent_resources(struct resource *parent,
1096 struct resource *res)
1097 {
1098 struct resource *p, **pp;
1099 struct resource **firstpp = NULL;
1100
1101 for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
1102 if (p->end < res->start)
1103 continue;
1104 if (res->end < p->start)
1105 break;
1106 if (p->start < res->start || p->end > res->end)
1107 return -1; /* not completely contained */
1108 if (firstpp == NULL)
1109 firstpp = pp;
1110 }
1111 if (firstpp == NULL)
1112 return -1; /* didn't find any conflicting entries? */
1113 res->parent = parent;
1114 res->child = *firstpp;
1115 res->sibling = *pp;
1116 *firstpp = res;
1117 *pp = NULL;
1118 for (p = res->child; p != NULL; p = p->sibling) {
1119 p->parent = res;
1120 pr_debug("PCI: Reparented %s %pR under %s\n",
1121 p->name, p, res->name);
1122 }
1123 return 0;
1124 }
1125
1126 /*
1127 * Handle resources of PCI devices. If the world were perfect, we could
1128 * just allocate all the resource regions and do nothing more. It isn't.
1129 * On the other hand, we cannot just re-allocate all devices, as it would
1130 * require us to know lots of host bridge internals. So we attempt to
1131 * keep as much of the original configuration as possible, but tweak it
1132 * when it's found to be wrong.
1133 *
1134 * Known BIOS problems we have to work around:
1135 * - I/O or memory regions not configured
1136 * - regions configured, but not enabled in the command register
1137 * - bogus I/O addresses above 64K used
1138 * - expansion ROMs left enabled (this may sound harmless, but given
1139 * the fact the PCI specs explicitly allow address decoders to be
1140 * shared between expansion ROMs and other resource regions, it's
1141 * at least dangerous)
1142 *
1143 * Our solution:
1144 * (1) Allocate resources for all buses behind PCI-to-PCI bridges.
1145 * This gives us fixed barriers on where we can allocate.
1146 * (2) Allocate resources for all enabled devices. If there is
1147 * a collision, just mark the resource as unallocated. Also
1148 * disable expansion ROMs during this step.
1149 * (3) Try to allocate resources for disabled devices. If the
1150 * resources were assigned correctly, everything goes well,
1151 * if they weren't, they won't disturb allocation of other
1152 * resources.
1153 * (4) Assign new addresses to resources which were either
1154 * not configured at all or misconfigured. If explicitly
1155 * requested by the user, configure expansion ROM address
1156 * as well.
1157 */
1158
pcibios_allocate_bus_resources(struct pci_bus * bus)1159 static void pcibios_allocate_bus_resources(struct pci_bus *bus)
1160 {
1161 struct pci_bus *b;
1162 int i;
1163 struct resource *res, *pr;
1164
1165 pr_debug("PCI: Allocating bus resources for %04x:%02x...\n",
1166 pci_domain_nr(bus), bus->number);
1167
1168 pci_bus_for_each_resource(bus, res, i) {
1169 if (!res || !res->flags || res->start > res->end || res->parent)
1170 continue;
1171
1172 /* If the resource was left unset at this point, we clear it */
1173 if (res->flags & IORESOURCE_UNSET)
1174 goto clear_resource;
1175
1176 if (bus->parent == NULL)
1177 pr = (res->flags & IORESOURCE_IO) ?
1178 &ioport_resource : &iomem_resource;
1179 else {
1180 pr = pci_find_parent_resource(bus->self, res);
1181 if (pr == res) {
1182 /* this happens when the generic PCI
1183 * code (wrongly) decides that this
1184 * bridge is transparent -- paulus
1185 */
1186 continue;
1187 }
1188 }
1189
1190 pr_debug("PCI: %s (bus %d) bridge rsrc %d: %pR, parent %p (%s)\n",
1191 bus->self ? pci_name(bus->self) : "PHB", bus->number,
1192 i, res, pr, (pr && pr->name) ? pr->name : "nil");
1193
1194 if (pr && !(pr->flags & IORESOURCE_UNSET)) {
1195 struct pci_dev *dev = bus->self;
1196
1197 if (request_resource(pr, res) == 0)
1198 continue;
1199 /*
1200 * Must be a conflict with an existing entry.
1201 * Move that entry (or entries) under the
1202 * bridge resource and try again.
1203 */
1204 if (reparent_resources(pr, res) == 0)
1205 continue;
1206
1207 if (dev && i < PCI_BRIDGE_RESOURCE_NUM &&
1208 pci_claim_bridge_resource(dev,
1209 i + PCI_BRIDGE_RESOURCES) == 0)
1210 continue;
1211 }
1212 pr_warn("PCI: Cannot allocate resource region %d of PCI bridge %d, will remap\n",
1213 i, bus->number);
1214 clear_resource:
1215 /* The resource might be figured out when doing
1216 * reassignment based on the resources required
1217 * by the downstream PCI devices. Here we set
1218 * the size of the resource to be 0 in order to
1219 * save more space.
1220 */
1221 res->start = 0;
1222 res->end = -1;
1223 res->flags = 0;
1224 }
1225
1226 list_for_each_entry(b, &bus->children, node)
1227 pcibios_allocate_bus_resources(b);
1228 }
1229
alloc_resource(struct pci_dev * dev,int idx)1230 static inline void alloc_resource(struct pci_dev *dev, int idx)
1231 {
1232 struct resource *pr, *r = &dev->resource[idx];
1233
1234 pr_debug("PCI: Allocating %s: Resource %d: %pR\n",
1235 pci_name(dev), idx, r);
1236
1237 pr = pci_find_parent_resource(dev, r);
1238 if (!pr || (pr->flags & IORESOURCE_UNSET) ||
1239 request_resource(pr, r) < 0) {
1240 printk(KERN_WARNING "PCI: Cannot allocate resource region %d"
1241 " of device %s, will remap\n", idx, pci_name(dev));
1242 if (pr)
1243 pr_debug("PCI: parent is %p: %pR\n", pr, pr);
1244 /* We'll assign a new address later */
1245 r->flags |= IORESOURCE_UNSET;
1246 r->end -= r->start;
1247 r->start = 0;
1248 }
1249 }
1250
pcibios_allocate_resources(int pass)1251 static void __init pcibios_allocate_resources(int pass)
1252 {
1253 struct pci_dev *dev = NULL;
1254 int idx, disabled;
1255 u16 command;
1256 struct resource *r;
1257
1258 for_each_pci_dev(dev) {
1259 pci_read_config_word(dev, PCI_COMMAND, &command);
1260 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
1261 r = &dev->resource[idx];
1262 if (r->parent) /* Already allocated */
1263 continue;
1264 if (!r->flags || (r->flags & IORESOURCE_UNSET))
1265 continue; /* Not assigned at all */
1266 /* We only allocate ROMs on pass 1 just in case they
1267 * have been screwed up by firmware
1268 */
1269 if (idx == PCI_ROM_RESOURCE )
1270 disabled = 1;
1271 if (r->flags & IORESOURCE_IO)
1272 disabled = !(command & PCI_COMMAND_IO);
1273 else
1274 disabled = !(command & PCI_COMMAND_MEMORY);
1275 if (pass == disabled)
1276 alloc_resource(dev, idx);
1277 }
1278 if (pass)
1279 continue;
1280 r = &dev->resource[PCI_ROM_RESOURCE];
1281 if (r->flags) {
1282 /* Turn the ROM off, leave the resource region,
1283 * but keep it unregistered.
1284 */
1285 u32 reg;
1286 pci_read_config_dword(dev, dev->rom_base_reg, ®);
1287 if (reg & PCI_ROM_ADDRESS_ENABLE) {
1288 pr_debug("PCI: Switching off ROM of %s\n",
1289 pci_name(dev));
1290 r->flags &= ~IORESOURCE_ROM_ENABLE;
1291 pci_write_config_dword(dev, dev->rom_base_reg,
1292 reg & ~PCI_ROM_ADDRESS_ENABLE);
1293 }
1294 }
1295 }
1296 }
1297
pcibios_reserve_legacy_regions(struct pci_bus * bus)1298 static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
1299 {
1300 struct pci_controller *hose = pci_bus_to_host(bus);
1301 resource_size_t offset;
1302 struct resource *res, *pres;
1303 int i;
1304
1305 pr_debug("Reserving legacy ranges for domain %04x\n", pci_domain_nr(bus));
1306
1307 /* Check for IO */
1308 if (!(hose->io_resource.flags & IORESOURCE_IO))
1309 goto no_io;
1310 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
1311 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1312 BUG_ON(res == NULL);
1313 res->name = "Legacy IO";
1314 res->flags = IORESOURCE_IO;
1315 res->start = offset;
1316 res->end = (offset + 0xfff) & 0xfffffffful;
1317 pr_debug("Candidate legacy IO: %pR\n", res);
1318 if (request_resource(&hose->io_resource, res)) {
1319 printk(KERN_DEBUG
1320 "PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
1321 pci_domain_nr(bus), bus->number, res);
1322 kfree(res);
1323 }
1324
1325 no_io:
1326 /* Check for memory */
1327 for (i = 0; i < 3; i++) {
1328 pres = &hose->mem_resources[i];
1329 offset = hose->mem_offset[i];
1330 if (!(pres->flags & IORESOURCE_MEM))
1331 continue;
1332 pr_debug("hose mem res: %pR\n", pres);
1333 if ((pres->start - offset) <= 0xa0000 &&
1334 (pres->end - offset) >= 0xbffff)
1335 break;
1336 }
1337 if (i >= 3)
1338 return;
1339 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1340 BUG_ON(res == NULL);
1341 res->name = "Legacy VGA memory";
1342 res->flags = IORESOURCE_MEM;
1343 res->start = 0xa0000 + offset;
1344 res->end = 0xbffff + offset;
1345 pr_debug("Candidate VGA memory: %pR\n", res);
1346 if (request_resource(pres, res)) {
1347 printk(KERN_DEBUG
1348 "PCI %04x:%02x Cannot reserve VGA memory %pR\n",
1349 pci_domain_nr(bus), bus->number, res);
1350 kfree(res);
1351 }
1352 }
1353
pcibios_resource_survey(void)1354 void __init pcibios_resource_survey(void)
1355 {
1356 struct pci_bus *b;
1357
1358 /* Allocate and assign resources */
1359 list_for_each_entry(b, &pci_root_buses, node)
1360 pcibios_allocate_bus_resources(b);
1361 if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
1362 pcibios_allocate_resources(0);
1363 pcibios_allocate_resources(1);
1364 }
1365
1366 /* Before we start assigning unassigned resource, we try to reserve
1367 * the low IO area and the VGA memory area if they intersect the
1368 * bus available resources to avoid allocating things on top of them
1369 */
1370 if (!pci_has_flag(PCI_PROBE_ONLY)) {
1371 list_for_each_entry(b, &pci_root_buses, node)
1372 pcibios_reserve_legacy_regions(b);
1373 }
1374
1375 /* Now, if the platform didn't decide to blindly trust the firmware,
1376 * we proceed to assigning things that were left unassigned
1377 */
1378 if (!pci_has_flag(PCI_PROBE_ONLY)) {
1379 pr_debug("PCI: Assigning unassigned resources...\n");
1380 pci_assign_unassigned_resources();
1381 }
1382 }
1383
1384 /* This is used by the PCI hotplug driver to allocate resource
1385 * of newly plugged busses. We can try to consolidate with the
1386 * rest of the code later, for now, keep it as-is as our main
1387 * resource allocation function doesn't deal with sub-trees yet.
1388 */
pcibios_claim_one_bus(struct pci_bus * bus)1389 void pcibios_claim_one_bus(struct pci_bus *bus)
1390 {
1391 struct pci_dev *dev;
1392 struct pci_bus *child_bus;
1393
1394 list_for_each_entry(dev, &bus->devices, bus_list) {
1395 int i;
1396
1397 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1398 struct resource *r = &dev->resource[i];
1399
1400 if (r->parent || !r->start || !r->flags)
1401 continue;
1402
1403 pr_debug("PCI: Claiming %s: Resource %d: %pR\n",
1404 pci_name(dev), i, r);
1405
1406 if (pci_claim_resource(dev, i) == 0)
1407 continue;
1408
1409 pci_claim_bridge_resource(dev, i);
1410 }
1411 }
1412
1413 list_for_each_entry(child_bus, &bus->children, node)
1414 pcibios_claim_one_bus(child_bus);
1415 }
1416 EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
1417
1418
1419 /* pcibios_finish_adding_to_bus
1420 *
1421 * This is to be called by the hotplug code after devices have been
1422 * added to a bus, this include calling it for a PHB that is just
1423 * being added
1424 */
pcibios_finish_adding_to_bus(struct pci_bus * bus)1425 void pcibios_finish_adding_to_bus(struct pci_bus *bus)
1426 {
1427 pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n",
1428 pci_domain_nr(bus), bus->number);
1429
1430 /* Allocate bus and devices resources */
1431 pcibios_allocate_bus_resources(bus);
1432 pcibios_claim_one_bus(bus);
1433 if (!pci_has_flag(PCI_PROBE_ONLY)) {
1434 if (bus->self)
1435 pci_assign_unassigned_bridge_resources(bus->self);
1436 else
1437 pci_assign_unassigned_bus_resources(bus);
1438 }
1439
1440 /* Fixup EEH */
1441 eeh_add_device_tree_late(bus);
1442
1443 /* Add new devices to global lists. Register in proc, sysfs. */
1444 pci_bus_add_devices(bus);
1445
1446 /* sysfs files should only be added after devices are added */
1447 eeh_add_sysfs_files(bus);
1448 }
1449 EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
1450
pcibios_enable_device(struct pci_dev * dev,int mask)1451 int pcibios_enable_device(struct pci_dev *dev, int mask)
1452 {
1453 struct pci_controller *phb = pci_bus_to_host(dev->bus);
1454
1455 if (phb->controller_ops.enable_device_hook)
1456 if (!phb->controller_ops.enable_device_hook(dev))
1457 return -EINVAL;
1458
1459 return pci_enable_resources(dev, mask);
1460 }
1461
pcibios_disable_device(struct pci_dev * dev)1462 void pcibios_disable_device(struct pci_dev *dev)
1463 {
1464 struct pci_controller *phb = pci_bus_to_host(dev->bus);
1465
1466 if (phb->controller_ops.disable_device)
1467 phb->controller_ops.disable_device(dev);
1468 }
1469
pcibios_io_space_offset(struct pci_controller * hose)1470 resource_size_t pcibios_io_space_offset(struct pci_controller *hose)
1471 {
1472 return (unsigned long) hose->io_base_virt - _IO_BASE;
1473 }
1474
pcibios_setup_phb_resources(struct pci_controller * hose,struct list_head * resources)1475 static void pcibios_setup_phb_resources(struct pci_controller *hose,
1476 struct list_head *resources)
1477 {
1478 struct resource *res;
1479 resource_size_t offset;
1480 int i;
1481
1482 /* Hookup PHB IO resource */
1483 res = &hose->io_resource;
1484
1485 if (!res->flags) {
1486 pr_debug("PCI: I/O resource not set for host"
1487 " bridge %pOF (domain %d)\n",
1488 hose->dn, hose->global_number);
1489 } else {
1490 offset = pcibios_io_space_offset(hose);
1491
1492 pr_debug("PCI: PHB IO resource = %pR off 0x%08llx\n",
1493 res, (unsigned long long)offset);
1494 pci_add_resource_offset(resources, res, offset);
1495 }
1496
1497 /* Hookup PHB Memory resources */
1498 for (i = 0; i < 3; ++i) {
1499 res = &hose->mem_resources[i];
1500 if (!res->flags)
1501 continue;
1502
1503 offset = hose->mem_offset[i];
1504 pr_debug("PCI: PHB MEM resource %d = %pR off 0x%08llx\n", i,
1505 res, (unsigned long long)offset);
1506
1507 pci_add_resource_offset(resources, res, offset);
1508 }
1509 }
1510
1511 /*
1512 * Null PCI config access functions, for the case when we can't
1513 * find a hose.
1514 */
1515 #define NULL_PCI_OP(rw, size, type) \
1516 static int \
1517 null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \
1518 { \
1519 return PCIBIOS_DEVICE_NOT_FOUND; \
1520 }
1521
1522 static int
null_read_config(struct pci_bus * bus,unsigned int devfn,int offset,int len,u32 * val)1523 null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
1524 int len, u32 *val)
1525 {
1526 return PCIBIOS_DEVICE_NOT_FOUND;
1527 }
1528
1529 static int
null_write_config(struct pci_bus * bus,unsigned int devfn,int offset,int len,u32 val)1530 null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
1531 int len, u32 val)
1532 {
1533 return PCIBIOS_DEVICE_NOT_FOUND;
1534 }
1535
1536 static struct pci_ops null_pci_ops =
1537 {
1538 .read = null_read_config,
1539 .write = null_write_config,
1540 };
1541
1542 /*
1543 * These functions are used early on before PCI scanning is done
1544 * and all of the pci_dev and pci_bus structures have been created.
1545 */
1546 static struct pci_bus *
fake_pci_bus(struct pci_controller * hose,int busnr)1547 fake_pci_bus(struct pci_controller *hose, int busnr)
1548 {
1549 static struct pci_bus bus;
1550
1551 if (hose == NULL) {
1552 printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr);
1553 }
1554 bus.number = busnr;
1555 bus.sysdata = hose;
1556 bus.ops = hose? hose->ops: &null_pci_ops;
1557 return &bus;
1558 }
1559
1560 #define EARLY_PCI_OP(rw, size, type) \
1561 int early_##rw##_config_##size(struct pci_controller *hose, int bus, \
1562 int devfn, int offset, type value) \
1563 { \
1564 return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \
1565 devfn, offset, value); \
1566 }
1567
EARLY_PCI_OP(read,byte,u8 *)1568 EARLY_PCI_OP(read, byte, u8 *)
1569 EARLY_PCI_OP(read, word, u16 *)
1570 EARLY_PCI_OP(read, dword, u32 *)
1571 EARLY_PCI_OP(write, byte, u8)
1572 EARLY_PCI_OP(write, word, u16)
1573 EARLY_PCI_OP(write, dword, u32)
1574
1575 int early_find_capability(struct pci_controller *hose, int bus, int devfn,
1576 int cap)
1577 {
1578 return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
1579 }
1580
pcibios_get_phb_of_node(struct pci_bus * bus)1581 struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
1582 {
1583 struct pci_controller *hose = bus->sysdata;
1584
1585 return of_node_get(hose->dn);
1586 }
1587
1588 /**
1589 * pci_scan_phb - Given a pci_controller, setup and scan the PCI bus
1590 * @hose: Pointer to the PCI host controller instance structure
1591 */
pcibios_scan_phb(struct pci_controller * hose)1592 void pcibios_scan_phb(struct pci_controller *hose)
1593 {
1594 LIST_HEAD(resources);
1595 struct pci_bus *bus;
1596 struct device_node *node = hose->dn;
1597 int mode;
1598
1599 pr_debug("PCI: Scanning PHB %pOF\n", node);
1600
1601 /* Get some IO space for the new PHB */
1602 pcibios_setup_phb_io_space(hose);
1603
1604 /* Wire up PHB bus resources */
1605 pcibios_setup_phb_resources(hose, &resources);
1606
1607 hose->busn.start = hose->first_busno;
1608 hose->busn.end = hose->last_busno;
1609 hose->busn.flags = IORESOURCE_BUS;
1610 pci_add_resource(&resources, &hose->busn);
1611
1612 /* Create an empty bus for the toplevel */
1613 bus = pci_create_root_bus(hose->parent, hose->first_busno,
1614 hose->ops, hose, &resources);
1615 if (bus == NULL) {
1616 pr_err("Failed to create bus for PCI domain %04x\n",
1617 hose->global_number);
1618 pci_free_resource_list(&resources);
1619 return;
1620 }
1621 hose->bus = bus;
1622
1623 /* Get probe mode and perform scan */
1624 mode = PCI_PROBE_NORMAL;
1625 if (node && hose->controller_ops.probe_mode)
1626 mode = hose->controller_ops.probe_mode(bus);
1627 pr_debug(" probe mode: %d\n", mode);
1628 if (mode == PCI_PROBE_DEVTREE)
1629 of_scan_bus(node, bus);
1630
1631 if (mode == PCI_PROBE_NORMAL) {
1632 pci_bus_update_busn_res_end(bus, 255);
1633 hose->last_busno = pci_scan_child_bus(bus);
1634 pci_bus_update_busn_res_end(bus, hose->last_busno);
1635 }
1636
1637 /* Platform gets a chance to do some global fixups before
1638 * we proceed to resource allocation
1639 */
1640 if (ppc_md.pcibios_fixup_phb)
1641 ppc_md.pcibios_fixup_phb(hose);
1642
1643 /* Configure PCI Express settings */
1644 if (bus && !pci_has_flag(PCI_PROBE_ONLY)) {
1645 struct pci_bus *child;
1646 list_for_each_entry(child, &bus->children, node)
1647 pcie_bus_configure_settings(child);
1648 }
1649 }
1650 EXPORT_SYMBOL_GPL(pcibios_scan_phb);
1651
fixup_hide_host_resource_fsl(struct pci_dev * dev)1652 static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
1653 {
1654 int i, class = dev->class >> 8;
1655 /* When configured as agent, programing interface = 1 */
1656 int prog_if = dev->class & 0xf;
1657
1658 if ((class == PCI_CLASS_PROCESSOR_POWERPC ||
1659 class == PCI_CLASS_BRIDGE_OTHER) &&
1660 (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) &&
1661 (prog_if == 0) &&
1662 (dev->bus->parent == NULL)) {
1663 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1664 dev->resource[i].start = 0;
1665 dev->resource[i].end = 0;
1666 dev->resource[i].flags = 0;
1667 }
1668 }
1669 }
1670 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
1671 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
1672