1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright IBM Corp. 2012
4 *
5 * Author(s):
6 * Jan Glauber <jang@linux.vnet.ibm.com>
7 *
8 * The System z PCI code is a rewrite from a prototype by
9 * the following people (Kudoz!):
10 * Alexander Schmidt
11 * Christoph Raisch
12 * Hannes Hering
13 * Hoang-Nam Nguyen
14 * Jan-Bernd Themann
15 * Stefan Roscher
16 * Thomas Klein
17 */
18
19 #define KMSG_COMPONENT "zpci"
20 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
21
22 #include <linux/kernel.h>
23 #include <linux/slab.h>
24 #include <linux/err.h>
25 #include <linux/export.h>
26 #include <linux/delay.h>
27 #include <linux/seq_file.h>
28 #include <linux/jump_label.h>
29 #include <linux/pci.h>
30 #include <linux/printk.h>
31
32 #include <asm/isc.h>
33 #include <asm/airq.h>
34 #include <asm/facility.h>
35 #include <asm/pci_insn.h>
36 #include <asm/pci_clp.h>
37 #include <asm/pci_dma.h>
38
39 #include "pci_bus.h"
40 #include "pci_iov.h"
41
42 /* list of all detected zpci devices */
43 static LIST_HEAD(zpci_list);
44 static DEFINE_SPINLOCK(zpci_list_lock);
45
46 static DECLARE_BITMAP(zpci_domain, ZPCI_DOMAIN_BITMAP_SIZE);
47 static DEFINE_SPINLOCK(zpci_domain_lock);
48
49 #define ZPCI_IOMAP_ENTRIES \
50 min(((unsigned long) ZPCI_NR_DEVICES * PCI_STD_NUM_BARS / 2), \
51 ZPCI_IOMAP_MAX_ENTRIES)
52
53 unsigned int s390_pci_no_rid;
54
55 static DEFINE_SPINLOCK(zpci_iomap_lock);
56 static unsigned long *zpci_iomap_bitmap;
57 struct zpci_iomap_entry *zpci_iomap_start;
58 EXPORT_SYMBOL_GPL(zpci_iomap_start);
59
60 DEFINE_STATIC_KEY_FALSE(have_mio);
61
62 static struct kmem_cache *zdev_fmb_cache;
63
64 /* AEN structures that must be preserved over KVM module re-insertion */
65 union zpci_sic_iib *zpci_aipb;
66 EXPORT_SYMBOL_GPL(zpci_aipb);
67 struct airq_iv *zpci_aif_sbv;
68 EXPORT_SYMBOL_GPL(zpci_aif_sbv);
69
get_zdev_by_fid(u32 fid)70 struct zpci_dev *get_zdev_by_fid(u32 fid)
71 {
72 struct zpci_dev *tmp, *zdev = NULL;
73
74 spin_lock(&zpci_list_lock);
75 list_for_each_entry(tmp, &zpci_list, entry) {
76 if (tmp->fid == fid) {
77 zdev = tmp;
78 zpci_zdev_get(zdev);
79 break;
80 }
81 }
82 spin_unlock(&zpci_list_lock);
83 return zdev;
84 }
85
zpci_remove_reserved_devices(void)86 void zpci_remove_reserved_devices(void)
87 {
88 struct zpci_dev *tmp, *zdev;
89 enum zpci_state state;
90 LIST_HEAD(remove);
91
92 spin_lock(&zpci_list_lock);
93 list_for_each_entry_safe(zdev, tmp, &zpci_list, entry) {
94 if (zdev->state == ZPCI_FN_STATE_STANDBY &&
95 !clp_get_state(zdev->fid, &state) &&
96 state == ZPCI_FN_STATE_RESERVED)
97 list_move_tail(&zdev->entry, &remove);
98 }
99 spin_unlock(&zpci_list_lock);
100
101 list_for_each_entry_safe(zdev, tmp, &remove, entry)
102 zpci_device_reserved(zdev);
103 }
104
pci_domain_nr(struct pci_bus * bus)105 int pci_domain_nr(struct pci_bus *bus)
106 {
107 return ((struct zpci_bus *) bus->sysdata)->domain_nr;
108 }
109 EXPORT_SYMBOL_GPL(pci_domain_nr);
110
pci_proc_domain(struct pci_bus * bus)111 int pci_proc_domain(struct pci_bus *bus)
112 {
113 return pci_domain_nr(bus);
114 }
115 EXPORT_SYMBOL_GPL(pci_proc_domain);
116
117 /* Modify PCI: Register I/O address translation parameters */
zpci_register_ioat(struct zpci_dev * zdev,u8 dmaas,u64 base,u64 limit,u64 iota,u8 * status)118 int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
119 u64 base, u64 limit, u64 iota, u8 *status)
120 {
121 u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_REG_IOAT);
122 struct zpci_fib fib = {0};
123 u8 cc;
124
125 WARN_ON_ONCE(iota & 0x3fff);
126 fib.pba = base;
127 fib.pal = limit;
128 fib.iota = iota | ZPCI_IOTA_RTTO_FLAG;
129 fib.gd = zdev->gisa;
130 cc = zpci_mod_fc(req, &fib, status);
131 if (cc)
132 zpci_dbg(3, "reg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, *status);
133 return cc;
134 }
135 EXPORT_SYMBOL_GPL(zpci_register_ioat);
136
137 /* Modify PCI: Unregister I/O address translation parameters */
zpci_unregister_ioat(struct zpci_dev * zdev,u8 dmaas)138 int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
139 {
140 u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_DEREG_IOAT);
141 struct zpci_fib fib = {0};
142 u8 cc, status;
143
144 fib.gd = zdev->gisa;
145
146 cc = zpci_mod_fc(req, &fib, &status);
147 if (cc)
148 zpci_dbg(3, "unreg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, status);
149 return cc;
150 }
151
152 /* Modify PCI: Set PCI function measurement parameters */
zpci_fmb_enable_device(struct zpci_dev * zdev)153 int zpci_fmb_enable_device(struct zpci_dev *zdev)
154 {
155 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
156 struct zpci_fib fib = {0};
157 u8 cc, status;
158
159 if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length)
160 return -EINVAL;
161
162 zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
163 if (!zdev->fmb)
164 return -ENOMEM;
165 WARN_ON((u64) zdev->fmb & 0xf);
166
167 /* reset software counters */
168 atomic64_set(&zdev->allocated_pages, 0);
169 atomic64_set(&zdev->mapped_pages, 0);
170 atomic64_set(&zdev->unmapped_pages, 0);
171
172 fib.fmb_addr = virt_to_phys(zdev->fmb);
173 fib.gd = zdev->gisa;
174 cc = zpci_mod_fc(req, &fib, &status);
175 if (cc) {
176 kmem_cache_free(zdev_fmb_cache, zdev->fmb);
177 zdev->fmb = NULL;
178 }
179 return cc ? -EIO : 0;
180 }
181
182 /* Modify PCI: Disable PCI function measurement */
zpci_fmb_disable_device(struct zpci_dev * zdev)183 int zpci_fmb_disable_device(struct zpci_dev *zdev)
184 {
185 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
186 struct zpci_fib fib = {0};
187 u8 cc, status;
188
189 if (!zdev->fmb)
190 return -EINVAL;
191
192 fib.gd = zdev->gisa;
193
194 /* Function measurement is disabled if fmb address is zero */
195 cc = zpci_mod_fc(req, &fib, &status);
196 if (cc == 3) /* Function already gone. */
197 cc = 0;
198
199 if (!cc) {
200 kmem_cache_free(zdev_fmb_cache, zdev->fmb);
201 zdev->fmb = NULL;
202 }
203 return cc ? -EIO : 0;
204 }
205
zpci_cfg_load(struct zpci_dev * zdev,int offset,u32 * val,u8 len)206 static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
207 {
208 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
209 u64 data;
210 int rc;
211
212 rc = __zpci_load(&data, req, offset);
213 if (!rc) {
214 data = le64_to_cpu((__force __le64) data);
215 data >>= (8 - len) * 8;
216 *val = (u32) data;
217 } else
218 *val = 0xffffffff;
219 return rc;
220 }
221
zpci_cfg_store(struct zpci_dev * zdev,int offset,u32 val,u8 len)222 static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
223 {
224 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
225 u64 data = val;
226 int rc;
227
228 data <<= (8 - len) * 8;
229 data = (__force u64) cpu_to_le64(data);
230 rc = __zpci_store(data, req, offset);
231 return rc;
232 }
233
pcibios_align_resource(void * data,const struct resource * res,resource_size_t size,resource_size_t align)234 resource_size_t pcibios_align_resource(void *data, const struct resource *res,
235 resource_size_t size,
236 resource_size_t align)
237 {
238 return 0;
239 }
240
241 /* combine single writes by using store-block insn */
__iowrite64_copy(void __iomem * to,const void * from,size_t count)242 void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
243 {
244 zpci_memcpy_toio(to, from, count);
245 }
246
ioremap_prot(phys_addr_t phys_addr,size_t size,unsigned long prot)247 void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
248 unsigned long prot)
249 {
250 /*
251 * When PCI MIO instructions are unavailable the "physical" address
252 * encodes a hint for accessing the PCI memory space it represents.
253 * Just pass it unchanged such that ioread/iowrite can decode it.
254 */
255 if (!static_branch_unlikely(&have_mio))
256 return (void __iomem *)phys_addr;
257
258 return generic_ioremap_prot(phys_addr, size, __pgprot(prot));
259 }
260 EXPORT_SYMBOL(ioremap_prot);
261
iounmap(volatile void __iomem * addr)262 void iounmap(volatile void __iomem *addr)
263 {
264 if (static_branch_likely(&have_mio))
265 generic_iounmap(addr);
266 }
267 EXPORT_SYMBOL(iounmap);
268
269 /* Create a virtual mapping cookie for a PCI BAR */
pci_iomap_range_fh(struct pci_dev * pdev,int bar,unsigned long offset,unsigned long max)270 static void __iomem *pci_iomap_range_fh(struct pci_dev *pdev, int bar,
271 unsigned long offset, unsigned long max)
272 {
273 struct zpci_dev *zdev = to_zpci(pdev);
274 int idx;
275
276 idx = zdev->bars[bar].map_idx;
277 spin_lock(&zpci_iomap_lock);
278 /* Detect overrun */
279 WARN_ON(!++zpci_iomap_start[idx].count);
280 zpci_iomap_start[idx].fh = zdev->fh;
281 zpci_iomap_start[idx].bar = bar;
282 spin_unlock(&zpci_iomap_lock);
283
284 return (void __iomem *) ZPCI_ADDR(idx) + offset;
285 }
286
pci_iomap_range_mio(struct pci_dev * pdev,int bar,unsigned long offset,unsigned long max)287 static void __iomem *pci_iomap_range_mio(struct pci_dev *pdev, int bar,
288 unsigned long offset,
289 unsigned long max)
290 {
291 unsigned long barsize = pci_resource_len(pdev, bar);
292 struct zpci_dev *zdev = to_zpci(pdev);
293 void __iomem *iova;
294
295 iova = ioremap((unsigned long) zdev->bars[bar].mio_wt, barsize);
296 return iova ? iova + offset : iova;
297 }
298
pci_iomap_range(struct pci_dev * pdev,int bar,unsigned long offset,unsigned long max)299 void __iomem *pci_iomap_range(struct pci_dev *pdev, int bar,
300 unsigned long offset, unsigned long max)
301 {
302 if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
303 return NULL;
304
305 if (static_branch_likely(&have_mio))
306 return pci_iomap_range_mio(pdev, bar, offset, max);
307 else
308 return pci_iomap_range_fh(pdev, bar, offset, max);
309 }
310 EXPORT_SYMBOL(pci_iomap_range);
311
pci_iomap(struct pci_dev * dev,int bar,unsigned long maxlen)312 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
313 {
314 return pci_iomap_range(dev, bar, 0, maxlen);
315 }
316 EXPORT_SYMBOL(pci_iomap);
317
pci_iomap_wc_range_mio(struct pci_dev * pdev,int bar,unsigned long offset,unsigned long max)318 static void __iomem *pci_iomap_wc_range_mio(struct pci_dev *pdev, int bar,
319 unsigned long offset, unsigned long max)
320 {
321 unsigned long barsize = pci_resource_len(pdev, bar);
322 struct zpci_dev *zdev = to_zpci(pdev);
323 void __iomem *iova;
324
325 iova = ioremap((unsigned long) zdev->bars[bar].mio_wb, barsize);
326 return iova ? iova + offset : iova;
327 }
328
pci_iomap_wc_range(struct pci_dev * pdev,int bar,unsigned long offset,unsigned long max)329 void __iomem *pci_iomap_wc_range(struct pci_dev *pdev, int bar,
330 unsigned long offset, unsigned long max)
331 {
332 if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
333 return NULL;
334
335 if (static_branch_likely(&have_mio))
336 return pci_iomap_wc_range_mio(pdev, bar, offset, max);
337 else
338 return pci_iomap_range_fh(pdev, bar, offset, max);
339 }
340 EXPORT_SYMBOL(pci_iomap_wc_range);
341
pci_iomap_wc(struct pci_dev * dev,int bar,unsigned long maxlen)342 void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
343 {
344 return pci_iomap_wc_range(dev, bar, 0, maxlen);
345 }
346 EXPORT_SYMBOL(pci_iomap_wc);
347
pci_iounmap_fh(struct pci_dev * pdev,void __iomem * addr)348 static void pci_iounmap_fh(struct pci_dev *pdev, void __iomem *addr)
349 {
350 unsigned int idx = ZPCI_IDX(addr);
351
352 spin_lock(&zpci_iomap_lock);
353 /* Detect underrun */
354 WARN_ON(!zpci_iomap_start[idx].count);
355 if (!--zpci_iomap_start[idx].count) {
356 zpci_iomap_start[idx].fh = 0;
357 zpci_iomap_start[idx].bar = 0;
358 }
359 spin_unlock(&zpci_iomap_lock);
360 }
361
pci_iounmap_mio(struct pci_dev * pdev,void __iomem * addr)362 static void pci_iounmap_mio(struct pci_dev *pdev, void __iomem *addr)
363 {
364 iounmap(addr);
365 }
366
pci_iounmap(struct pci_dev * pdev,void __iomem * addr)367 void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
368 {
369 if (static_branch_likely(&have_mio))
370 pci_iounmap_mio(pdev, addr);
371 else
372 pci_iounmap_fh(pdev, addr);
373 }
374 EXPORT_SYMBOL(pci_iounmap);
375
pci_read(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)376 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
377 int size, u32 *val)
378 {
379 struct zpci_dev *zdev = zdev_from_bus(bus, devfn);
380
381 return (zdev) ? zpci_cfg_load(zdev, where, val, size) : -ENODEV;
382 }
383
pci_write(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)384 static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
385 int size, u32 val)
386 {
387 struct zpci_dev *zdev = zdev_from_bus(bus, devfn);
388
389 return (zdev) ? zpci_cfg_store(zdev, where, val, size) : -ENODEV;
390 }
391
392 static struct pci_ops pci_root_ops = {
393 .read = pci_read,
394 .write = pci_write,
395 };
396
zpci_map_resources(struct pci_dev * pdev)397 static void zpci_map_resources(struct pci_dev *pdev)
398 {
399 struct zpci_dev *zdev = to_zpci(pdev);
400 resource_size_t len;
401 int i;
402
403 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
404 len = pci_resource_len(pdev, i);
405 if (!len)
406 continue;
407
408 if (zpci_use_mio(zdev))
409 pdev->resource[i].start =
410 (resource_size_t __force) zdev->bars[i].mio_wt;
411 else
412 pdev->resource[i].start = (resource_size_t __force)
413 pci_iomap_range_fh(pdev, i, 0, 0);
414 pdev->resource[i].end = pdev->resource[i].start + len - 1;
415 }
416
417 zpci_iov_map_resources(pdev);
418 }
419
zpci_unmap_resources(struct pci_dev * pdev)420 static void zpci_unmap_resources(struct pci_dev *pdev)
421 {
422 struct zpci_dev *zdev = to_zpci(pdev);
423 resource_size_t len;
424 int i;
425
426 if (zpci_use_mio(zdev))
427 return;
428
429 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
430 len = pci_resource_len(pdev, i);
431 if (!len)
432 continue;
433 pci_iounmap_fh(pdev, (void __iomem __force *)
434 pdev->resource[i].start);
435 }
436 }
437
zpci_alloc_iomap(struct zpci_dev * zdev)438 static int zpci_alloc_iomap(struct zpci_dev *zdev)
439 {
440 unsigned long entry;
441
442 spin_lock(&zpci_iomap_lock);
443 entry = find_first_zero_bit(zpci_iomap_bitmap, ZPCI_IOMAP_ENTRIES);
444 if (entry == ZPCI_IOMAP_ENTRIES) {
445 spin_unlock(&zpci_iomap_lock);
446 return -ENOSPC;
447 }
448 set_bit(entry, zpci_iomap_bitmap);
449 spin_unlock(&zpci_iomap_lock);
450 return entry;
451 }
452
zpci_free_iomap(struct zpci_dev * zdev,int entry)453 static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
454 {
455 spin_lock(&zpci_iomap_lock);
456 memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
457 clear_bit(entry, zpci_iomap_bitmap);
458 spin_unlock(&zpci_iomap_lock);
459 }
460
zpci_do_update_iomap_fh(struct zpci_dev * zdev,u32 fh)461 static void zpci_do_update_iomap_fh(struct zpci_dev *zdev, u32 fh)
462 {
463 int bar, idx;
464
465 spin_lock(&zpci_iomap_lock);
466 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
467 if (!zdev->bars[bar].size)
468 continue;
469 idx = zdev->bars[bar].map_idx;
470 if (!zpci_iomap_start[idx].count)
471 continue;
472 WRITE_ONCE(zpci_iomap_start[idx].fh, zdev->fh);
473 }
474 spin_unlock(&zpci_iomap_lock);
475 }
476
zpci_update_fh(struct zpci_dev * zdev,u32 fh)477 void zpci_update_fh(struct zpci_dev *zdev, u32 fh)
478 {
479 if (!fh || zdev->fh == fh)
480 return;
481
482 zdev->fh = fh;
483 if (zpci_use_mio(zdev))
484 return;
485 if (zdev->has_resources && zdev_enabled(zdev))
486 zpci_do_update_iomap_fh(zdev, fh);
487 }
488
__alloc_res(struct zpci_dev * zdev,unsigned long start,unsigned long size,unsigned long flags)489 static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start,
490 unsigned long size, unsigned long flags)
491 {
492 struct resource *r;
493
494 r = kzalloc(sizeof(*r), GFP_KERNEL);
495 if (!r)
496 return NULL;
497
498 r->start = start;
499 r->end = r->start + size - 1;
500 r->flags = flags;
501 r->name = zdev->res_name;
502
503 if (request_resource(&iomem_resource, r)) {
504 kfree(r);
505 return NULL;
506 }
507 return r;
508 }
509
zpci_setup_bus_resources(struct zpci_dev * zdev)510 int zpci_setup_bus_resources(struct zpci_dev *zdev)
511 {
512 unsigned long addr, size, flags;
513 struct resource *res;
514 int i, entry;
515
516 snprintf(zdev->res_name, sizeof(zdev->res_name),
517 "PCI Bus %04x:%02x", zdev->uid, ZPCI_BUS_NR);
518
519 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
520 if (!zdev->bars[i].size)
521 continue;
522 entry = zpci_alloc_iomap(zdev);
523 if (entry < 0)
524 return entry;
525 zdev->bars[i].map_idx = entry;
526
527 /* only MMIO is supported */
528 flags = IORESOURCE_MEM;
529 if (zdev->bars[i].val & 8)
530 flags |= IORESOURCE_PREFETCH;
531 if (zdev->bars[i].val & 4)
532 flags |= IORESOURCE_MEM_64;
533
534 if (zpci_use_mio(zdev))
535 addr = (unsigned long) zdev->bars[i].mio_wt;
536 else
537 addr = ZPCI_ADDR(entry);
538 size = 1UL << zdev->bars[i].size;
539
540 res = __alloc_res(zdev, addr, size, flags);
541 if (!res) {
542 zpci_free_iomap(zdev, entry);
543 return -ENOMEM;
544 }
545 zdev->bars[i].res = res;
546 }
547 zdev->has_resources = 1;
548
549 return 0;
550 }
551
zpci_cleanup_bus_resources(struct zpci_dev * zdev)552 static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
553 {
554 struct resource *res;
555 int i;
556
557 pci_lock_rescan_remove();
558 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
559 res = zdev->bars[i].res;
560 if (!res)
561 continue;
562
563 release_resource(res);
564 pci_bus_remove_resource(zdev->zbus->bus, res);
565 zpci_free_iomap(zdev, zdev->bars[i].map_idx);
566 zdev->bars[i].res = NULL;
567 kfree(res);
568 }
569 zdev->has_resources = 0;
570 pci_unlock_rescan_remove();
571 }
572
pcibios_device_add(struct pci_dev * pdev)573 int pcibios_device_add(struct pci_dev *pdev)
574 {
575 struct zpci_dev *zdev = to_zpci(pdev);
576 struct resource *res;
577 int i;
578
579 /* The pdev has a reference to the zdev via its bus */
580 zpci_zdev_get(zdev);
581 if (pdev->is_physfn)
582 pdev->no_vf_scan = 1;
583
584 pdev->dev.groups = zpci_attr_groups;
585 pdev->dev.dma_ops = &s390_pci_dma_ops;
586 zpci_map_resources(pdev);
587
588 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
589 res = &pdev->resource[i];
590 if (res->parent || !res->flags)
591 continue;
592 pci_claim_resource(pdev, i);
593 }
594
595 return 0;
596 }
597
pcibios_release_device(struct pci_dev * pdev)598 void pcibios_release_device(struct pci_dev *pdev)
599 {
600 struct zpci_dev *zdev = to_zpci(pdev);
601
602 zpci_unmap_resources(pdev);
603 zpci_zdev_put(zdev);
604 }
605
pcibios_enable_device(struct pci_dev * pdev,int mask)606 int pcibios_enable_device(struct pci_dev *pdev, int mask)
607 {
608 struct zpci_dev *zdev = to_zpci(pdev);
609
610 zpci_debug_init_device(zdev, dev_name(&pdev->dev));
611 zpci_fmb_enable_device(zdev);
612
613 return pci_enable_resources(pdev, mask);
614 }
615
pcibios_disable_device(struct pci_dev * pdev)616 void pcibios_disable_device(struct pci_dev *pdev)
617 {
618 struct zpci_dev *zdev = to_zpci(pdev);
619
620 zpci_fmb_disable_device(zdev);
621 zpci_debug_exit_device(zdev);
622 }
623
__zpci_register_domain(int domain)624 static int __zpci_register_domain(int domain)
625 {
626 spin_lock(&zpci_domain_lock);
627 if (test_bit(domain, zpci_domain)) {
628 spin_unlock(&zpci_domain_lock);
629 pr_err("Domain %04x is already assigned\n", domain);
630 return -EEXIST;
631 }
632 set_bit(domain, zpci_domain);
633 spin_unlock(&zpci_domain_lock);
634 return domain;
635 }
636
__zpci_alloc_domain(void)637 static int __zpci_alloc_domain(void)
638 {
639 int domain;
640
641 spin_lock(&zpci_domain_lock);
642 /*
643 * We can always auto allocate domains below ZPCI_NR_DEVICES.
644 * There is either a free domain or we have reached the maximum in
645 * which case we would have bailed earlier.
646 */
647 domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
648 set_bit(domain, zpci_domain);
649 spin_unlock(&zpci_domain_lock);
650 return domain;
651 }
652
zpci_alloc_domain(int domain)653 int zpci_alloc_domain(int domain)
654 {
655 if (zpci_unique_uid) {
656 if (domain)
657 return __zpci_register_domain(domain);
658 pr_warn("UID checking was active but no UID is provided: switching to automatic domain allocation\n");
659 update_uid_checking(false);
660 }
661 return __zpci_alloc_domain();
662 }
663
zpci_free_domain(int domain)664 void zpci_free_domain(int domain)
665 {
666 spin_lock(&zpci_domain_lock);
667 clear_bit(domain, zpci_domain);
668 spin_unlock(&zpci_domain_lock);
669 }
670
671
zpci_enable_device(struct zpci_dev * zdev)672 int zpci_enable_device(struct zpci_dev *zdev)
673 {
674 u32 fh = zdev->fh;
675 int rc = 0;
676
677 if (clp_enable_fh(zdev, &fh, ZPCI_NR_DMA_SPACES))
678 rc = -EIO;
679 else
680 zpci_update_fh(zdev, fh);
681 return rc;
682 }
683 EXPORT_SYMBOL_GPL(zpci_enable_device);
684
zpci_disable_device(struct zpci_dev * zdev)685 int zpci_disable_device(struct zpci_dev *zdev)
686 {
687 u32 fh = zdev->fh;
688 int cc, rc = 0;
689
690 cc = clp_disable_fh(zdev, &fh);
691 if (!cc) {
692 zpci_update_fh(zdev, fh);
693 } else if (cc == CLP_RC_SETPCIFN_ALRDY) {
694 pr_info("Disabling PCI function %08x had no effect as it was already disabled\n",
695 zdev->fid);
696 /* Function is already disabled - update handle */
697 rc = clp_refresh_fh(zdev->fid, &fh);
698 if (!rc) {
699 zpci_update_fh(zdev, fh);
700 rc = -EINVAL;
701 }
702 } else {
703 rc = -EIO;
704 }
705 return rc;
706 }
707 EXPORT_SYMBOL_GPL(zpci_disable_device);
708
709 /**
710 * zpci_hot_reset_device - perform a reset of the given zPCI function
711 * @zdev: the slot which should be reset
712 *
713 * Performs a low level reset of the zPCI function. The reset is low level in
714 * the sense that the zPCI function can be reset without detaching it from the
715 * common PCI subsystem. The reset may be performed while under control of
716 * either DMA or IOMMU APIs in which case the existing DMA/IOMMU translation
717 * table is reinstated at the end of the reset.
718 *
719 * After the reset the functions internal state is reset to an initial state
720 * equivalent to its state during boot when first probing a driver.
721 * Consequently after reset the PCI function requires re-initialization via the
722 * common PCI code including re-enabling IRQs via pci_alloc_irq_vectors()
723 * and enabling the function via e.g.pci_enablde_device_flags().The caller
724 * must guard against concurrent reset attempts.
725 *
726 * In most cases this function should not be called directly but through
727 * pci_reset_function() or pci_reset_bus() which handle the save/restore and
728 * locking.
729 *
730 * Return: 0 on success and an error value otherwise
731 */
zpci_hot_reset_device(struct zpci_dev * zdev)732 int zpci_hot_reset_device(struct zpci_dev *zdev)
733 {
734 u8 status;
735 int rc;
736
737 zpci_dbg(3, "rst fid:%x, fh:%x\n", zdev->fid, zdev->fh);
738 if (zdev_enabled(zdev)) {
739 /* Disables device access, DMAs and IRQs (reset state) */
740 rc = zpci_disable_device(zdev);
741 /*
742 * Due to a z/VM vs LPAR inconsistency in the error state the
743 * FH may indicate an enabled device but disable says the
744 * device is already disabled don't treat it as an error here.
745 */
746 if (rc == -EINVAL)
747 rc = 0;
748 if (rc)
749 return rc;
750 }
751
752 rc = zpci_enable_device(zdev);
753 if (rc)
754 return rc;
755
756 if (zdev->dma_table)
757 rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
758 virt_to_phys(zdev->dma_table), &status);
759 else
760 rc = zpci_dma_init_device(zdev);
761 if (rc) {
762 zpci_disable_device(zdev);
763 return rc;
764 }
765
766 return 0;
767 }
768
769 /**
770 * zpci_create_device() - Create a new zpci_dev and add it to the zbus
771 * @fid: Function ID of the device to be created
772 * @fh: Current Function Handle of the device to be created
773 * @state: Initial state after creation either Standby or Configured
774 *
775 * Creates a new zpci device and adds it to its, possibly newly created, zbus
776 * as well as zpci_list.
777 *
778 * Returns: the zdev on success or an error pointer otherwise
779 */
zpci_create_device(u32 fid,u32 fh,enum zpci_state state)780 struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
781 {
782 struct zpci_dev *zdev;
783 int rc;
784
785 zpci_dbg(1, "add fid:%x, fh:%x, c:%d\n", fid, fh, state);
786 zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
787 if (!zdev)
788 return ERR_PTR(-ENOMEM);
789
790 /* FID and Function Handle are the static/dynamic identifiers */
791 zdev->fid = fid;
792 zdev->fh = fh;
793
794 /* Query function properties and update zdev */
795 rc = clp_query_pci_fn(zdev);
796 if (rc)
797 goto error;
798 zdev->state = state;
799
800 kref_init(&zdev->kref);
801 mutex_init(&zdev->lock);
802 mutex_init(&zdev->kzdev_lock);
803
804 rc = zpci_init_iommu(zdev);
805 if (rc)
806 goto error;
807
808 rc = zpci_bus_device_register(zdev, &pci_root_ops);
809 if (rc)
810 goto error_destroy_iommu;
811
812 spin_lock(&zpci_list_lock);
813 list_add_tail(&zdev->entry, &zpci_list);
814 spin_unlock(&zpci_list_lock);
815
816 return zdev;
817
818 error_destroy_iommu:
819 zpci_destroy_iommu(zdev);
820 error:
821 zpci_dbg(0, "add fid:%x, rc:%d\n", fid, rc);
822 kfree(zdev);
823 return ERR_PTR(rc);
824 }
825
zpci_is_device_configured(struct zpci_dev * zdev)826 bool zpci_is_device_configured(struct zpci_dev *zdev)
827 {
828 enum zpci_state state = zdev->state;
829
830 return state != ZPCI_FN_STATE_RESERVED &&
831 state != ZPCI_FN_STATE_STANDBY;
832 }
833
834 /**
835 * zpci_scan_configured_device() - Scan a freshly configured zpci_dev
836 * @zdev: The zpci_dev to be configured
837 * @fh: The general function handle supplied by the platform
838 *
839 * Given a device in the configuration state Configured, enables, scans and
840 * adds it to the common code PCI subsystem if possible. If any failure occurs,
841 * the zpci_dev is left disabled.
842 *
843 * Return: 0 on success, or an error code otherwise
844 */
zpci_scan_configured_device(struct zpci_dev * zdev,u32 fh)845 int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh)
846 {
847 zpci_update_fh(zdev, fh);
848 return zpci_bus_scan_device(zdev);
849 }
850
851 /**
852 * zpci_deconfigure_device() - Deconfigure a zpci_dev
853 * @zdev: The zpci_dev to configure
854 *
855 * Deconfigure a zPCI function that is currently configured and possibly known
856 * to the common code PCI subsystem.
857 * If any failure occurs the device is left as is.
858 *
859 * Return: 0 on success, or an error code otherwise
860 */
zpci_deconfigure_device(struct zpci_dev * zdev)861 int zpci_deconfigure_device(struct zpci_dev *zdev)
862 {
863 int rc;
864
865 if (zdev->zbus->bus)
866 zpci_bus_remove_device(zdev, false);
867
868 if (zdev->dma_table) {
869 rc = zpci_dma_exit_device(zdev);
870 if (rc)
871 return rc;
872 }
873 if (zdev_enabled(zdev)) {
874 rc = zpci_disable_device(zdev);
875 if (rc)
876 return rc;
877 }
878
879 rc = sclp_pci_deconfigure(zdev->fid);
880 zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, rc);
881 if (rc)
882 return rc;
883 zdev->state = ZPCI_FN_STATE_STANDBY;
884
885 return 0;
886 }
887
888 /**
889 * zpci_device_reserved() - Mark device as resverved
890 * @zdev: the zpci_dev that was reserved
891 *
892 * Handle the case that a given zPCI function was reserved by another system.
893 * After a call to this function the zpci_dev can not be found via
894 * get_zdev_by_fid() anymore but may still be accessible via existing
895 * references though it will not be functional anymore.
896 */
zpci_device_reserved(struct zpci_dev * zdev)897 void zpci_device_reserved(struct zpci_dev *zdev)
898 {
899 if (zdev->has_hp_slot)
900 zpci_exit_slot(zdev);
901 /*
902 * Remove device from zpci_list as it is going away. This also
903 * makes sure we ignore subsequent zPCI events for this device.
904 */
905 spin_lock(&zpci_list_lock);
906 list_del(&zdev->entry);
907 spin_unlock(&zpci_list_lock);
908 zdev->state = ZPCI_FN_STATE_RESERVED;
909 zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
910 zpci_zdev_put(zdev);
911 }
912
zpci_release_device(struct kref * kref)913 void zpci_release_device(struct kref *kref)
914 {
915 struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
916 int ret;
917
918 if (zdev->zbus->bus)
919 zpci_bus_remove_device(zdev, false);
920
921 if (zdev->dma_table)
922 zpci_dma_exit_device(zdev);
923 if (zdev_enabled(zdev))
924 zpci_disable_device(zdev);
925
926 switch (zdev->state) {
927 case ZPCI_FN_STATE_CONFIGURED:
928 ret = sclp_pci_deconfigure(zdev->fid);
929 zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, ret);
930 fallthrough;
931 case ZPCI_FN_STATE_STANDBY:
932 if (zdev->has_hp_slot)
933 zpci_exit_slot(zdev);
934 spin_lock(&zpci_list_lock);
935 list_del(&zdev->entry);
936 spin_unlock(&zpci_list_lock);
937 zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
938 fallthrough;
939 case ZPCI_FN_STATE_RESERVED:
940 if (zdev->has_resources)
941 zpci_cleanup_bus_resources(zdev);
942 zpci_bus_device_unregister(zdev);
943 zpci_destroy_iommu(zdev);
944 fallthrough;
945 default:
946 break;
947 }
948 zpci_dbg(3, "rem fid:%x\n", zdev->fid);
949 kfree_rcu(zdev, rcu);
950 }
951
zpci_report_error(struct pci_dev * pdev,struct zpci_report_error_header * report)952 int zpci_report_error(struct pci_dev *pdev,
953 struct zpci_report_error_header *report)
954 {
955 struct zpci_dev *zdev = to_zpci(pdev);
956
957 return sclp_pci_report(report, zdev->fh, zdev->fid);
958 }
959 EXPORT_SYMBOL(zpci_report_error);
960
961 /**
962 * zpci_clear_error_state() - Clears the zPCI error state of the device
963 * @zdev: The zdev for which the zPCI error state should be reset
964 *
965 * Clear the zPCI error state of the device. If clearing the zPCI error state
966 * fails the device is left in the error state. In this case it may make sense
967 * to call zpci_io_perm_failure() on the associated pdev if it exists.
968 *
969 * Returns: 0 on success, -EIO otherwise
970 */
zpci_clear_error_state(struct zpci_dev * zdev)971 int zpci_clear_error_state(struct zpci_dev *zdev)
972 {
973 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_RESET_ERROR);
974 struct zpci_fib fib = {0};
975 u8 status;
976 int cc;
977
978 cc = zpci_mod_fc(req, &fib, &status);
979 if (cc) {
980 zpci_dbg(3, "ces fid:%x, cc:%d, status:%x\n", zdev->fid, cc, status);
981 return -EIO;
982 }
983
984 return 0;
985 }
986
987 /**
988 * zpci_reset_load_store_blocked() - Re-enables L/S from error state
989 * @zdev: The zdev for which to unblock load/store access
990 *
991 * Re-enables load/store access for a PCI function in the error state while
992 * keeping DMA blocked. In this state drivers can poke MMIO space to determine
993 * if error recovery is possible while catching any rogue DMA access from the
994 * device.
995 *
996 * Returns: 0 on success, -EIO otherwise
997 */
zpci_reset_load_store_blocked(struct zpci_dev * zdev)998 int zpci_reset_load_store_blocked(struct zpci_dev *zdev)
999 {
1000 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_RESET_BLOCK);
1001 struct zpci_fib fib = {0};
1002 u8 status;
1003 int cc;
1004
1005 cc = zpci_mod_fc(req, &fib, &status);
1006 if (cc) {
1007 zpci_dbg(3, "rls fid:%x, cc:%d, status:%x\n", zdev->fid, cc, status);
1008 return -EIO;
1009 }
1010
1011 return 0;
1012 }
1013
zpci_mem_init(void)1014 static int zpci_mem_init(void)
1015 {
1016 BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) ||
1017 __alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb));
1018
1019 zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
1020 __alignof__(struct zpci_fmb), 0, NULL);
1021 if (!zdev_fmb_cache)
1022 goto error_fmb;
1023
1024 zpci_iomap_start = kcalloc(ZPCI_IOMAP_ENTRIES,
1025 sizeof(*zpci_iomap_start), GFP_KERNEL);
1026 if (!zpci_iomap_start)
1027 goto error_iomap;
1028
1029 zpci_iomap_bitmap = kcalloc(BITS_TO_LONGS(ZPCI_IOMAP_ENTRIES),
1030 sizeof(*zpci_iomap_bitmap), GFP_KERNEL);
1031 if (!zpci_iomap_bitmap)
1032 goto error_iomap_bitmap;
1033
1034 if (static_branch_likely(&have_mio))
1035 clp_setup_writeback_mio();
1036
1037 return 0;
1038 error_iomap_bitmap:
1039 kfree(zpci_iomap_start);
1040 error_iomap:
1041 kmem_cache_destroy(zdev_fmb_cache);
1042 error_fmb:
1043 return -ENOMEM;
1044 }
1045
zpci_mem_exit(void)1046 static void zpci_mem_exit(void)
1047 {
1048 kfree(zpci_iomap_bitmap);
1049 kfree(zpci_iomap_start);
1050 kmem_cache_destroy(zdev_fmb_cache);
1051 }
1052
1053 static unsigned int s390_pci_probe __initdata = 1;
1054 unsigned int s390_pci_force_floating __initdata;
1055 static unsigned int s390_pci_initialized;
1056
pcibios_setup(char * str)1057 char * __init pcibios_setup(char *str)
1058 {
1059 if (!strcmp(str, "off")) {
1060 s390_pci_probe = 0;
1061 return NULL;
1062 }
1063 if (!strcmp(str, "nomio")) {
1064 S390_lowcore.machine_flags &= ~MACHINE_FLAG_PCI_MIO;
1065 return NULL;
1066 }
1067 if (!strcmp(str, "force_floating")) {
1068 s390_pci_force_floating = 1;
1069 return NULL;
1070 }
1071 if (!strcmp(str, "norid")) {
1072 s390_pci_no_rid = 1;
1073 return NULL;
1074 }
1075 return str;
1076 }
1077
zpci_is_enabled(void)1078 bool zpci_is_enabled(void)
1079 {
1080 return s390_pci_initialized;
1081 }
1082
pci_base_init(void)1083 static int __init pci_base_init(void)
1084 {
1085 int rc;
1086
1087 if (!s390_pci_probe)
1088 return 0;
1089
1090 if (!test_facility(69) || !test_facility(71)) {
1091 pr_info("PCI is not supported because CPU facilities 69 or 71 are not available\n");
1092 return 0;
1093 }
1094
1095 if (MACHINE_HAS_PCI_MIO) {
1096 static_branch_enable(&have_mio);
1097 ctl_set_bit(2, 5);
1098 }
1099
1100 rc = zpci_debug_init();
1101 if (rc)
1102 goto out;
1103
1104 rc = zpci_mem_init();
1105 if (rc)
1106 goto out_mem;
1107
1108 rc = zpci_irq_init();
1109 if (rc)
1110 goto out_irq;
1111
1112 rc = zpci_dma_init();
1113 if (rc)
1114 goto out_dma;
1115
1116 rc = clp_scan_pci_devices();
1117 if (rc)
1118 goto out_find;
1119 zpci_bus_scan_busses();
1120
1121 s390_pci_initialized = 1;
1122 return 0;
1123
1124 out_find:
1125 zpci_dma_exit();
1126 out_dma:
1127 zpci_irq_exit();
1128 out_irq:
1129 zpci_mem_exit();
1130 out_mem:
1131 zpci_debug_exit();
1132 out:
1133 return rc;
1134 }
1135 subsys_initcall_sync(pci_base_init);
1136