Lines Matching refs:viodev

71 	struct vio_dev *viodev;  member
146 static inline int vio_cmo_alloc(struct vio_dev *viodev, size_t size) in vio_cmo_alloc() argument
156 if (viodev->cmo.entitled > viodev->cmo.allocated) in vio_cmo_alloc()
157 reserve_free = viodev->cmo.entitled - viodev->cmo.allocated; in vio_cmo_alloc()
168 viodev->cmo.allocated += size; in vio_cmo_alloc()
190 static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size) in vio_cmo_dealloc() argument
203 if (viodev->cmo.allocated > viodev->cmo.entitled) { in vio_cmo_dealloc()
204 excess_freed = min(reserve_freed, (viodev->cmo.allocated - in vio_cmo_dealloc()
205 viodev->cmo.entitled)); in vio_cmo_dealloc()
210 viodev->cmo.allocated -= (reserve_freed + excess_freed); in vio_cmo_dealloc()
236 tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT)); in vio_cmo_dealloc()
239 viodev->cmo.entitled -= tmp; in vio_cmo_dealloc()
281 struct vio_dev *viodev; in vio_cmo_entitlement_update() local
320 viodev = dev_ent->viodev; in vio_cmo_entitlement_update()
321 if ((viodev->cmo.entitled > viodev->cmo.allocated) && in vio_cmo_entitlement_update()
322 (viodev->cmo.entitled > VIO_CMO_MIN_ENT)) in vio_cmo_entitlement_update()
323 avail += viodev->cmo.entitled - in vio_cmo_entitlement_update()
324 max_t(size_t, viodev->cmo.allocated, in vio_cmo_entitlement_update()
345 viodev = dev_ent->viodev; in vio_cmo_entitlement_update()
347 if ((viodev->cmo.entitled > viodev->cmo.allocated) && in vio_cmo_entitlement_update()
348 (viodev->cmo.entitled > VIO_CMO_MIN_ENT)) in vio_cmo_entitlement_update()
349 tmp = viodev->cmo.entitled - in vio_cmo_entitlement_update()
350 max_t(size_t, viodev->cmo.allocated, in vio_cmo_entitlement_update()
352 viodev->cmo.entitled -= min(tmp, delta); in vio_cmo_entitlement_update()
390 struct vio_dev *viodev; in vio_cmo_balance() local
413 viodev = dev_ent->viodev; in vio_cmo_balance()
415 viodev->cmo.entitled = VIO_CMO_MIN_ENT; in vio_cmo_balance()
416 cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT); in vio_cmo_balance()
417 avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT); in vio_cmo_balance()
429 viodev = dev_ent->viodev; in vio_cmo_balance()
431 if (viodev->cmo.desired <= level) { in vio_cmo_balance()
442 chunk = min(chunk, (viodev->cmo.desired - in vio_cmo_balance()
443 viodev->cmo.entitled)); in vio_cmo_balance()
444 viodev->cmo.entitled += chunk; in vio_cmo_balance()
451 need = max(viodev->cmo.allocated, viodev->cmo.entitled)- in vio_cmo_balance()
452 max(viodev->cmo.allocated, level); in vio_cmo_balance()
467 viodev = dev_ent->viodev; in vio_cmo_balance()
469 if (viodev->cmo.entitled) in vio_cmo_balance()
470 cmo->reserve.size += (viodev->cmo.entitled - in vio_cmo_balance()
473 if (viodev->cmo.allocated > viodev->cmo.entitled) in vio_cmo_balance()
474 need += viodev->cmo.allocated - viodev->cmo.entitled; in vio_cmo_balance()
487 struct vio_dev *viodev = to_vio_dev(dev); in vio_dma_iommu_alloc_coherent() local
490 if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) { in vio_dma_iommu_alloc_coherent()
491 atomic_inc(&viodev->cmo.allocs_failed); in vio_dma_iommu_alloc_coherent()
497 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE)); in vio_dma_iommu_alloc_coherent()
498 atomic_inc(&viodev->cmo.allocs_failed); in vio_dma_iommu_alloc_coherent()
508 struct vio_dev *viodev = to_vio_dev(dev); in vio_dma_iommu_free_coherent() local
512 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE)); in vio_dma_iommu_free_coherent()
520 struct vio_dev *viodev = to_vio_dev(dev); in vio_dma_iommu_map_page() local
525 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) { in vio_dma_iommu_map_page()
526 atomic_inc(&viodev->cmo.allocs_failed); in vio_dma_iommu_map_page()
532 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))); in vio_dma_iommu_map_page()
533 atomic_inc(&viodev->cmo.allocs_failed); in vio_dma_iommu_map_page()
544 struct vio_dev *viodev = to_vio_dev(dev); in vio_dma_iommu_unmap_page() local
550 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))); in vio_dma_iommu_unmap_page()
557 struct vio_dev *viodev = to_vio_dev(dev); in vio_dma_iommu_map_sg() local
567 if (vio_cmo_alloc(viodev, alloc_size)) { in vio_dma_iommu_map_sg()
568 atomic_inc(&viodev->cmo.allocs_failed); in vio_dma_iommu_map_sg()
575 vio_cmo_dealloc(viodev, alloc_size); in vio_dma_iommu_map_sg()
576 atomic_inc(&viodev->cmo.allocs_failed); in vio_dma_iommu_map_sg()
583 vio_cmo_dealloc(viodev, alloc_size); in vio_dma_iommu_map_sg()
593 struct vio_dev *viodev = to_vio_dev(dev); in vio_dma_iommu_unmap_sg() local
605 vio_cmo_dealloc(viodev, alloc_size); in vio_dma_iommu_unmap_sg()
641 void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) in vio_cmo_set_dev_desired() argument
660 if (viodev == dev_ent->viodev) { in vio_cmo_set_dev_desired()
670 if (desired >= viodev->cmo.desired) { in vio_cmo_set_dev_desired()
672 vio_cmo.desired += desired - viodev->cmo.desired; in vio_cmo_set_dev_desired()
673 viodev->cmo.desired = desired; in vio_cmo_set_dev_desired()
676 vio_cmo.desired -= viodev->cmo.desired - desired; in vio_cmo_set_dev_desired()
677 viodev->cmo.desired = desired; in vio_cmo_set_dev_desired()
682 if (viodev->cmo.entitled > desired) { in vio_cmo_set_dev_desired()
683 vio_cmo.reserve.size -= viodev->cmo.entitled - desired; in vio_cmo_set_dev_desired()
684 vio_cmo.excess.size += viodev->cmo.entitled - desired; in vio_cmo_set_dev_desired()
690 if (viodev->cmo.allocated < viodev->cmo.entitled) in vio_cmo_set_dev_desired()
691 vio_cmo.excess.free += viodev->cmo.entitled - in vio_cmo_set_dev_desired()
692 max(viodev->cmo.allocated, desired); in vio_cmo_set_dev_desired()
693 viodev->cmo.entitled = desired; in vio_cmo_set_dev_desired()
714 static int vio_cmo_bus_probe(struct vio_dev *viodev) in vio_cmo_bus_probe() argument
717 struct device *dev = &viodev->dev; in vio_cmo_bus_probe()
727 switch (viodev->family) { in vio_cmo_bus_probe()
729 if (of_get_property(viodev->dev.of_node, in vio_cmo_bus_probe()
737 dev_warn(dev, "unknown device family: %d\n", viodev->family); in vio_cmo_bus_probe()
751 viodev->cmo.desired = in vio_cmo_bus_probe()
752 IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev), tbl); in vio_cmo_bus_probe()
753 if (viodev->cmo.desired < VIO_CMO_MIN_ENT) in vio_cmo_bus_probe()
754 viodev->cmo.desired = VIO_CMO_MIN_ENT; in vio_cmo_bus_probe()
762 dev_ent->viodev = viodev; in vio_cmo_bus_probe()
766 viodev->cmo.desired = 0; in vio_cmo_bus_probe()
781 vio_cmo.desired += (viodev->cmo.desired - in vio_cmo_bus_probe()
807 vio_cmo.desired += viodev->cmo.desired; in vio_cmo_bus_probe()
823 static void vio_cmo_bus_remove(struct vio_dev *viodev) in vio_cmo_bus_remove() argument
830 if (viodev->cmo.allocated) { in vio_cmo_bus_remove()
831 dev_err(&viodev->dev, "%s: device had %lu bytes of IO " in vio_cmo_bus_remove()
833 __func__, viodev->cmo.allocated); in vio_cmo_bus_remove()
842 if (viodev == dev_ent->viodev) { in vio_cmo_bus_remove()
853 if (viodev->cmo.entitled) { in vio_cmo_bus_remove()
859 vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT); in vio_cmo_bus_remove()
866 viodev->cmo.entitled -= VIO_CMO_MIN_ENT; in vio_cmo_bus_remove()
869 if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) { in vio_cmo_bus_remove()
870 tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT - in vio_cmo_bus_remove()
873 viodev->cmo.entitled -= tmp; in vio_cmo_bus_remove()
877 vio_cmo.excess.size += viodev->cmo.entitled; in vio_cmo_bus_remove()
878 vio_cmo.excess.free += viodev->cmo.entitled; in vio_cmo_bus_remove()
879 vio_cmo.reserve.size -= viodev->cmo.entitled; in vio_cmo_bus_remove()
886 viodev->cmo.entitled = VIO_CMO_MIN_ENT; in vio_cmo_bus_remove()
887 viodev->cmo.desired = VIO_CMO_MIN_ENT; in vio_cmo_bus_remove()
888 atomic_set(&viodev->cmo.allocs_failed, 0); in vio_cmo_bus_remove()
894 static void vio_cmo_set_dma_ops(struct vio_dev *viodev) in vio_cmo_set_dma_ops() argument
896 set_dma_ops(&viodev->dev, &vio_dma_mapping_ops); in vio_cmo_set_dma_ops()
962 struct vio_dev *viodev = to_vio_dev(dev); in cmo_allocs_failed_show() local
963 return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed)); in cmo_allocs_failed_show()
969 struct vio_dev *viodev = to_vio_dev(dev); in cmo_allocs_failed_store() local
970 atomic_set(&viodev->cmo.allocs_failed, 0); in cmo_allocs_failed_store()
977 struct vio_dev *viodev = to_vio_dev(dev); in cmo_desired_store() local
985 vio_cmo_set_dev_desired(viodev, new_desired); in cmo_desired_store()
1085 void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {} in vio_cmo_set_dev_desired() argument
1086 static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; } in vio_cmo_bus_probe() argument
1087 static void vio_cmo_bus_remove(struct vio_dev *viodev) {} in vio_cmo_bus_remove() argument
1088 static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {} in vio_cmo_set_dma_ops() argument
1248 struct vio_dev *viodev = to_vio_dev(dev); in vio_bus_probe() local
1256 id = vio_match_device(viodrv->id_table, viodev); in vio_bus_probe()
1258 memset(&viodev->cmo, 0, sizeof(viodev->cmo)); in vio_bus_probe()
1260 error = vio_cmo_bus_probe(viodev); in vio_bus_probe()
1264 error = viodrv->probe(viodev, id); in vio_bus_probe()
1266 vio_cmo_bus_remove(viodev); in vio_bus_probe()
1275 struct vio_dev *viodev = to_vio_dev(dev); in vio_bus_remove() local
1287 ret = viodrv->remove(viodev); in vio_bus_remove()
1290 vio_cmo_bus_remove(viodev); in vio_bus_remove()
1348 struct vio_dev *viodev; in vio_register_device_node() local
1388 viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL); in vio_register_device_node()
1389 if (viodev == NULL) { in vio_register_device_node()
1395 viodev->family = family; in vio_register_device_node()
1396 if (viodev->family == VDEVICE) { in vio_register_device_node()
1400 viodev->type = of_node->type; in vio_register_device_node()
1414 dev_set_name(&viodev->dev, "%x", unit_address); in vio_register_device_node()
1415 viodev->irq = irq_of_parse_and_map(of_node, 0); in vio_register_device_node()
1416 viodev->unit_address = unit_address; in vio_register_device_node()
1423 viodev->resource_id = of_read_number(prop, 1); in vio_register_device_node()
1425 dev_set_name(&viodev->dev, "%s", of_node_name); in vio_register_device_node()
1426 viodev->type = of_node_name; in vio_register_device_node()
1427 viodev->irq = 0; in vio_register_device_node()
1430 viodev->name = of_node->name; in vio_register_device_node()
1431 viodev->dev.of_node = of_node_get(of_node); in vio_register_device_node()
1433 set_dev_node(&viodev->dev, of_node_to_nid(of_node)); in vio_register_device_node()
1436 viodev->dev.parent = &vio_bus_device.dev; in vio_register_device_node()
1437 viodev->dev.bus = &vio_bus_type; in vio_register_device_node()
1438 viodev->dev.release = vio_dev_release; in vio_register_device_node()
1440 if (of_get_property(viodev->dev.of_node, "ibm,my-dma-window", NULL)) { in vio_register_device_node()
1442 vio_cmo_set_dma_ops(viodev); in vio_register_device_node()
1444 set_dma_ops(&viodev->dev, &dma_iommu_ops); in vio_register_device_node()
1446 set_iommu_table_base(&viodev->dev, in vio_register_device_node()
1447 vio_build_iommu_table(viodev)); in vio_register_device_node()
1451 viodev->dev.coherent_dma_mask = DMA_BIT_MASK(64); in vio_register_device_node()
1452 viodev->dev.dma_mask = &viodev->dev.coherent_dma_mask; in vio_register_device_node()
1456 if (device_register(&viodev->dev)) { in vio_register_device_node()
1458 __func__, dev_name(&viodev->dev)); in vio_register_device_node()
1459 put_device(&viodev->dev); in vio_register_device_node()
1463 return viodev; in vio_register_device_node()
1466 kfree(viodev); in vio_register_device_node()
1592 void vio_unregister_device(struct vio_dev *viodev) in vio_unregister_device() argument
1594 device_unregister(&viodev->dev); in vio_unregister_device()
1595 if (viodev->family == VDEVICE) in vio_unregister_device()
1596 irq_dispose_mapping(viodev->irq); in vio_unregister_device()