Lines Matching refs:viodev

69 	struct vio_dev *viodev;  member
144 static inline int vio_cmo_alloc(struct vio_dev *viodev, size_t size) in vio_cmo_alloc() argument
154 if (viodev->cmo.entitled > viodev->cmo.allocated) in vio_cmo_alloc()
155 reserve_free = viodev->cmo.entitled - viodev->cmo.allocated; in vio_cmo_alloc()
166 viodev->cmo.allocated += size; in vio_cmo_alloc()
188 static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size) in vio_cmo_dealloc() argument
201 if (viodev->cmo.allocated > viodev->cmo.entitled) { in vio_cmo_dealloc()
202 excess_freed = min(reserve_freed, (viodev->cmo.allocated - in vio_cmo_dealloc()
203 viodev->cmo.entitled)); in vio_cmo_dealloc()
208 viodev->cmo.allocated -= (reserve_freed + excess_freed); in vio_cmo_dealloc()
234 tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT)); in vio_cmo_dealloc()
237 viodev->cmo.entitled -= tmp; in vio_cmo_dealloc()
279 struct vio_dev *viodev; in vio_cmo_entitlement_update() local
318 viodev = dev_ent->viodev; in vio_cmo_entitlement_update()
319 if ((viodev->cmo.entitled > viodev->cmo.allocated) && in vio_cmo_entitlement_update()
320 (viodev->cmo.entitled > VIO_CMO_MIN_ENT)) in vio_cmo_entitlement_update()
321 avail += viodev->cmo.entitled - in vio_cmo_entitlement_update()
322 max_t(size_t, viodev->cmo.allocated, in vio_cmo_entitlement_update()
343 viodev = dev_ent->viodev; in vio_cmo_entitlement_update()
345 if ((viodev->cmo.entitled > viodev->cmo.allocated) && in vio_cmo_entitlement_update()
346 (viodev->cmo.entitled > VIO_CMO_MIN_ENT)) in vio_cmo_entitlement_update()
347 tmp = viodev->cmo.entitled - in vio_cmo_entitlement_update()
348 max_t(size_t, viodev->cmo.allocated, in vio_cmo_entitlement_update()
350 viodev->cmo.entitled -= min(tmp, delta); in vio_cmo_entitlement_update()
388 struct vio_dev *viodev; in vio_cmo_balance() local
411 viodev = dev_ent->viodev; in vio_cmo_balance()
413 viodev->cmo.entitled = VIO_CMO_MIN_ENT; in vio_cmo_balance()
414 cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT); in vio_cmo_balance()
415 avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT); in vio_cmo_balance()
427 viodev = dev_ent->viodev; in vio_cmo_balance()
429 if (viodev->cmo.desired <= level) { in vio_cmo_balance()
440 chunk = min(chunk, (viodev->cmo.desired - in vio_cmo_balance()
441 viodev->cmo.entitled)); in vio_cmo_balance()
442 viodev->cmo.entitled += chunk; in vio_cmo_balance()
449 need = max(viodev->cmo.allocated, viodev->cmo.entitled)- in vio_cmo_balance()
450 max(viodev->cmo.allocated, level); in vio_cmo_balance()
465 viodev = dev_ent->viodev; in vio_cmo_balance()
467 if (viodev->cmo.entitled) in vio_cmo_balance()
468 cmo->reserve.size += (viodev->cmo.entitled - in vio_cmo_balance()
471 if (viodev->cmo.allocated > viodev->cmo.entitled) in vio_cmo_balance()
472 need += viodev->cmo.allocated - viodev->cmo.entitled; in vio_cmo_balance()
485 struct vio_dev *viodev = to_vio_dev(dev); in vio_dma_iommu_alloc_coherent() local
488 if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) { in vio_dma_iommu_alloc_coherent()
489 atomic_inc(&viodev->cmo.allocs_failed); in vio_dma_iommu_alloc_coherent()
497 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE)); in vio_dma_iommu_alloc_coherent()
498 atomic_inc(&viodev->cmo.allocs_failed); in vio_dma_iommu_alloc_coherent()
508 struct vio_dev *viodev = to_vio_dev(dev); in vio_dma_iommu_free_coherent() local
511 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE)); in vio_dma_iommu_free_coherent()
519 struct vio_dev *viodev = to_vio_dev(dev); in vio_dma_iommu_map_page() local
523 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) in vio_dma_iommu_map_page()
532 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))); in vio_dma_iommu_map_page()
534 atomic_inc(&viodev->cmo.allocs_failed); in vio_dma_iommu_map_page()
543 struct vio_dev *viodev = to_vio_dev(dev); in vio_dma_iommu_unmap_page() local
547 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))); in vio_dma_iommu_unmap_page()
554 struct vio_dev *viodev = to_vio_dev(dev); in vio_dma_iommu_map_sg() local
563 ret = vio_cmo_alloc(viodev, alloc_size); in vio_dma_iommu_map_sg()
574 vio_cmo_dealloc(viodev, alloc_size); in vio_dma_iommu_map_sg()
578 vio_cmo_dealloc(viodev, alloc_size); in vio_dma_iommu_map_sg()
580 atomic_inc(&viodev->cmo.allocs_failed); in vio_dma_iommu_map_sg()
589 struct vio_dev *viodev = to_vio_dev(dev); in vio_dma_iommu_unmap_sg() local
599 vio_cmo_dealloc(viodev, alloc_size); in vio_dma_iommu_unmap_sg()
627 void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) in vio_cmo_set_dev_desired() argument
646 if (viodev == dev_ent->viodev) { in vio_cmo_set_dev_desired()
656 if (desired >= viodev->cmo.desired) { in vio_cmo_set_dev_desired()
658 vio_cmo.desired += desired - viodev->cmo.desired; in vio_cmo_set_dev_desired()
659 viodev->cmo.desired = desired; in vio_cmo_set_dev_desired()
662 vio_cmo.desired -= viodev->cmo.desired - desired; in vio_cmo_set_dev_desired()
663 viodev->cmo.desired = desired; in vio_cmo_set_dev_desired()
668 if (viodev->cmo.entitled > desired) { in vio_cmo_set_dev_desired()
669 vio_cmo.reserve.size -= viodev->cmo.entitled - desired; in vio_cmo_set_dev_desired()
670 vio_cmo.excess.size += viodev->cmo.entitled - desired; in vio_cmo_set_dev_desired()
676 if (viodev->cmo.allocated < viodev->cmo.entitled) in vio_cmo_set_dev_desired()
677 vio_cmo.excess.free += viodev->cmo.entitled - in vio_cmo_set_dev_desired()
678 max(viodev->cmo.allocated, desired); in vio_cmo_set_dev_desired()
679 viodev->cmo.entitled = desired; in vio_cmo_set_dev_desired()
700 static int vio_cmo_bus_probe(struct vio_dev *viodev) in vio_cmo_bus_probe() argument
703 struct device *dev = &viodev->dev; in vio_cmo_bus_probe()
713 switch (viodev->family) { in vio_cmo_bus_probe()
715 if (of_get_property(viodev->dev.of_node, in vio_cmo_bus_probe()
723 dev_warn(dev, "unknown device family: %d\n", viodev->family); in vio_cmo_bus_probe()
737 viodev->cmo.desired = in vio_cmo_bus_probe()
738 IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev), tbl); in vio_cmo_bus_probe()
739 if (viodev->cmo.desired < VIO_CMO_MIN_ENT) in vio_cmo_bus_probe()
740 viodev->cmo.desired = VIO_CMO_MIN_ENT; in vio_cmo_bus_probe()
748 dev_ent->viodev = viodev; in vio_cmo_bus_probe()
752 viodev->cmo.desired = 0; in vio_cmo_bus_probe()
767 vio_cmo.desired += (viodev->cmo.desired - in vio_cmo_bus_probe()
793 vio_cmo.desired += viodev->cmo.desired; in vio_cmo_bus_probe()
809 static void vio_cmo_bus_remove(struct vio_dev *viodev) in vio_cmo_bus_remove() argument
816 if (viodev->cmo.allocated) { in vio_cmo_bus_remove()
817 dev_err(&viodev->dev, "%s: device had %lu bytes of IO " in vio_cmo_bus_remove()
819 __func__, viodev->cmo.allocated); in vio_cmo_bus_remove()
828 if (viodev == dev_ent->viodev) { in vio_cmo_bus_remove()
839 if (viodev->cmo.entitled) { in vio_cmo_bus_remove()
845 vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT); in vio_cmo_bus_remove()
852 viodev->cmo.entitled -= VIO_CMO_MIN_ENT; in vio_cmo_bus_remove()
855 if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) { in vio_cmo_bus_remove()
856 tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT - in vio_cmo_bus_remove()
859 viodev->cmo.entitled -= tmp; in vio_cmo_bus_remove()
863 vio_cmo.excess.size += viodev->cmo.entitled; in vio_cmo_bus_remove()
864 vio_cmo.excess.free += viodev->cmo.entitled; in vio_cmo_bus_remove()
865 vio_cmo.reserve.size -= viodev->cmo.entitled; in vio_cmo_bus_remove()
872 viodev->cmo.entitled = VIO_CMO_MIN_ENT; in vio_cmo_bus_remove()
873 viodev->cmo.desired = VIO_CMO_MIN_ENT; in vio_cmo_bus_remove()
874 atomic_set(&viodev->cmo.allocs_failed, 0); in vio_cmo_bus_remove()
880 static void vio_cmo_set_dma_ops(struct vio_dev *viodev) in vio_cmo_set_dma_ops() argument
882 set_dma_ops(&viodev->dev, &vio_dma_mapping_ops); in vio_cmo_set_dma_ops()
948 struct vio_dev *viodev = to_vio_dev(dev); in cmo_allocs_failed_show() local
949 return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed)); in cmo_allocs_failed_show()
955 struct vio_dev *viodev = to_vio_dev(dev); in cmo_allocs_failed_store() local
956 atomic_set(&viodev->cmo.allocs_failed, 0); in cmo_allocs_failed_store()
963 struct vio_dev *viodev = to_vio_dev(dev); in cmo_desired_store() local
971 vio_cmo_set_dev_desired(viodev, new_desired); in cmo_desired_store()
1071 void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {} in vio_cmo_set_dev_desired() argument
1072 static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; } in vio_cmo_bus_probe() argument
1073 static void vio_cmo_bus_remove(struct vio_dev *viodev) {} in vio_cmo_bus_remove() argument
1074 static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {} in vio_cmo_set_dma_ops() argument
1236 struct vio_dev *viodev = to_vio_dev(dev); in vio_bus_probe() local
1244 id = vio_match_device(viodrv->id_table, viodev); in vio_bus_probe()
1246 memset(&viodev->cmo, 0, sizeof(viodev->cmo)); in vio_bus_probe()
1248 error = vio_cmo_bus_probe(viodev); in vio_bus_probe()
1252 error = viodrv->probe(viodev, id); in vio_bus_probe()
1254 vio_cmo_bus_remove(viodev); in vio_bus_probe()
1263 struct vio_dev *viodev = to_vio_dev(dev); in vio_bus_remove() local
1274 viodrv->remove(viodev); in vio_bus_remove()
1277 vio_cmo_bus_remove(viodev); in vio_bus_remove()
1284 struct vio_dev *viodev = to_vio_dev(dev); in vio_bus_shutdown() local
1290 viodrv->shutdown(viodev); in vio_bus_shutdown()
1352 struct vio_dev *viodev; in vio_register_device_node() local
1391 viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL); in vio_register_device_node()
1392 if (viodev == NULL) { in vio_register_device_node()
1398 viodev->family = family; in vio_register_device_node()
1399 if (viodev->family == VDEVICE) { in vio_register_device_node()
1402 viodev->type = of_node_get_device_type(of_node); in vio_register_device_node()
1403 if (!viodev->type) { in vio_register_device_node()
1416 dev_set_name(&viodev->dev, "%x", unit_address); in vio_register_device_node()
1417 viodev->irq = irq_of_parse_and_map(of_node, 0); in vio_register_device_node()
1418 viodev->unit_address = unit_address; in vio_register_device_node()
1425 viodev->resource_id = of_read_number(prop, 1); in vio_register_device_node()
1427 dev_set_name(&viodev->dev, "%pOFn", of_node); in vio_register_device_node()
1428 viodev->type = dev_name(&viodev->dev); in vio_register_device_node()
1429 viodev->irq = 0; in vio_register_device_node()
1432 viodev->name = of_node->name; in vio_register_device_node()
1433 viodev->dev.of_node = of_node_get(of_node); in vio_register_device_node()
1435 set_dev_node(&viodev->dev, of_node_to_nid(of_node)); in vio_register_device_node()
1438 viodev->dev.parent = &vio_bus_device.dev; in vio_register_device_node()
1439 viodev->dev.bus = &vio_bus_type; in vio_register_device_node()
1440 viodev->dev.release = vio_dev_release; in vio_register_device_node()
1442 if (of_get_property(viodev->dev.of_node, "ibm,my-dma-window", NULL)) { in vio_register_device_node()
1444 vio_cmo_set_dma_ops(viodev); in vio_register_device_node()
1446 set_dma_ops(&viodev->dev, &dma_iommu_ops); in vio_register_device_node()
1448 set_iommu_table_base(&viodev->dev, in vio_register_device_node()
1449 vio_build_iommu_table(viodev)); in vio_register_device_node()
1453 viodev->dev.coherent_dma_mask = DMA_BIT_MASK(64); in vio_register_device_node()
1454 viodev->dev.dma_mask = &viodev->dev.coherent_dma_mask; in vio_register_device_node()
1458 if (device_register(&viodev->dev)) { in vio_register_device_node()
1460 __func__, dev_name(&viodev->dev)); in vio_register_device_node()
1461 put_device(&viodev->dev); in vio_register_device_node()
1465 return viodev; in vio_register_device_node()
1468 kfree(viodev); in vio_register_device_node()
1594 void vio_unregister_device(struct vio_dev *viodev) in vio_unregister_device() argument
1596 device_unregister(&viodev->dev); in vio_unregister_device()
1597 if (viodev->family == VDEVICE) in vio_unregister_device()
1598 irq_dispose_mapping(viodev->irq); in vio_unregister_device()