Lines Matching refs:cmo
152 if (viodev->cmo.entitled > viodev->cmo.allocated) in vio_cmo_alloc()
153 reserve_free = viodev->cmo.entitled - viodev->cmo.allocated; in vio_cmo_alloc()
164 viodev->cmo.allocated += size; in vio_cmo_alloc()
199 if (viodev->cmo.allocated > viodev->cmo.entitled) { in vio_cmo_dealloc()
200 excess_freed = min(reserve_freed, (viodev->cmo.allocated - in vio_cmo_dealloc()
201 viodev->cmo.entitled)); in vio_cmo_dealloc()
206 viodev->cmo.allocated -= (reserve_freed + excess_freed); in vio_cmo_dealloc()
232 tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT)); in vio_cmo_dealloc()
235 viodev->cmo.entitled -= tmp; in vio_cmo_dealloc()
317 if ((viodev->cmo.entitled > viodev->cmo.allocated) && in vio_cmo_entitlement_update()
318 (viodev->cmo.entitled > VIO_CMO_MIN_ENT)) in vio_cmo_entitlement_update()
319 avail += viodev->cmo.entitled - in vio_cmo_entitlement_update()
320 max_t(size_t, viodev->cmo.allocated, in vio_cmo_entitlement_update()
343 if ((viodev->cmo.entitled > viodev->cmo.allocated) && in vio_cmo_entitlement_update()
344 (viodev->cmo.entitled > VIO_CMO_MIN_ENT)) in vio_cmo_entitlement_update()
345 tmp = viodev->cmo.entitled - in vio_cmo_entitlement_update()
346 max_t(size_t, viodev->cmo.allocated, in vio_cmo_entitlement_update()
348 viodev->cmo.entitled -= min(tmp, delta); in vio_cmo_entitlement_update()
385 struct vio_cmo *cmo; in vio_cmo_balance() local
392 cmo = container_of(work, struct vio_cmo, balance_q.work); in vio_cmo_balance()
397 cmo->min = vio_cmo_num_OF_devs() * VIO_CMO_MIN_ENT; in vio_cmo_balance()
398 BUG_ON(cmo->min > cmo->entitled); in vio_cmo_balance()
399 cmo->spare = min_t(size_t, VIO_CMO_MIN_ENT, (cmo->entitled - cmo->min)); in vio_cmo_balance()
400 cmo->min += cmo->spare; in vio_cmo_balance()
401 cmo->desired = cmo->min; in vio_cmo_balance()
407 avail = cmo->entitled - cmo->spare; in vio_cmo_balance()
411 viodev->cmo.entitled = VIO_CMO_MIN_ENT; in vio_cmo_balance()
412 cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT); in vio_cmo_balance()
413 avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT); in vio_cmo_balance()
427 if (viodev->cmo.desired <= level) { in vio_cmo_balance()
438 chunk = min(chunk, (viodev->cmo.desired - in vio_cmo_balance()
439 viodev->cmo.entitled)); in vio_cmo_balance()
440 viodev->cmo.entitled += chunk; in vio_cmo_balance()
447 need = max(viodev->cmo.allocated, viodev->cmo.entitled)- in vio_cmo_balance()
448 max(viodev->cmo.allocated, level); in vio_cmo_balance()
458 cmo->reserve.size = cmo->min; in vio_cmo_balance()
459 cmo->excess.free = 0; in vio_cmo_balance()
460 cmo->excess.size = 0; in vio_cmo_balance()
465 if (viodev->cmo.entitled) in vio_cmo_balance()
466 cmo->reserve.size += (viodev->cmo.entitled - in vio_cmo_balance()
469 if (viodev->cmo.allocated > viodev->cmo.entitled) in vio_cmo_balance()
470 need += viodev->cmo.allocated - viodev->cmo.entitled; in vio_cmo_balance()
472 cmo->excess.size = cmo->entitled - cmo->reserve.size; in vio_cmo_balance()
473 cmo->excess.free = cmo->excess.size - need; in vio_cmo_balance()
487 atomic_inc(&viodev->cmo.allocs_failed); in vio_dma_iommu_alloc_coherent()
496 atomic_inc(&viodev->cmo.allocs_failed); in vio_dma_iommu_alloc_coherent()
532 atomic_inc(&viodev->cmo.allocs_failed); in vio_dma_iommu_map_page()
577 atomic_inc(&viodev->cmo.allocs_failed); in vio_dma_iommu_map_sg()
651 if (desired >= viodev->cmo.desired) { in vio_cmo_set_dev_desired()
653 vio_cmo.desired += desired - viodev->cmo.desired; in vio_cmo_set_dev_desired()
654 viodev->cmo.desired = desired; in vio_cmo_set_dev_desired()
657 vio_cmo.desired -= viodev->cmo.desired - desired; in vio_cmo_set_dev_desired()
658 viodev->cmo.desired = desired; in vio_cmo_set_dev_desired()
663 if (viodev->cmo.entitled > desired) { in vio_cmo_set_dev_desired()
664 vio_cmo.reserve.size -= viodev->cmo.entitled - desired; in vio_cmo_set_dev_desired()
665 vio_cmo.excess.size += viodev->cmo.entitled - desired; in vio_cmo_set_dev_desired()
671 if (viodev->cmo.allocated < viodev->cmo.entitled) in vio_cmo_set_dev_desired()
672 vio_cmo.excess.free += viodev->cmo.entitled - in vio_cmo_set_dev_desired()
673 max(viodev->cmo.allocated, desired); in vio_cmo_set_dev_desired()
674 viodev->cmo.entitled = desired; in vio_cmo_set_dev_desired()
732 viodev->cmo.desired = in vio_cmo_bus_probe()
734 if (viodev->cmo.desired < VIO_CMO_MIN_ENT) in vio_cmo_bus_probe()
735 viodev->cmo.desired = VIO_CMO_MIN_ENT; in vio_cmo_bus_probe()
747 viodev->cmo.desired = 0; in vio_cmo_bus_probe()
762 vio_cmo.desired += (viodev->cmo.desired - in vio_cmo_bus_probe()
788 vio_cmo.desired += viodev->cmo.desired; in vio_cmo_bus_probe()
811 if (viodev->cmo.allocated) { in vio_cmo_bus_remove()
814 __func__, viodev->cmo.allocated); in vio_cmo_bus_remove()
834 if (viodev->cmo.entitled) { in vio_cmo_bus_remove()
840 vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT); in vio_cmo_bus_remove()
847 viodev->cmo.entitled -= VIO_CMO_MIN_ENT; in vio_cmo_bus_remove()
850 if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) { in vio_cmo_bus_remove()
851 tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT - in vio_cmo_bus_remove()
854 viodev->cmo.entitled -= tmp; in vio_cmo_bus_remove()
858 vio_cmo.excess.size += viodev->cmo.entitled; in vio_cmo_bus_remove()
859 vio_cmo.excess.free += viodev->cmo.entitled; in vio_cmo_bus_remove()
860 vio_cmo.reserve.size -= viodev->cmo.entitled; in vio_cmo_bus_remove()
867 viodev->cmo.entitled = VIO_CMO_MIN_ENT; in vio_cmo_bus_remove()
868 viodev->cmo.desired = VIO_CMO_MIN_ENT; in vio_cmo_bus_remove()
869 atomic_set(&viodev->cmo.allocs_failed, 0); in vio_cmo_bus_remove()
937 return sprintf(buf, "%lu\n", to_vio_dev(dev)->cmo.name); \
944 return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed)); in cmo_allocs_failed_show()
951 atomic_set(&viodev->cmo.allocs_failed, 0); in cmo_allocs_failed_store()
1239 memset(&viodev->cmo, 0, sizeof(viodev->cmo)); in vio_bus_probe()