Lines Matching full:desired

84  * @desired: desired memory for system operation
97 size_t desired; member
184 * pool is used to increase the reserve pool toward the desired entitlement
244 * Increase the reserve pool until the desired allocation is met. in vio_cmo_dealloc()
248 if (excess_freed && (vio_cmo.desired > vio_cmo.reserve.size)) { in vio_cmo_dealloc()
249 tmp = min(excess_freed, (vio_cmo.desired - vio_cmo.reserve.size)); in vio_cmo_dealloc()
371 * The list of devices is iterated through to recalculate the desired
403 cmo->desired = cmo->min; in vio_cmo_balance()
414 cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT); in vio_cmo_balance()
429 if (viodev->cmo.desired <= level) { in vio_cmo_balance()
437 * desired level of entitlement for the device. in vio_cmo_balance()
440 chunk = min(chunk, (viodev->cmo.desired - in vio_cmo_balance()
618 * vio_cmo_set_dev_desired - Set desired entitlement for a device
621 * @desired: new desired entitlement level in bytes
624 * through sysfs. The desired entitlement level is changed and a balancing
627 void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) in vio_cmo_set_dev_desired() argument
637 if (desired < VIO_CMO_MIN_ENT) in vio_cmo_set_dev_desired()
638 desired = VIO_CMO_MIN_ENT; in vio_cmo_set_dev_desired()
655 /* Increase/decrease in desired device entitlement */ in vio_cmo_set_dev_desired()
656 if (desired >= viodev->cmo.desired) { in vio_cmo_set_dev_desired()
658 vio_cmo.desired += desired - viodev->cmo.desired; in vio_cmo_set_dev_desired()
659 viodev->cmo.desired = desired; in vio_cmo_set_dev_desired()
661 /* Decrease bus and device values for desired entitlement */ in vio_cmo_set_dev_desired()
662 vio_cmo.desired -= viodev->cmo.desired - desired; in vio_cmo_set_dev_desired()
663 viodev->cmo.desired = desired; in vio_cmo_set_dev_desired()
665 * If less entitlement is desired than current entitlement, move in vio_cmo_set_dev_desired()
668 if (viodev->cmo.entitled > desired) { in vio_cmo_set_dev_desired()
669 vio_cmo.reserve.size -= viodev->cmo.entitled - desired; in vio_cmo_set_dev_desired()
670 vio_cmo.excess.size += viodev->cmo.entitled - desired; in vio_cmo_set_dev_desired()
678 max(viodev->cmo.allocated, desired); in vio_cmo_set_dev_desired()
679 viodev->cmo.entitled = desired; in vio_cmo_set_dev_desired()
730 /* Check that the driver is CMO enabled and get desired DMA */ in vio_cmo_bus_probe()
737 viodev->cmo.desired = in vio_cmo_bus_probe()
739 if (viodev->cmo.desired < VIO_CMO_MIN_ENT) in vio_cmo_bus_probe()
740 viodev->cmo.desired = VIO_CMO_MIN_ENT; in vio_cmo_bus_probe()
752 viodev->cmo.desired = 0; in vio_cmo_bus_probe()
765 /* Updated desired entitlement if device requires it */ in vio_cmo_bus_probe()
767 vio_cmo.desired += (viodev->cmo.desired - in vio_cmo_bus_probe()
793 vio_cmo.desired += viodev->cmo.desired; in vio_cmo_bus_probe()
843 * vio_cmo.desired in vio_cmo_bus_remove()
845 vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT); in vio_cmo_bus_remove()
873 viodev->cmo.desired = VIO_CMO_MIN_ENT; in vio_cmo_bus_remove()
932 vio_cmo.desired = vio_cmo.reserve.size; in vio_cmo_bus_init()
975 viodev_cmo_rd_attr(desired);
1026 viobus_cmo_rd_attr(desired);
1071 void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {} in vio_cmo_set_dev_desired() argument