Lines Matching +full:sc7180 +full:- +full:dpu

2  * SPDX-License-Identifier: GPL-2.0
49 struct icc_path *path0 = of_icc_get(dev, "mdp0-mem"); in msm_mdss_parse_data_bus_icc_path()
50 struct icc_path *path1 = of_icc_get(dev, "mdp1-mem"); in msm_mdss_parse_data_bus_icc_path()
55 msm_mdss->path[0] = path0; in msm_mdss_parse_data_bus_icc_path()
56 msm_mdss->num_paths = 1; in msm_mdss_parse_data_bus_icc_path()
59 msm_mdss->path[1] = path1; in msm_mdss_parse_data_bus_icc_path()
60 msm_mdss->num_paths++; in msm_mdss_parse_data_bus_icc_path()
71 for (i = 0; i < msm_mdss->num_paths; i++) in msm_mdss_put_icc_path()
72 icc_put(msm_mdss->path[i]); in msm_mdss_put_icc_path()
79 for (i = 0; i < msm_mdss->num_paths; i++) in msm_mdss_icc_request_bw()
80 icc_set_bw(msm_mdss->path[i], 0, Bps_to_icc(bw)); in msm_mdss_icc_request_bw()
91 interrupts = readl_relaxed(msm_mdss->mmio + HW_INTR_STATUS); in msm_mdss_irq()
94 irq_hw_number_t hwirq = fls(interrupts) - 1; in msm_mdss_irq()
97 rc = generic_handle_domain_irq(msm_mdss->irq_controller.domain, in msm_mdss_irq()
100 dev_err(msm_mdss->dev, "handle irq fail: irq=%lu rc=%d\n", in msm_mdss_irq()
117 clear_bit(irqd->hwirq, &msm_mdss->irq_controller.enabled_mask); in msm_mdss_irq_mask()
128 set_bit(irqd->hwirq, &msm_mdss->irq_controller.enabled_mask); in msm_mdss_irq_unmask()
144 struct msm_mdss *msm_mdss = domain->host_data; in msm_mdss_irqdomain_map()
162 dev = msm_mdss->dev; in _msm_mdss_irq_domain_add()
164 domain = irq_domain_add_linear(dev->of_node, 32, in _msm_mdss_irq_domain_add()
168 return -EINVAL; in _msm_mdss_irq_domain_add()
171 msm_mdss->irq_controller.enabled_mask = 0; in _msm_mdss_irq_domain_add()
172 msm_mdss->irq_controller.domain = domain; in _msm_mdss_irq_domain_add()
183 * the interconnect is enabled (non-zero bandwidth). Let's make sure in msm_mdss_enable()
188 ret = clk_bulk_prepare_enable(msm_mdss->num_clocks, msm_mdss->clocks); in msm_mdss_enable()
190 dev_err(msm_mdss->dev, "clock enable failed, ret:%d\n", ret); in msm_mdss_enable()
198 if (msm_mdss->is_mdp5) in msm_mdss_enable()
205 switch (readl_relaxed(msm_mdss->mmio + HW_REV)) { in msm_mdss_enable()
208 writel_relaxed(0x420, msm_mdss->mmio + UBWC_STATIC); in msm_mdss_enable()
212 writel_relaxed(0x103e, msm_mdss->mmio + UBWC_STATIC); in msm_mdss_enable()
213 writel_relaxed(2, msm_mdss->mmio + UBWC_CTRL_2); in msm_mdss_enable()
214 writel_relaxed(1, msm_mdss->mmio + UBWC_PREDICTION_MODE); in msm_mdss_enable()
217 writel_relaxed(0x1e, msm_mdss->mmio + UBWC_STATIC); in msm_mdss_enable()
220 writel_relaxed(0x101e, msm_mdss->mmio + UBWC_STATIC); in msm_mdss_enable()
229 clk_bulk_disable_unprepare(msm_mdss->num_clocks, msm_mdss->clocks); in msm_mdss_disable()
237 struct platform_device *pdev = to_platform_device(msm_mdss->dev); in msm_mdss_destroy()
240 pm_runtime_suspend(msm_mdss->dev); in msm_mdss_destroy()
241 pm_runtime_disable(msm_mdss->dev); in msm_mdss_destroy()
242 irq_domain_remove(msm_mdss->irq_controller.domain); in msm_mdss_destroy()
243 msm_mdss->irq_controller.domain = NULL; in msm_mdss_destroy()
285 return -EINVAL; in mdp5_mdss_parse_clock()
287 bulk = devm_kcalloc(&pdev->dev, MDP5_MDSS_NUM_CLOCKS, sizeof(struct clk_bulk_data), GFP_KERNEL); in mdp5_mdss_parse_clock()
289 return -ENOMEM; in mdp5_mdss_parse_clock()
295 ret = devm_clk_bulk_get_optional(&pdev->dev, num_clocks, bulk); in mdp5_mdss_parse_clock()
310 ret = msm_mdss_reset(&pdev->dev); in msm_mdss_init()
314 msm_mdss = devm_kzalloc(&pdev->dev, sizeof(*msm_mdss), GFP_KERNEL); in msm_mdss_init()
316 return ERR_PTR(-ENOMEM); in msm_mdss_init()
318 msm_mdss->mmio = devm_platform_ioremap_resource_byname(pdev, is_mdp5 ? "mdss_phys" : "mdss"); in msm_mdss_init()
319 if (IS_ERR(msm_mdss->mmio)) in msm_mdss_init()
320 return ERR_CAST(msm_mdss->mmio); in msm_mdss_init()
322 dev_dbg(&pdev->dev, "mapped mdss address space @%pK\n", msm_mdss->mmio); in msm_mdss_init()
324 ret = msm_mdss_parse_data_bus_icc_path(&pdev->dev, msm_mdss); in msm_mdss_init()
327 ret = devm_add_action_or_reset(&pdev->dev, msm_mdss_put_icc_path, msm_mdss); in msm_mdss_init()
332 ret = mdp5_mdss_parse_clock(pdev, &msm_mdss->clocks); in msm_mdss_init()
334 ret = devm_clk_bulk_get_all(&pdev->dev, &msm_mdss->clocks); in msm_mdss_init()
336 dev_err(&pdev->dev, "failed to parse clocks, ret=%d\n", ret); in msm_mdss_init()
339 msm_mdss->num_clocks = ret; in msm_mdss_init()
340 msm_mdss->is_mdp5 = is_mdp5; in msm_mdss_init()
342 msm_mdss->dev = &pdev->dev; in msm_mdss_init()
355 pm_runtime_enable(&pdev->dev); in msm_mdss_init()
403 bool is_mdp5 = of_device_is_compatible(pdev->dev.of_node, "qcom,mdss"); in mdss_probe()
404 struct device *dev = &pdev->dev; in mdss_probe()
414 * MDP5/DPU based devices don't have a flat hierarchy. There is a top in mdss_probe()
415 * level parent: MDSS, and children: MDP5/DPU, DSI, HDMI, eDP etc. in mdss_probe()
416 * Populate the children devices, find the MDP5/DPU node, and then add in mdss_probe()
419 ret = of_platform_populate(dev->of_node, NULL, NULL, dev); in mdss_probe()
433 of_platform_depopulate(&pdev->dev); in mdss_remove()
442 { .compatible = "qcom,msm8998-mdss" },
443 { .compatible = "qcom,qcm2290-mdss" },
444 { .compatible = "qcom,sdm845-mdss" },
445 { .compatible = "qcom,sc7180-mdss" },
446 { .compatible = "qcom,sc7280-mdss" },
447 { .compatible = "qcom,sc8180x-mdss" },
448 { .compatible = "qcom,sm8150-mdss" },
449 { .compatible = "qcom,sm8250-mdss" },
458 .name = "msm-mdss",