1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Cedrus VPU driver
4  *
5  * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
6  * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
7  * Copyright (C) 2018 Bootlin
8  *
9  * Based on the vim2m driver, that is:
10  *
11  * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
12  * Pawel Osciak, <pawel@osciak.com>
13  * Marek Szyprowski, <m.szyprowski@samsung.com>
14  */
15 
16 #include <linux/platform_device.h>
17 #include <linux/of_reserved_mem.h>
18 #include <linux/of_device.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/interrupt.h>
21 #include <linux/clk.h>
22 #include <linux/regmap.h>
23 #include <linux/reset.h>
24 #include <linux/soc/sunxi/sunxi_sram.h>
25 
26 #include <media/videobuf2-core.h>
27 #include <media/v4l2-mem2mem.h>
28 
29 #include "cedrus.h"
30 #include "cedrus_hw.h"
31 #include "cedrus_regs.h"
32 
cedrus_engine_enable(struct cedrus_dev * dev,enum cedrus_codec codec)33 int cedrus_engine_enable(struct cedrus_dev *dev, enum cedrus_codec codec)
34 {
35 	u32 reg = 0;
36 
37 	/*
38 	 * FIXME: This is only valid on 32-bits DDR's, we should test
39 	 * it on the A13/A33.
40 	 */
41 	reg |= VE_MODE_REC_WR_MODE_2MB;
42 	reg |= VE_MODE_DDR_MODE_BW_128;
43 
44 	switch (codec) {
45 	case CEDRUS_CODEC_MPEG2:
46 		reg |= VE_MODE_DEC_MPEG;
47 		break;
48 
49 	case CEDRUS_CODEC_H264:
50 		reg |= VE_MODE_DEC_H264;
51 		break;
52 
53 	default:
54 		return -EINVAL;
55 	}
56 
57 	cedrus_write(dev, VE_MODE, reg);
58 
59 	return 0;
60 }
61 
cedrus_engine_disable(struct cedrus_dev * dev)62 void cedrus_engine_disable(struct cedrus_dev *dev)
63 {
64 	cedrus_write(dev, VE_MODE, VE_MODE_DISABLED);
65 }
66 
cedrus_dst_format_set(struct cedrus_dev * dev,struct v4l2_pix_format * fmt)67 void cedrus_dst_format_set(struct cedrus_dev *dev,
68 			   struct v4l2_pix_format *fmt)
69 {
70 	unsigned int width = fmt->width;
71 	unsigned int height = fmt->height;
72 	u32 chroma_size;
73 	u32 reg;
74 
75 	switch (fmt->pixelformat) {
76 	case V4L2_PIX_FMT_NV12:
77 		chroma_size = ALIGN(width, 16) * ALIGN(height, 16) / 2;
78 
79 		reg = VE_PRIMARY_OUT_FMT_NV12;
80 		cedrus_write(dev, VE_PRIMARY_OUT_FMT, reg);
81 
82 		reg = chroma_size / 2;
83 		cedrus_write(dev, VE_PRIMARY_CHROMA_BUF_LEN, reg);
84 
85 		reg = VE_PRIMARY_FB_LINE_STRIDE_LUMA(ALIGN(width, 16)) |
86 		      VE_PRIMARY_FB_LINE_STRIDE_CHROMA(ALIGN(width, 16) / 2);
87 		cedrus_write(dev, VE_PRIMARY_FB_LINE_STRIDE, reg);
88 
89 		break;
90 	case V4L2_PIX_FMT_SUNXI_TILED_NV12:
91 	default:
92 		reg = VE_PRIMARY_OUT_FMT_TILED_32_NV12;
93 		cedrus_write(dev, VE_PRIMARY_OUT_FMT, reg);
94 
95 		reg = VE_SECONDARY_OUT_FMT_TILED_32_NV12;
96 		cedrus_write(dev, VE_CHROMA_BUF_LEN, reg);
97 
98 		break;
99 	}
100 }
101 
cedrus_irq(int irq,void * data)102 static irqreturn_t cedrus_irq(int irq, void *data)
103 {
104 	struct cedrus_dev *dev = data;
105 	struct cedrus_ctx *ctx;
106 	struct vb2_v4l2_buffer *src_buf, *dst_buf;
107 	enum vb2_buffer_state state;
108 	enum cedrus_irq_status status;
109 
110 	ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
111 	if (!ctx) {
112 		v4l2_err(&dev->v4l2_dev,
113 			 "Instance released before the end of transaction\n");
114 		return IRQ_NONE;
115 	}
116 
117 	status = dev->dec_ops[ctx->current_codec]->irq_status(ctx);
118 	if (status == CEDRUS_IRQ_NONE)
119 		return IRQ_NONE;
120 
121 	dev->dec_ops[ctx->current_codec]->irq_disable(ctx);
122 	dev->dec_ops[ctx->current_codec]->irq_clear(ctx);
123 
124 	src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
125 	dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
126 
127 	if (!src_buf || !dst_buf) {
128 		v4l2_err(&dev->v4l2_dev,
129 			 "Missing source and/or destination buffers\n");
130 		return IRQ_HANDLED;
131 	}
132 
133 	if (status == CEDRUS_IRQ_ERROR)
134 		state = VB2_BUF_STATE_ERROR;
135 	else
136 		state = VB2_BUF_STATE_DONE;
137 
138 	v4l2_m2m_buf_done(src_buf, state);
139 	v4l2_m2m_buf_done(dst_buf, state);
140 
141 	v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx);
142 
143 	return IRQ_HANDLED;
144 }
145 
cedrus_hw_probe(struct cedrus_dev * dev)146 int cedrus_hw_probe(struct cedrus_dev *dev)
147 {
148 	const struct cedrus_variant *variant;
149 	struct resource *res;
150 	int irq_dec;
151 	int ret;
152 
153 	variant = of_device_get_match_data(dev->dev);
154 	if (!variant)
155 		return -EINVAL;
156 
157 	dev->capabilities = variant->capabilities;
158 
159 	irq_dec = platform_get_irq(dev->pdev, 0);
160 	if (irq_dec <= 0)
161 		return irq_dec;
162 	ret = devm_request_irq(dev->dev, irq_dec, cedrus_irq,
163 			       0, dev_name(dev->dev), dev);
164 	if (ret) {
165 		dev_err(dev->dev, "Failed to request IRQ\n");
166 
167 		return ret;
168 	}
169 
170 	/*
171 	 * The VPU is only able to handle bus addresses so we have to subtract
172 	 * the RAM offset to the physcal addresses.
173 	 *
174 	 * This information will eventually be obtained from device-tree.
175 	 */
176 
177 #ifdef PHYS_PFN_OFFSET
178 	if (!(variant->quirks & CEDRUS_QUIRK_NO_DMA_OFFSET))
179 		dev->dev->dma_pfn_offset = PHYS_PFN_OFFSET;
180 #endif
181 
182 	ret = of_reserved_mem_device_init(dev->dev);
183 	if (ret && ret != -ENODEV) {
184 		dev_err(dev->dev, "Failed to reserve memory\n");
185 
186 		return ret;
187 	}
188 
189 	ret = sunxi_sram_claim(dev->dev);
190 	if (ret) {
191 		dev_err(dev->dev, "Failed to claim SRAM\n");
192 
193 		goto err_mem;
194 	}
195 
196 	dev->ahb_clk = devm_clk_get(dev->dev, "ahb");
197 	if (IS_ERR(dev->ahb_clk)) {
198 		dev_err(dev->dev, "Failed to get AHB clock\n");
199 
200 		ret = PTR_ERR(dev->ahb_clk);
201 		goto err_sram;
202 	}
203 
204 	dev->mod_clk = devm_clk_get(dev->dev, "mod");
205 	if (IS_ERR(dev->mod_clk)) {
206 		dev_err(dev->dev, "Failed to get MOD clock\n");
207 
208 		ret = PTR_ERR(dev->mod_clk);
209 		goto err_sram;
210 	}
211 
212 	dev->ram_clk = devm_clk_get(dev->dev, "ram");
213 	if (IS_ERR(dev->ram_clk)) {
214 		dev_err(dev->dev, "Failed to get RAM clock\n");
215 
216 		ret = PTR_ERR(dev->ram_clk);
217 		goto err_sram;
218 	}
219 
220 	dev->rstc = devm_reset_control_get(dev->dev, NULL);
221 	if (IS_ERR(dev->rstc)) {
222 		dev_err(dev->dev, "Failed to get reset control\n");
223 
224 		ret = PTR_ERR(dev->rstc);
225 		goto err_sram;
226 	}
227 
228 	res = platform_get_resource(dev->pdev, IORESOURCE_MEM, 0);
229 	dev->base = devm_ioremap_resource(dev->dev, res);
230 	if (IS_ERR(dev->base)) {
231 		dev_err(dev->dev, "Failed to map registers\n");
232 
233 		ret = PTR_ERR(dev->base);
234 		goto err_sram;
235 	}
236 
237 	ret = clk_set_rate(dev->mod_clk, variant->mod_rate);
238 	if (ret) {
239 		dev_err(dev->dev, "Failed to set clock rate\n");
240 
241 		goto err_sram;
242 	}
243 
244 	ret = clk_prepare_enable(dev->ahb_clk);
245 	if (ret) {
246 		dev_err(dev->dev, "Failed to enable AHB clock\n");
247 
248 		goto err_sram;
249 	}
250 
251 	ret = clk_prepare_enable(dev->mod_clk);
252 	if (ret) {
253 		dev_err(dev->dev, "Failed to enable MOD clock\n");
254 
255 		goto err_ahb_clk;
256 	}
257 
258 	ret = clk_prepare_enable(dev->ram_clk);
259 	if (ret) {
260 		dev_err(dev->dev, "Failed to enable RAM clock\n");
261 
262 		goto err_mod_clk;
263 	}
264 
265 	ret = reset_control_reset(dev->rstc);
266 	if (ret) {
267 		dev_err(dev->dev, "Failed to apply reset\n");
268 
269 		goto err_ram_clk;
270 	}
271 
272 	return 0;
273 
274 err_ram_clk:
275 	clk_disable_unprepare(dev->ram_clk);
276 err_mod_clk:
277 	clk_disable_unprepare(dev->mod_clk);
278 err_ahb_clk:
279 	clk_disable_unprepare(dev->ahb_clk);
280 err_sram:
281 	sunxi_sram_release(dev->dev);
282 err_mem:
283 	of_reserved_mem_device_release(dev->dev);
284 
285 	return ret;
286 }
287 
cedrus_hw_remove(struct cedrus_dev * dev)288 void cedrus_hw_remove(struct cedrus_dev *dev)
289 {
290 	reset_control_assert(dev->rstc);
291 
292 	clk_disable_unprepare(dev->ram_clk);
293 	clk_disable_unprepare(dev->mod_clk);
294 	clk_disable_unprepare(dev->ahb_clk);
295 
296 	sunxi_sram_release(dev->dev);
297 
298 	of_reserved_mem_device_release(dev->dev);
299 }
300