1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Hantro VPU codec driver
4 *
5 * Copyright (C) 2018 Collabora, Ltd.
6 * Copyright 2018 Google LLC.
7 * Tomasz Figa <tfiga@chromium.org>
8 *
9 * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
10 * Copyright (C) 2011 Samsung Electronics Co., Ltd.
11 */
12
13 #include <linux/clk.h>
14 #include <linux/module.h>
15 #include <linux/of.h>
16 #include <linux/platform_device.h>
17 #include <linux/pm.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/slab.h>
20 #include <linux/videodev2.h>
21 #include <linux/workqueue.h>
22 #include <media/v4l2-event.h>
23 #include <media/v4l2-mem2mem.h>
24 #include <media/videobuf2-core.h>
25 #include <media/videobuf2-vmalloc.h>
26
27 #include "hantro_v4l2.h"
28 #include "hantro.h"
29 #include "hantro_hw.h"
30
31 #define DRIVER_NAME "hantro-vpu"
32
33 int hantro_debug;
34 module_param_named(debug, hantro_debug, int, 0644);
35 MODULE_PARM_DESC(debug,
36 "Debug level - higher value produces more verbose messages");
37
hantro_get_ctrl(struct hantro_ctx * ctx,u32 id)38 void *hantro_get_ctrl(struct hantro_ctx *ctx, u32 id)
39 {
40 struct v4l2_ctrl *ctrl;
41
42 ctrl = v4l2_ctrl_find(&ctx->ctrl_handler, id);
43 return ctrl ? ctrl->p_cur.p : NULL;
44 }
45
hantro_get_ref(struct hantro_ctx * ctx,u64 ts)46 dma_addr_t hantro_get_ref(struct hantro_ctx *ctx, u64 ts)
47 {
48 struct vb2_queue *q = v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx);
49 struct vb2_buffer *buf;
50
51 buf = vb2_find_buffer(q, ts);
52 if (!buf)
53 return 0;
54 return hantro_get_dec_buf_addr(ctx, buf);
55 }
56
57 static const struct v4l2_event hantro_eos_event = {
58 .type = V4L2_EVENT_EOS
59 };
60
hantro_job_finish_no_pm(struct hantro_dev * vpu,struct hantro_ctx * ctx,enum vb2_buffer_state result)61 static void hantro_job_finish_no_pm(struct hantro_dev *vpu,
62 struct hantro_ctx *ctx,
63 enum vb2_buffer_state result)
64 {
65 struct vb2_v4l2_buffer *src, *dst;
66
67 src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
68 dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
69
70 if (WARN_ON(!src))
71 return;
72 if (WARN_ON(!dst))
73 return;
74
75 src->sequence = ctx->sequence_out++;
76 dst->sequence = ctx->sequence_cap++;
77
78 if (v4l2_m2m_is_last_draining_src_buf(ctx->fh.m2m_ctx, src)) {
79 dst->flags |= V4L2_BUF_FLAG_LAST;
80 v4l2_event_queue_fh(&ctx->fh, &hantro_eos_event);
81 v4l2_m2m_mark_stopped(ctx->fh.m2m_ctx);
82 }
83
84 v4l2_m2m_buf_done_and_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx,
85 result);
86 }
87
hantro_job_finish(struct hantro_dev * vpu,struct hantro_ctx * ctx,enum vb2_buffer_state result)88 static void hantro_job_finish(struct hantro_dev *vpu,
89 struct hantro_ctx *ctx,
90 enum vb2_buffer_state result)
91 {
92 pm_runtime_mark_last_busy(vpu->dev);
93 pm_runtime_put_autosuspend(vpu->dev);
94
95 clk_bulk_disable(vpu->variant->num_clocks, vpu->clocks);
96
97 hantro_job_finish_no_pm(vpu, ctx, result);
98 }
99
hantro_irq_done(struct hantro_dev * vpu,enum vb2_buffer_state result)100 void hantro_irq_done(struct hantro_dev *vpu,
101 enum vb2_buffer_state result)
102 {
103 struct hantro_ctx *ctx =
104 v4l2_m2m_get_curr_priv(vpu->m2m_dev);
105
106 /*
107 * If cancel_delayed_work returns false
108 * the timeout expired. The watchdog is running,
109 * and will take care of finishing the job.
110 */
111 if (cancel_delayed_work(&vpu->watchdog_work)) {
112 if (result == VB2_BUF_STATE_DONE && ctx->codec_ops->done)
113 ctx->codec_ops->done(ctx);
114 hantro_job_finish(vpu, ctx, result);
115 }
116 }
117
hantro_watchdog(struct work_struct * work)118 void hantro_watchdog(struct work_struct *work)
119 {
120 struct hantro_dev *vpu;
121 struct hantro_ctx *ctx;
122
123 vpu = container_of(to_delayed_work(work),
124 struct hantro_dev, watchdog_work);
125 ctx = v4l2_m2m_get_curr_priv(vpu->m2m_dev);
126 if (ctx) {
127 vpu_err("frame processing timed out!\n");
128 ctx->codec_ops->reset(ctx);
129 hantro_job_finish(vpu, ctx, VB2_BUF_STATE_ERROR);
130 }
131 }
132
hantro_start_prepare_run(struct hantro_ctx * ctx)133 void hantro_start_prepare_run(struct hantro_ctx *ctx)
134 {
135 struct vb2_v4l2_buffer *src_buf;
136
137 src_buf = hantro_get_src_buf(ctx);
138 v4l2_ctrl_request_setup(src_buf->vb2_buf.req_obj.req,
139 &ctx->ctrl_handler);
140
141 if (!ctx->is_encoder && !ctx->dev->variant->late_postproc) {
142 if (hantro_needs_postproc(ctx, ctx->vpu_dst_fmt))
143 hantro_postproc_enable(ctx);
144 else
145 hantro_postproc_disable(ctx);
146 }
147 }
148
hantro_end_prepare_run(struct hantro_ctx * ctx)149 void hantro_end_prepare_run(struct hantro_ctx *ctx)
150 {
151 struct vb2_v4l2_buffer *src_buf;
152
153 if (!ctx->is_encoder && ctx->dev->variant->late_postproc) {
154 if (hantro_needs_postproc(ctx, ctx->vpu_dst_fmt))
155 hantro_postproc_enable(ctx);
156 else
157 hantro_postproc_disable(ctx);
158 }
159
160 src_buf = hantro_get_src_buf(ctx);
161 v4l2_ctrl_request_complete(src_buf->vb2_buf.req_obj.req,
162 &ctx->ctrl_handler);
163
164 /* Kick the watchdog. */
165 schedule_delayed_work(&ctx->dev->watchdog_work,
166 msecs_to_jiffies(2000));
167 }
168
device_run(void * priv)169 static void device_run(void *priv)
170 {
171 struct hantro_ctx *ctx = priv;
172 struct vb2_v4l2_buffer *src, *dst;
173 int ret;
174
175 src = hantro_get_src_buf(ctx);
176 dst = hantro_get_dst_buf(ctx);
177
178 ret = pm_runtime_resume_and_get(ctx->dev->dev);
179 if (ret < 0)
180 goto err_cancel_job;
181
182 ret = clk_bulk_enable(ctx->dev->variant->num_clocks, ctx->dev->clocks);
183 if (ret)
184 goto err_cancel_job;
185
186 v4l2_m2m_buf_copy_metadata(src, dst, true);
187
188 if (ctx->codec_ops->run(ctx))
189 goto err_cancel_job;
190
191 return;
192
193 err_cancel_job:
194 hantro_job_finish_no_pm(ctx->dev, ctx, VB2_BUF_STATE_ERROR);
195 }
196
197 static const struct v4l2_m2m_ops vpu_m2m_ops = {
198 .device_run = device_run,
199 };
200
201 static int
queue_init(void * priv,struct vb2_queue * src_vq,struct vb2_queue * dst_vq)202 queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
203 {
204 struct hantro_ctx *ctx = priv;
205 int ret;
206
207 src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
208 src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
209 src_vq->drv_priv = ctx;
210 src_vq->ops = &hantro_queue_ops;
211 src_vq->mem_ops = &vb2_dma_contig_memops;
212
213 /*
214 * Driver does mostly sequential access, so sacrifice TLB efficiency
215 * for faster allocation. Also, no CPU access on the source queue,
216 * so no kernel mapping needed.
217 */
218 src_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES |
219 DMA_ATTR_NO_KERNEL_MAPPING;
220 src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
221 src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
222 src_vq->lock = &ctx->dev->vpu_mutex;
223 src_vq->dev = ctx->dev->v4l2_dev.dev;
224 src_vq->supports_requests = true;
225
226 ret = vb2_queue_init(src_vq);
227 if (ret)
228 return ret;
229
230 dst_vq->bidirectional = true;
231 dst_vq->mem_ops = &vb2_dma_contig_memops;
232 dst_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES;
233 /*
234 * The Kernel needs access to the JPEG destination buffer for the
235 * JPEG encoder to fill in the JPEG headers.
236 */
237 if (!ctx->is_encoder)
238 dst_vq->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
239
240 dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
241 dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
242 dst_vq->drv_priv = ctx;
243 dst_vq->ops = &hantro_queue_ops;
244 dst_vq->buf_struct_size = sizeof(struct hantro_decoded_buffer);
245 dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
246 dst_vq->lock = &ctx->dev->vpu_mutex;
247 dst_vq->dev = ctx->dev->v4l2_dev.dev;
248
249 return vb2_queue_init(dst_vq);
250 }
251
hantro_try_ctrl(struct v4l2_ctrl * ctrl)252 static int hantro_try_ctrl(struct v4l2_ctrl *ctrl)
253 {
254 if (ctrl->id == V4L2_CID_STATELESS_H264_SPS) {
255 const struct v4l2_ctrl_h264_sps *sps = ctrl->p_new.p_h264_sps;
256
257 if (sps->chroma_format_idc > 1)
258 /* Only 4:0:0 and 4:2:0 are supported */
259 return -EINVAL;
260 if (sps->bit_depth_luma_minus8 != sps->bit_depth_chroma_minus8)
261 /* Luma and chroma bit depth mismatch */
262 return -EINVAL;
263 if (sps->bit_depth_luma_minus8 != 0)
264 /* Only 8-bit is supported */
265 return -EINVAL;
266 } else if (ctrl->id == V4L2_CID_STATELESS_HEVC_SPS) {
267 const struct v4l2_ctrl_hevc_sps *sps = ctrl->p_new.p_hevc_sps;
268
269 if (sps->bit_depth_luma_minus8 != 0 && sps->bit_depth_luma_minus8 != 2)
270 /* Only 8-bit and 10-bit are supported */
271 return -EINVAL;
272 } else if (ctrl->id == V4L2_CID_STATELESS_VP9_FRAME) {
273 const struct v4l2_ctrl_vp9_frame *dec_params = ctrl->p_new.p_vp9_frame;
274
275 /* We only support profile 0 */
276 if (dec_params->profile != 0)
277 return -EINVAL;
278 } else if (ctrl->id == V4L2_CID_STATELESS_AV1_SEQUENCE) {
279 const struct v4l2_ctrl_av1_sequence *sequence = ctrl->p_new.p_av1_sequence;
280
281 if (sequence->bit_depth != 8 && sequence->bit_depth != 10)
282 return -EINVAL;
283 }
284
285 return 0;
286 }
287
hantro_jpeg_s_ctrl(struct v4l2_ctrl * ctrl)288 static int hantro_jpeg_s_ctrl(struct v4l2_ctrl *ctrl)
289 {
290 struct hantro_ctx *ctx;
291
292 ctx = container_of(ctrl->handler,
293 struct hantro_ctx, ctrl_handler);
294
295 vpu_debug(1, "s_ctrl: id = %d, val = %d\n", ctrl->id, ctrl->val);
296
297 switch (ctrl->id) {
298 case V4L2_CID_JPEG_COMPRESSION_QUALITY:
299 ctx->jpeg_quality = ctrl->val;
300 break;
301 default:
302 return -EINVAL;
303 }
304
305 return 0;
306 }
307
hantro_vp9_s_ctrl(struct v4l2_ctrl * ctrl)308 static int hantro_vp9_s_ctrl(struct v4l2_ctrl *ctrl)
309 {
310 struct hantro_ctx *ctx;
311
312 ctx = container_of(ctrl->handler,
313 struct hantro_ctx, ctrl_handler);
314
315 switch (ctrl->id) {
316 case V4L2_CID_STATELESS_VP9_FRAME: {
317 int bit_depth = ctrl->p_new.p_vp9_frame->bit_depth;
318
319 if (ctx->bit_depth == bit_depth)
320 return 0;
321
322 return hantro_reset_raw_fmt(ctx, bit_depth, HANTRO_AUTO_POSTPROC);
323 }
324 default:
325 return -EINVAL;
326 }
327
328 return 0;
329 }
330
hantro_hevc_s_ctrl(struct v4l2_ctrl * ctrl)331 static int hantro_hevc_s_ctrl(struct v4l2_ctrl *ctrl)
332 {
333 struct hantro_ctx *ctx;
334
335 ctx = container_of(ctrl->handler,
336 struct hantro_ctx, ctrl_handler);
337
338 switch (ctrl->id) {
339 case V4L2_CID_STATELESS_HEVC_SPS: {
340 const struct v4l2_ctrl_hevc_sps *sps = ctrl->p_new.p_hevc_sps;
341 int bit_depth = sps->bit_depth_luma_minus8 + 8;
342
343 if (ctx->bit_depth == bit_depth)
344 return 0;
345
346 return hantro_reset_raw_fmt(ctx, bit_depth, HANTRO_AUTO_POSTPROC);
347 }
348 default:
349 return -EINVAL;
350 }
351
352 return 0;
353 }
354
hantro_av1_s_ctrl(struct v4l2_ctrl * ctrl)355 static int hantro_av1_s_ctrl(struct v4l2_ctrl *ctrl)
356 {
357 struct hantro_ctx *ctx;
358
359 ctx = container_of(ctrl->handler,
360 struct hantro_ctx, ctrl_handler);
361
362 switch (ctrl->id) {
363 case V4L2_CID_STATELESS_AV1_SEQUENCE:
364 {
365 int bit_depth = ctrl->p_new.p_av1_sequence->bit_depth;
366 bool need_postproc = HANTRO_AUTO_POSTPROC;
367
368 if (ctrl->p_new.p_av1_sequence->flags
369 & V4L2_AV1_SEQUENCE_FLAG_FILM_GRAIN_PARAMS_PRESENT)
370 need_postproc = HANTRO_FORCE_POSTPROC;
371
372 if (ctx->bit_depth == bit_depth &&
373 ctx->need_postproc == need_postproc)
374 return 0;
375
376 return hantro_reset_raw_fmt(ctx, bit_depth, need_postproc);
377 }
378 default:
379 return -EINVAL;
380 }
381
382 return 0;
383 }
384
385 static const struct v4l2_ctrl_ops hantro_ctrl_ops = {
386 .try_ctrl = hantro_try_ctrl,
387 };
388
389 static const struct v4l2_ctrl_ops hantro_jpeg_ctrl_ops = {
390 .s_ctrl = hantro_jpeg_s_ctrl,
391 };
392
393 static const struct v4l2_ctrl_ops hantro_vp9_ctrl_ops = {
394 .s_ctrl = hantro_vp9_s_ctrl,
395 };
396
397 static const struct v4l2_ctrl_ops hantro_hevc_ctrl_ops = {
398 .try_ctrl = hantro_try_ctrl,
399 .s_ctrl = hantro_hevc_s_ctrl,
400 };
401
402 static const struct v4l2_ctrl_ops hantro_av1_ctrl_ops = {
403 .try_ctrl = hantro_try_ctrl,
404 .s_ctrl = hantro_av1_s_ctrl,
405 };
406
407 #define HANTRO_JPEG_ACTIVE_MARKERS (V4L2_JPEG_ACTIVE_MARKER_APP0 | \
408 V4L2_JPEG_ACTIVE_MARKER_COM | \
409 V4L2_JPEG_ACTIVE_MARKER_DQT | \
410 V4L2_JPEG_ACTIVE_MARKER_DHT)
411
412 static const struct hantro_ctrl controls[] = {
413 {
414 .codec = HANTRO_JPEG_ENCODER,
415 .cfg = {
416 .id = V4L2_CID_JPEG_COMPRESSION_QUALITY,
417 .min = 5,
418 .max = 100,
419 .step = 1,
420 .def = 50,
421 .ops = &hantro_jpeg_ctrl_ops,
422 },
423 }, {
424 .codec = HANTRO_JPEG_ENCODER,
425 .cfg = {
426 .id = V4L2_CID_JPEG_ACTIVE_MARKER,
427 .max = HANTRO_JPEG_ACTIVE_MARKERS,
428 .def = HANTRO_JPEG_ACTIVE_MARKERS,
429 /*
430 * Changing the set of active markers/segments also
431 * messes up the alignment of the JPEG header, which
432 * is needed to allow the hardware to write directly
433 * to the output buffer. Implementing this introduces
434 * a lot of complexity for little gain, as the markers
435 * enabled is already the minimum required set.
436 */
437 .flags = V4L2_CTRL_FLAG_READ_ONLY,
438 },
439 }, {
440 .codec = HANTRO_MPEG2_DECODER,
441 .cfg = {
442 .id = V4L2_CID_STATELESS_MPEG2_SEQUENCE,
443 },
444 }, {
445 .codec = HANTRO_MPEG2_DECODER,
446 .cfg = {
447 .id = V4L2_CID_STATELESS_MPEG2_PICTURE,
448 },
449 }, {
450 .codec = HANTRO_MPEG2_DECODER,
451 .cfg = {
452 .id = V4L2_CID_STATELESS_MPEG2_QUANTISATION,
453 },
454 }, {
455 .codec = HANTRO_VP8_DECODER,
456 .cfg = {
457 .id = V4L2_CID_STATELESS_VP8_FRAME,
458 },
459 }, {
460 .codec = HANTRO_H264_DECODER,
461 .cfg = {
462 .id = V4L2_CID_STATELESS_H264_DECODE_PARAMS,
463 },
464 }, {
465 .codec = HANTRO_H264_DECODER,
466 .cfg = {
467 .id = V4L2_CID_STATELESS_H264_SPS,
468 .ops = &hantro_ctrl_ops,
469 },
470 }, {
471 .codec = HANTRO_H264_DECODER,
472 .cfg = {
473 .id = V4L2_CID_STATELESS_H264_PPS,
474 },
475 }, {
476 .codec = HANTRO_H264_DECODER,
477 .cfg = {
478 .id = V4L2_CID_STATELESS_H264_SCALING_MATRIX,
479 },
480 }, {
481 .codec = HANTRO_H264_DECODER,
482 .cfg = {
483 .id = V4L2_CID_STATELESS_H264_DECODE_MODE,
484 .min = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
485 .def = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
486 .max = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
487 },
488 }, {
489 .codec = HANTRO_H264_DECODER,
490 .cfg = {
491 .id = V4L2_CID_STATELESS_H264_START_CODE,
492 .min = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
493 .def = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
494 .max = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
495 },
496 }, {
497 .codec = HANTRO_H264_DECODER,
498 .cfg = {
499 .id = V4L2_CID_MPEG_VIDEO_H264_PROFILE,
500 .min = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
501 .max = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
502 .menu_skip_mask =
503 BIT(V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED),
504 .def = V4L2_MPEG_VIDEO_H264_PROFILE_MAIN,
505 }
506 }, {
507 .codec = HANTRO_HEVC_DECODER,
508 .cfg = {
509 .id = V4L2_CID_STATELESS_HEVC_DECODE_MODE,
510 .min = V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED,
511 .max = V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED,
512 .def = V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED,
513 },
514 }, {
515 .codec = HANTRO_HEVC_DECODER,
516 .cfg = {
517 .id = V4L2_CID_STATELESS_HEVC_START_CODE,
518 .min = V4L2_STATELESS_HEVC_START_CODE_ANNEX_B,
519 .max = V4L2_STATELESS_HEVC_START_CODE_ANNEX_B,
520 .def = V4L2_STATELESS_HEVC_START_CODE_ANNEX_B,
521 },
522 }, {
523 .codec = HANTRO_HEVC_DECODER,
524 .cfg = {
525 .id = V4L2_CID_MPEG_VIDEO_HEVC_PROFILE,
526 .min = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
527 .max = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10,
528 .def = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
529 },
530 }, {
531 .codec = HANTRO_HEVC_DECODER,
532 .cfg = {
533 .id = V4L2_CID_MPEG_VIDEO_HEVC_LEVEL,
534 .min = V4L2_MPEG_VIDEO_HEVC_LEVEL_1,
535 .max = V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1,
536 },
537 }, {
538 .codec = HANTRO_HEVC_DECODER,
539 .cfg = {
540 .id = V4L2_CID_STATELESS_HEVC_SPS,
541 .ops = &hantro_hevc_ctrl_ops,
542 },
543 }, {
544 .codec = HANTRO_HEVC_DECODER,
545 .cfg = {
546 .id = V4L2_CID_STATELESS_HEVC_PPS,
547 },
548 }, {
549 .codec = HANTRO_HEVC_DECODER,
550 .cfg = {
551 .id = V4L2_CID_STATELESS_HEVC_DECODE_PARAMS,
552 },
553 }, {
554 .codec = HANTRO_HEVC_DECODER,
555 .cfg = {
556 .id = V4L2_CID_STATELESS_HEVC_SCALING_MATRIX,
557 },
558 }, {
559 .codec = HANTRO_VP9_DECODER,
560 .cfg = {
561 .id = V4L2_CID_STATELESS_VP9_FRAME,
562 .ops = &hantro_vp9_ctrl_ops,
563 },
564 }, {
565 .codec = HANTRO_VP9_DECODER,
566 .cfg = {
567 .id = V4L2_CID_STATELESS_VP9_COMPRESSED_HDR,
568 },
569 }, {
570 .codec = HANTRO_AV1_DECODER,
571 .cfg = {
572 .id = V4L2_CID_STATELESS_AV1_FRAME,
573 },
574 }, {
575 .codec = HANTRO_AV1_DECODER,
576 .cfg = {
577 .id = V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY,
578 .dims = { V4L2_AV1_MAX_TILE_COUNT },
579 },
580 }, {
581 .codec = HANTRO_AV1_DECODER,
582 .cfg = {
583 .id = V4L2_CID_STATELESS_AV1_SEQUENCE,
584 .ops = &hantro_av1_ctrl_ops,
585 },
586 }, {
587 .codec = HANTRO_AV1_DECODER,
588 .cfg = {
589 .id = V4L2_CID_STATELESS_AV1_FILM_GRAIN,
590 },
591 },
592 };
593
hantro_ctrls_setup(struct hantro_dev * vpu,struct hantro_ctx * ctx,int allowed_codecs)594 static int hantro_ctrls_setup(struct hantro_dev *vpu,
595 struct hantro_ctx *ctx,
596 int allowed_codecs)
597 {
598 int i, num_ctrls = ARRAY_SIZE(controls);
599
600 v4l2_ctrl_handler_init(&ctx->ctrl_handler, num_ctrls);
601
602 for (i = 0; i < num_ctrls; i++) {
603 if (!(allowed_codecs & controls[i].codec))
604 continue;
605
606 v4l2_ctrl_new_custom(&ctx->ctrl_handler,
607 &controls[i].cfg, NULL);
608 if (ctx->ctrl_handler.error) {
609 vpu_err("Adding control (%d) failed %d\n",
610 controls[i].cfg.id,
611 ctx->ctrl_handler.error);
612 v4l2_ctrl_handler_free(&ctx->ctrl_handler);
613 return ctx->ctrl_handler.error;
614 }
615 }
616 return v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
617 }
618
619 /*
620 * V4L2 file operations.
621 */
622
hantro_open(struct file * filp)623 static int hantro_open(struct file *filp)
624 {
625 struct hantro_dev *vpu = video_drvdata(filp);
626 struct video_device *vdev = video_devdata(filp);
627 struct hantro_func *func = hantro_vdev_to_func(vdev);
628 struct hantro_ctx *ctx;
629 int allowed_codecs, ret;
630
631 /*
632 * We do not need any extra locking here, because we operate only
633 * on local data here, except reading few fields from dev, which
634 * do not change through device's lifetime (which is guaranteed by
635 * reference on module from open()) and V4L2 internal objects (such
636 * as vdev and ctx->fh), which have proper locking done in respective
637 * helper functions used here.
638 */
639
640 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
641 if (!ctx)
642 return -ENOMEM;
643
644 ctx->dev = vpu;
645 if (func->id == MEDIA_ENT_F_PROC_VIDEO_ENCODER) {
646 allowed_codecs = vpu->variant->codec & HANTRO_ENCODERS;
647 ctx->is_encoder = true;
648 } else if (func->id == MEDIA_ENT_F_PROC_VIDEO_DECODER) {
649 allowed_codecs = vpu->variant->codec & HANTRO_DECODERS;
650 ctx->is_encoder = false;
651 } else {
652 ret = -ENODEV;
653 goto err_ctx_free;
654 }
655
656 ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(vpu->m2m_dev, ctx, queue_init);
657 if (IS_ERR(ctx->fh.m2m_ctx)) {
658 ret = PTR_ERR(ctx->fh.m2m_ctx);
659 goto err_ctx_free;
660 }
661
662 v4l2_fh_init(&ctx->fh, vdev);
663 filp->private_data = &ctx->fh;
664 v4l2_fh_add(&ctx->fh);
665
666 hantro_reset_fmts(ctx);
667
668 ret = hantro_ctrls_setup(vpu, ctx, allowed_codecs);
669 if (ret) {
670 vpu_err("Failed to set up controls\n");
671 goto err_fh_free;
672 }
673 ctx->fh.ctrl_handler = &ctx->ctrl_handler;
674
675 return 0;
676
677 err_fh_free:
678 v4l2_fh_del(&ctx->fh);
679 v4l2_fh_exit(&ctx->fh);
680 err_ctx_free:
681 kfree(ctx);
682 return ret;
683 }
684
hantro_release(struct file * filp)685 static int hantro_release(struct file *filp)
686 {
687 struct hantro_ctx *ctx =
688 container_of(filp->private_data, struct hantro_ctx, fh);
689
690 /*
691 * No need for extra locking because this was the last reference
692 * to this file.
693 */
694 v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
695 v4l2_fh_del(&ctx->fh);
696 v4l2_fh_exit(&ctx->fh);
697 v4l2_ctrl_handler_free(&ctx->ctrl_handler);
698 kfree(ctx);
699
700 return 0;
701 }
702
703 static const struct v4l2_file_operations hantro_fops = {
704 .owner = THIS_MODULE,
705 .open = hantro_open,
706 .release = hantro_release,
707 .poll = v4l2_m2m_fop_poll,
708 .unlocked_ioctl = video_ioctl2,
709 .mmap = v4l2_m2m_fop_mmap,
710 };
711
712 static const struct of_device_id of_hantro_match[] = {
713 #ifdef CONFIG_VIDEO_HANTRO_ROCKCHIP
714 { .compatible = "rockchip,px30-vpu", .data = &px30_vpu_variant, },
715 { .compatible = "rockchip,rk3036-vpu", .data = &rk3036_vpu_variant, },
716 { .compatible = "rockchip,rk3066-vpu", .data = &rk3066_vpu_variant, },
717 { .compatible = "rockchip,rk3288-vpu", .data = &rk3288_vpu_variant, },
718 { .compatible = "rockchip,rk3328-vpu", .data = &rk3328_vpu_variant, },
719 { .compatible = "rockchip,rk3399-vpu", .data = &rk3399_vpu_variant, },
720 { .compatible = "rockchip,rk3568-vepu", .data = &rk3568_vepu_variant, },
721 { .compatible = "rockchip,rk3568-vpu", .data = &rk3568_vpu_variant, },
722 { .compatible = "rockchip,rk3588-av1-vpu", .data = &rk3588_vpu981_variant, },
723 #endif
724 #ifdef CONFIG_VIDEO_HANTRO_IMX8M
725 { .compatible = "nxp,imx8mm-vpu-g1", .data = &imx8mm_vpu_g1_variant, },
726 { .compatible = "nxp,imx8mq-vpu", .data = &imx8mq_vpu_variant, },
727 { .compatible = "nxp,imx8mq-vpu-g1", .data = &imx8mq_vpu_g1_variant },
728 { .compatible = "nxp,imx8mq-vpu-g2", .data = &imx8mq_vpu_g2_variant },
729 #endif
730 #ifdef CONFIG_VIDEO_HANTRO_SAMA5D4
731 { .compatible = "microchip,sama5d4-vdec", .data = &sama5d4_vdec_variant, },
732 #endif
733 #ifdef CONFIG_VIDEO_HANTRO_SUNXI
734 { .compatible = "allwinner,sun50i-h6-vpu-g2", .data = &sunxi_vpu_variant, },
735 #endif
736 { /* sentinel */ }
737 };
738 MODULE_DEVICE_TABLE(of, of_hantro_match);
739
hantro_register_entity(struct media_device * mdev,struct media_entity * entity,const char * entity_name,struct media_pad * pads,int num_pads,int function,struct video_device * vdev)740 static int hantro_register_entity(struct media_device *mdev,
741 struct media_entity *entity,
742 const char *entity_name,
743 struct media_pad *pads, int num_pads,
744 int function, struct video_device *vdev)
745 {
746 char *name;
747 int ret;
748
749 entity->obj_type = MEDIA_ENTITY_TYPE_BASE;
750 if (function == MEDIA_ENT_F_IO_V4L) {
751 entity->info.dev.major = VIDEO_MAJOR;
752 entity->info.dev.minor = vdev->minor;
753 }
754
755 name = devm_kasprintf(mdev->dev, GFP_KERNEL, "%s-%s", vdev->name,
756 entity_name);
757 if (!name)
758 return -ENOMEM;
759
760 entity->name = name;
761 entity->function = function;
762
763 ret = media_entity_pads_init(entity, num_pads, pads);
764 if (ret)
765 return ret;
766
767 ret = media_device_register_entity(mdev, entity);
768 if (ret)
769 return ret;
770
771 return 0;
772 }
773
hantro_attach_func(struct hantro_dev * vpu,struct hantro_func * func)774 static int hantro_attach_func(struct hantro_dev *vpu,
775 struct hantro_func *func)
776 {
777 struct media_device *mdev = &vpu->mdev;
778 struct media_link *link;
779 int ret;
780
781 /* Create the three encoder entities with their pads */
782 func->source_pad.flags = MEDIA_PAD_FL_SOURCE;
783 ret = hantro_register_entity(mdev, &func->vdev.entity, "source",
784 &func->source_pad, 1, MEDIA_ENT_F_IO_V4L,
785 &func->vdev);
786 if (ret)
787 return ret;
788
789 func->proc_pads[0].flags = MEDIA_PAD_FL_SINK;
790 func->proc_pads[1].flags = MEDIA_PAD_FL_SOURCE;
791 ret = hantro_register_entity(mdev, &func->proc, "proc",
792 func->proc_pads, 2, func->id,
793 &func->vdev);
794 if (ret)
795 goto err_rel_entity0;
796
797 func->sink_pad.flags = MEDIA_PAD_FL_SINK;
798 ret = hantro_register_entity(mdev, &func->sink, "sink",
799 &func->sink_pad, 1, MEDIA_ENT_F_IO_V4L,
800 &func->vdev);
801 if (ret)
802 goto err_rel_entity1;
803
804 /* Connect the three entities */
805 ret = media_create_pad_link(&func->vdev.entity, 0, &func->proc, 0,
806 MEDIA_LNK_FL_IMMUTABLE |
807 MEDIA_LNK_FL_ENABLED);
808 if (ret)
809 goto err_rel_entity2;
810
811 ret = media_create_pad_link(&func->proc, 1, &func->sink, 0,
812 MEDIA_LNK_FL_IMMUTABLE |
813 MEDIA_LNK_FL_ENABLED);
814 if (ret)
815 goto err_rm_links0;
816
817 /* Create video interface */
818 func->intf_devnode = media_devnode_create(mdev, MEDIA_INTF_T_V4L_VIDEO,
819 0, VIDEO_MAJOR,
820 func->vdev.minor);
821 if (!func->intf_devnode) {
822 ret = -ENOMEM;
823 goto err_rm_links1;
824 }
825
826 /* Connect the two DMA engines to the interface */
827 link = media_create_intf_link(&func->vdev.entity,
828 &func->intf_devnode->intf,
829 MEDIA_LNK_FL_IMMUTABLE |
830 MEDIA_LNK_FL_ENABLED);
831 if (!link) {
832 ret = -ENOMEM;
833 goto err_rm_devnode;
834 }
835
836 link = media_create_intf_link(&func->sink, &func->intf_devnode->intf,
837 MEDIA_LNK_FL_IMMUTABLE |
838 MEDIA_LNK_FL_ENABLED);
839 if (!link) {
840 ret = -ENOMEM;
841 goto err_rm_devnode;
842 }
843 return 0;
844
845 err_rm_devnode:
846 media_devnode_remove(func->intf_devnode);
847
848 err_rm_links1:
849 media_entity_remove_links(&func->sink);
850
851 err_rm_links0:
852 media_entity_remove_links(&func->proc);
853 media_entity_remove_links(&func->vdev.entity);
854
855 err_rel_entity2:
856 media_device_unregister_entity(&func->sink);
857
858 err_rel_entity1:
859 media_device_unregister_entity(&func->proc);
860
861 err_rel_entity0:
862 media_device_unregister_entity(&func->vdev.entity);
863 return ret;
864 }
865
hantro_detach_func(struct hantro_func * func)866 static void hantro_detach_func(struct hantro_func *func)
867 {
868 media_devnode_remove(func->intf_devnode);
869 media_entity_remove_links(&func->sink);
870 media_entity_remove_links(&func->proc);
871 media_entity_remove_links(&func->vdev.entity);
872 media_device_unregister_entity(&func->sink);
873 media_device_unregister_entity(&func->proc);
874 media_device_unregister_entity(&func->vdev.entity);
875 }
876
hantro_add_func(struct hantro_dev * vpu,unsigned int funcid)877 static int hantro_add_func(struct hantro_dev *vpu, unsigned int funcid)
878 {
879 const struct of_device_id *match;
880 struct hantro_func *func;
881 struct video_device *vfd;
882 int ret;
883
884 match = of_match_node(of_hantro_match, vpu->dev->of_node);
885 func = devm_kzalloc(vpu->dev, sizeof(*func), GFP_KERNEL);
886 if (!func) {
887 v4l2_err(&vpu->v4l2_dev, "Failed to allocate video device\n");
888 return -ENOMEM;
889 }
890
891 func->id = funcid;
892
893 vfd = &func->vdev;
894 vfd->fops = &hantro_fops;
895 vfd->release = video_device_release_empty;
896 vfd->lock = &vpu->vpu_mutex;
897 vfd->v4l2_dev = &vpu->v4l2_dev;
898 vfd->vfl_dir = VFL_DIR_M2M;
899 vfd->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
900 vfd->ioctl_ops = &hantro_ioctl_ops;
901 snprintf(vfd->name, sizeof(vfd->name), "%s-%s", match->compatible,
902 funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER ? "enc" : "dec");
903
904 if (funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER) {
905 vpu->encoder = func;
906 } else {
907 vpu->decoder = func;
908 v4l2_disable_ioctl(vfd, VIDIOC_TRY_ENCODER_CMD);
909 v4l2_disable_ioctl(vfd, VIDIOC_ENCODER_CMD);
910 }
911
912 video_set_drvdata(vfd, vpu);
913
914 ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
915 if (ret) {
916 v4l2_err(&vpu->v4l2_dev, "Failed to register video device\n");
917 return ret;
918 }
919
920 ret = hantro_attach_func(vpu, func);
921 if (ret) {
922 v4l2_err(&vpu->v4l2_dev,
923 "Failed to attach functionality to the media device\n");
924 goto err_unreg_dev;
925 }
926
927 v4l2_info(&vpu->v4l2_dev, "registered %s as /dev/video%d\n", vfd->name,
928 vfd->num);
929
930 return 0;
931
932 err_unreg_dev:
933 video_unregister_device(vfd);
934 return ret;
935 }
936
hantro_add_enc_func(struct hantro_dev * vpu)937 static int hantro_add_enc_func(struct hantro_dev *vpu)
938 {
939 if (!vpu->variant->enc_fmts)
940 return 0;
941
942 return hantro_add_func(vpu, MEDIA_ENT_F_PROC_VIDEO_ENCODER);
943 }
944
hantro_add_dec_func(struct hantro_dev * vpu)945 static int hantro_add_dec_func(struct hantro_dev *vpu)
946 {
947 if (!vpu->variant->dec_fmts)
948 return 0;
949
950 return hantro_add_func(vpu, MEDIA_ENT_F_PROC_VIDEO_DECODER);
951 }
952
hantro_remove_func(struct hantro_dev * vpu,unsigned int funcid)953 static void hantro_remove_func(struct hantro_dev *vpu,
954 unsigned int funcid)
955 {
956 struct hantro_func *func;
957
958 if (funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER)
959 func = vpu->encoder;
960 else
961 func = vpu->decoder;
962
963 if (!func)
964 return;
965
966 hantro_detach_func(func);
967 video_unregister_device(&func->vdev);
968 }
969
hantro_remove_enc_func(struct hantro_dev * vpu)970 static void hantro_remove_enc_func(struct hantro_dev *vpu)
971 {
972 hantro_remove_func(vpu, MEDIA_ENT_F_PROC_VIDEO_ENCODER);
973 }
974
hantro_remove_dec_func(struct hantro_dev * vpu)975 static void hantro_remove_dec_func(struct hantro_dev *vpu)
976 {
977 hantro_remove_func(vpu, MEDIA_ENT_F_PROC_VIDEO_DECODER);
978 }
979
980 static const struct media_device_ops hantro_m2m_media_ops = {
981 .req_validate = vb2_request_validate,
982 .req_queue = v4l2_m2m_request_queue,
983 };
984
hantro_probe(struct platform_device * pdev)985 static int hantro_probe(struct platform_device *pdev)
986 {
987 const struct of_device_id *match;
988 struct hantro_dev *vpu;
989 int num_bases;
990 int i, ret;
991
992 vpu = devm_kzalloc(&pdev->dev, sizeof(*vpu), GFP_KERNEL);
993 if (!vpu)
994 return -ENOMEM;
995
996 vpu->dev = &pdev->dev;
997 vpu->pdev = pdev;
998 mutex_init(&vpu->vpu_mutex);
999 spin_lock_init(&vpu->irqlock);
1000
1001 match = of_match_node(of_hantro_match, pdev->dev.of_node);
1002 vpu->variant = match->data;
1003
1004 /*
1005 * Support for nxp,imx8mq-vpu is kept for backwards compatibility
1006 * but it's deprecated. Please update your DTS file to use
1007 * nxp,imx8mq-vpu-g1 or nxp,imx8mq-vpu-g2 instead.
1008 */
1009 if (of_device_is_compatible(pdev->dev.of_node, "nxp,imx8mq-vpu"))
1010 dev_warn(&pdev->dev, "%s compatible is deprecated\n",
1011 match->compatible);
1012
1013 INIT_DELAYED_WORK(&vpu->watchdog_work, hantro_watchdog);
1014
1015 vpu->clocks = devm_kcalloc(&pdev->dev, vpu->variant->num_clocks,
1016 sizeof(*vpu->clocks), GFP_KERNEL);
1017 if (!vpu->clocks)
1018 return -ENOMEM;
1019
1020 if (vpu->variant->num_clocks > 1) {
1021 for (i = 0; i < vpu->variant->num_clocks; i++)
1022 vpu->clocks[i].id = vpu->variant->clk_names[i];
1023
1024 ret = devm_clk_bulk_get(&pdev->dev, vpu->variant->num_clocks,
1025 vpu->clocks);
1026 if (ret)
1027 return ret;
1028 } else {
1029 /*
1030 * If the driver has a single clk, chances are there will be no
1031 * actual name in the DT bindings.
1032 */
1033 vpu->clocks[0].clk = devm_clk_get(&pdev->dev, NULL);
1034 if (IS_ERR(vpu->clocks[0].clk))
1035 return PTR_ERR(vpu->clocks[0].clk);
1036 }
1037
1038 vpu->resets = devm_reset_control_array_get_optional_exclusive(&pdev->dev);
1039 if (IS_ERR(vpu->resets))
1040 return PTR_ERR(vpu->resets);
1041
1042 num_bases = vpu->variant->num_regs ?: 1;
1043 vpu->reg_bases = devm_kcalloc(&pdev->dev, num_bases,
1044 sizeof(*vpu->reg_bases), GFP_KERNEL);
1045 if (!vpu->reg_bases)
1046 return -ENOMEM;
1047
1048 for (i = 0; i < num_bases; i++) {
1049 vpu->reg_bases[i] = vpu->variant->reg_names ?
1050 devm_platform_ioremap_resource_byname(pdev, vpu->variant->reg_names[i]) :
1051 devm_platform_ioremap_resource(pdev, 0);
1052 if (IS_ERR(vpu->reg_bases[i]))
1053 return PTR_ERR(vpu->reg_bases[i]);
1054 }
1055 vpu->enc_base = vpu->reg_bases[0] + vpu->variant->enc_offset;
1056 vpu->dec_base = vpu->reg_bases[0] + vpu->variant->dec_offset;
1057
1058 /**
1059 * TODO: Eventually allow taking advantage of full 64-bit address space.
1060 * Until then we assume the MSB portion of buffers' base addresses is
1061 * always 0 due to this masking operation.
1062 */
1063 ret = dma_set_coherent_mask(vpu->dev, DMA_BIT_MASK(32));
1064 if (ret) {
1065 dev_err(vpu->dev, "Could not set DMA coherent mask.\n");
1066 return ret;
1067 }
1068 vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
1069
1070 for (i = 0; i < vpu->variant->num_irqs; i++) {
1071 const char *irq_name;
1072 int irq;
1073
1074 if (!vpu->variant->irqs[i].handler)
1075 continue;
1076
1077 if (vpu->variant->num_irqs > 1) {
1078 irq_name = vpu->variant->irqs[i].name;
1079 irq = platform_get_irq_byname(vpu->pdev, irq_name);
1080 } else {
1081 /*
1082 * If the driver has a single IRQ, chances are there
1083 * will be no actual name in the DT bindings.
1084 */
1085 irq_name = "default";
1086 irq = platform_get_irq(vpu->pdev, 0);
1087 }
1088 if (irq < 0)
1089 return irq;
1090
1091 ret = devm_request_irq(vpu->dev, irq,
1092 vpu->variant->irqs[i].handler, 0,
1093 dev_name(vpu->dev), vpu);
1094 if (ret) {
1095 dev_err(vpu->dev, "Could not request %s IRQ.\n",
1096 irq_name);
1097 return ret;
1098 }
1099 }
1100
1101 if (vpu->variant->init) {
1102 ret = vpu->variant->init(vpu);
1103 if (ret) {
1104 dev_err(&pdev->dev, "Failed to init VPU hardware\n");
1105 return ret;
1106 }
1107 }
1108
1109 pm_runtime_set_autosuspend_delay(vpu->dev, 100);
1110 pm_runtime_use_autosuspend(vpu->dev);
1111 pm_runtime_enable(vpu->dev);
1112
1113 ret = reset_control_deassert(vpu->resets);
1114 if (ret) {
1115 dev_err(&pdev->dev, "Failed to deassert resets\n");
1116 goto err_pm_disable;
1117 }
1118
1119 ret = clk_bulk_prepare(vpu->variant->num_clocks, vpu->clocks);
1120 if (ret) {
1121 dev_err(&pdev->dev, "Failed to prepare clocks\n");
1122 goto err_rst_assert;
1123 }
1124
1125 ret = v4l2_device_register(&pdev->dev, &vpu->v4l2_dev);
1126 if (ret) {
1127 dev_err(&pdev->dev, "Failed to register v4l2 device\n");
1128 goto err_clk_unprepare;
1129 }
1130 platform_set_drvdata(pdev, vpu);
1131
1132 vpu->m2m_dev = v4l2_m2m_init(&vpu_m2m_ops);
1133 if (IS_ERR(vpu->m2m_dev)) {
1134 v4l2_err(&vpu->v4l2_dev, "Failed to init mem2mem device\n");
1135 ret = PTR_ERR(vpu->m2m_dev);
1136 goto err_v4l2_unreg;
1137 }
1138
1139 vpu->mdev.dev = vpu->dev;
1140 strscpy(vpu->mdev.model, DRIVER_NAME, sizeof(vpu->mdev.model));
1141 media_device_init(&vpu->mdev);
1142 vpu->mdev.ops = &hantro_m2m_media_ops;
1143 vpu->v4l2_dev.mdev = &vpu->mdev;
1144
1145 ret = hantro_add_enc_func(vpu);
1146 if (ret) {
1147 dev_err(&pdev->dev, "Failed to register encoder\n");
1148 goto err_m2m_rel;
1149 }
1150
1151 ret = hantro_add_dec_func(vpu);
1152 if (ret) {
1153 dev_err(&pdev->dev, "Failed to register decoder\n");
1154 goto err_rm_enc_func;
1155 }
1156
1157 ret = media_device_register(&vpu->mdev);
1158 if (ret) {
1159 v4l2_err(&vpu->v4l2_dev, "Failed to register mem2mem media device\n");
1160 goto err_rm_dec_func;
1161 }
1162
1163 return 0;
1164
1165 err_rm_dec_func:
1166 hantro_remove_dec_func(vpu);
1167 err_rm_enc_func:
1168 hantro_remove_enc_func(vpu);
1169 err_m2m_rel:
1170 media_device_cleanup(&vpu->mdev);
1171 v4l2_m2m_release(vpu->m2m_dev);
1172 err_v4l2_unreg:
1173 v4l2_device_unregister(&vpu->v4l2_dev);
1174 err_clk_unprepare:
1175 clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
1176 err_rst_assert:
1177 reset_control_assert(vpu->resets);
1178 err_pm_disable:
1179 pm_runtime_dont_use_autosuspend(vpu->dev);
1180 pm_runtime_disable(vpu->dev);
1181 return ret;
1182 }
1183
hantro_remove(struct platform_device * pdev)1184 static void hantro_remove(struct platform_device *pdev)
1185 {
1186 struct hantro_dev *vpu = platform_get_drvdata(pdev);
1187
1188 v4l2_info(&vpu->v4l2_dev, "Removing %s\n", pdev->name);
1189
1190 media_device_unregister(&vpu->mdev);
1191 hantro_remove_dec_func(vpu);
1192 hantro_remove_enc_func(vpu);
1193 media_device_cleanup(&vpu->mdev);
1194 v4l2_m2m_release(vpu->m2m_dev);
1195 v4l2_device_unregister(&vpu->v4l2_dev);
1196 clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
1197 reset_control_assert(vpu->resets);
1198 pm_runtime_dont_use_autosuspend(vpu->dev);
1199 pm_runtime_disable(vpu->dev);
1200 }
1201
1202 #ifdef CONFIG_PM
hantro_runtime_resume(struct device * dev)1203 static int hantro_runtime_resume(struct device *dev)
1204 {
1205 struct hantro_dev *vpu = dev_get_drvdata(dev);
1206
1207 if (vpu->variant->runtime_resume)
1208 return vpu->variant->runtime_resume(vpu);
1209
1210 return 0;
1211 }
1212 #endif
1213
1214 static const struct dev_pm_ops hantro_pm_ops = {
1215 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1216 pm_runtime_force_resume)
1217 SET_RUNTIME_PM_OPS(NULL, hantro_runtime_resume, NULL)
1218 };
1219
1220 static struct platform_driver hantro_driver = {
1221 .probe = hantro_probe,
1222 .remove_new = hantro_remove,
1223 .driver = {
1224 .name = DRIVER_NAME,
1225 .of_match_table = of_hantro_match,
1226 .pm = &hantro_pm_ops,
1227 },
1228 };
1229 module_platform_driver(hantro_driver);
1230
1231 MODULE_LICENSE("GPL v2");
1232 MODULE_AUTHOR("Alpha Lin <Alpha.Lin@Rock-Chips.com>");
1233 MODULE_AUTHOR("Tomasz Figa <tfiga@chromium.org>");
1234 MODULE_AUTHOR("Ezequiel Garcia <ezequiel@collabora.com>");
1235 MODULE_DESCRIPTION("Hantro VPU codec driver");
1236