1 // SPDX-License-Identifier: GPL-2.0+
2
3 #include <linux/dma-buf-map.h>
4
5 #include <drm/drm_atomic.h>
6 #include <drm/drm_fourcc.h>
7 #include <drm/drm_writeback.h>
8 #include <drm/drm_probe_helper.h>
9 #include <drm/drm_atomic_helper.h>
10 #include <drm/drm_gem_framebuffer_helper.h>
11 #include <drm/drm_gem_shmem_helper.h>
12
13 #include "vkms_drv.h"
14
15 static const u32 vkms_wb_formats[] = {
16 DRM_FORMAT_XRGB8888,
17 };
18
19 static const struct drm_connector_funcs vkms_wb_connector_funcs = {
20 .fill_modes = drm_helper_probe_single_connector_modes,
21 .destroy = drm_connector_cleanup,
22 .reset = drm_atomic_helper_connector_reset,
23 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
24 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
25 };
26
vkms_wb_encoder_atomic_check(struct drm_encoder * encoder,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)27 static int vkms_wb_encoder_atomic_check(struct drm_encoder *encoder,
28 struct drm_crtc_state *crtc_state,
29 struct drm_connector_state *conn_state)
30 {
31 struct drm_framebuffer *fb;
32 const struct drm_display_mode *mode = &crtc_state->mode;
33
34 if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
35 return 0;
36
37 fb = conn_state->writeback_job->fb;
38 if (fb->width != mode->hdisplay || fb->height != mode->vdisplay) {
39 DRM_DEBUG_KMS("Invalid framebuffer size %ux%u\n",
40 fb->width, fb->height);
41 return -EINVAL;
42 }
43
44 if (fb->format->format != vkms_wb_formats[0]) {
45 DRM_DEBUG_KMS("Invalid pixel format %p4cc\n",
46 &fb->format->format);
47 return -EINVAL;
48 }
49
50 return 0;
51 }
52
53 static const struct drm_encoder_helper_funcs vkms_wb_encoder_helper_funcs = {
54 .atomic_check = vkms_wb_encoder_atomic_check,
55 };
56
vkms_wb_connector_get_modes(struct drm_connector * connector)57 static int vkms_wb_connector_get_modes(struct drm_connector *connector)
58 {
59 struct drm_device *dev = connector->dev;
60
61 return drm_add_modes_noedid(connector, dev->mode_config.max_width,
62 dev->mode_config.max_height);
63 }
64
vkms_wb_prepare_job(struct drm_writeback_connector * wb_connector,struct drm_writeback_job * job)65 static int vkms_wb_prepare_job(struct drm_writeback_connector *wb_connector,
66 struct drm_writeback_job *job)
67 {
68 struct vkms_writeback_job *vkmsjob;
69 int ret;
70
71 if (!job->fb)
72 return 0;
73
74 vkmsjob = kzalloc(sizeof(*vkmsjob), GFP_KERNEL);
75 if (!vkmsjob)
76 return -ENOMEM;
77
78 ret = drm_gem_fb_vmap(job->fb, vkmsjob->map, vkmsjob->data);
79 if (ret) {
80 DRM_ERROR("vmap failed: %d\n", ret);
81 goto err_kfree;
82 }
83
84 job->priv = vkmsjob;
85
86 return 0;
87
88 err_kfree:
89 kfree(vkmsjob);
90 return ret;
91 }
92
vkms_wb_cleanup_job(struct drm_writeback_connector * connector,struct drm_writeback_job * job)93 static void vkms_wb_cleanup_job(struct drm_writeback_connector *connector,
94 struct drm_writeback_job *job)
95 {
96 struct vkms_writeback_job *vkmsjob = job->priv;
97 struct vkms_device *vkmsdev;
98
99 if (!job->fb)
100 return;
101
102 drm_gem_fb_vunmap(job->fb, vkmsjob->map);
103
104 vkmsdev = drm_device_to_vkms_device(job->fb->dev);
105 vkms_set_composer(&vkmsdev->output, false);
106 kfree(vkmsjob);
107 }
108
vkms_wb_atomic_commit(struct drm_connector * conn,struct drm_atomic_state * state)109 static void vkms_wb_atomic_commit(struct drm_connector *conn,
110 struct drm_atomic_state *state)
111 {
112 struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
113 conn);
114 struct vkms_device *vkmsdev = drm_device_to_vkms_device(conn->dev);
115 struct vkms_output *output = &vkmsdev->output;
116 struct drm_writeback_connector *wb_conn = &output->wb_connector;
117 struct drm_connector_state *conn_state = wb_conn->base.state;
118 struct vkms_crtc_state *crtc_state = output->composer_state;
119
120 if (!conn_state)
121 return;
122
123 vkms_set_composer(&vkmsdev->output, true);
124
125 spin_lock_irq(&output->composer_lock);
126 crtc_state->active_writeback = conn_state->writeback_job->priv;
127 crtc_state->wb_pending = true;
128 spin_unlock_irq(&output->composer_lock);
129 drm_writeback_queue_job(wb_conn, connector_state);
130 }
131
132 static const struct drm_connector_helper_funcs vkms_wb_conn_helper_funcs = {
133 .get_modes = vkms_wb_connector_get_modes,
134 .prepare_writeback_job = vkms_wb_prepare_job,
135 .cleanup_writeback_job = vkms_wb_cleanup_job,
136 .atomic_commit = vkms_wb_atomic_commit,
137 };
138
vkms_enable_writeback_connector(struct vkms_device * vkmsdev)139 int vkms_enable_writeback_connector(struct vkms_device *vkmsdev)
140 {
141 struct drm_writeback_connector *wb = &vkmsdev->output.wb_connector;
142
143 vkmsdev->output.wb_connector.encoder.possible_crtcs = 1;
144 drm_connector_helper_add(&wb->base, &vkms_wb_conn_helper_funcs);
145
146 return drm_writeback_connector_init(&vkmsdev->drm, wb,
147 &vkms_wb_connector_funcs,
148 &vkms_wb_encoder_helper_funcs,
149 vkms_wb_formats,
150 ARRAY_SIZE(vkms_wb_formats));
151 }
152