1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
4 * Author: James.Qian.Wang <james.qian.wang@arm.com>
5 *
6 */
7 #include <linux/component.h>
8 #include <linux/interrupt.h>
9
10 #include <drm/drm_atomic.h>
11 #include <drm/drm_atomic_helper.h>
12 #include <drm/drm_drv.h>
13 #include <drm/drm_fb_helper.h>
14 #include <drm/drm_gem_dma_helper.h>
15 #include <drm/drm_gem_framebuffer_helper.h>
16 #include <drm/drm_managed.h>
17 #include <drm/drm_probe_helper.h>
18 #include <drm/drm_vblank.h>
19
20 #include "komeda_dev.h"
21 #include "komeda_framebuffer.h"
22 #include "komeda_kms.h"
23
24 DEFINE_DRM_GEM_DMA_FOPS(komeda_cma_fops);
25
komeda_gem_dma_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)26 static int komeda_gem_dma_dumb_create(struct drm_file *file,
27 struct drm_device *dev,
28 struct drm_mode_create_dumb *args)
29 {
30 struct komeda_dev *mdev = dev->dev_private;
31 u32 pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
32
33 args->pitch = ALIGN(pitch, mdev->chip.bus_width);
34
35 return drm_gem_dma_dumb_create_internal(file, dev, args);
36 }
37
komeda_kms_irq_handler(int irq,void * data)38 static irqreturn_t komeda_kms_irq_handler(int irq, void *data)
39 {
40 struct drm_device *drm = data;
41 struct komeda_dev *mdev = drm->dev_private;
42 struct komeda_kms_dev *kms = to_kdev(drm);
43 struct komeda_events evts;
44 irqreturn_t status;
45 u32 i;
46
47 /* Call into the CHIP to recognize events */
48 memset(&evts, 0, sizeof(evts));
49 status = mdev->funcs->irq_handler(mdev, &evts);
50
51 komeda_print_events(&evts, drm);
52
53 /* Notify the crtc to handle the events */
54 for (i = 0; i < kms->n_crtcs; i++)
55 komeda_crtc_handle_event(&kms->crtcs[i], &evts);
56
57 return status;
58 }
59
60 static const struct drm_driver komeda_kms_driver = {
61 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
62 .lastclose = drm_fb_helper_lastclose,
63 DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(komeda_gem_dma_dumb_create),
64 .fops = &komeda_cma_fops,
65 .name = "komeda",
66 .desc = "Arm Komeda Display Processor driver",
67 .date = "20181101",
68 .major = 0,
69 .minor = 1,
70 };
71
komeda_kms_atomic_commit_hw_done(struct drm_atomic_state * state)72 static void komeda_kms_atomic_commit_hw_done(struct drm_atomic_state *state)
73 {
74 struct drm_device *dev = state->dev;
75 struct komeda_kms_dev *kms = to_kdev(dev);
76 int i;
77
78 for (i = 0; i < kms->n_crtcs; i++) {
79 struct komeda_crtc *kcrtc = &kms->crtcs[i];
80
81 if (kcrtc->base.state->active) {
82 struct completion *flip_done = NULL;
83 if (kcrtc->base.state->event)
84 flip_done = kcrtc->base.state->event->base.completion;
85 komeda_crtc_flush_and_wait_for_flip_done(kcrtc, flip_done);
86 }
87 }
88 drm_atomic_helper_commit_hw_done(state);
89 }
90
komeda_kms_commit_tail(struct drm_atomic_state * old_state)91 static void komeda_kms_commit_tail(struct drm_atomic_state *old_state)
92 {
93 struct drm_device *dev = old_state->dev;
94 bool fence_cookie = dma_fence_begin_signalling();
95
96 drm_atomic_helper_commit_modeset_disables(dev, old_state);
97
98 drm_atomic_helper_commit_planes(dev, old_state,
99 DRM_PLANE_COMMIT_ACTIVE_ONLY);
100
101 drm_atomic_helper_commit_modeset_enables(dev, old_state);
102
103 komeda_kms_atomic_commit_hw_done(old_state);
104
105 drm_atomic_helper_wait_for_flip_done(dev, old_state);
106
107 dma_fence_end_signalling(fence_cookie);
108
109 drm_atomic_helper_cleanup_planes(dev, old_state);
110 }
111
112 static const struct drm_mode_config_helper_funcs komeda_mode_config_helpers = {
113 .atomic_commit_tail = komeda_kms_commit_tail,
114 };
115
komeda_plane_state_list_add(struct drm_plane_state * plane_st,struct list_head * zorder_list)116 static int komeda_plane_state_list_add(struct drm_plane_state *plane_st,
117 struct list_head *zorder_list)
118 {
119 struct komeda_plane_state *new = to_kplane_st(plane_st);
120 struct komeda_plane_state *node, *last;
121
122 last = list_empty(zorder_list) ?
123 NULL : list_last_entry(zorder_list, typeof(*last), zlist_node);
124
125 /* Considering the list sequence is zpos increasing, so if list is empty
126 * or the zpos of new node bigger than the last node in list, no need
127 * loop and just insert the new one to the tail of the list.
128 */
129 if (!last || (new->base.zpos > last->base.zpos)) {
130 list_add_tail(&new->zlist_node, zorder_list);
131 return 0;
132 }
133
134 /* Build the list by zpos increasing */
135 list_for_each_entry(node, zorder_list, zlist_node) {
136 if (new->base.zpos < node->base.zpos) {
137 list_add_tail(&new->zlist_node, &node->zlist_node);
138 break;
139 } else if (node->base.zpos == new->base.zpos) {
140 struct drm_plane *a = node->base.plane;
141 struct drm_plane *b = new->base.plane;
142
143 /* Komeda doesn't support setting a same zpos for
144 * different planes.
145 */
146 DRM_DEBUG_ATOMIC("PLANE: %s and PLANE: %s are configured same zpos: %d.\n",
147 a->name, b->name, node->base.zpos);
148 return -EINVAL;
149 }
150 }
151
152 return 0;
153 }
154
komeda_crtc_normalize_zpos(struct drm_crtc * crtc,struct drm_crtc_state * crtc_st)155 static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc,
156 struct drm_crtc_state *crtc_st)
157 {
158 struct drm_atomic_state *state = crtc_st->state;
159 struct komeda_crtc *kcrtc = to_kcrtc(crtc);
160 struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc_st);
161 struct komeda_plane_state *kplane_st;
162 struct drm_plane_state *plane_st;
163 struct drm_plane *plane;
164 struct list_head zorder_list;
165 int order = 0, err;
166
167 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] calculating normalized zpos values\n",
168 crtc->base.id, crtc->name);
169
170 INIT_LIST_HEAD(&zorder_list);
171
172 /* This loop also added all effected planes into the new state */
173 drm_for_each_plane_mask(plane, crtc->dev, crtc_st->plane_mask) {
174 plane_st = drm_atomic_get_plane_state(state, plane);
175 if (IS_ERR(plane_st))
176 return PTR_ERR(plane_st);
177
178 /* Build a list by zpos increasing */
179 err = komeda_plane_state_list_add(plane_st, &zorder_list);
180 if (err)
181 return err;
182 }
183
184 kcrtc_st->max_slave_zorder = 0;
185
186 list_for_each_entry(kplane_st, &zorder_list, zlist_node) {
187 plane_st = &kplane_st->base;
188 plane = plane_st->plane;
189
190 plane_st->normalized_zpos = order++;
191 /* When layer_split has been enabled, one plane will be handled
192 * by two separated komeda layers (left/right), which may needs
193 * two zorders.
194 * - zorder: for left_layer for left display part.
195 * - zorder + 1: will be reserved for right layer.
196 */
197 if (to_kplane_st(plane_st)->layer_split)
198 order++;
199
200 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] zpos:%d, normalized zpos: %d\n",
201 plane->base.id, plane->name,
202 plane_st->zpos, plane_st->normalized_zpos);
203
204 /* calculate max slave zorder */
205 if (has_bit(drm_plane_index(plane), kcrtc->slave_planes))
206 kcrtc_st->max_slave_zorder =
207 max(plane_st->normalized_zpos,
208 kcrtc_st->max_slave_zorder);
209 }
210
211 crtc_st->zpos_changed = true;
212
213 return 0;
214 }
215
komeda_kms_check(struct drm_device * dev,struct drm_atomic_state * state)216 static int komeda_kms_check(struct drm_device *dev,
217 struct drm_atomic_state *state)
218 {
219 struct drm_crtc *crtc;
220 struct drm_crtc_state *new_crtc_st;
221 int i, err;
222
223 err = drm_atomic_helper_check_modeset(dev, state);
224 if (err)
225 return err;
226
227 /* Komeda need to re-calculate resource assumption in every commit
228 * so need to add all affected_planes (even unchanged) to
229 * drm_atomic_state.
230 */
231 for_each_new_crtc_in_state(state, crtc, new_crtc_st, i) {
232 err = drm_atomic_add_affected_planes(state, crtc);
233 if (err)
234 return err;
235
236 err = komeda_crtc_normalize_zpos(crtc, new_crtc_st);
237 if (err)
238 return err;
239 }
240
241 err = drm_atomic_helper_check_planes(dev, state);
242 if (err)
243 return err;
244
245 return 0;
246 }
247
248 static const struct drm_mode_config_funcs komeda_mode_config_funcs = {
249 .fb_create = komeda_fb_create,
250 .atomic_check = komeda_kms_check,
251 .atomic_commit = drm_atomic_helper_commit,
252 };
253
komeda_kms_mode_config_init(struct komeda_kms_dev * kms,struct komeda_dev * mdev)254 static void komeda_kms_mode_config_init(struct komeda_kms_dev *kms,
255 struct komeda_dev *mdev)
256 {
257 struct drm_mode_config *config = &kms->base.mode_config;
258
259 drm_mode_config_init(&kms->base);
260
261 komeda_kms_setup_crtcs(kms, mdev);
262
263 /* Get value from dev */
264 config->min_width = 0;
265 config->min_height = 0;
266 config->max_width = 4096;
267 config->max_height = 4096;
268
269 config->funcs = &komeda_mode_config_funcs;
270 config->helper_private = &komeda_mode_config_helpers;
271 }
272
komeda_kms_attach(struct komeda_dev * mdev)273 struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev)
274 {
275 struct komeda_kms_dev *kms;
276 struct drm_device *drm;
277 int err;
278
279 kms = devm_drm_dev_alloc(mdev->dev, &komeda_kms_driver,
280 struct komeda_kms_dev, base);
281 if (IS_ERR(kms))
282 return kms;
283
284 drm = &kms->base;
285
286 drm->dev_private = mdev;
287
288 komeda_kms_mode_config_init(kms, mdev);
289
290 err = komeda_kms_add_private_objs(kms, mdev);
291 if (err)
292 goto cleanup_mode_config;
293
294 err = komeda_kms_add_planes(kms, mdev);
295 if (err)
296 goto cleanup_mode_config;
297
298 err = drm_vblank_init(drm, kms->n_crtcs);
299 if (err)
300 goto cleanup_mode_config;
301
302 err = komeda_kms_add_crtcs(kms, mdev);
303 if (err)
304 goto cleanup_mode_config;
305
306 err = komeda_kms_add_wb_connectors(kms, mdev);
307 if (err)
308 goto cleanup_mode_config;
309
310 err = component_bind_all(mdev->dev, kms);
311 if (err)
312 goto cleanup_mode_config;
313
314 drm_mode_config_reset(drm);
315
316 err = devm_request_irq(drm->dev, mdev->irq,
317 komeda_kms_irq_handler, IRQF_SHARED,
318 drm->driver->name, drm);
319 if (err)
320 goto free_component_binding;
321
322 drm_kms_helper_poll_init(drm);
323
324 err = drm_dev_register(drm, 0);
325 if (err)
326 goto free_interrupts;
327
328 return kms;
329
330 free_interrupts:
331 drm_kms_helper_poll_fini(drm);
332 free_component_binding:
333 component_unbind_all(mdev->dev, drm);
334 cleanup_mode_config:
335 drm_mode_config_cleanup(drm);
336 komeda_kms_cleanup_private_objs(kms);
337 drm->dev_private = NULL;
338 return ERR_PTR(err);
339 }
340
komeda_kms_detach(struct komeda_kms_dev * kms)341 void komeda_kms_detach(struct komeda_kms_dev *kms)
342 {
343 struct drm_device *drm = &kms->base;
344 struct komeda_dev *mdev = drm->dev_private;
345
346 drm_dev_unregister(drm);
347 drm_kms_helper_poll_fini(drm);
348 drm_atomic_helper_shutdown(drm);
349 component_unbind_all(mdev->dev, drm);
350 drm_mode_config_cleanup(drm);
351 komeda_kms_cleanup_private_objs(kms);
352 drm->dev_private = NULL;
353 }
354