Lines Matching +full:omap +full:- +full:sdma

2  * DMM IOMMU driver support functions for TI OMAP processors.
4 * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
20 #include <linux/dma-mapping.h>
57 .slot_w = 1 << (SLOT_WIDTH_BITS - (xshift)), \
58 .slot_h = 1 << (SLOT_HEIGHT_BITS - (yshift)), \
62 u32 x_shft; /* unused X-bits (as part of bpp) */
63 u32 y_shft; /* unused Y-bits (as part of bpp) */
75 /* lookup table for registers w/ per-engine instances */
89 tx = dmaengine_prep_dma_memcpy(dmm->wa_dma_chan, dst, src, 4, 0); in dmm_dma_copy()
91 dev_err(dmm->dev, "Failed to prepare DMA memcpy\n"); in dmm_dma_copy()
92 return -EIO; in dmm_dma_copy()
95 cookie = tx->tx_submit(tx); in dmm_dma_copy()
97 dev_err(dmm->dev, "Failed to do DMA tx_submit\n"); in dmm_dma_copy()
98 return -EIO; in dmm_dma_copy()
101 status = dma_sync_wait(dmm->wa_dma_chan, cookie); in dmm_dma_copy()
103 dev_err(dmm->dev, "i878 wa DMA copy failure\n"); in dmm_dma_copy()
105 dmaengine_terminate_all(dmm->wa_dma_chan); in dmm_dma_copy()
114 src = dmm->phys_base + reg; in dmm_read_wa()
115 dst = dmm->wa_dma_handle; in dmm_read_wa()
119 dev_err(dmm->dev, "sDMA read transfer timeout\n"); in dmm_read_wa()
120 return readl(dmm->base + reg); in dmm_read_wa()
129 return readl(dmm->wa_dma_data); in dmm_read_wa()
137 writel(val, dmm->wa_dma_data); in dmm_write_wa()
146 src = dmm->wa_dma_handle; in dmm_write_wa()
147 dst = dmm->phys_base + reg; in dmm_write_wa()
151 dev_err(dmm->dev, "sDMA write transfer timeout\n"); in dmm_write_wa()
152 writel(val, dmm->base + reg); in dmm_write_wa()
158 if (dmm->dmm_workaround) { in dmm_read()
162 spin_lock_irqsave(&dmm->wa_lock, flags); in dmm_read()
164 spin_unlock_irqrestore(&dmm->wa_lock, flags); in dmm_read()
168 return readl(dmm->base + reg); in dmm_read()
174 if (dmm->dmm_workaround) { in dmm_write()
177 spin_lock_irqsave(&dmm->wa_lock, flags); in dmm_write()
179 spin_unlock_irqrestore(&dmm->wa_lock, flags); in dmm_write()
181 writel(val, dmm->base + reg); in dmm_write()
189 spin_lock_init(&dmm->wa_lock); in dmm_workaround_init()
191 dmm->wa_dma_data = dma_alloc_coherent(dmm->dev, sizeof(u32), in dmm_workaround_init()
192 &dmm->wa_dma_handle, GFP_KERNEL); in dmm_workaround_init()
193 if (!dmm->wa_dma_data) in dmm_workaround_init()
194 return -ENOMEM; in dmm_workaround_init()
199 dmm->wa_dma_chan = dma_request_channel(mask, NULL, NULL); in dmm_workaround_init()
200 if (!dmm->wa_dma_chan) { in dmm_workaround_init()
201 dma_free_coherent(dmm->dev, 4, dmm->wa_dma_data, dmm->wa_dma_handle); in dmm_workaround_init()
202 return -ENODEV; in dmm_workaround_init()
210 dma_release_channel(dmm->wa_dma_chan); in dmm_workaround_uninit()
212 dma_free_coherent(dmm->dev, 4, dmm->wa_dma_data, dmm->wa_dma_handle); in dmm_workaround_uninit()
219 struct refill_engine *engine = txn->engine_handle; in alloc_dma()
222 txn->current_pa = round_up(txn->current_pa, 16); in alloc_dma()
223 txn->current_va = (void *)round_up((long)txn->current_va, 16); in alloc_dma()
225 ptr = txn->current_va; in alloc_dma()
226 *pa = txn->current_pa; in alloc_dma()
228 txn->current_pa += sz; in alloc_dma()
229 txn->current_va += sz; in alloc_dma()
231 BUG_ON((txn->current_va - engine->refill_va) > REFILL_BUFFER_SIZE); in alloc_dma()
239 struct dmm *dmm = engine->dmm; in wait_status()
244 r = dmm_read(dmm, reg[PAT_STATUS][engine->id]); in wait_status()
247 dev_err(dmm->dev, in wait_status()
249 __func__, engine->id, r); in wait_status()
250 return -EFAULT; in wait_status()
256 if (--i == 0) { in wait_status()
257 dev_err(dmm->dev, in wait_status()
259 __func__, engine->id, r); in wait_status()
260 return -ETIMEDOUT; in wait_status()
274 list_add(&engine->idle_node, &omap_dmm->idle_head); in release_engine()
277 atomic_inc(&omap_dmm->engine_counter); in release_engine()
278 wake_up_interruptible(&omap_dmm->engine_queue); in release_engine()
290 for (i = 0; i < dmm->num_engines; i++) { in omap_dmm_irq_handler()
292 dev_err(dmm->dev, in omap_dmm_irq_handler()
297 if (dmm->engines[i].async) in omap_dmm_irq_handler()
298 release_engine(&dmm->engines[i]); in omap_dmm_irq_handler()
300 complete(&dmm->engines[i].compl); in omap_dmm_irq_handler()
321 ret = wait_event_interruptible(omap_dmm->engine_queue, in dmm_txn_init()
322 atomic_add_unless(&omap_dmm->engine_counter, -1, 0)); in dmm_txn_init()
328 if (!list_empty(&dmm->idle_head)) { in dmm_txn_init()
329 engine = list_entry(dmm->idle_head.next, struct refill_engine, in dmm_txn_init()
331 list_del(&engine->idle_node); in dmm_txn_init()
337 txn = &engine->txn; in dmm_txn_init()
338 engine->tcm = tcm; in dmm_txn_init()
339 txn->engine_handle = engine; in dmm_txn_init()
340 txn->last_pat = NULL; in dmm_txn_init()
341 txn->current_va = engine->refill_va; in dmm_txn_init()
342 txn->current_pa = engine->refill_pa; in dmm_txn_init()
357 struct refill_engine *engine = txn->engine_handle; in dmm_txn_append()
358 int columns = (1 + area->x1 - area->x0); in dmm_txn_append()
359 int rows = (1 + area->y1 - area->y0); in dmm_txn_append()
364 if (txn->last_pat) in dmm_txn_append()
365 txn->last_pat->next_pa = (u32)pat_pa; in dmm_txn_append()
367 pat->area = *area; in dmm_txn_append()
370 pat->area.y0 += engine->tcm->y_offset; in dmm_txn_append()
371 pat->area.y1 += engine->tcm->y_offset; in dmm_txn_append()
373 pat->ctrl = (struct pat_ctrl){ in dmm_txn_append()
375 .lut_id = engine->tcm->lut_id, in dmm_txn_append()
379 /* FIXME: what if data_pa is more than 32-bit ? */ in dmm_txn_append()
380 pat->data_pa = data_pa; in dmm_txn_append()
382 while (i--) { in dmm_txn_append()
385 n -= npages; in dmm_txn_append()
387 page_to_phys(pages[n]) : engine->dmm->dummy_pa; in dmm_txn_append()
390 txn->last_pat = pat; in dmm_txn_append()
401 struct refill_engine *engine = txn->engine_handle; in dmm_txn_commit()
402 struct dmm *dmm = engine->dmm; in dmm_txn_commit()
404 if (!txn->last_pat) { in dmm_txn_commit()
405 dev_err(engine->dmm->dev, "need at least one txn\n"); in dmm_txn_commit()
406 ret = -EINVAL; in dmm_txn_commit()
410 txn->last_pat->next_pa = 0; in dmm_txn_commit()
416 * in OMAP's memory barrier implementation, which in some rare cases may in dmm_txn_commit()
421 readl(&txn->last_pat->next_pa); in dmm_txn_commit()
424 dmm_write(dmm, 0x0, reg[PAT_DESCR][engine->id]); in dmm_txn_commit()
429 ret = -EFAULT; in dmm_txn_commit()
434 engine->async = wait ? false : true; in dmm_txn_commit()
435 reinit_completion(&engine->compl); in dmm_txn_commit()
440 dmm_write(dmm, engine->refill_pa, reg[PAT_DESCR][engine->id]); in dmm_txn_commit()
443 if (!wait_for_completion_timeout(&engine->compl, in dmm_txn_commit()
445 dev_err(dmm->dev, "timed out waiting for done\n"); in dmm_txn_commit()
446 ret = -ETIMEDOUT; in dmm_txn_commit()
488 txn = dmm_txn_init(omap_dmm, area->tcm); in fill()
490 return -ENOMEM; in fill()
519 ret = fill(&block->area, pages, npages, roll, wait); in tiler_pin()
529 return fill(&block->area, NULL, 0, 0, false); in tiler_unpin()
546 return ERR_PTR(-ENOMEM); in tiler_reserve_2d()
560 block->fmt = fmt; in tiler_reserve_2d()
562 ret = tcm_reserve_2d(containers[fmt], w, h, align, -1, slot_bytes, in tiler_reserve_2d()
563 &block->area); in tiler_reserve_2d()
566 return ERR_PTR(-ENOMEM); in tiler_reserve_2d()
571 list_add(&block->alloc_node, &omap_dmm->alloc_head); in tiler_reserve_2d()
580 int num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; in tiler_reserve_1d()
584 return ERR_PTR(-ENOMEM); in tiler_reserve_1d()
586 block->fmt = TILFMT_PAGE; in tiler_reserve_1d()
589 &block->area)) { in tiler_reserve_1d()
591 return ERR_PTR(-ENOMEM); in tiler_reserve_1d()
595 list_add(&block->alloc_node, &omap_dmm->alloc_head); in tiler_reserve_1d()
604 int ret = tcm_free(&block->area); in tiler_release()
607 if (block->area.tcm) in tiler_release()
608 dev_err(omap_dmm->dev, "failed to release block\n"); in tiler_release()
611 list_del(&block->alloc_node); in tiler_release()
626 * [28:27] = 0x0 for 8-bit tiled
627 * 0x1 for 16-bit tiled
628 * 0x2 for 32-bit tiled
630 * [31:29] = 0x0 for 0-degree view
631 * 0x1 for 180-degree view + mirroring
632 * 0x2 for 0-degree view + mirroring
633 * 0x3 for 180-degree view
634 * 0x4 for 270-degree view + mirroring
635 * 0x5 for 270-degree view
636 * 0x6 for 90-degree view
637 * 0x7 for 90-degree view + mirroring
645 x_bits = CONT_WIDTH_BITS - geom[fmt].x_shft; in tiler_get_address()
646 y_bits = CONT_HEIGHT_BITS - geom[fmt].y_shft; in tiler_get_address()
676 BUG_ON(!validfmt(block->fmt)); in tiler_ssptr()
678 return TILVIEW_8BIT + tiler_get_address(block->fmt, 0, in tiler_ssptr()
679 block->area.p0.x * geom[block->fmt].slot_w, in tiler_ssptr()
680 block->area.p0.y * geom[block->fmt].slot_h); in tiler_ssptr()
686 struct tcm_pt *p = &block->area.p0; in tiler_tsptr()
687 BUG_ON(!validfmt(block->fmt)); in tiler_tsptr()
689 return tiler_get_address(block->fmt, orient, in tiler_tsptr()
690 (p->x * geom[block->fmt].slot_w) + x, in tiler_tsptr()
691 (p->y * geom[block->fmt].slot_h) + y); in tiler_tsptr()
725 return omap_dmm->plat_data->cpu_cache_flags; in tiler_get_cpu_cache_flags()
742 free_irq(omap_dmm->irq, omap_dmm); in omap_dmm_remove()
746 list_for_each_entry_safe(block, _block, &omap_dmm->alloc_head, in omap_dmm_remove()
748 list_del(&block->alloc_node); in omap_dmm_remove()
753 for (i = 0; i < omap_dmm->num_lut; i++) in omap_dmm_remove()
754 if (omap_dmm->tcm && omap_dmm->tcm[i]) in omap_dmm_remove()
755 omap_dmm->tcm[i]->deinit(omap_dmm->tcm[i]); in omap_dmm_remove()
756 kfree(omap_dmm->tcm); in omap_dmm_remove()
758 kfree(omap_dmm->engines); in omap_dmm_remove()
759 if (omap_dmm->refill_va) in omap_dmm_remove()
760 dma_free_wc(omap_dmm->dev, in omap_dmm_remove()
761 REFILL_BUFFER_SIZE * omap_dmm->num_engines, in omap_dmm_remove()
762 omap_dmm->refill_va, omap_dmm->refill_pa); in omap_dmm_remove()
763 if (omap_dmm->dummy_page) in omap_dmm_remove()
764 __free_page(omap_dmm->dummy_page); in omap_dmm_remove()
766 if (omap_dmm->dmm_workaround) in omap_dmm_remove()
769 iounmap(omap_dmm->base); in omap_dmm_remove()
779 int ret = -EFAULT, i; in omap_dmm_probe()
789 INIT_LIST_HEAD(&omap_dmm->alloc_head); in omap_dmm_probe()
790 INIT_LIST_HEAD(&omap_dmm->idle_head); in omap_dmm_probe()
792 init_waitqueue_head(&omap_dmm->engine_queue); in omap_dmm_probe()
794 if (dev->dev.of_node) { in omap_dmm_probe()
797 match = of_match_node(dmm_of_match, dev->dev.of_node); in omap_dmm_probe()
799 dev_err(&dev->dev, "failed to find matching device node\n"); in omap_dmm_probe()
800 ret = -ENODEV; in omap_dmm_probe()
804 omap_dmm->plat_data = match->data; in omap_dmm_probe()
807 /* lookup hwmod data - base address and irq */ in omap_dmm_probe()
810 dev_err(&dev->dev, "failed to get base address resource\n"); in omap_dmm_probe()
814 omap_dmm->phys_base = mem->start; in omap_dmm_probe()
815 omap_dmm->base = ioremap(mem->start, SZ_2K); in omap_dmm_probe()
817 if (!omap_dmm->base) { in omap_dmm_probe()
818 dev_err(&dev->dev, "failed to get dmm base address\n"); in omap_dmm_probe()
822 omap_dmm->irq = platform_get_irq(dev, 0); in omap_dmm_probe()
823 if (omap_dmm->irq < 0) { in omap_dmm_probe()
824 dev_err(&dev->dev, "failed to get IRQ resource\n"); in omap_dmm_probe()
828 omap_dmm->dev = &dev->dev; in omap_dmm_probe()
837 omap_dmm->dmm_workaround = true; in omap_dmm_probe()
838 dev_info(&dev->dev, in omap_dmm_probe()
841 dev_warn(&dev->dev, in omap_dmm_probe()
842 "failed to initialize work-around for i878\n"); in omap_dmm_probe()
847 omap_dmm->num_engines = (hwinfo >> 24) & 0x1F; in omap_dmm_probe()
848 omap_dmm->num_lut = (hwinfo >> 16) & 0x1F; in omap_dmm_probe()
849 omap_dmm->container_width = 256; in omap_dmm_probe()
850 omap_dmm->container_height = 128; in omap_dmm_probe()
852 atomic_set(&omap_dmm->engine_counter, omap_dmm->num_engines); in omap_dmm_probe()
856 omap_dmm->lut_width = ((pat_geom >> 16) & 0xF) << 5; in omap_dmm_probe()
857 omap_dmm->lut_height = ((pat_geom >> 24) & 0xF) << 5; in omap_dmm_probe()
861 if (omap_dmm->lut_height != omap_dmm->container_height) in omap_dmm_probe()
862 omap_dmm->num_lut++; in omap_dmm_probe()
872 omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32); in omap_dmm_probe()
873 if (!omap_dmm->dummy_page) { in omap_dmm_probe()
874 dev_err(&dev->dev, "could not allocate dummy page\n"); in omap_dmm_probe()
875 ret = -ENOMEM; in omap_dmm_probe()
880 ret = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32)); in omap_dmm_probe()
884 omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page); in omap_dmm_probe()
887 omap_dmm->refill_va = dma_alloc_wc(&dev->dev, in omap_dmm_probe()
888 REFILL_BUFFER_SIZE * omap_dmm->num_engines, in omap_dmm_probe()
889 &omap_dmm->refill_pa, GFP_KERNEL); in omap_dmm_probe()
890 if (!omap_dmm->refill_va) { in omap_dmm_probe()
891 dev_err(&dev->dev, "could not allocate refill memory\n"); in omap_dmm_probe()
892 ret = -ENOMEM; in omap_dmm_probe()
897 omap_dmm->engines = kcalloc(omap_dmm->num_engines, in omap_dmm_probe()
898 sizeof(*omap_dmm->engines), GFP_KERNEL); in omap_dmm_probe()
899 if (!omap_dmm->engines) { in omap_dmm_probe()
900 ret = -ENOMEM; in omap_dmm_probe()
904 for (i = 0; i < omap_dmm->num_engines; i++) { in omap_dmm_probe()
905 omap_dmm->engines[i].id = i; in omap_dmm_probe()
906 omap_dmm->engines[i].dmm = omap_dmm; in omap_dmm_probe()
907 omap_dmm->engines[i].refill_va = omap_dmm->refill_va + in omap_dmm_probe()
909 omap_dmm->engines[i].refill_pa = omap_dmm->refill_pa + in omap_dmm_probe()
911 init_completion(&omap_dmm->engines[i].compl); in omap_dmm_probe()
913 list_add(&omap_dmm->engines[i].idle_node, &omap_dmm->idle_head); in omap_dmm_probe()
916 omap_dmm->tcm = kcalloc(omap_dmm->num_lut, sizeof(*omap_dmm->tcm), in omap_dmm_probe()
918 if (!omap_dmm->tcm) { in omap_dmm_probe()
919 ret = -ENOMEM; in omap_dmm_probe()
927 for (i = 0; i < omap_dmm->num_lut; i++) { in omap_dmm_probe()
928 omap_dmm->tcm[i] = sita_init(omap_dmm->container_width, in omap_dmm_probe()
929 omap_dmm->container_height); in omap_dmm_probe()
931 if (!omap_dmm->tcm[i]) { in omap_dmm_probe()
932 dev_err(&dev->dev, "failed to allocate container\n"); in omap_dmm_probe()
933 ret = -ENOMEM; in omap_dmm_probe()
937 omap_dmm->tcm[i]->lut_id = i; in omap_dmm_probe()
941 /* OMAP 4 has 1 container for all 4 views */ in omap_dmm_probe()
942 /* OMAP 5 has 2 containers, 1 for 2D and 1 for 1D */ in omap_dmm_probe()
943 containers[TILFMT_8BIT] = omap_dmm->tcm[0]; in omap_dmm_probe()
944 containers[TILFMT_16BIT] = omap_dmm->tcm[0]; in omap_dmm_probe()
945 containers[TILFMT_32BIT] = omap_dmm->tcm[0]; in omap_dmm_probe()
947 if (omap_dmm->container_height != omap_dmm->lut_height) { in omap_dmm_probe()
951 containers[TILFMT_PAGE] = omap_dmm->tcm[1]; in omap_dmm_probe()
952 omap_dmm->tcm[1]->y_offset = OMAP5_LUT_OFFSET; in omap_dmm_probe()
953 omap_dmm->tcm[1]->lut_id = 0; in omap_dmm_probe()
955 containers[TILFMT_PAGE] = omap_dmm->tcm[0]; in omap_dmm_probe()
960 .p1.x = omap_dmm->container_width - 1, in omap_dmm_probe()
961 .p1.y = omap_dmm->container_height - 1, in omap_dmm_probe()
964 ret = request_irq(omap_dmm->irq, omap_dmm_irq_handler, IRQF_SHARED, in omap_dmm_probe()
968 dev_err(&dev->dev, "couldn't register IRQ %d, error %d\n", in omap_dmm_probe()
969 omap_dmm->irq, ret); in omap_dmm_probe()
970 omap_dmm->irq = -1; in omap_dmm_probe()
983 for (i = 0; i < omap_dmm->num_lut; i++) { in omap_dmm_probe()
984 area.tcm = omap_dmm->tcm[i]; in omap_dmm_probe()
986 dev_err(omap_dmm->dev, "refill failed"); in omap_dmm_probe()
989 dev_info(omap_dmm->dev, "initialized all PAT entries\n"); in omap_dmm_probe()
995 dev_err(&dev->dev, "cleanup failed\n"); in omap_dmm_probe()
1007 static const char *special = ".,:;'\"`~!^-+";
1013 for (y = a->p0.y / ydiv; y <= a->p1.y / ydiv; y++) in fill_map()
1014 for (x = a->p0.x / xdiv; x <= a->p1.x / xdiv; x++) in fill_map()
1022 map[p->y / ydiv][p->x / xdiv] = c; in fill_map_pt()
1027 return map[p->y / ydiv][p->x / xdiv]; in read_map_pt()
1032 return (x1 / xdiv) - (x0 / xdiv) + 1; in map_width()
1038 int w = (map_width(xdiv, x0, x1) - strlen(nice)) / 2; in text_map()
1050 if (a->p0.y + 1 < a->p1.y) { in map_1d_info()
1051 text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv, 0, in map_1d_info()
1052 256 - 1); in map_1d_info()
1053 } else if (a->p0.y < a->p1.y) { in map_1d_info()
1054 if (strlen(nice) < map_width(xdiv, a->p0.x, 256 - 1)) in map_1d_info()
1055 text_map(map, xdiv, nice, a->p0.y / ydiv, in map_1d_info()
1056 a->p0.x + xdiv, 256 - 1); in map_1d_info()
1057 else if (strlen(nice) < map_width(xdiv, 0, a->p1.x)) in map_1d_info()
1058 text_map(map, xdiv, nice, a->p1.y / ydiv, in map_1d_info()
1059 0, a->p1.y - xdiv); in map_1d_info()
1060 } else if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x)) { in map_1d_info()
1061 text_map(map, xdiv, nice, a->p0.y / ydiv, a->p0.x, a->p1.x); in map_1d_info()
1069 if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x)) in map_2d_info()
1070 text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv, in map_2d_info()
1071 a->p0.x, a->p1.x); in map_2d_info()
1096 h_adj = omap_dmm->container_height / ydiv; in tiler_map_show()
1097 w_adj = omap_dmm->container_width / xdiv; in tiler_map_show()
1105 for (lut_idx = 0; lut_idx < omap_dmm->num_lut; lut_idx++) { in tiler_map_show()
1109 for (i = 0; i < omap_dmm->container_height; i++) { in tiler_map_show()
1116 list_for_each_entry(block, &omap_dmm->alloc_head, alloc_node) { in tiler_map_show()
1117 if (block->area.tcm == omap_dmm->tcm[lut_idx]) { in tiler_map_show()
1118 if (block->fmt != TILFMT_PAGE) { in tiler_map_show()
1119 fill_map(map, xdiv, ydiv, &block->area, in tiler_map_show()
1126 &block->area); in tiler_map_show()
1129 ydiv, &block->area.p0) == ' '; in tiler_map_show()
1131 &block->area.p1) == ' '; in tiler_map_show()
1133 tcm_for_each_slice(a, block->area, p) in tiler_map_show()
1137 &block->area.p0, in tiler_map_show()
1140 &block->area.p1, in tiler_map_show()
1143 &block->area); in tiler_map_show()
1156 dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP BEGIN\n", in tiler_map_show()
1159 dev_dbg(omap_dmm->dev, "%03d:%s\n", i, map[i]); in tiler_map_show()
1160 dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP END\n", in tiler_map_show()
1180 return -ENODEV; in omap_dmm_resume()
1184 .p1.x = omap_dmm->container_width - 1, in omap_dmm_resume()
1185 .p1.y = omap_dmm->container_height - 1, in omap_dmm_resume()
1189 for (i = 0; i < omap_dmm->num_lut; i++) { in omap_dmm_resume()
1190 area.tcm = omap_dmm->tcm[i]; in omap_dmm_resume()
1212 .compatible = "ti,omap4-dmm",
1216 .compatible = "ti,omap5-dmm",
1236 MODULE_DESCRIPTION("OMAP DMM/Tiler Driver");