1b94b7353SCai Huoqing // SPDX-License-Identifier: GPL-2.0-only
28bb0daffSRob Clark /*
38bb0daffSRob Clark * DMM IOMMU driver support functions for TI OMAP processors.
48bb0daffSRob Clark *
51b409fdaSAlexander A. Klimov * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
68bb0daffSRob Clark * Author: Rob Clark <rob@ti.com>
78bb0daffSRob Clark * Andy Gross <andy.gross@ti.com>
88bb0daffSRob Clark */
92d278f54SLaurent Pinchart
102d278f54SLaurent Pinchart #include <linux/completion.h>
112d278f54SLaurent Pinchart #include <linux/delay.h>
122d278f54SLaurent Pinchart #include <linux/dma-mapping.h>
13f5b9930bSTomi Valkeinen #include <linux/dmaengine.h>
142d278f54SLaurent Pinchart #include <linux/errno.h>
158bb0daffSRob Clark #include <linux/init.h>
162d278f54SLaurent Pinchart #include <linux/interrupt.h>
172d278f54SLaurent Pinchart #include <linux/list.h>
182d278f54SLaurent Pinchart #include <linux/mm.h>
198bb0daffSRob Clark #include <linux/module.h>
2073289afeSVille Syrjälä #include <linux/of.h>
218bb0daffSRob Clark #include <linux/platform_device.h> /* platform_device() */
228bb0daffSRob Clark #include <linux/sched.h>
232d802453SArnd Bergmann #include <linux/seq_file.h>
248bb0daffSRob Clark #include <linux/slab.h>
258bb0daffSRob Clark #include <linux/time.h>
262d278f54SLaurent Pinchart #include <linux/vmalloc.h>
272d278f54SLaurent Pinchart #include <linux/wait.h>
288bb0daffSRob Clark
298bb0daffSRob Clark #include "omap_dmm_tiler.h"
308bb0daffSRob Clark #include "omap_dmm_priv.h"
318bb0daffSRob Clark
328bb0daffSRob Clark #define DMM_DRIVER_NAME "dmm"
338bb0daffSRob Clark
348bb0daffSRob Clark /* mappings for associating views to luts */
358bb0daffSRob Clark static struct tcm *containers[TILFMT_NFORMATS];
368bb0daffSRob Clark static struct dmm *omap_dmm;
378bb0daffSRob Clark
387cb0d6c1STomi Valkeinen #if defined(CONFIG_OF)
397cb0d6c1STomi Valkeinen static const struct of_device_id dmm_of_match[];
407cb0d6c1STomi Valkeinen #endif
417cb0d6c1STomi Valkeinen
428bb0daffSRob Clark /* global spinlock for protecting lists */
438bb0daffSRob Clark static DEFINE_SPINLOCK(list_lock);
448bb0daffSRob Clark
458bb0daffSRob Clark /* Geometry table */
468bb0daffSRob Clark #define GEOM(xshift, yshift, bytes_per_pixel) { \
478bb0daffSRob Clark .x_shft = (xshift), \
488bb0daffSRob Clark .y_shft = (yshift), \
498bb0daffSRob Clark .cpp = (bytes_per_pixel), \
508bb0daffSRob Clark .slot_w = 1 << (SLOT_WIDTH_BITS - (xshift)), \
518bb0daffSRob Clark .slot_h = 1 << (SLOT_HEIGHT_BITS - (yshift)), \
528bb0daffSRob Clark }
538bb0daffSRob Clark
548bb0daffSRob Clark static const struct {
55dfe9cfccSLaurent Pinchart u32 x_shft; /* unused X-bits (as part of bpp) */
56dfe9cfccSLaurent Pinchart u32 y_shft; /* unused Y-bits (as part of bpp) */
57dfe9cfccSLaurent Pinchart u32 cpp; /* bytes/chars per pixel */
58dfe9cfccSLaurent Pinchart u32 slot_w; /* width of each slot (in pixels) */
59dfe9cfccSLaurent Pinchart u32 slot_h; /* height of each slot (in pixels) */
608bb0daffSRob Clark } geom[TILFMT_NFORMATS] = {
618bb0daffSRob Clark [TILFMT_8BIT] = GEOM(0, 0, 1),
628bb0daffSRob Clark [TILFMT_16BIT] = GEOM(0, 1, 2),
638bb0daffSRob Clark [TILFMT_32BIT] = GEOM(1, 1, 4),
648bb0daffSRob Clark [TILFMT_PAGE] = GEOM(SLOT_WIDTH_BITS, SLOT_HEIGHT_BITS, 1),
658bb0daffSRob Clark };
668bb0daffSRob Clark
678bb0daffSRob Clark
688bb0daffSRob Clark /* lookup table for registers w/ per-engine instances */
69dfe9cfccSLaurent Pinchart static const u32 reg[][4] = {
708bb0daffSRob Clark [PAT_STATUS] = {DMM_PAT_STATUS__0, DMM_PAT_STATUS__1,
718bb0daffSRob Clark DMM_PAT_STATUS__2, DMM_PAT_STATUS__3},
728bb0daffSRob Clark [PAT_DESCR] = {DMM_PAT_DESCR__0, DMM_PAT_DESCR__1,
738bb0daffSRob Clark DMM_PAT_DESCR__2, DMM_PAT_DESCR__3},
748bb0daffSRob Clark };
758bb0daffSRob Clark
dmm_dma_copy(struct dmm * dmm,dma_addr_t src,dma_addr_t dst)76f5b9930bSTomi Valkeinen static int dmm_dma_copy(struct dmm *dmm, dma_addr_t src, dma_addr_t dst)
77f5b9930bSTomi Valkeinen {
78f5b9930bSTomi Valkeinen struct dma_async_tx_descriptor *tx;
79f5b9930bSTomi Valkeinen enum dma_status status;
80f5b9930bSTomi Valkeinen dma_cookie_t cookie;
81f5b9930bSTomi Valkeinen
8255817d28SPeter Ujfalusi tx = dmaengine_prep_dma_memcpy(dmm->wa_dma_chan, dst, src, 4, 0);
83f5b9930bSTomi Valkeinen if (!tx) {
84f5b9930bSTomi Valkeinen dev_err(dmm->dev, "Failed to prepare DMA memcpy\n");
85f5b9930bSTomi Valkeinen return -EIO;
86f5b9930bSTomi Valkeinen }
87f5b9930bSTomi Valkeinen
88f5b9930bSTomi Valkeinen cookie = tx->tx_submit(tx);
89f5b9930bSTomi Valkeinen if (dma_submit_error(cookie)) {
90f5b9930bSTomi Valkeinen dev_err(dmm->dev, "Failed to do DMA tx_submit\n");
91f5b9930bSTomi Valkeinen return -EIO;
92f5b9930bSTomi Valkeinen }
93f5b9930bSTomi Valkeinen
94f5b9930bSTomi Valkeinen status = dma_sync_wait(dmm->wa_dma_chan, cookie);
95f5b9930bSTomi Valkeinen if (status != DMA_COMPLETE)
96f5b9930bSTomi Valkeinen dev_err(dmm->dev, "i878 wa DMA copy failure\n");
97f5b9930bSTomi Valkeinen
98f5b9930bSTomi Valkeinen dmaengine_terminate_all(dmm->wa_dma_chan);
99f5b9930bSTomi Valkeinen return 0;
100f5b9930bSTomi Valkeinen }
101f5b9930bSTomi Valkeinen
dmm_read_wa(struct dmm * dmm,u32 reg)102f5b9930bSTomi Valkeinen static u32 dmm_read_wa(struct dmm *dmm, u32 reg)
103f5b9930bSTomi Valkeinen {
104f5b9930bSTomi Valkeinen dma_addr_t src, dst;
105f5b9930bSTomi Valkeinen int r;
106f5b9930bSTomi Valkeinen
107f5b9930bSTomi Valkeinen src = dmm->phys_base + reg;
108f5b9930bSTomi Valkeinen dst = dmm->wa_dma_handle;
109f5b9930bSTomi Valkeinen
110f5b9930bSTomi Valkeinen r = dmm_dma_copy(dmm, src, dst);
111f5b9930bSTomi Valkeinen if (r) {
112f5b9930bSTomi Valkeinen dev_err(dmm->dev, "sDMA read transfer timeout\n");
113f5b9930bSTomi Valkeinen return readl(dmm->base + reg);
114f5b9930bSTomi Valkeinen }
115f5b9930bSTomi Valkeinen
116f5b9930bSTomi Valkeinen /*
117f5b9930bSTomi Valkeinen * As per i878 workaround, the DMA is used to access the DMM registers.
118f5b9930bSTomi Valkeinen * Make sure that the readl is not moved by the compiler or the CPU
119f5b9930bSTomi Valkeinen * earlier than the DMA finished writing the value to memory.
120f5b9930bSTomi Valkeinen */
121f5b9930bSTomi Valkeinen rmb();
122f5b9930bSTomi Valkeinen return readl(dmm->wa_dma_data);
123f5b9930bSTomi Valkeinen }
124f5b9930bSTomi Valkeinen
dmm_write_wa(struct dmm * dmm,u32 val,u32 reg)125f5b9930bSTomi Valkeinen static void dmm_write_wa(struct dmm *dmm, u32 val, u32 reg)
126f5b9930bSTomi Valkeinen {
127f5b9930bSTomi Valkeinen dma_addr_t src, dst;
128f5b9930bSTomi Valkeinen int r;
129f5b9930bSTomi Valkeinen
130f5b9930bSTomi Valkeinen writel(val, dmm->wa_dma_data);
131f5b9930bSTomi Valkeinen /*
132f5b9930bSTomi Valkeinen * As per i878 workaround, the DMA is used to access the DMM registers.
133f5b9930bSTomi Valkeinen * Make sure that the writel is not moved by the compiler or the CPU, so
134f5b9930bSTomi Valkeinen * the data will be in place before we start the DMA to do the actual
135f5b9930bSTomi Valkeinen * register write.
136f5b9930bSTomi Valkeinen */
137f5b9930bSTomi Valkeinen wmb();
138f5b9930bSTomi Valkeinen
139f5b9930bSTomi Valkeinen src = dmm->wa_dma_handle;
140f5b9930bSTomi Valkeinen dst = dmm->phys_base + reg;
141f5b9930bSTomi Valkeinen
142f5b9930bSTomi Valkeinen r = dmm_dma_copy(dmm, src, dst);
143f5b9930bSTomi Valkeinen if (r) {
144f5b9930bSTomi Valkeinen dev_err(dmm->dev, "sDMA write transfer timeout\n");
145f5b9930bSTomi Valkeinen writel(val, dmm->base + reg);
146f5b9930bSTomi Valkeinen }
147f5b9930bSTomi Valkeinen }
148f5b9930bSTomi Valkeinen
dmm_read(struct dmm * dmm,u32 reg)1498e54adfdSTomi Valkeinen static u32 dmm_read(struct dmm *dmm, u32 reg)
1508e54adfdSTomi Valkeinen {
151f5b9930bSTomi Valkeinen if (dmm->dmm_workaround) {
152f5b9930bSTomi Valkeinen u32 v;
153f5b9930bSTomi Valkeinen unsigned long flags;
154f5b9930bSTomi Valkeinen
155f5b9930bSTomi Valkeinen spin_lock_irqsave(&dmm->wa_lock, flags);
156f5b9930bSTomi Valkeinen v = dmm_read_wa(dmm, reg);
157f5b9930bSTomi Valkeinen spin_unlock_irqrestore(&dmm->wa_lock, flags);
158f5b9930bSTomi Valkeinen
159f5b9930bSTomi Valkeinen return v;
160f5b9930bSTomi Valkeinen } else {
1618e54adfdSTomi Valkeinen return readl(dmm->base + reg);
1628e54adfdSTomi Valkeinen }
163f5b9930bSTomi Valkeinen }
1648e54adfdSTomi Valkeinen
dmm_write(struct dmm * dmm,u32 val,u32 reg)1658e54adfdSTomi Valkeinen static void dmm_write(struct dmm *dmm, u32 val, u32 reg)
1668e54adfdSTomi Valkeinen {
167f5b9930bSTomi Valkeinen if (dmm->dmm_workaround) {
168f5b9930bSTomi Valkeinen unsigned long flags;
169f5b9930bSTomi Valkeinen
170f5b9930bSTomi Valkeinen spin_lock_irqsave(&dmm->wa_lock, flags);
171f5b9930bSTomi Valkeinen dmm_write_wa(dmm, val, reg);
172f5b9930bSTomi Valkeinen spin_unlock_irqrestore(&dmm->wa_lock, flags);
173f5b9930bSTomi Valkeinen } else {
1748e54adfdSTomi Valkeinen writel(val, dmm->base + reg);
1758e54adfdSTomi Valkeinen }
176f5b9930bSTomi Valkeinen }
177f5b9930bSTomi Valkeinen
dmm_workaround_init(struct dmm * dmm)178f5b9930bSTomi Valkeinen static int dmm_workaround_init(struct dmm *dmm)
179f5b9930bSTomi Valkeinen {
180f5b9930bSTomi Valkeinen dma_cap_mask_t mask;
181f5b9930bSTomi Valkeinen
182f5b9930bSTomi Valkeinen spin_lock_init(&dmm->wa_lock);
183f5b9930bSTomi Valkeinen
184f5b9930bSTomi Valkeinen dmm->wa_dma_data = dma_alloc_coherent(dmm->dev, sizeof(u32),
185f5b9930bSTomi Valkeinen &dmm->wa_dma_handle, GFP_KERNEL);
186f5b9930bSTomi Valkeinen if (!dmm->wa_dma_data)
187f5b9930bSTomi Valkeinen return -ENOMEM;
188f5b9930bSTomi Valkeinen
189f5b9930bSTomi Valkeinen dma_cap_zero(mask);
190f5b9930bSTomi Valkeinen dma_cap_set(DMA_MEMCPY, mask);
191f5b9930bSTomi Valkeinen
192f5b9930bSTomi Valkeinen dmm->wa_dma_chan = dma_request_channel(mask, NULL, NULL);
193f5b9930bSTomi Valkeinen if (!dmm->wa_dma_chan) {
194f5b9930bSTomi Valkeinen dma_free_coherent(dmm->dev, 4, dmm->wa_dma_data, dmm->wa_dma_handle);
195f5b9930bSTomi Valkeinen return -ENODEV;
196f5b9930bSTomi Valkeinen }
197f5b9930bSTomi Valkeinen
198f5b9930bSTomi Valkeinen return 0;
199f5b9930bSTomi Valkeinen }
200f5b9930bSTomi Valkeinen
dmm_workaround_uninit(struct dmm * dmm)201f5b9930bSTomi Valkeinen static void dmm_workaround_uninit(struct dmm *dmm)
202f5b9930bSTomi Valkeinen {
203f5b9930bSTomi Valkeinen dma_release_channel(dmm->wa_dma_chan);
204f5b9930bSTomi Valkeinen
205f5b9930bSTomi Valkeinen dma_free_coherent(dmm->dev, 4, dmm->wa_dma_data, dmm->wa_dma_handle);
206f5b9930bSTomi Valkeinen }
2078e54adfdSTomi Valkeinen
2088bb0daffSRob Clark /* simple allocator to grab next 16 byte aligned memory from txn */
alloc_dma(struct dmm_txn * txn,size_t sz,dma_addr_t * pa)2098bb0daffSRob Clark static void *alloc_dma(struct dmm_txn *txn, size_t sz, dma_addr_t *pa)
2108bb0daffSRob Clark {
2118bb0daffSRob Clark void *ptr;
2128bb0daffSRob Clark struct refill_engine *engine = txn->engine_handle;
2138bb0daffSRob Clark
2148bb0daffSRob Clark /* dmm programming requires 16 byte aligned addresses */
2158bb0daffSRob Clark txn->current_pa = round_up(txn->current_pa, 16);
2168bb0daffSRob Clark txn->current_va = (void *)round_up((long)txn->current_va, 16);
2178bb0daffSRob Clark
2188bb0daffSRob Clark ptr = txn->current_va;
2198bb0daffSRob Clark *pa = txn->current_pa;
2208bb0daffSRob Clark
2218bb0daffSRob Clark txn->current_pa += sz;
2228bb0daffSRob Clark txn->current_va += sz;
2238bb0daffSRob Clark
2248bb0daffSRob Clark BUG_ON((txn->current_va - engine->refill_va) > REFILL_BUFFER_SIZE);
2258bb0daffSRob Clark
2268bb0daffSRob Clark return ptr;
2278bb0daffSRob Clark }
2288bb0daffSRob Clark
2298bb0daffSRob Clark /* check status and spin until wait_mask comes true */
wait_status(struct refill_engine * engine,u32 wait_mask)230dfe9cfccSLaurent Pinchart static int wait_status(struct refill_engine *engine, u32 wait_mask)
2318bb0daffSRob Clark {
2328bb0daffSRob Clark struct dmm *dmm = engine->dmm;
233dfe9cfccSLaurent Pinchart u32 r = 0, err, i;
2348bb0daffSRob Clark
2358bb0daffSRob Clark i = DMM_FIXED_RETRY_COUNT;
2368bb0daffSRob Clark while (true) {
2378e54adfdSTomi Valkeinen r = dmm_read(dmm, reg[PAT_STATUS][engine->id]);
2388bb0daffSRob Clark err = r & DMM_PATSTATUS_ERR;
239d312fe2eSPeter Ujfalusi if (err) {
240d312fe2eSPeter Ujfalusi dev_err(dmm->dev,
241d312fe2eSPeter Ujfalusi "%s: error (engine%d). PAT_STATUS: 0x%08x\n",
242d312fe2eSPeter Ujfalusi __func__, engine->id, r);
2438bb0daffSRob Clark return -EFAULT;
244d312fe2eSPeter Ujfalusi }
2458bb0daffSRob Clark
2468bb0daffSRob Clark if ((r & wait_mask) == wait_mask)
2478bb0daffSRob Clark break;
2488bb0daffSRob Clark
249d312fe2eSPeter Ujfalusi if (--i == 0) {
250d312fe2eSPeter Ujfalusi dev_err(dmm->dev,
251d312fe2eSPeter Ujfalusi "%s: timeout (engine%d). PAT_STATUS: 0x%08x\n",
252d312fe2eSPeter Ujfalusi __func__, engine->id, r);
2538bb0daffSRob Clark return -ETIMEDOUT;
254d312fe2eSPeter Ujfalusi }
2558bb0daffSRob Clark
2568bb0daffSRob Clark udelay(1);
2578bb0daffSRob Clark }
2588bb0daffSRob Clark
2598bb0daffSRob Clark return 0;
2608bb0daffSRob Clark }
2618bb0daffSRob Clark
release_engine(struct refill_engine * engine)2628bb0daffSRob Clark static void release_engine(struct refill_engine *engine)
2638bb0daffSRob Clark {
2648bb0daffSRob Clark unsigned long flags;
2658bb0daffSRob Clark
2668bb0daffSRob Clark spin_lock_irqsave(&list_lock, flags);
2678bb0daffSRob Clark list_add(&engine->idle_node, &omap_dmm->idle_head);
2688bb0daffSRob Clark spin_unlock_irqrestore(&list_lock, flags);
2698bb0daffSRob Clark
2708bb0daffSRob Clark atomic_inc(&omap_dmm->engine_counter);
2718bb0daffSRob Clark wake_up_interruptible(&omap_dmm->engine_queue);
2728bb0daffSRob Clark }
2738bb0daffSRob Clark
omap_dmm_irq_handler(int irq,void * arg)2748bb0daffSRob Clark static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
2758bb0daffSRob Clark {
2768bb0daffSRob Clark struct dmm *dmm = arg;
277dfe9cfccSLaurent Pinchart u32 status = dmm_read(dmm, DMM_PAT_IRQSTATUS);
2788bb0daffSRob Clark int i;
2798bb0daffSRob Clark
2808bb0daffSRob Clark /* ack IRQ */
2818e54adfdSTomi Valkeinen dmm_write(dmm, status, DMM_PAT_IRQSTATUS);
2828bb0daffSRob Clark
2838bb0daffSRob Clark for (i = 0; i < dmm->num_engines; i++) {
284b8c456d3SPeter Ujfalusi if (status & DMM_IRQSTAT_ERR_MASK)
285b8c456d3SPeter Ujfalusi dev_err(dmm->dev,
286b8c456d3SPeter Ujfalusi "irq error(engine%d): IRQSTAT 0x%02x\n",
287b8c456d3SPeter Ujfalusi i, status & 0xff);
288b8c456d3SPeter Ujfalusi
2898bb0daffSRob Clark if (status & DMM_IRQSTAT_LST) {
2908bb0daffSRob Clark if (dmm->engines[i].async)
2918bb0daffSRob Clark release_engine(&dmm->engines[i]);
2927439507fSTomi Valkeinen
2937439507fSTomi Valkeinen complete(&dmm->engines[i].compl);
2948bb0daffSRob Clark }
2958bb0daffSRob Clark
2968bb0daffSRob Clark status >>= 8;
2978bb0daffSRob Clark }
2988bb0daffSRob Clark
2998bb0daffSRob Clark return IRQ_HANDLED;
3008bb0daffSRob Clark }
3018bb0daffSRob Clark
302fc4ef6b1SLee Jones /*
3038bb0daffSRob Clark * Get a handle for a DMM transaction
3048bb0daffSRob Clark */
dmm_txn_init(struct dmm * dmm,struct tcm * tcm)3058bb0daffSRob Clark static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
3068bb0daffSRob Clark {
3078bb0daffSRob Clark struct dmm_txn *txn = NULL;
3088bb0daffSRob Clark struct refill_engine *engine = NULL;
3098bb0daffSRob Clark int ret;
3108bb0daffSRob Clark unsigned long flags;
3118bb0daffSRob Clark
3128bb0daffSRob Clark
3138bb0daffSRob Clark /* wait until an engine is available */
3148bb0daffSRob Clark ret = wait_event_interruptible(omap_dmm->engine_queue,
3158bb0daffSRob Clark atomic_add_unless(&omap_dmm->engine_counter, -1, 0));
3168bb0daffSRob Clark if (ret)
3178bb0daffSRob Clark return ERR_PTR(ret);
3188bb0daffSRob Clark
3198bb0daffSRob Clark /* grab an idle engine */
3208bb0daffSRob Clark spin_lock_irqsave(&list_lock, flags);
3218bb0daffSRob Clark if (!list_empty(&dmm->idle_head)) {
3228bb0daffSRob Clark engine = list_entry(dmm->idle_head.next, struct refill_engine,
3238bb0daffSRob Clark idle_node);
3248bb0daffSRob Clark list_del(&engine->idle_node);
3258bb0daffSRob Clark }
3268bb0daffSRob Clark spin_unlock_irqrestore(&list_lock, flags);
3278bb0daffSRob Clark
3288bb0daffSRob Clark BUG_ON(!engine);
3298bb0daffSRob Clark
3308bb0daffSRob Clark txn = &engine->txn;
3318bb0daffSRob Clark engine->tcm = tcm;
3328bb0daffSRob Clark txn->engine_handle = engine;
3338bb0daffSRob Clark txn->last_pat = NULL;
3348bb0daffSRob Clark txn->current_va = engine->refill_va;
3358bb0daffSRob Clark txn->current_pa = engine->refill_pa;
3368bb0daffSRob Clark
3378bb0daffSRob Clark return txn;
3388bb0daffSRob Clark }
3398bb0daffSRob Clark
340fc4ef6b1SLee Jones /*
3418bb0daffSRob Clark * Add region to DMM transaction. If pages or pages[i] is NULL, then the
3428bb0daffSRob Clark * corresponding slot is cleared (ie. dummy_pa is programmed)
3438bb0daffSRob Clark */
dmm_txn_append(struct dmm_txn * txn,struct pat_area * area,struct page ** pages,u32 npages,u32 roll)3448bb0daffSRob Clark static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
345dfe9cfccSLaurent Pinchart struct page **pages, u32 npages, u32 roll)
3468bb0daffSRob Clark {
3472d31ca3aSRussell King dma_addr_t pat_pa = 0, data_pa = 0;
348dfe9cfccSLaurent Pinchart u32 *data;
3498bb0daffSRob Clark struct pat *pat;
3508bb0daffSRob Clark struct refill_engine *engine = txn->engine_handle;
3518bb0daffSRob Clark int columns = (1 + area->x1 - area->x0);
3528bb0daffSRob Clark int rows = (1 + area->y1 - area->y0);
3538bb0daffSRob Clark int i = columns*rows;
3548bb0daffSRob Clark
355d501b129SLaurent Pinchart pat = alloc_dma(txn, sizeof(*pat), &pat_pa);
3568bb0daffSRob Clark
3578bb0daffSRob Clark if (txn->last_pat)
358dfe9cfccSLaurent Pinchart txn->last_pat->next_pa = (u32)pat_pa;
3598bb0daffSRob Clark
3608bb0daffSRob Clark pat->area = *area;
3618bb0daffSRob Clark
3628bb0daffSRob Clark /* adjust Y coordinates based off of container parameters */
3638bb0daffSRob Clark pat->area.y0 += engine->tcm->y_offset;
3648bb0daffSRob Clark pat->area.y1 += engine->tcm->y_offset;
3658bb0daffSRob Clark
3668bb0daffSRob Clark pat->ctrl = (struct pat_ctrl){
3678bb0daffSRob Clark .start = 1,
3688bb0daffSRob Clark .lut_id = engine->tcm->lut_id,
3698bb0daffSRob Clark };
3708bb0daffSRob Clark
3712d31ca3aSRussell King data = alloc_dma(txn, 4*i, &data_pa);
3722d31ca3aSRussell King /* FIXME: what if data_pa is more than 32-bit ? */
3732d31ca3aSRussell King pat->data_pa = data_pa;
3748bb0daffSRob Clark
3758bb0daffSRob Clark while (i--) {
3768bb0daffSRob Clark int n = i + roll;
3778bb0daffSRob Clark if (n >= npages)
3788bb0daffSRob Clark n -= npages;
3798bb0daffSRob Clark data[i] = (pages && pages[n]) ?
3808bb0daffSRob Clark page_to_phys(pages[n]) : engine->dmm->dummy_pa;
3818bb0daffSRob Clark }
3828bb0daffSRob Clark
3838bb0daffSRob Clark txn->last_pat = pat;
3848bb0daffSRob Clark
3858bb0daffSRob Clark return;
3868bb0daffSRob Clark }
3878bb0daffSRob Clark
388fc4ef6b1SLee Jones /*
3898bb0daffSRob Clark * Commit the DMM transaction.
3908bb0daffSRob Clark */
dmm_txn_commit(struct dmm_txn * txn,bool wait)3918bb0daffSRob Clark static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
3928bb0daffSRob Clark {
3938bb0daffSRob Clark int ret = 0;
3948bb0daffSRob Clark struct refill_engine *engine = txn->engine_handle;
3958bb0daffSRob Clark struct dmm *dmm = engine->dmm;
3968bb0daffSRob Clark
3978bb0daffSRob Clark if (!txn->last_pat) {
3988bb0daffSRob Clark dev_err(engine->dmm->dev, "need at least one txn\n");
3998bb0daffSRob Clark ret = -EINVAL;
4008bb0daffSRob Clark goto cleanup;
4018bb0daffSRob Clark }
4028bb0daffSRob Clark
4038bb0daffSRob Clark txn->last_pat->next_pa = 0;
404538f66baSTomi Valkeinen /* ensure that the written descriptors are visible to DMM */
405538f66baSTomi Valkeinen wmb();
406538f66baSTomi Valkeinen
407538f66baSTomi Valkeinen /*
408538f66baSTomi Valkeinen * NOTE: the wmb() above should be enough, but there seems to be a bug
409538f66baSTomi Valkeinen * in OMAP's memory barrier implementation, which in some rare cases may
410538f66baSTomi Valkeinen * cause the writes not to be observable after wmb().
411538f66baSTomi Valkeinen */
412538f66baSTomi Valkeinen
413538f66baSTomi Valkeinen /* read back to ensure the data is in RAM */
414538f66baSTomi Valkeinen readl(&txn->last_pat->next_pa);
4158bb0daffSRob Clark
4168bb0daffSRob Clark /* write to PAT_DESCR to clear out any pending transaction */
4178e54adfdSTomi Valkeinen dmm_write(dmm, 0x0, reg[PAT_DESCR][engine->id]);
4188bb0daffSRob Clark
4198bb0daffSRob Clark /* wait for engine ready: */
4208bb0daffSRob Clark ret = wait_status(engine, DMM_PATSTATUS_READY);
4218bb0daffSRob Clark if (ret) {
4228bb0daffSRob Clark ret = -EFAULT;
4238bb0daffSRob Clark goto cleanup;
4248bb0daffSRob Clark }
4258bb0daffSRob Clark
4268bb0daffSRob Clark /* mark whether it is async to denote list management in IRQ handler */
4278bb0daffSRob Clark engine->async = wait ? false : true;
4287439507fSTomi Valkeinen reinit_completion(&engine->compl);
4297439507fSTomi Valkeinen /* verify that the irq handler sees the 'async' and completion value */
430e7e24df4STomi Valkeinen smp_mb();
4318bb0daffSRob Clark
4328bb0daffSRob Clark /* kick reload */
4338e54adfdSTomi Valkeinen dmm_write(dmm, engine->refill_pa, reg[PAT_DESCR][engine->id]);
4348bb0daffSRob Clark
4358bb0daffSRob Clark if (wait) {
4367439507fSTomi Valkeinen if (!wait_for_completion_timeout(&engine->compl,
43796cbd142STomi Valkeinen msecs_to_jiffies(100))) {
4388bb0daffSRob Clark dev_err(dmm->dev, "timed out waiting for done\n");
4398bb0daffSRob Clark ret = -ETIMEDOUT;
440b7ea6b28SPeter Ujfalusi goto cleanup;
4418bb0daffSRob Clark }
442b7ea6b28SPeter Ujfalusi
443b7ea6b28SPeter Ujfalusi /* Check the engine status before continue */
444b7ea6b28SPeter Ujfalusi ret = wait_status(engine, DMM_PATSTATUS_READY |
445b7ea6b28SPeter Ujfalusi DMM_PATSTATUS_VALID | DMM_PATSTATUS_DONE);
4468bb0daffSRob Clark }
4478bb0daffSRob Clark
4488bb0daffSRob Clark cleanup:
4498bb0daffSRob Clark /* only place engine back on list if we are done with it */
4508bb0daffSRob Clark if (ret || wait)
4518bb0daffSRob Clark release_engine(engine);
4528bb0daffSRob Clark
4538bb0daffSRob Clark return ret;
4548bb0daffSRob Clark }
4558bb0daffSRob Clark
4568bb0daffSRob Clark /*
4578bb0daffSRob Clark * DMM programming
4588bb0daffSRob Clark */
fill(struct tcm_area * area,struct page ** pages,u32 npages,u32 roll,bool wait)4598bb0daffSRob Clark static int fill(struct tcm_area *area, struct page **pages,
460dfe9cfccSLaurent Pinchart u32 npages, u32 roll, bool wait)
4618bb0daffSRob Clark {
4628bb0daffSRob Clark int ret = 0;
4638bb0daffSRob Clark struct tcm_area slice, area_s;
4648bb0daffSRob Clark struct dmm_txn *txn;
4658bb0daffSRob Clark
4662bb2daf3STomi Valkeinen /*
4672bb2daf3STomi Valkeinen * FIXME
4682bb2daf3STomi Valkeinen *
4692bb2daf3STomi Valkeinen * Asynchronous fill does not work reliably, as the driver does not
4702bb2daf3STomi Valkeinen * handle errors in the async code paths. The fill operation may
4712bb2daf3STomi Valkeinen * silently fail, leading to leaking DMM engines, which may eventually
4722bb2daf3STomi Valkeinen * lead to deadlock if we run out of DMM engines.
4732bb2daf3STomi Valkeinen *
4742bb2daf3STomi Valkeinen * For now, always set 'wait' so that we only use sync fills. Async
4752bb2daf3STomi Valkeinen * fills should be fixed, or alternatively we could decide to only
4762bb2daf3STomi Valkeinen * support sync fills and so the whole async code path could be removed.
4772bb2daf3STomi Valkeinen */
4782bb2daf3STomi Valkeinen
4792bb2daf3STomi Valkeinen wait = true;
4802bb2daf3STomi Valkeinen
4818bb0daffSRob Clark txn = dmm_txn_init(omap_dmm, area->tcm);
4828bb0daffSRob Clark if (IS_ERR_OR_NULL(txn))
4838bb0daffSRob Clark return -ENOMEM;
4848bb0daffSRob Clark
4858bb0daffSRob Clark tcm_for_each_slice(slice, *area, area_s) {
4868bb0daffSRob Clark struct pat_area p_area = {
4878bb0daffSRob Clark .x0 = slice.p0.x, .y0 = slice.p0.y,
4888bb0daffSRob Clark .x1 = slice.p1.x, .y1 = slice.p1.y,
4898bb0daffSRob Clark };
4908bb0daffSRob Clark
4918bb0daffSRob Clark dmm_txn_append(txn, &p_area, pages, npages, roll);
4928bb0daffSRob Clark
4938bb0daffSRob Clark roll += tcm_sizeof(slice);
4948bb0daffSRob Clark }
4958bb0daffSRob Clark
4968bb0daffSRob Clark ret = dmm_txn_commit(txn, wait);
4978bb0daffSRob Clark
4988bb0daffSRob Clark return ret;
4998bb0daffSRob Clark }
5008bb0daffSRob Clark
5018bb0daffSRob Clark /*
5028bb0daffSRob Clark * Pin/unpin
5038bb0daffSRob Clark */
5048bb0daffSRob Clark
5058bb0daffSRob Clark /* note: slots for which pages[i] == NULL are filled w/ dummy page
5068bb0daffSRob Clark */
tiler_pin(struct tiler_block * block,struct page ** pages,u32 npages,u32 roll,bool wait)5078bb0daffSRob Clark int tiler_pin(struct tiler_block *block, struct page **pages,
508dfe9cfccSLaurent Pinchart u32 npages, u32 roll, bool wait)
5098bb0daffSRob Clark {
5108bb0daffSRob Clark int ret;
5118bb0daffSRob Clark
5128bb0daffSRob Clark ret = fill(&block->area, pages, npages, roll, wait);
5138bb0daffSRob Clark
5148bb0daffSRob Clark if (ret)
5158bb0daffSRob Clark tiler_unpin(block);
5168bb0daffSRob Clark
5178bb0daffSRob Clark return ret;
5188bb0daffSRob Clark }
5198bb0daffSRob Clark
tiler_unpin(struct tiler_block * block)5208bb0daffSRob Clark int tiler_unpin(struct tiler_block *block)
5218bb0daffSRob Clark {
5228bb0daffSRob Clark return fill(&block->area, NULL, 0, 0, false);
5238bb0daffSRob Clark }
5248bb0daffSRob Clark
5258bb0daffSRob Clark /*
5268bb0daffSRob Clark * Reserve/release
5278bb0daffSRob Clark */
tiler_reserve_2d(enum tiler_fmt fmt,u16 w,u16 h,u16 align)528dfe9cfccSLaurent Pinchart struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, u16 w,
529dfe9cfccSLaurent Pinchart u16 h, u16 align)
5308bb0daffSRob Clark {
5316a0f0c55STomi Valkeinen struct tiler_block *block;
5328bb0daffSRob Clark u32 min_align = 128;
5338bb0daffSRob Clark int ret;
5348bb0daffSRob Clark unsigned long flags;
5352150c19bSTomi Valkeinen u32 slot_bytes;
5368bb0daffSRob Clark
5376a0f0c55STomi Valkeinen block = kzalloc(sizeof(*block), GFP_KERNEL);
5386a0f0c55STomi Valkeinen if (!block)
5396a0f0c55STomi Valkeinen return ERR_PTR(-ENOMEM);
5406a0f0c55STomi Valkeinen
5418bb0daffSRob Clark BUG_ON(!validfmt(fmt));
5428bb0daffSRob Clark
5438bb0daffSRob Clark /* convert width/height to slots */
5448bb0daffSRob Clark w = DIV_ROUND_UP(w, geom[fmt].slot_w);
5458bb0daffSRob Clark h = DIV_ROUND_UP(h, geom[fmt].slot_h);
5468bb0daffSRob Clark
5478bb0daffSRob Clark /* convert alignment to slots */
5480d6fa53fSAndy Gross slot_bytes = geom[fmt].slot_w * geom[fmt].cpp;
5490d6fa53fSAndy Gross min_align = max(min_align, slot_bytes);
5500d6fa53fSAndy Gross align = (align > min_align) ? ALIGN(align, min_align) : min_align;
5510d6fa53fSAndy Gross align /= slot_bytes;
5528bb0daffSRob Clark
5538bb0daffSRob Clark block->fmt = fmt;
5548bb0daffSRob Clark
5550d6fa53fSAndy Gross ret = tcm_reserve_2d(containers[fmt], w, h, align, -1, slot_bytes,
5560d6fa53fSAndy Gross &block->area);
5578bb0daffSRob Clark if (ret) {
5588bb0daffSRob Clark kfree(block);
5598bb0daffSRob Clark return ERR_PTR(-ENOMEM);
5608bb0daffSRob Clark }
5618bb0daffSRob Clark
5628bb0daffSRob Clark /* add to allocation list */
5638bb0daffSRob Clark spin_lock_irqsave(&list_lock, flags);
5648bb0daffSRob Clark list_add(&block->alloc_node, &omap_dmm->alloc_head);
5658bb0daffSRob Clark spin_unlock_irqrestore(&list_lock, flags);
5668bb0daffSRob Clark
5678bb0daffSRob Clark return block;
5688bb0daffSRob Clark }
5698bb0daffSRob Clark
tiler_reserve_1d(size_t size)5708bb0daffSRob Clark struct tiler_block *tiler_reserve_1d(size_t size)
5718bb0daffSRob Clark {
5728bb0daffSRob Clark struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
5738bb0daffSRob Clark int num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
5748bb0daffSRob Clark unsigned long flags;
5758bb0daffSRob Clark
5768bb0daffSRob Clark if (!block)
5778bb0daffSRob Clark return ERR_PTR(-ENOMEM);
5788bb0daffSRob Clark
5798bb0daffSRob Clark block->fmt = TILFMT_PAGE;
5808bb0daffSRob Clark
5818bb0daffSRob Clark if (tcm_reserve_1d(containers[TILFMT_PAGE], num_pages,
5828bb0daffSRob Clark &block->area)) {
5838bb0daffSRob Clark kfree(block);
5848bb0daffSRob Clark return ERR_PTR(-ENOMEM);
5858bb0daffSRob Clark }
5868bb0daffSRob Clark
5878bb0daffSRob Clark spin_lock_irqsave(&list_lock, flags);
5888bb0daffSRob Clark list_add(&block->alloc_node, &omap_dmm->alloc_head);
5898bb0daffSRob Clark spin_unlock_irqrestore(&list_lock, flags);
5908bb0daffSRob Clark
5918bb0daffSRob Clark return block;
5928bb0daffSRob Clark }
5938bb0daffSRob Clark
5948bb0daffSRob Clark /* note: if you have pin'd pages, you should have already unpin'd first! */
tiler_release(struct tiler_block * block)5958bb0daffSRob Clark int tiler_release(struct tiler_block *block)
5968bb0daffSRob Clark {
5978bb0daffSRob Clark int ret = tcm_free(&block->area);
5988bb0daffSRob Clark unsigned long flags;
5998bb0daffSRob Clark
6008bb0daffSRob Clark if (block->area.tcm)
6018bb0daffSRob Clark dev_err(omap_dmm->dev, "failed to release block\n");
6028bb0daffSRob Clark
6038bb0daffSRob Clark spin_lock_irqsave(&list_lock, flags);
6048bb0daffSRob Clark list_del(&block->alloc_node);
6058bb0daffSRob Clark spin_unlock_irqrestore(&list_lock, flags);
6068bb0daffSRob Clark
6078bb0daffSRob Clark kfree(block);
6088bb0daffSRob Clark return ret;
6098bb0daffSRob Clark }
6108bb0daffSRob Clark
6118bb0daffSRob Clark /*
6128bb0daffSRob Clark * Utils
6138bb0daffSRob Clark */
6148bb0daffSRob Clark
6158bb0daffSRob Clark /* calculate the tiler space address of a pixel in a view orientation...
6168bb0daffSRob Clark * below description copied from the display subsystem section of TRM:
6178bb0daffSRob Clark *
6188bb0daffSRob Clark * When the TILER is addressed, the bits:
6198bb0daffSRob Clark * [28:27] = 0x0 for 8-bit tiled
6208bb0daffSRob Clark * 0x1 for 16-bit tiled
6218bb0daffSRob Clark * 0x2 for 32-bit tiled
6228bb0daffSRob Clark * 0x3 for page mode
6238bb0daffSRob Clark * [31:29] = 0x0 for 0-degree view
6248bb0daffSRob Clark * 0x1 for 180-degree view + mirroring
6258bb0daffSRob Clark * 0x2 for 0-degree view + mirroring
6268bb0daffSRob Clark * 0x3 for 180-degree view
6278bb0daffSRob Clark * 0x4 for 270-degree view + mirroring
6288bb0daffSRob Clark * 0x5 for 270-degree view
6298bb0daffSRob Clark * 0x6 for 90-degree view
6308bb0daffSRob Clark * 0x7 for 90-degree view + mirroring
6318bb0daffSRob Clark * Otherwise the bits indicated the corresponding bit address to access
6328bb0daffSRob Clark * the SDRAM.
6338bb0daffSRob Clark */
tiler_get_address(enum tiler_fmt fmt,u32 orient,u32 x,u32 y)6348bb0daffSRob Clark static u32 tiler_get_address(enum tiler_fmt fmt, u32 orient, u32 x, u32 y)
6358bb0daffSRob Clark {
6368bb0daffSRob Clark u32 x_bits, y_bits, tmp, x_mask, y_mask, alignment;
6378bb0daffSRob Clark
6388bb0daffSRob Clark x_bits = CONT_WIDTH_BITS - geom[fmt].x_shft;
6398bb0daffSRob Clark y_bits = CONT_HEIGHT_BITS - geom[fmt].y_shft;
6408bb0daffSRob Clark alignment = geom[fmt].x_shft + geom[fmt].y_shft;
6418bb0daffSRob Clark
6428bb0daffSRob Clark /* validate coordinate */
6438bb0daffSRob Clark x_mask = MASK(x_bits);
6448bb0daffSRob Clark y_mask = MASK(y_bits);
6458bb0daffSRob Clark
6468bb0daffSRob Clark if (x < 0 || x > x_mask || y < 0 || y > y_mask) {
6478bb0daffSRob Clark DBG("invalid coords: %u < 0 || %u > %u || %u < 0 || %u > %u",
6488bb0daffSRob Clark x, x, x_mask, y, y, y_mask);
6498bb0daffSRob Clark return 0;
6508bb0daffSRob Clark }
6518bb0daffSRob Clark
6528bb0daffSRob Clark /* account for mirroring */
6538bb0daffSRob Clark if (orient & MASK_X_INVERT)
6548bb0daffSRob Clark x ^= x_mask;
6558bb0daffSRob Clark if (orient & MASK_Y_INVERT)
6568bb0daffSRob Clark y ^= y_mask;
6578bb0daffSRob Clark
6588bb0daffSRob Clark /* get coordinate address */
6598bb0daffSRob Clark if (orient & MASK_XY_FLIP)
6608bb0daffSRob Clark tmp = ((x << y_bits) + y);
6618bb0daffSRob Clark else
6628bb0daffSRob Clark tmp = ((y << x_bits) + x);
6638bb0daffSRob Clark
6648bb0daffSRob Clark return TIL_ADDR((tmp << alignment), orient, fmt);
6658bb0daffSRob Clark }
6668bb0daffSRob Clark
tiler_ssptr(struct tiler_block * block)6678bb0daffSRob Clark dma_addr_t tiler_ssptr(struct tiler_block *block)
6688bb0daffSRob Clark {
6698bb0daffSRob Clark BUG_ON(!validfmt(block->fmt));
6708bb0daffSRob Clark
6718bb0daffSRob Clark return TILVIEW_8BIT + tiler_get_address(block->fmt, 0,
6728bb0daffSRob Clark block->area.p0.x * geom[block->fmt].slot_w,
6738bb0daffSRob Clark block->area.p0.y * geom[block->fmt].slot_h);
6748bb0daffSRob Clark }
6758bb0daffSRob Clark
tiler_tsptr(struct tiler_block * block,u32 orient,u32 x,u32 y)676dfe9cfccSLaurent Pinchart dma_addr_t tiler_tsptr(struct tiler_block *block, u32 orient,
677dfe9cfccSLaurent Pinchart u32 x, u32 y)
6788bb0daffSRob Clark {
6798bb0daffSRob Clark struct tcm_pt *p = &block->area.p0;
6808bb0daffSRob Clark BUG_ON(!validfmt(block->fmt));
6818bb0daffSRob Clark
6828bb0daffSRob Clark return tiler_get_address(block->fmt, orient,
6838bb0daffSRob Clark (p->x * geom[block->fmt].slot_w) + x,
6848bb0daffSRob Clark (p->y * geom[block->fmt].slot_h) + y);
6858bb0daffSRob Clark }
6868bb0daffSRob Clark
tiler_align(enum tiler_fmt fmt,u16 * w,u16 * h)687dfe9cfccSLaurent Pinchart void tiler_align(enum tiler_fmt fmt, u16 *w, u16 *h)
6888bb0daffSRob Clark {
6898bb0daffSRob Clark BUG_ON(!validfmt(fmt));
6908bb0daffSRob Clark *w = round_up(*w, geom[fmt].slot_w);
6918bb0daffSRob Clark *h = round_up(*h, geom[fmt].slot_h);
6928bb0daffSRob Clark }
6938bb0daffSRob Clark
tiler_stride(enum tiler_fmt fmt,u32 orient)694dfe9cfccSLaurent Pinchart u32 tiler_stride(enum tiler_fmt fmt, u32 orient)
6958bb0daffSRob Clark {
6968bb0daffSRob Clark BUG_ON(!validfmt(fmt));
6978bb0daffSRob Clark
6988bb0daffSRob Clark if (orient & MASK_XY_FLIP)
6998bb0daffSRob Clark return 1 << (CONT_HEIGHT_BITS + geom[fmt].x_shft);
7008bb0daffSRob Clark else
7018bb0daffSRob Clark return 1 << (CONT_WIDTH_BITS + geom[fmt].y_shft);
7028bb0daffSRob Clark }
7038bb0daffSRob Clark
tiler_size(enum tiler_fmt fmt,u16 w,u16 h)704dfe9cfccSLaurent Pinchart size_t tiler_size(enum tiler_fmt fmt, u16 w, u16 h)
7058bb0daffSRob Clark {
7068bb0daffSRob Clark tiler_align(fmt, &w, &h);
7078bb0daffSRob Clark return geom[fmt].cpp * w * h;
7088bb0daffSRob Clark }
7098bb0daffSRob Clark
tiler_vsize(enum tiler_fmt fmt,u16 w,u16 h)710dfe9cfccSLaurent Pinchart size_t tiler_vsize(enum tiler_fmt fmt, u16 w, u16 h)
7118bb0daffSRob Clark {
7128bb0daffSRob Clark BUG_ON(!validfmt(fmt));
7138bb0daffSRob Clark return round_up(geom[fmt].cpp * w, PAGE_SIZE) * h;
7148bb0daffSRob Clark }
7158bb0daffSRob Clark
tiler_get_cpu_cache_flags(void)716dfe9cfccSLaurent Pinchart u32 tiler_get_cpu_cache_flags(void)
7177cb0d6c1STomi Valkeinen {
7187cb0d6c1STomi Valkeinen return omap_dmm->plat_data->cpu_cache_flags;
7197cb0d6c1STomi Valkeinen }
7207cb0d6c1STomi Valkeinen
dmm_is_available(void)7218bb0daffSRob Clark bool dmm_is_available(void)
7228bb0daffSRob Clark {
7238bb0daffSRob Clark return omap_dmm ? true : false;
7248bb0daffSRob Clark }
7258bb0daffSRob Clark
omap_dmm_remove(struct platform_device * dev)726*c2807ecbSUwe Kleine-König static void omap_dmm_remove(struct platform_device *dev)
7278bb0daffSRob Clark {
7288bb0daffSRob Clark struct tiler_block *block, *_block;
7298bb0daffSRob Clark int i;
7308bb0daffSRob Clark unsigned long flags;
7318bb0daffSRob Clark
7328bb0daffSRob Clark if (omap_dmm) {
733176c866dSPeter Ujfalusi /* Disable all enabled interrupts */
734176c866dSPeter Ujfalusi dmm_write(omap_dmm, 0x7e7e7e7e, DMM_PAT_IRQENABLE_CLR);
735176c866dSPeter Ujfalusi free_irq(omap_dmm->irq, omap_dmm);
736176c866dSPeter Ujfalusi
7378bb0daffSRob Clark /* free all area regions */
7388bb0daffSRob Clark spin_lock_irqsave(&list_lock, flags);
7398bb0daffSRob Clark list_for_each_entry_safe(block, _block, &omap_dmm->alloc_head,
7408bb0daffSRob Clark alloc_node) {
7418bb0daffSRob Clark list_del(&block->alloc_node);
7428bb0daffSRob Clark kfree(block);
7438bb0daffSRob Clark }
7448bb0daffSRob Clark spin_unlock_irqrestore(&list_lock, flags);
7458bb0daffSRob Clark
7468bb0daffSRob Clark for (i = 0; i < omap_dmm->num_lut; i++)
7478bb0daffSRob Clark if (omap_dmm->tcm && omap_dmm->tcm[i])
7488bb0daffSRob Clark omap_dmm->tcm[i]->deinit(omap_dmm->tcm[i]);
7498bb0daffSRob Clark kfree(omap_dmm->tcm);
7508bb0daffSRob Clark
7518bb0daffSRob Clark kfree(omap_dmm->engines);
7528bb0daffSRob Clark if (omap_dmm->refill_va)
753f6e45661SLuis R. Rodriguez dma_free_wc(omap_dmm->dev,
7548bb0daffSRob Clark REFILL_BUFFER_SIZE * omap_dmm->num_engines,
755f6e45661SLuis R. Rodriguez omap_dmm->refill_va, omap_dmm->refill_pa);
7568bb0daffSRob Clark if (omap_dmm->dummy_page)
7578bb0daffSRob Clark __free_page(omap_dmm->dummy_page);
7588bb0daffSRob Clark
759f5b9930bSTomi Valkeinen if (omap_dmm->dmm_workaround)
760f5b9930bSTomi Valkeinen dmm_workaround_uninit(omap_dmm);
761f5b9930bSTomi Valkeinen
7628bb0daffSRob Clark iounmap(omap_dmm->base);
7638bb0daffSRob Clark kfree(omap_dmm);
7648bb0daffSRob Clark omap_dmm = NULL;
7658bb0daffSRob Clark }
7668bb0daffSRob Clark }
7678bb0daffSRob Clark
omap_dmm_probe(struct platform_device * dev)7688bb0daffSRob Clark static int omap_dmm_probe(struct platform_device *dev)
7698bb0daffSRob Clark {
7708bb0daffSRob Clark int ret = -EFAULT, i;
7718bb0daffSRob Clark struct tcm_area area = {0};
7728bb0daffSRob Clark u32 hwinfo, pat_geom;
7738bb0daffSRob Clark struct resource *mem;
7748bb0daffSRob Clark
7758bb0daffSRob Clark omap_dmm = kzalloc(sizeof(*omap_dmm), GFP_KERNEL);
776fffddfd6SLinus Torvalds if (!omap_dmm)
7778bb0daffSRob Clark goto fail;
7788bb0daffSRob Clark
7798bb0daffSRob Clark /* initialize lists */
7808bb0daffSRob Clark INIT_LIST_HEAD(&omap_dmm->alloc_head);
7818bb0daffSRob Clark INIT_LIST_HEAD(&omap_dmm->idle_head);
7828bb0daffSRob Clark
7838bb0daffSRob Clark init_waitqueue_head(&omap_dmm->engine_queue);
7848bb0daffSRob Clark
7857cb0d6c1STomi Valkeinen if (dev->dev.of_node) {
7867cb0d6c1STomi Valkeinen const struct of_device_id *match;
7877cb0d6c1STomi Valkeinen
7887cb0d6c1STomi Valkeinen match = of_match_node(dmm_of_match, dev->dev.of_node);
7897cb0d6c1STomi Valkeinen if (!match) {
7907cb0d6c1STomi Valkeinen dev_err(&dev->dev, "failed to find matching device node\n");
7918677b1acSChristophe JAILLET ret = -ENODEV;
7928677b1acSChristophe JAILLET goto fail;
7937cb0d6c1STomi Valkeinen }
7947cb0d6c1STomi Valkeinen
7957cb0d6c1STomi Valkeinen omap_dmm->plat_data = match->data;
7967cb0d6c1STomi Valkeinen }
7977cb0d6c1STomi Valkeinen
7988bb0daffSRob Clark /* lookup hwmod data - base address and irq */
7998bb0daffSRob Clark mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
8008bb0daffSRob Clark if (!mem) {
8018bb0daffSRob Clark dev_err(&dev->dev, "failed to get base address resource\n");
8028bb0daffSRob Clark goto fail;
8038bb0daffSRob Clark }
8048bb0daffSRob Clark
805f5b9930bSTomi Valkeinen omap_dmm->phys_base = mem->start;
8068bb0daffSRob Clark omap_dmm->base = ioremap(mem->start, SZ_2K);
8078bb0daffSRob Clark
8088bb0daffSRob Clark if (!omap_dmm->base) {
8098bb0daffSRob Clark dev_err(&dev->dev, "failed to get dmm base address\n");
8108bb0daffSRob Clark goto fail;
8118bb0daffSRob Clark }
8128bb0daffSRob Clark
8138bb0daffSRob Clark omap_dmm->irq = platform_get_irq(dev, 0);
814a99509baSye xingchen if (omap_dmm->irq < 0)
8158bb0daffSRob Clark goto fail;
8168bb0daffSRob Clark
8178bb0daffSRob Clark omap_dmm->dev = &dev->dev;
8188bb0daffSRob Clark
819f5b9930bSTomi Valkeinen if (of_machine_is_compatible("ti,dra7")) {
820f5b9930bSTomi Valkeinen /*
821f5b9930bSTomi Valkeinen * DRA7 Errata i878 says that MPU should not be used to access
822f5b9930bSTomi Valkeinen * RAM and DMM at the same time. As it's not possible to prevent
823f5b9930bSTomi Valkeinen * MPU accessing RAM, we need to access DMM via a proxy.
824f5b9930bSTomi Valkeinen */
825f5b9930bSTomi Valkeinen if (!dmm_workaround_init(omap_dmm)) {
826f5b9930bSTomi Valkeinen omap_dmm->dmm_workaround = true;
827f5b9930bSTomi Valkeinen dev_info(&dev->dev,
828f5b9930bSTomi Valkeinen "workaround for errata i878 in use\n");
829f5b9930bSTomi Valkeinen } else {
830f5b9930bSTomi Valkeinen dev_warn(&dev->dev,
831f5b9930bSTomi Valkeinen "failed to initialize work-around for i878\n");
832f5b9930bSTomi Valkeinen }
833f5b9930bSTomi Valkeinen }
834f5b9930bSTomi Valkeinen
8358e54adfdSTomi Valkeinen hwinfo = dmm_read(omap_dmm, DMM_PAT_HWINFO);
8368bb0daffSRob Clark omap_dmm->num_engines = (hwinfo >> 24) & 0x1F;
8378bb0daffSRob Clark omap_dmm->num_lut = (hwinfo >> 16) & 0x1F;
8388bb0daffSRob Clark omap_dmm->container_width = 256;
8398bb0daffSRob Clark omap_dmm->container_height = 128;
8408bb0daffSRob Clark
8418bb0daffSRob Clark atomic_set(&omap_dmm->engine_counter, omap_dmm->num_engines);
8428bb0daffSRob Clark
8438bb0daffSRob Clark /* read out actual LUT width and height */
8448e54adfdSTomi Valkeinen pat_geom = dmm_read(omap_dmm, DMM_PAT_GEOMETRY);
8458bb0daffSRob Clark omap_dmm->lut_width = ((pat_geom >> 16) & 0xF) << 5;
8468bb0daffSRob Clark omap_dmm->lut_height = ((pat_geom >> 24) & 0xF) << 5;
8478bb0daffSRob Clark
8488bb0daffSRob Clark /* increment LUT by one if on OMAP5 */
8498bb0daffSRob Clark /* LUT has twice the height, and is split into a separate container */
8508bb0daffSRob Clark if (omap_dmm->lut_height != omap_dmm->container_height)
8518bb0daffSRob Clark omap_dmm->num_lut++;
8528bb0daffSRob Clark
8538bb0daffSRob Clark /* initialize DMM registers */
8548e54adfdSTomi Valkeinen dmm_write(omap_dmm, 0x88888888, DMM_PAT_VIEW__0);
8558e54adfdSTomi Valkeinen dmm_write(omap_dmm, 0x88888888, DMM_PAT_VIEW__1);
8568e54adfdSTomi Valkeinen dmm_write(omap_dmm, 0x80808080, DMM_PAT_VIEW_MAP__0);
8578e54adfdSTomi Valkeinen dmm_write(omap_dmm, 0x80000000, DMM_PAT_VIEW_MAP_BASE);
8588e54adfdSTomi Valkeinen dmm_write(omap_dmm, 0x88888888, DMM_TILER_OR__0);
8598e54adfdSTomi Valkeinen dmm_write(omap_dmm, 0x88888888, DMM_TILER_OR__1);
8608bb0daffSRob Clark
8618bb0daffSRob Clark omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32);
8628bb0daffSRob Clark if (!omap_dmm->dummy_page) {
8638bb0daffSRob Clark dev_err(&dev->dev, "could not allocate dummy page\n");
8648bb0daffSRob Clark ret = -ENOMEM;
8658bb0daffSRob Clark goto fail;
8668bb0daffSRob Clark }
8678bb0daffSRob Clark
8688bb0daffSRob Clark /* set dma mask for device */
869d6cfaabaSRussell King ret = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32));
870d6cfaabaSRussell King if (ret)
871d6cfaabaSRussell King goto fail;
8728bb0daffSRob Clark
8738bb0daffSRob Clark omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page);
8748bb0daffSRob Clark
8758bb0daffSRob Clark /* alloc refill memory */
876f6e45661SLuis R. Rodriguez omap_dmm->refill_va = dma_alloc_wc(&dev->dev,
8778bb0daffSRob Clark REFILL_BUFFER_SIZE * omap_dmm->num_engines,
8788bb0daffSRob Clark &omap_dmm->refill_pa, GFP_KERNEL);
8798bb0daffSRob Clark if (!omap_dmm->refill_va) {
8808bb0daffSRob Clark dev_err(&dev->dev, "could not allocate refill memory\n");
881723ae803SYang Yingliang ret = -ENOMEM;
8828bb0daffSRob Clark goto fail;
8838bb0daffSRob Clark }
8848bb0daffSRob Clark
8858bb0daffSRob Clark /* alloc engines */
886fffddfd6SLinus Torvalds omap_dmm->engines = kcalloc(omap_dmm->num_engines,
887d501b129SLaurent Pinchart sizeof(*omap_dmm->engines), GFP_KERNEL);
8888bb0daffSRob Clark if (!omap_dmm->engines) {
8898bb0daffSRob Clark ret = -ENOMEM;
8908bb0daffSRob Clark goto fail;
8918bb0daffSRob Clark }
8928bb0daffSRob Clark
8938bb0daffSRob Clark for (i = 0; i < omap_dmm->num_engines; i++) {
8948bb0daffSRob Clark omap_dmm->engines[i].id = i;
8958bb0daffSRob Clark omap_dmm->engines[i].dmm = omap_dmm;
8968bb0daffSRob Clark omap_dmm->engines[i].refill_va = omap_dmm->refill_va +
8978bb0daffSRob Clark (REFILL_BUFFER_SIZE * i);
8988bb0daffSRob Clark omap_dmm->engines[i].refill_pa = omap_dmm->refill_pa +
8998bb0daffSRob Clark (REFILL_BUFFER_SIZE * i);
9007439507fSTomi Valkeinen init_completion(&omap_dmm->engines[i].compl);
9018bb0daffSRob Clark
9028bb0daffSRob Clark list_add(&omap_dmm->engines[i].idle_node, &omap_dmm->idle_head);
9038bb0daffSRob Clark }
9048bb0daffSRob Clark
905fffddfd6SLinus Torvalds omap_dmm->tcm = kcalloc(omap_dmm->num_lut, sizeof(*omap_dmm->tcm),
9068bb0daffSRob Clark GFP_KERNEL);
9078bb0daffSRob Clark if (!omap_dmm->tcm) {
9088bb0daffSRob Clark ret = -ENOMEM;
9098bb0daffSRob Clark goto fail;
9108bb0daffSRob Clark }
9118bb0daffSRob Clark
9128bb0daffSRob Clark /* init containers */
9138bb0daffSRob Clark /* Each LUT is associated with a TCM (container manager). We use the
9148bb0daffSRob Clark lut_id to denote the lut_id used to identify the correct LUT for
9158bb0daffSRob Clark programming during reill operations */
9168bb0daffSRob Clark for (i = 0; i < omap_dmm->num_lut; i++) {
9178bb0daffSRob Clark omap_dmm->tcm[i] = sita_init(omap_dmm->container_width,
9180d6fa53fSAndy Gross omap_dmm->container_height);
9198bb0daffSRob Clark
9208bb0daffSRob Clark if (!omap_dmm->tcm[i]) {
9218bb0daffSRob Clark dev_err(&dev->dev, "failed to allocate container\n");
9228bb0daffSRob Clark ret = -ENOMEM;
9238bb0daffSRob Clark goto fail;
9248bb0daffSRob Clark }
9258bb0daffSRob Clark
9268bb0daffSRob Clark omap_dmm->tcm[i]->lut_id = i;
9278bb0daffSRob Clark }
9288bb0daffSRob Clark
9298bb0daffSRob Clark /* assign access mode containers to applicable tcm container */
9308bb0daffSRob Clark /* OMAP 4 has 1 container for all 4 views */
9318bb0daffSRob Clark /* OMAP 5 has 2 containers, 1 for 2D and 1 for 1D */
9328bb0daffSRob Clark containers[TILFMT_8BIT] = omap_dmm->tcm[0];
9338bb0daffSRob Clark containers[TILFMT_16BIT] = omap_dmm->tcm[0];
9348bb0daffSRob Clark containers[TILFMT_32BIT] = omap_dmm->tcm[0];
9358bb0daffSRob Clark
9368bb0daffSRob Clark if (omap_dmm->container_height != omap_dmm->lut_height) {
9378bb0daffSRob Clark /* second LUT is used for PAGE mode. Programming must use
9388bb0daffSRob Clark y offset that is added to all y coordinates. LUT id is still
9398bb0daffSRob Clark 0, because it is the same LUT, just the upper 128 lines */
9408bb0daffSRob Clark containers[TILFMT_PAGE] = omap_dmm->tcm[1];
9418bb0daffSRob Clark omap_dmm->tcm[1]->y_offset = OMAP5_LUT_OFFSET;
9428bb0daffSRob Clark omap_dmm->tcm[1]->lut_id = 0;
9438bb0daffSRob Clark } else {
9448bb0daffSRob Clark containers[TILFMT_PAGE] = omap_dmm->tcm[0];
9458bb0daffSRob Clark }
9468bb0daffSRob Clark
9478bb0daffSRob Clark area = (struct tcm_area) {
9488bb0daffSRob Clark .tcm = NULL,
9498bb0daffSRob Clark .p1.x = omap_dmm->container_width - 1,
9508bb0daffSRob Clark .p1.y = omap_dmm->container_height - 1,
9518bb0daffSRob Clark };
9528bb0daffSRob Clark
953176c866dSPeter Ujfalusi ret = request_irq(omap_dmm->irq, omap_dmm_irq_handler, IRQF_SHARED,
954176c866dSPeter Ujfalusi "omap_dmm_irq_handler", omap_dmm);
955176c866dSPeter Ujfalusi
956176c866dSPeter Ujfalusi if (ret) {
957176c866dSPeter Ujfalusi dev_err(&dev->dev, "couldn't register IRQ %d, error %d\n",
958176c866dSPeter Ujfalusi omap_dmm->irq, ret);
959176c866dSPeter Ujfalusi omap_dmm->irq = -1;
960176c866dSPeter Ujfalusi goto fail;
961176c866dSPeter Ujfalusi }
962176c866dSPeter Ujfalusi
963176c866dSPeter Ujfalusi /* Enable all interrupts for each refill engine except
964176c866dSPeter Ujfalusi * ERR_LUT_MISS<n> (which is just advisory, and we don't care
965176c866dSPeter Ujfalusi * about because we want to be able to refill live scanout
966176c866dSPeter Ujfalusi * buffers for accelerated pan/scroll) and FILL_DSC<n> which
967176c866dSPeter Ujfalusi * we just generally don't care about.
968176c866dSPeter Ujfalusi */
969176c866dSPeter Ujfalusi dmm_write(omap_dmm, 0x7e7e7e7e, DMM_PAT_IRQENABLE_SET);
970176c866dSPeter Ujfalusi
9718bb0daffSRob Clark /* initialize all LUTs to dummy page entries */
9728bb0daffSRob Clark for (i = 0; i < omap_dmm->num_lut; i++) {
9738bb0daffSRob Clark area.tcm = omap_dmm->tcm[i];
9748bb0daffSRob Clark if (fill(&area, NULL, 0, 0, true))
9758bb0daffSRob Clark dev_err(omap_dmm->dev, "refill failed");
9768bb0daffSRob Clark }
9778bb0daffSRob Clark
9788bb0daffSRob Clark dev_info(omap_dmm->dev, "initialized all PAT entries\n");
9798bb0daffSRob Clark
9808bb0daffSRob Clark return 0;
9818bb0daffSRob Clark
9828bb0daffSRob Clark fail:
983*c2807ecbSUwe Kleine-König omap_dmm_remove(dev);
9848bb0daffSRob Clark return ret;
9858bb0daffSRob Clark }
9868bb0daffSRob Clark
9878bb0daffSRob Clark /*
9888bb0daffSRob Clark * debugfs support
9898bb0daffSRob Clark */
9908bb0daffSRob Clark
9918bb0daffSRob Clark #ifdef CONFIG_DEBUG_FS
9928bb0daffSRob Clark
9938bb0daffSRob Clark static const char *alphabet = "abcdefghijklmnopqrstuvwxyz"
9948bb0daffSRob Clark "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
9958bb0daffSRob Clark static const char *special = ".,:;'\"`~!^-+";
9968bb0daffSRob Clark
fill_map(char ** map,int xdiv,int ydiv,struct tcm_area * a,char c,bool ovw)9978bb0daffSRob Clark static void fill_map(char **map, int xdiv, int ydiv, struct tcm_area *a,
9988bb0daffSRob Clark char c, bool ovw)
9998bb0daffSRob Clark {
10008bb0daffSRob Clark int x, y;
10018bb0daffSRob Clark for (y = a->p0.y / ydiv; y <= a->p1.y / ydiv; y++)
10028bb0daffSRob Clark for (x = a->p0.x / xdiv; x <= a->p1.x / xdiv; x++)
10038bb0daffSRob Clark if (map[y][x] == ' ' || ovw)
10048bb0daffSRob Clark map[y][x] = c;
10058bb0daffSRob Clark }
10068bb0daffSRob Clark
fill_map_pt(char ** map,int xdiv,int ydiv,struct tcm_pt * p,char c)10078bb0daffSRob Clark static void fill_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p,
10088bb0daffSRob Clark char c)
10098bb0daffSRob Clark {
10108bb0daffSRob Clark map[p->y / ydiv][p->x / xdiv] = c;
10118bb0daffSRob Clark }
10128bb0daffSRob Clark
read_map_pt(char ** map,int xdiv,int ydiv,struct tcm_pt * p)10138bb0daffSRob Clark static char read_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p)
10148bb0daffSRob Clark {
10158bb0daffSRob Clark return map[p->y / ydiv][p->x / xdiv];
10168bb0daffSRob Clark }
10178bb0daffSRob Clark
map_width(int xdiv,int x0,int x1)10188bb0daffSRob Clark static int map_width(int xdiv, int x0, int x1)
10198bb0daffSRob Clark {
10208bb0daffSRob Clark return (x1 / xdiv) - (x0 / xdiv) + 1;
10218bb0daffSRob Clark }
10228bb0daffSRob Clark
text_map(char ** map,int xdiv,char * nice,int yd,int x0,int x1)10238bb0daffSRob Clark static void text_map(char **map, int xdiv, char *nice, int yd, int x0, int x1)
10248bb0daffSRob Clark {
10258bb0daffSRob Clark char *p = map[yd] + (x0 / xdiv);
10268bb0daffSRob Clark int w = (map_width(xdiv, x0, x1) - strlen(nice)) / 2;
10278bb0daffSRob Clark if (w >= 0) {
10288bb0daffSRob Clark p += w;
10298bb0daffSRob Clark while (*nice)
10308bb0daffSRob Clark *p++ = *nice++;
10318bb0daffSRob Clark }
10328bb0daffSRob Clark }
10338bb0daffSRob Clark
map_1d_info(char ** map,int xdiv,int ydiv,char * nice,struct tcm_area * a)10348bb0daffSRob Clark static void map_1d_info(char **map, int xdiv, int ydiv, char *nice,
10358bb0daffSRob Clark struct tcm_area *a)
10368bb0daffSRob Clark {
10378bb0daffSRob Clark sprintf(nice, "%dK", tcm_sizeof(*a) * 4);
10388bb0daffSRob Clark if (a->p0.y + 1 < a->p1.y) {
10398bb0daffSRob Clark text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv, 0,
10408bb0daffSRob Clark 256 - 1);
10418bb0daffSRob Clark } else if (a->p0.y < a->p1.y) {
10428bb0daffSRob Clark if (strlen(nice) < map_width(xdiv, a->p0.x, 256 - 1))
10438bb0daffSRob Clark text_map(map, xdiv, nice, a->p0.y / ydiv,
10448bb0daffSRob Clark a->p0.x + xdiv, 256 - 1);
10458bb0daffSRob Clark else if (strlen(nice) < map_width(xdiv, 0, a->p1.x))
10468bb0daffSRob Clark text_map(map, xdiv, nice, a->p1.y / ydiv,
10478bb0daffSRob Clark 0, a->p1.y - xdiv);
10488bb0daffSRob Clark } else if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x)) {
10498bb0daffSRob Clark text_map(map, xdiv, nice, a->p0.y / ydiv, a->p0.x, a->p1.x);
10508bb0daffSRob Clark }
10518bb0daffSRob Clark }
10528bb0daffSRob Clark
map_2d_info(char ** map,int xdiv,int ydiv,char * nice,struct tcm_area * a)10538bb0daffSRob Clark static void map_2d_info(char **map, int xdiv, int ydiv, char *nice,
10548bb0daffSRob Clark struct tcm_area *a)
10558bb0daffSRob Clark {
10568bb0daffSRob Clark sprintf(nice, "(%d*%d)", tcm_awidth(*a), tcm_aheight(*a));
10578bb0daffSRob Clark if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x))
10588bb0daffSRob Clark text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv,
10598bb0daffSRob Clark a->p0.x, a->p1.x);
10608bb0daffSRob Clark }
10618bb0daffSRob Clark
tiler_map_show(struct seq_file * s,void * arg)10628bb0daffSRob Clark int tiler_map_show(struct seq_file *s, void *arg)
10638bb0daffSRob Clark {
10648bb0daffSRob Clark int xdiv = 2, ydiv = 1;
10658bb0daffSRob Clark char **map = NULL, *global_map;
10668bb0daffSRob Clark struct tiler_block *block;
10678bb0daffSRob Clark struct tcm_area a, p;
10688bb0daffSRob Clark int i;
10698bb0daffSRob Clark const char *m2d = alphabet;
10708bb0daffSRob Clark const char *a2d = special;
10718bb0daffSRob Clark const char *m2dp = m2d, *a2dp = a2d;
10728bb0daffSRob Clark char nice[128];
10738bb0daffSRob Clark int h_adj;
10748bb0daffSRob Clark int w_adj;
10758bb0daffSRob Clark unsigned long flags;
10768bb0daffSRob Clark int lut_idx;
10778bb0daffSRob Clark
10788bb0daffSRob Clark
10798bb0daffSRob Clark if (!omap_dmm) {
10808bb0daffSRob Clark /* early return if dmm/tiler device is not initialized */
10818bb0daffSRob Clark return 0;
10828bb0daffSRob Clark }
10838bb0daffSRob Clark
10848bb0daffSRob Clark h_adj = omap_dmm->container_height / ydiv;
10858bb0daffSRob Clark w_adj = omap_dmm->container_width / xdiv;
10868bb0daffSRob Clark
10876da2ec56SKees Cook map = kmalloc_array(h_adj, sizeof(*map), GFP_KERNEL);
10886da2ec56SKees Cook global_map = kmalloc_array(w_adj + 1, h_adj, GFP_KERNEL);
10898bb0daffSRob Clark
10908bb0daffSRob Clark if (!map || !global_map)
10918bb0daffSRob Clark goto error;
10928bb0daffSRob Clark
10938bb0daffSRob Clark for (lut_idx = 0; lut_idx < omap_dmm->num_lut; lut_idx++) {
1094e1e9c90eSDan Carpenter memset(map, 0, h_adj * sizeof(*map));
10958bb0daffSRob Clark memset(global_map, ' ', (w_adj + 1) * h_adj);
10968bb0daffSRob Clark
10978bb0daffSRob Clark for (i = 0; i < omap_dmm->container_height; i++) {
10988bb0daffSRob Clark map[i] = global_map + i * (w_adj + 1);
10998bb0daffSRob Clark map[i][w_adj] = 0;
11008bb0daffSRob Clark }
11018bb0daffSRob Clark
11028bb0daffSRob Clark spin_lock_irqsave(&list_lock, flags);
11038bb0daffSRob Clark
11048bb0daffSRob Clark list_for_each_entry(block, &omap_dmm->alloc_head, alloc_node) {
11058bb0daffSRob Clark if (block->area.tcm == omap_dmm->tcm[lut_idx]) {
11068bb0daffSRob Clark if (block->fmt != TILFMT_PAGE) {
11078bb0daffSRob Clark fill_map(map, xdiv, ydiv, &block->area,
11088bb0daffSRob Clark *m2dp, true);
11098bb0daffSRob Clark if (!*++a2dp)
11108bb0daffSRob Clark a2dp = a2d;
11118bb0daffSRob Clark if (!*++m2dp)
11128bb0daffSRob Clark m2dp = m2d;
11138bb0daffSRob Clark map_2d_info(map, xdiv, ydiv, nice,
11148bb0daffSRob Clark &block->area);
11158bb0daffSRob Clark } else {
11168bb0daffSRob Clark bool start = read_map_pt(map, xdiv,
11178bb0daffSRob Clark ydiv, &block->area.p0) == ' ';
11188bb0daffSRob Clark bool end = read_map_pt(map, xdiv, ydiv,
11198bb0daffSRob Clark &block->area.p1) == ' ';
11208bb0daffSRob Clark
11218bb0daffSRob Clark tcm_for_each_slice(a, block->area, p)
11228bb0daffSRob Clark fill_map(map, xdiv, ydiv, &a,
11238bb0daffSRob Clark '=', true);
11248bb0daffSRob Clark fill_map_pt(map, xdiv, ydiv,
11258bb0daffSRob Clark &block->area.p0,
11268bb0daffSRob Clark start ? '<' : 'X');
11278bb0daffSRob Clark fill_map_pt(map, xdiv, ydiv,
11288bb0daffSRob Clark &block->area.p1,
11298bb0daffSRob Clark end ? '>' : 'X');
11308bb0daffSRob Clark map_1d_info(map, xdiv, ydiv, nice,
11318bb0daffSRob Clark &block->area);
11328bb0daffSRob Clark }
11338bb0daffSRob Clark }
11348bb0daffSRob Clark }
11358bb0daffSRob Clark
11368bb0daffSRob Clark spin_unlock_irqrestore(&list_lock, flags);
11378bb0daffSRob Clark
11388bb0daffSRob Clark if (s) {
11398bb0daffSRob Clark seq_printf(s, "CONTAINER %d DUMP BEGIN\n", lut_idx);
11408bb0daffSRob Clark for (i = 0; i < 128; i++)
11418bb0daffSRob Clark seq_printf(s, "%03d:%s\n", i, map[i]);
11428bb0daffSRob Clark seq_printf(s, "CONTAINER %d DUMP END\n", lut_idx);
11438bb0daffSRob Clark } else {
11448bb0daffSRob Clark dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP BEGIN\n",
11458bb0daffSRob Clark lut_idx);
11468bb0daffSRob Clark for (i = 0; i < 128; i++)
11478bb0daffSRob Clark dev_dbg(omap_dmm->dev, "%03d:%s\n", i, map[i]);
11488bb0daffSRob Clark dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP END\n",
11498bb0daffSRob Clark lut_idx);
11508bb0daffSRob Clark }
11518bb0daffSRob Clark }
11528bb0daffSRob Clark
11538bb0daffSRob Clark error:
11548bb0daffSRob Clark kfree(map);
11558bb0daffSRob Clark kfree(global_map);
11568bb0daffSRob Clark
11578bb0daffSRob Clark return 0;
11588bb0daffSRob Clark }
11598bb0daffSRob Clark #endif
11608bb0daffSRob Clark
11611d601da2SGrygorii Strashko #ifdef CONFIG_PM_SLEEP
omap_dmm_resume(struct device * dev)11628bb0daffSRob Clark static int omap_dmm_resume(struct device *dev)
11638bb0daffSRob Clark {
11648bb0daffSRob Clark struct tcm_area area;
11658bb0daffSRob Clark int i;
11668bb0daffSRob Clark
11678bb0daffSRob Clark if (!omap_dmm)
11688bb0daffSRob Clark return -ENODEV;
11698bb0daffSRob Clark
11708bb0daffSRob Clark area = (struct tcm_area) {
11718bb0daffSRob Clark .tcm = NULL,
11728bb0daffSRob Clark .p1.x = omap_dmm->container_width - 1,
11738bb0daffSRob Clark .p1.y = omap_dmm->container_height - 1,
11748bb0daffSRob Clark };
11758bb0daffSRob Clark
11768bb0daffSRob Clark /* initialize all LUTs to dummy page entries */
11778bb0daffSRob Clark for (i = 0; i < omap_dmm->num_lut; i++) {
11788bb0daffSRob Clark area.tcm = omap_dmm->tcm[i];
11798bb0daffSRob Clark if (fill(&area, NULL, 0, 0, true))
11808bb0daffSRob Clark dev_err(dev, "refill failed");
11818bb0daffSRob Clark }
11828bb0daffSRob Clark
11838bb0daffSRob Clark return 0;
11848bb0daffSRob Clark }
11858bb0daffSRob Clark #endif
11868bb0daffSRob Clark
11871d601da2SGrygorii Strashko static SIMPLE_DEV_PM_OPS(omap_dmm_pm_ops, NULL, omap_dmm_resume);
11881d601da2SGrygorii Strashko
11893d232346SArchit Taneja #if defined(CONFIG_OF)
11907cb0d6c1STomi Valkeinen static const struct dmm_platform_data dmm_omap4_platform_data = {
11917cb0d6c1STomi Valkeinen .cpu_cache_flags = OMAP_BO_WC,
11927cb0d6c1STomi Valkeinen };
11937cb0d6c1STomi Valkeinen
11947cb0d6c1STomi Valkeinen static const struct dmm_platform_data dmm_omap5_platform_data = {
11957cb0d6c1STomi Valkeinen .cpu_cache_flags = OMAP_BO_UNCACHED,
11967cb0d6c1STomi Valkeinen };
11977cb0d6c1STomi Valkeinen
11983d232346SArchit Taneja static const struct of_device_id dmm_of_match[] = {
11997cb0d6c1STomi Valkeinen {
12007cb0d6c1STomi Valkeinen .compatible = "ti,omap4-dmm",
12017cb0d6c1STomi Valkeinen .data = &dmm_omap4_platform_data,
12027cb0d6c1STomi Valkeinen },
12037cb0d6c1STomi Valkeinen {
12047cb0d6c1STomi Valkeinen .compatible = "ti,omap5-dmm",
12057cb0d6c1STomi Valkeinen .data = &dmm_omap5_platform_data,
12067cb0d6c1STomi Valkeinen },
12073d232346SArchit Taneja {},
12083d232346SArchit Taneja };
12093d232346SArchit Taneja #endif
12103d232346SArchit Taneja
12118bb0daffSRob Clark struct platform_driver omap_dmm_driver = {
12128bb0daffSRob Clark .probe = omap_dmm_probe,
1213*c2807ecbSUwe Kleine-König .remove_new = omap_dmm_remove,
12148bb0daffSRob Clark .driver = {
12158bb0daffSRob Clark .name = DMM_DRIVER_NAME,
12163d232346SArchit Taneja .of_match_table = of_match_ptr(dmm_of_match),
12178bb0daffSRob Clark .pm = &omap_dmm_pm_ops,
12188bb0daffSRob Clark },
12198bb0daffSRob Clark };
12208bb0daffSRob Clark
12218bb0daffSRob Clark MODULE_LICENSE("GPL v2");
12228bb0daffSRob Clark MODULE_AUTHOR("Andy Gross <andy.gross@ti.com>");
12238bb0daffSRob Clark MODULE_DESCRIPTION("OMAP DMM/Tiler Driver");
1224