Commit | Line | Data |
---|---|---|
71e8831f AG |
1 | /* |
2 | * DMM IOMMU driver support functions for TI OMAP processors. | |
3 | * | |
4 | * Author: Rob Clark <rob@ti.com> | |
5 | * Andy Gross <andy.gross@ti.com> | |
6 | * | |
7 | * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or | |
10 | * modify it under the terms of the GNU General Public License as | |
11 | * published by the Free Software Foundation version 2. | |
12 | * | |
13 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | |
14 | * kind, whether express or implied; without even the implied warranty | |
15 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | */ | |
2d278f54 LP |
18 | |
19 | #include <linux/completion.h> | |
20 | #include <linux/delay.h> | |
21 | #include <linux/dma-mapping.h> | |
22 | #include <linux/errno.h> | |
71e8831f | 23 | #include <linux/init.h> |
2d278f54 LP |
24 | #include <linux/interrupt.h> |
25 | #include <linux/list.h> | |
26 | #include <linux/mm.h> | |
71e8831f AG |
27 | #include <linux/module.h> |
28 | #include <linux/platform_device.h> /* platform_device() */ | |
71e8831f | 29 | #include <linux/sched.h> |
71e8831f | 30 | #include <linux/slab.h> |
71e8831f | 31 | #include <linux/time.h> |
2d278f54 LP |
32 | #include <linux/vmalloc.h> |
33 | #include <linux/wait.h> | |
71e8831f AG |
34 | |
35 | #include "omap_dmm_tiler.h" | |
36 | #include "omap_dmm_priv.h" | |
37 | ||
5c137797 AG |
38 | #define DMM_DRIVER_NAME "dmm" |
39 | ||
71e8831f AG |
40 | /* mappings for associating views to luts */ |
41 | static struct tcm *containers[TILFMT_NFORMATS]; | |
42 | static struct dmm *omap_dmm; | |
43 | ||
7cb0d6c1 TV |
44 | #if defined(CONFIG_OF) |
45 | static const struct of_device_id dmm_of_match[]; | |
46 | #endif | |
47 | ||
ef445934 AG |
48 | /* global spinlock for protecting lists */ |
49 | static DEFINE_SPINLOCK(list_lock); | |
50 | ||
71e8831f AG |
51 | /* Geometry table */ |
52 | #define GEOM(xshift, yshift, bytes_per_pixel) { \ | |
53 | .x_shft = (xshift), \ | |
54 | .y_shft = (yshift), \ | |
55 | .cpp = (bytes_per_pixel), \ | |
56 | .slot_w = 1 << (SLOT_WIDTH_BITS - (xshift)), \ | |
57 | .slot_h = 1 << (SLOT_HEIGHT_BITS - (yshift)), \ | |
58 | } | |
59 | ||
60 | static const struct { | |
61 | uint32_t x_shft; /* unused X-bits (as part of bpp) */ | |
62 | uint32_t y_shft; /* unused Y-bits (as part of bpp) */ | |
63 | uint32_t cpp; /* bytes/chars per pixel */ | |
64 | uint32_t slot_w; /* width of each slot (in pixels) */ | |
65 | uint32_t slot_h; /* height of each slot (in pixels) */ | |
66 | } geom[TILFMT_NFORMATS] = { | |
222025e4 LP |
67 | [TILFMT_8BIT] = GEOM(0, 0, 1), |
68 | [TILFMT_16BIT] = GEOM(0, 1, 2), | |
69 | [TILFMT_32BIT] = GEOM(1, 1, 4), | |
70 | [TILFMT_PAGE] = GEOM(SLOT_WIDTH_BITS, SLOT_HEIGHT_BITS, 1), | |
71e8831f AG |
71 | }; |
72 | ||
73 | ||
74 | /* lookup table for registers w/ per-engine instances */ | |
75 | static const uint32_t reg[][4] = { | |
222025e4 LP |
76 | [PAT_STATUS] = {DMM_PAT_STATUS__0, DMM_PAT_STATUS__1, |
77 | DMM_PAT_STATUS__2, DMM_PAT_STATUS__3}, | |
78 | [PAT_DESCR] = {DMM_PAT_DESCR__0, DMM_PAT_DESCR__1, | |
79 | DMM_PAT_DESCR__2, DMM_PAT_DESCR__3}, | |
71e8831f AG |
80 | }; |
81 | ||
8e54adfd TV |
82 | static u32 dmm_read(struct dmm *dmm, u32 reg) |
83 | { | |
84 | return readl(dmm->base + reg); | |
85 | } | |
86 | ||
87 | static void dmm_write(struct dmm *dmm, u32 val, u32 reg) | |
88 | { | |
89 | writel(val, dmm->base + reg); | |
90 | } | |
91 | ||
71e8831f AG |
92 | /* simple allocator to grab next 16 byte aligned memory from txn */ |
93 | static void *alloc_dma(struct dmm_txn *txn, size_t sz, dma_addr_t *pa) | |
94 | { | |
95 | void *ptr; | |
96 | struct refill_engine *engine = txn->engine_handle; | |
97 | ||
98 | /* dmm programming requires 16 byte aligned addresses */ | |
99 | txn->current_pa = round_up(txn->current_pa, 16); | |
100 | txn->current_va = (void *)round_up((long)txn->current_va, 16); | |
101 | ||
102 | ptr = txn->current_va; | |
103 | *pa = txn->current_pa; | |
104 | ||
105 | txn->current_pa += sz; | |
106 | txn->current_va += sz; | |
107 | ||
108 | BUG_ON((txn->current_va - engine->refill_va) > REFILL_BUFFER_SIZE); | |
109 | ||
110 | return ptr; | |
111 | } | |
112 | ||
113 | /* check status and spin until wait_mask comes true */ | |
114 | static int wait_status(struct refill_engine *engine, uint32_t wait_mask) | |
115 | { | |
116 | struct dmm *dmm = engine->dmm; | |
117 | uint32_t r = 0, err, i; | |
118 | ||
119 | i = DMM_FIXED_RETRY_COUNT; | |
120 | while (true) { | |
8e54adfd | 121 | r = dmm_read(dmm, reg[PAT_STATUS][engine->id]); |
71e8831f AG |
122 | err = r & DMM_PATSTATUS_ERR; |
123 | if (err) | |
124 | return -EFAULT; | |
125 | ||
126 | if ((r & wait_mask) == wait_mask) | |
127 | break; | |
128 | ||
129 | if (--i == 0) | |
130 | return -ETIMEDOUT; | |
131 | ||
132 | udelay(1); | |
133 | } | |
134 | ||
135 | return 0; | |
136 | } | |
137 | ||
faaa0540 AG |
138 | static void release_engine(struct refill_engine *engine) |
139 | { | |
140 | unsigned long flags; | |
141 | ||
142 | spin_lock_irqsave(&list_lock, flags); | |
143 | list_add(&engine->idle_node, &omap_dmm->idle_head); | |
144 | spin_unlock_irqrestore(&list_lock, flags); | |
145 | ||
146 | atomic_inc(&omap_dmm->engine_counter); | |
147 | wake_up_interruptible(&omap_dmm->engine_queue); | |
148 | } | |
149 | ||
d7de9935 | 150 | static irqreturn_t omap_dmm_irq_handler(int irq, void *arg) |
71e8831f AG |
151 | { |
152 | struct dmm *dmm = arg; | |
8e54adfd | 153 | uint32_t status = dmm_read(dmm, DMM_PAT_IRQSTATUS); |
71e8831f AG |
154 | int i; |
155 | ||
156 | /* ack IRQ */ | |
8e54adfd | 157 | dmm_write(dmm, status, DMM_PAT_IRQSTATUS); |
71e8831f AG |
158 | |
159 | for (i = 0; i < dmm->num_engines; i++) { | |
faaa0540 | 160 | if (status & DMM_IRQSTAT_LST) { |
faaa0540 AG |
161 | if (dmm->engines[i].async) |
162 | release_engine(&dmm->engines[i]); | |
7439507f TV |
163 | |
164 | complete(&dmm->engines[i].compl); | |
faaa0540 AG |
165 | } |
166 | ||
71e8831f AG |
167 | status >>= 8; |
168 | } | |
169 | ||
170 | return IRQ_HANDLED; | |
171 | } | |
172 | ||
173 | /** | |
174 | * Get a handle for a DMM transaction | |
175 | */ | |
176 | static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm) | |
177 | { | |
178 | struct dmm_txn *txn = NULL; | |
179 | struct refill_engine *engine = NULL; | |
faaa0540 AG |
180 | int ret; |
181 | unsigned long flags; | |
182 | ||
71e8831f | 183 | |
faaa0540 AG |
184 | /* wait until an engine is available */ |
185 | ret = wait_event_interruptible(omap_dmm->engine_queue, | |
186 | atomic_add_unless(&omap_dmm->engine_counter, -1, 0)); | |
187 | if (ret) | |
188 | return ERR_PTR(ret); | |
71e8831f AG |
189 | |
190 | /* grab an idle engine */ | |
faaa0540 | 191 | spin_lock_irqsave(&list_lock, flags); |
71e8831f AG |
192 | if (!list_empty(&dmm->idle_head)) { |
193 | engine = list_entry(dmm->idle_head.next, struct refill_engine, | |
194 | idle_node); | |
195 | list_del(&engine->idle_node); | |
196 | } | |
faaa0540 | 197 | spin_unlock_irqrestore(&list_lock, flags); |
71e8831f AG |
198 | |
199 | BUG_ON(!engine); | |
200 | ||
201 | txn = &engine->txn; | |
202 | engine->tcm = tcm; | |
203 | txn->engine_handle = engine; | |
204 | txn->last_pat = NULL; | |
205 | txn->current_va = engine->refill_va; | |
206 | txn->current_pa = engine->refill_pa; | |
207 | ||
208 | return txn; | |
209 | } | |
210 | ||
211 | /** | |
212 | * Add region to DMM transaction. If pages or pages[i] is NULL, then the | |
213 | * corresponding slot is cleared (ie. dummy_pa is programmed) | |
214 | */ | |
faaa0540 | 215 | static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area, |
a6a91827 | 216 | struct page **pages, uint32_t npages, uint32_t roll) |
71e8831f | 217 | { |
2d31ca3a | 218 | dma_addr_t pat_pa = 0, data_pa = 0; |
71e8831f AG |
219 | uint32_t *data; |
220 | struct pat *pat; | |
221 | struct refill_engine *engine = txn->engine_handle; | |
222 | int columns = (1 + area->x1 - area->x0); | |
223 | int rows = (1 + area->y1 - area->y0); | |
224 | int i = columns*rows; | |
71e8831f AG |
225 | |
226 | pat = alloc_dma(txn, sizeof(struct pat), &pat_pa); | |
227 | ||
228 | if (txn->last_pat) | |
229 | txn->last_pat->next_pa = (uint32_t)pat_pa; | |
230 | ||
231 | pat->area = *area; | |
c6b7ae55 AG |
232 | |
233 | /* adjust Y coordinates based off of container parameters */ | |
234 | pat->area.y0 += engine->tcm->y_offset; | |
235 | pat->area.y1 += engine->tcm->y_offset; | |
236 | ||
71e8831f AG |
237 | pat->ctrl = (struct pat_ctrl){ |
238 | .start = 1, | |
239 | .lut_id = engine->tcm->lut_id, | |
240 | }; | |
241 | ||
2d31ca3a RK |
242 | data = alloc_dma(txn, 4*i, &data_pa); |
243 | /* FIXME: what if data_pa is more than 32-bit ? */ | |
244 | pat->data_pa = data_pa; | |
71e8831f AG |
245 | |
246 | while (i--) { | |
a6a91827 RC |
247 | int n = i + roll; |
248 | if (n >= npages) | |
249 | n -= npages; | |
250 | data[i] = (pages && pages[n]) ? | |
251 | page_to_phys(pages[n]) : engine->dmm->dummy_pa; | |
71e8831f AG |
252 | } |
253 | ||
71e8831f AG |
254 | txn->last_pat = pat; |
255 | ||
faaa0540 | 256 | return; |
71e8831f AG |
257 | } |
258 | ||
259 | /** | |
260 | * Commit the DMM transaction. | |
261 | */ | |
262 | static int dmm_txn_commit(struct dmm_txn *txn, bool wait) | |
263 | { | |
264 | int ret = 0; | |
265 | struct refill_engine *engine = txn->engine_handle; | |
266 | struct dmm *dmm = engine->dmm; | |
267 | ||
268 | if (!txn->last_pat) { | |
269 | dev_err(engine->dmm->dev, "need at least one txn\n"); | |
270 | ret = -EINVAL; | |
271 | goto cleanup; | |
272 | } | |
273 | ||
274 | txn->last_pat->next_pa = 0; | |
275 | ||
276 | /* write to PAT_DESCR to clear out any pending transaction */ | |
8e54adfd | 277 | dmm_write(dmm, 0x0, reg[PAT_DESCR][engine->id]); |
71e8831f AG |
278 | |
279 | /* wait for engine ready: */ | |
280 | ret = wait_status(engine, DMM_PATSTATUS_READY); | |
281 | if (ret) { | |
282 | ret = -EFAULT; | |
283 | goto cleanup; | |
284 | } | |
285 | ||
faaa0540 AG |
286 | /* mark whether it is async to denote list management in IRQ handler */ |
287 | engine->async = wait ? false : true; | |
7439507f TV |
288 | reinit_completion(&engine->compl); |
289 | /* verify that the irq handler sees the 'async' and completion value */ | |
e7e24df4 | 290 | smp_mb(); |
faaa0540 | 291 | |
71e8831f | 292 | /* kick reload */ |
8e54adfd | 293 | dmm_write(dmm, engine->refill_pa, reg[PAT_DESCR][engine->id]); |
71e8831f AG |
294 | |
295 | if (wait) { | |
7439507f | 296 | if (!wait_for_completion_timeout(&engine->compl, |
96cbd142 | 297 | msecs_to_jiffies(100))) { |
71e8831f AG |
298 | dev_err(dmm->dev, "timed out waiting for done\n"); |
299 | ret = -ETIMEDOUT; | |
300 | } | |
301 | } | |
302 | ||
303 | cleanup: | |
faaa0540 AG |
304 | /* only place engine back on list if we are done with it */ |
305 | if (ret || wait) | |
306 | release_engine(engine); | |
71e8831f | 307 | |
71e8831f AG |
308 | return ret; |
309 | } | |
310 | ||
311 | /* | |
312 | * DMM programming | |
313 | */ | |
a6a91827 RC |
314 | static int fill(struct tcm_area *area, struct page **pages, |
315 | uint32_t npages, uint32_t roll, bool wait) | |
71e8831f AG |
316 | { |
317 | int ret = 0; | |
318 | struct tcm_area slice, area_s; | |
319 | struct dmm_txn *txn; | |
320 | ||
2bb2daf3 TV |
321 | /* |
322 | * FIXME | |
323 | * | |
324 | * Asynchronous fill does not work reliably, as the driver does not | |
325 | * handle errors in the async code paths. The fill operation may | |
326 | * silently fail, leading to leaking DMM engines, which may eventually | |
327 | * lead to deadlock if we run out of DMM engines. | |
328 | * | |
329 | * For now, always set 'wait' so that we only use sync fills. Async | |
330 | * fills should be fixed, or alternatively we could decide to only | |
331 | * support sync fills and so the whole async code path could be removed. | |
332 | */ | |
333 | ||
334 | wait = true; | |
335 | ||
71e8831f AG |
336 | txn = dmm_txn_init(omap_dmm, area->tcm); |
337 | if (IS_ERR_OR_NULL(txn)) | |
295c799a | 338 | return -ENOMEM; |
71e8831f AG |
339 | |
340 | tcm_for_each_slice(slice, *area, area_s) { | |
341 | struct pat_area p_area = { | |
342 | .x0 = slice.p0.x, .y0 = slice.p0.y, | |
343 | .x1 = slice.p1.x, .y1 = slice.p1.y, | |
344 | }; | |
345 | ||
faaa0540 | 346 | dmm_txn_append(txn, &p_area, pages, npages, roll); |
71e8831f | 347 | |
a6a91827 | 348 | roll += tcm_sizeof(slice); |
71e8831f AG |
349 | } |
350 | ||
351 | ret = dmm_txn_commit(txn, wait); | |
352 | ||
71e8831f AG |
353 | return ret; |
354 | } | |
355 | ||
356 | /* | |
357 | * Pin/unpin | |
358 | */ | |
359 | ||
360 | /* note: slots for which pages[i] == NULL are filled w/ dummy page | |
361 | */ | |
a6a91827 RC |
362 | int tiler_pin(struct tiler_block *block, struct page **pages, |
363 | uint32_t npages, uint32_t roll, bool wait) | |
71e8831f AG |
364 | { |
365 | int ret; | |
366 | ||
a6a91827 | 367 | ret = fill(&block->area, pages, npages, roll, wait); |
71e8831f AG |
368 | |
369 | if (ret) | |
370 | tiler_unpin(block); | |
371 | ||
372 | return ret; | |
373 | } | |
374 | ||
375 | int tiler_unpin(struct tiler_block *block) | |
376 | { | |
a6a91827 | 377 | return fill(&block->area, NULL, 0, 0, false); |
71e8831f AG |
378 | } |
379 | ||
380 | /* | |
381 | * Reserve/release | |
382 | */ | |
383 | struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w, | |
384 | uint16_t h, uint16_t align) | |
385 | { | |
386 | struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL); | |
387 | u32 min_align = 128; | |
388 | int ret; | |
faaa0540 | 389 | unsigned long flags; |
0d6fa53f | 390 | size_t slot_bytes; |
71e8831f AG |
391 | |
392 | BUG_ON(!validfmt(fmt)); | |
393 | ||
394 | /* convert width/height to slots */ | |
395 | w = DIV_ROUND_UP(w, geom[fmt].slot_w); | |
396 | h = DIV_ROUND_UP(h, geom[fmt].slot_h); | |
397 | ||
398 | /* convert alignment to slots */ | |
0d6fa53f AG |
399 | slot_bytes = geom[fmt].slot_w * geom[fmt].cpp; |
400 | min_align = max(min_align, slot_bytes); | |
401 | align = (align > min_align) ? ALIGN(align, min_align) : min_align; | |
402 | align /= slot_bytes; | |
71e8831f AG |
403 | |
404 | block->fmt = fmt; | |
405 | ||
0d6fa53f AG |
406 | ret = tcm_reserve_2d(containers[fmt], w, h, align, -1, slot_bytes, |
407 | &block->area); | |
71e8831f AG |
408 | if (ret) { |
409 | kfree(block); | |
1c3a4dc3 | 410 | return ERR_PTR(-ENOMEM); |
71e8831f AG |
411 | } |
412 | ||
413 | /* add to allocation list */ | |
faaa0540 | 414 | spin_lock_irqsave(&list_lock, flags); |
71e8831f | 415 | list_add(&block->alloc_node, &omap_dmm->alloc_head); |
faaa0540 | 416 | spin_unlock_irqrestore(&list_lock, flags); |
71e8831f AG |
417 | |
418 | return block; | |
419 | } | |
420 | ||
421 | struct tiler_block *tiler_reserve_1d(size_t size) | |
422 | { | |
423 | struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL); | |
424 | int num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
faaa0540 | 425 | unsigned long flags; |
71e8831f AG |
426 | |
427 | if (!block) | |
d7de9935 | 428 | return ERR_PTR(-ENOMEM); |
71e8831f AG |
429 | |
430 | block->fmt = TILFMT_PAGE; | |
431 | ||
432 | if (tcm_reserve_1d(containers[TILFMT_PAGE], num_pages, | |
433 | &block->area)) { | |
434 | kfree(block); | |
1c3a4dc3 | 435 | return ERR_PTR(-ENOMEM); |
71e8831f AG |
436 | } |
437 | ||
faaa0540 | 438 | spin_lock_irqsave(&list_lock, flags); |
71e8831f | 439 | list_add(&block->alloc_node, &omap_dmm->alloc_head); |
faaa0540 | 440 | spin_unlock_irqrestore(&list_lock, flags); |
71e8831f AG |
441 | |
442 | return block; | |
443 | } | |
444 | ||
445 | /* note: if you have pin'd pages, you should have already unpin'd first! */ | |
446 | int tiler_release(struct tiler_block *block) | |
447 | { | |
448 | int ret = tcm_free(&block->area); | |
faaa0540 | 449 | unsigned long flags; |
71e8831f AG |
450 | |
451 | if (block->area.tcm) | |
452 | dev_err(omap_dmm->dev, "failed to release block\n"); | |
453 | ||
faaa0540 | 454 | spin_lock_irqsave(&list_lock, flags); |
71e8831f | 455 | list_del(&block->alloc_node); |
faaa0540 | 456 | spin_unlock_irqrestore(&list_lock, flags); |
71e8831f AG |
457 | |
458 | kfree(block); | |
459 | return ret; | |
460 | } | |
461 | ||
462 | /* | |
463 | * Utils | |
464 | */ | |
465 | ||
3c810c61 RC |
466 | /* calculate the tiler space address of a pixel in a view orientation... |
467 | * below description copied from the display subsystem section of TRM: | |
468 | * | |
469 | * When the TILER is addressed, the bits: | |
470 | * [28:27] = 0x0 for 8-bit tiled | |
471 | * 0x1 for 16-bit tiled | |
472 | * 0x2 for 32-bit tiled | |
473 | * 0x3 for page mode | |
474 | * [31:29] = 0x0 for 0-degree view | |
475 | * 0x1 for 180-degree view + mirroring | |
476 | * 0x2 for 0-degree view + mirroring | |
477 | * 0x3 for 180-degree view | |
478 | * 0x4 for 270-degree view + mirroring | |
479 | * 0x5 for 270-degree view | |
480 | * 0x6 for 90-degree view | |
481 | * 0x7 for 90-degree view + mirroring | |
482 | * Otherwise the bits indicated the corresponding bit address to access | |
483 | * the SDRAM. | |
484 | */ | |
485 | static u32 tiler_get_address(enum tiler_fmt fmt, u32 orient, u32 x, u32 y) | |
71e8831f AG |
486 | { |
487 | u32 x_bits, y_bits, tmp, x_mask, y_mask, alignment; | |
488 | ||
489 | x_bits = CONT_WIDTH_BITS - geom[fmt].x_shft; | |
490 | y_bits = CONT_HEIGHT_BITS - geom[fmt].y_shft; | |
491 | alignment = geom[fmt].x_shft + geom[fmt].y_shft; | |
492 | ||
493 | /* validate coordinate */ | |
494 | x_mask = MASK(x_bits); | |
495 | y_mask = MASK(y_bits); | |
496 | ||
3c810c61 RC |
497 | if (x < 0 || x > x_mask || y < 0 || y > y_mask) { |
498 | DBG("invalid coords: %u < 0 || %u > %u || %u < 0 || %u > %u", | |
499 | x, x, x_mask, y, y, y_mask); | |
71e8831f | 500 | return 0; |
3c810c61 | 501 | } |
71e8831f AG |
502 | |
503 | /* account for mirroring */ | |
504 | if (orient & MASK_X_INVERT) | |
505 | x ^= x_mask; | |
506 | if (orient & MASK_Y_INVERT) | |
507 | y ^= y_mask; | |
508 | ||
509 | /* get coordinate address */ | |
510 | if (orient & MASK_XY_FLIP) | |
511 | tmp = ((x << y_bits) + y); | |
512 | else | |
513 | tmp = ((y << x_bits) + x); | |
514 | ||
515 | return TIL_ADDR((tmp << alignment), orient, fmt); | |
516 | } | |
517 | ||
518 | dma_addr_t tiler_ssptr(struct tiler_block *block) | |
519 | { | |
520 | BUG_ON(!validfmt(block->fmt)); | |
521 | ||
3c810c61 | 522 | return TILVIEW_8BIT + tiler_get_address(block->fmt, 0, |
71e8831f AG |
523 | block->area.p0.x * geom[block->fmt].slot_w, |
524 | block->area.p0.y * geom[block->fmt].slot_h); | |
525 | } | |
526 | ||
3c810c61 RC |
527 | dma_addr_t tiler_tsptr(struct tiler_block *block, uint32_t orient, |
528 | uint32_t x, uint32_t y) | |
529 | { | |
530 | struct tcm_pt *p = &block->area.p0; | |
531 | BUG_ON(!validfmt(block->fmt)); | |
532 | ||
533 | return tiler_get_address(block->fmt, orient, | |
534 | (p->x * geom[block->fmt].slot_w) + x, | |
535 | (p->y * geom[block->fmt].slot_h) + y); | |
536 | } | |
537 | ||
71e8831f AG |
538 | void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h) |
539 | { | |
540 | BUG_ON(!validfmt(fmt)); | |
541 | *w = round_up(*w, geom[fmt].slot_w); | |
542 | *h = round_up(*h, geom[fmt].slot_h); | |
543 | } | |
544 | ||
3c810c61 | 545 | uint32_t tiler_stride(enum tiler_fmt fmt, uint32_t orient) |
71e8831f AG |
546 | { |
547 | BUG_ON(!validfmt(fmt)); | |
548 | ||
3c810c61 RC |
549 | if (orient & MASK_XY_FLIP) |
550 | return 1 << (CONT_HEIGHT_BITS + geom[fmt].x_shft); | |
551 | else | |
552 | return 1 << (CONT_WIDTH_BITS + geom[fmt].y_shft); | |
71e8831f AG |
553 | } |
554 | ||
555 | size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h) | |
556 | { | |
557 | tiler_align(fmt, &w, &h); | |
558 | return geom[fmt].cpp * w * h; | |
559 | } | |
560 | ||
561 | size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h) | |
562 | { | |
563 | BUG_ON(!validfmt(fmt)); | |
564 | return round_up(geom[fmt].cpp * w, PAGE_SIZE) * h; | |
565 | } | |
566 | ||
7cb0d6c1 TV |
567 | uint32_t tiler_get_cpu_cache_flags(void) |
568 | { | |
569 | return omap_dmm->plat_data->cpu_cache_flags; | |
570 | } | |
571 | ||
e5e4e9b7 | 572 | bool dmm_is_available(void) |
5c137797 AG |
573 | { |
574 | return omap_dmm ? true : false; | |
575 | } | |
576 | ||
577 | static int omap_dmm_remove(struct platform_device *dev) | |
71e8831f AG |
578 | { |
579 | struct tiler_block *block, *_block; | |
580 | int i; | |
faaa0540 | 581 | unsigned long flags; |
71e8831f AG |
582 | |
583 | if (omap_dmm) { | |
584 | /* free all area regions */ | |
faaa0540 | 585 | spin_lock_irqsave(&list_lock, flags); |
71e8831f AG |
586 | list_for_each_entry_safe(block, _block, &omap_dmm->alloc_head, |
587 | alloc_node) { | |
588 | list_del(&block->alloc_node); | |
589 | kfree(block); | |
590 | } | |
faaa0540 | 591 | spin_unlock_irqrestore(&list_lock, flags); |
71e8831f AG |
592 | |
593 | for (i = 0; i < omap_dmm->num_lut; i++) | |
594 | if (omap_dmm->tcm && omap_dmm->tcm[i]) | |
595 | omap_dmm->tcm[i]->deinit(omap_dmm->tcm[i]); | |
596 | kfree(omap_dmm->tcm); | |
597 | ||
598 | kfree(omap_dmm->engines); | |
599 | if (omap_dmm->refill_va) | |
f6e45661 LR |
600 | dma_free_wc(omap_dmm->dev, |
601 | REFILL_BUFFER_SIZE * omap_dmm->num_engines, | |
602 | omap_dmm->refill_va, omap_dmm->refill_pa); | |
71e8831f AG |
603 | if (omap_dmm->dummy_page) |
604 | __free_page(omap_dmm->dummy_page); | |
605 | ||
ef445934 | 606 | if (omap_dmm->irq > 0) |
71e8831f AG |
607 | free_irq(omap_dmm->irq, omap_dmm); |
608 | ||
5c137797 | 609 | iounmap(omap_dmm->base); |
71e8831f | 610 | kfree(omap_dmm); |
5c137797 | 611 | omap_dmm = NULL; |
71e8831f AG |
612 | } |
613 | ||
614 | return 0; | |
615 | } | |
616 | ||
5c137797 | 617 | static int omap_dmm_probe(struct platform_device *dev) |
71e8831f AG |
618 | { |
619 | int ret = -EFAULT, i; | |
620 | struct tcm_area area = {0}; | |
0f562d16 | 621 | u32 hwinfo, pat_geom; |
5c137797 | 622 | struct resource *mem; |
71e8831f AG |
623 | |
624 | omap_dmm = kzalloc(sizeof(*omap_dmm), GFP_KERNEL); | |
78110bb8 | 625 | if (!omap_dmm) |
71e8831f | 626 | goto fail; |
71e8831f | 627 | |
ef445934 AG |
628 | /* initialize lists */ |
629 | INIT_LIST_HEAD(&omap_dmm->alloc_head); | |
630 | INIT_LIST_HEAD(&omap_dmm->idle_head); | |
631 | ||
faaa0540 AG |
632 | init_waitqueue_head(&omap_dmm->engine_queue); |
633 | ||
7cb0d6c1 TV |
634 | if (dev->dev.of_node) { |
635 | const struct of_device_id *match; | |
636 | ||
637 | match = of_match_node(dmm_of_match, dev->dev.of_node); | |
638 | if (!match) { | |
639 | dev_err(&dev->dev, "failed to find matching device node\n"); | |
640 | return -ENODEV; | |
641 | } | |
642 | ||
643 | omap_dmm->plat_data = match->data; | |
644 | } | |
645 | ||
71e8831f | 646 | /* lookup hwmod data - base address and irq */ |
5c137797 AG |
647 | mem = platform_get_resource(dev, IORESOURCE_MEM, 0); |
648 | if (!mem) { | |
649 | dev_err(&dev->dev, "failed to get base address resource\n"); | |
650 | goto fail; | |
651 | } | |
652 | ||
653 | omap_dmm->base = ioremap(mem->start, SZ_2K); | |
71e8831f AG |
654 | |
655 | if (!omap_dmm->base) { | |
5c137797 | 656 | dev_err(&dev->dev, "failed to get dmm base address\n"); |
71e8831f AG |
657 | goto fail; |
658 | } | |
659 | ||
5c137797 AG |
660 | omap_dmm->irq = platform_get_irq(dev, 0); |
661 | if (omap_dmm->irq < 0) { | |
662 | dev_err(&dev->dev, "failed to get IRQ resource\n"); | |
663 | goto fail; | |
664 | } | |
665 | ||
666 | omap_dmm->dev = &dev->dev; | |
667 | ||
8e54adfd | 668 | hwinfo = dmm_read(omap_dmm, DMM_PAT_HWINFO); |
71e8831f AG |
669 | omap_dmm->num_engines = (hwinfo >> 24) & 0x1F; |
670 | omap_dmm->num_lut = (hwinfo >> 16) & 0x1F; | |
671 | omap_dmm->container_width = 256; | |
672 | omap_dmm->container_height = 128; | |
673 | ||
faaa0540 AG |
674 | atomic_set(&omap_dmm->engine_counter, omap_dmm->num_engines); |
675 | ||
71e8831f | 676 | /* read out actual LUT width and height */ |
8e54adfd | 677 | pat_geom = dmm_read(omap_dmm, DMM_PAT_GEOMETRY); |
71e8831f AG |
678 | omap_dmm->lut_width = ((pat_geom >> 16) & 0xF) << 5; |
679 | omap_dmm->lut_height = ((pat_geom >> 24) & 0xF) << 5; | |
680 | ||
c6b7ae55 AG |
681 | /* increment LUT by one if on OMAP5 */ |
682 | /* LUT has twice the height, and is split into a separate container */ | |
683 | if (omap_dmm->lut_height != omap_dmm->container_height) | |
684 | omap_dmm->num_lut++; | |
685 | ||
71e8831f | 686 | /* initialize DMM registers */ |
8e54adfd TV |
687 | dmm_write(omap_dmm, 0x88888888, DMM_PAT_VIEW__0); |
688 | dmm_write(omap_dmm, 0x88888888, DMM_PAT_VIEW__1); | |
689 | dmm_write(omap_dmm, 0x80808080, DMM_PAT_VIEW_MAP__0); | |
690 | dmm_write(omap_dmm, 0x80000000, DMM_PAT_VIEW_MAP_BASE); | |
691 | dmm_write(omap_dmm, 0x88888888, DMM_TILER_OR__0); | |
692 | dmm_write(omap_dmm, 0x88888888, DMM_TILER_OR__1); | |
71e8831f AG |
693 | |
694 | ret = request_irq(omap_dmm->irq, omap_dmm_irq_handler, IRQF_SHARED, | |
695 | "omap_dmm_irq_handler", omap_dmm); | |
696 | ||
697 | if (ret) { | |
5c137797 | 698 | dev_err(&dev->dev, "couldn't register IRQ %d, error %d\n", |
71e8831f AG |
699 | omap_dmm->irq, ret); |
700 | omap_dmm->irq = -1; | |
701 | goto fail; | |
702 | } | |
703 | ||
a6a91827 RC |
704 | /* Enable all interrupts for each refill engine except |
705 | * ERR_LUT_MISS<n> (which is just advisory, and we don't care | |
706 | * about because we want to be able to refill live scanout | |
707 | * buffers for accelerated pan/scroll) and FILL_DSC<n> which | |
708 | * we just generally don't care about. | |
709 | */ | |
8e54adfd | 710 | dmm_write(omap_dmm, 0x7e7e7e7e, DMM_PAT_IRQENABLE_SET); |
71e8831f | 711 | |
71e8831f AG |
712 | omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32); |
713 | if (!omap_dmm->dummy_page) { | |
5c137797 | 714 | dev_err(&dev->dev, "could not allocate dummy page\n"); |
71e8831f AG |
715 | ret = -ENOMEM; |
716 | goto fail; | |
717 | } | |
5c137797 AG |
718 | |
719 | /* set dma mask for device */ | |
d6cfaaba RK |
720 | ret = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32)); |
721 | if (ret) | |
722 | goto fail; | |
5c137797 | 723 | |
71e8831f AG |
724 | omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page); |
725 | ||
726 | /* alloc refill memory */ | |
f6e45661 LR |
727 | omap_dmm->refill_va = dma_alloc_wc(&dev->dev, |
728 | REFILL_BUFFER_SIZE * omap_dmm->num_engines, | |
729 | &omap_dmm->refill_pa, GFP_KERNEL); | |
71e8831f | 730 | if (!omap_dmm->refill_va) { |
5c137797 | 731 | dev_err(&dev->dev, "could not allocate refill memory\n"); |
71e8831f AG |
732 | goto fail; |
733 | } | |
734 | ||
735 | /* alloc engines */ | |
78110bb8 JP |
736 | omap_dmm->engines = kcalloc(omap_dmm->num_engines, |
737 | sizeof(struct refill_engine), GFP_KERNEL); | |
71e8831f | 738 | if (!omap_dmm->engines) { |
71e8831f AG |
739 | ret = -ENOMEM; |
740 | goto fail; | |
741 | } | |
742 | ||
71e8831f AG |
743 | for (i = 0; i < omap_dmm->num_engines; i++) { |
744 | omap_dmm->engines[i].id = i; | |
745 | omap_dmm->engines[i].dmm = omap_dmm; | |
746 | omap_dmm->engines[i].refill_va = omap_dmm->refill_va + | |
747 | (REFILL_BUFFER_SIZE * i); | |
748 | omap_dmm->engines[i].refill_pa = omap_dmm->refill_pa + | |
749 | (REFILL_BUFFER_SIZE * i); | |
7439507f | 750 | init_completion(&omap_dmm->engines[i].compl); |
71e8831f AG |
751 | |
752 | list_add(&omap_dmm->engines[i].idle_node, &omap_dmm->idle_head); | |
753 | } | |
754 | ||
78110bb8 | 755 | omap_dmm->tcm = kcalloc(omap_dmm->num_lut, sizeof(*omap_dmm->tcm), |
71e8831f AG |
756 | GFP_KERNEL); |
757 | if (!omap_dmm->tcm) { | |
71e8831f AG |
758 | ret = -ENOMEM; |
759 | goto fail; | |
760 | } | |
761 | ||
762 | /* init containers */ | |
c6b7ae55 AG |
763 | /* Each LUT is associated with a TCM (container manager). We use the |
764 | lut_id to denote the lut_id used to identify the correct LUT for | |
765 | programming during reill operations */ | |
71e8831f AG |
766 | for (i = 0; i < omap_dmm->num_lut; i++) { |
767 | omap_dmm->tcm[i] = sita_init(omap_dmm->container_width, | |
0d6fa53f | 768 | omap_dmm->container_height); |
71e8831f AG |
769 | |
770 | if (!omap_dmm->tcm[i]) { | |
5c137797 | 771 | dev_err(&dev->dev, "failed to allocate container\n"); |
71e8831f AG |
772 | ret = -ENOMEM; |
773 | goto fail; | |
774 | } | |
775 | ||
776 | omap_dmm->tcm[i]->lut_id = i; | |
777 | } | |
778 | ||
779 | /* assign access mode containers to applicable tcm container */ | |
780 | /* OMAP 4 has 1 container for all 4 views */ | |
c6b7ae55 | 781 | /* OMAP 5 has 2 containers, 1 for 2D and 1 for 1D */ |
71e8831f AG |
782 | containers[TILFMT_8BIT] = omap_dmm->tcm[0]; |
783 | containers[TILFMT_16BIT] = omap_dmm->tcm[0]; | |
784 | containers[TILFMT_32BIT] = omap_dmm->tcm[0]; | |
c6b7ae55 AG |
785 | |
786 | if (omap_dmm->container_height != omap_dmm->lut_height) { | |
787 | /* second LUT is used for PAGE mode. Programming must use | |
788 | y offset that is added to all y coordinates. LUT id is still | |
789 | 0, because it is the same LUT, just the upper 128 lines */ | |
790 | containers[TILFMT_PAGE] = omap_dmm->tcm[1]; | |
791 | omap_dmm->tcm[1]->y_offset = OMAP5_LUT_OFFSET; | |
792 | omap_dmm->tcm[1]->lut_id = 0; | |
793 | } else { | |
794 | containers[TILFMT_PAGE] = omap_dmm->tcm[0]; | |
795 | } | |
71e8831f | 796 | |
71e8831f | 797 | area = (struct tcm_area) { |
71e8831f AG |
798 | .tcm = NULL, |
799 | .p1.x = omap_dmm->container_width - 1, | |
800 | .p1.y = omap_dmm->container_height - 1, | |
801 | }; | |
802 | ||
71e8831f AG |
803 | /* initialize all LUTs to dummy page entries */ |
804 | for (i = 0; i < omap_dmm->num_lut; i++) { | |
805 | area.tcm = omap_dmm->tcm[i]; | |
a6a91827 | 806 | if (fill(&area, NULL, 0, 0, true)) |
71e8831f AG |
807 | dev_err(omap_dmm->dev, "refill failed"); |
808 | } | |
809 | ||
810 | dev_info(omap_dmm->dev, "initialized all PAT entries\n"); | |
811 | ||
812 | return 0; | |
813 | ||
814 | fail: | |
ef445934 AG |
815 | if (omap_dmm_remove(dev)) |
816 | dev_err(&dev->dev, "cleanup failed\n"); | |
71e8831f AG |
817 | return ret; |
818 | } | |
6169a148 AG |
819 | |
820 | /* | |
821 | * debugfs support | |
822 | */ | |
823 | ||
824 | #ifdef CONFIG_DEBUG_FS | |
825 | ||
826 | static const char *alphabet = "abcdefghijklmnopqrstuvwxyz" | |
827 | "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"; | |
828 | static const char *special = ".,:;'\"`~!^-+"; | |
829 | ||
830 | static void fill_map(char **map, int xdiv, int ydiv, struct tcm_area *a, | |
831 | char c, bool ovw) | |
832 | { | |
833 | int x, y; | |
834 | for (y = a->p0.y / ydiv; y <= a->p1.y / ydiv; y++) | |
835 | for (x = a->p0.x / xdiv; x <= a->p1.x / xdiv; x++) | |
836 | if (map[y][x] == ' ' || ovw) | |
837 | map[y][x] = c; | |
838 | } | |
839 | ||
840 | static void fill_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p, | |
841 | char c) | |
842 | { | |
843 | map[p->y / ydiv][p->x / xdiv] = c; | |
844 | } | |
845 | ||
846 | static char read_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p) | |
847 | { | |
848 | return map[p->y / ydiv][p->x / xdiv]; | |
849 | } | |
850 | ||
851 | static int map_width(int xdiv, int x0, int x1) | |
852 | { | |
853 | return (x1 / xdiv) - (x0 / xdiv) + 1; | |
854 | } | |
855 | ||
856 | static void text_map(char **map, int xdiv, char *nice, int yd, int x0, int x1) | |
857 | { | |
858 | char *p = map[yd] + (x0 / xdiv); | |
859 | int w = (map_width(xdiv, x0, x1) - strlen(nice)) / 2; | |
860 | if (w >= 0) { | |
861 | p += w; | |
862 | while (*nice) | |
863 | *p++ = *nice++; | |
864 | } | |
865 | } | |
866 | ||
867 | static void map_1d_info(char **map, int xdiv, int ydiv, char *nice, | |
868 | struct tcm_area *a) | |
869 | { | |
870 | sprintf(nice, "%dK", tcm_sizeof(*a) * 4); | |
871 | if (a->p0.y + 1 < a->p1.y) { | |
872 | text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv, 0, | |
873 | 256 - 1); | |
874 | } else if (a->p0.y < a->p1.y) { | |
875 | if (strlen(nice) < map_width(xdiv, a->p0.x, 256 - 1)) | |
876 | text_map(map, xdiv, nice, a->p0.y / ydiv, | |
877 | a->p0.x + xdiv, 256 - 1); | |
878 | else if (strlen(nice) < map_width(xdiv, 0, a->p1.x)) | |
879 | text_map(map, xdiv, nice, a->p1.y / ydiv, | |
880 | 0, a->p1.y - xdiv); | |
881 | } else if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x)) { | |
882 | text_map(map, xdiv, nice, a->p0.y / ydiv, a->p0.x, a->p1.x); | |
883 | } | |
884 | } | |
885 | ||
886 | static void map_2d_info(char **map, int xdiv, int ydiv, char *nice, | |
887 | struct tcm_area *a) | |
888 | { | |
889 | sprintf(nice, "(%d*%d)", tcm_awidth(*a), tcm_aheight(*a)); | |
890 | if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x)) | |
891 | text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv, | |
892 | a->p0.x, a->p1.x); | |
893 | } | |
894 | ||
895 | int tiler_map_show(struct seq_file *s, void *arg) | |
896 | { | |
897 | int xdiv = 2, ydiv = 1; | |
898 | char **map = NULL, *global_map; | |
899 | struct tiler_block *block; | |
900 | struct tcm_area a, p; | |
901 | int i; | |
902 | const char *m2d = alphabet; | |
903 | const char *a2d = special; | |
904 | const char *m2dp = m2d, *a2dp = a2d; | |
905 | char nice[128]; | |
02646fb8 AG |
906 | int h_adj; |
907 | int w_adj; | |
6169a148 | 908 | unsigned long flags; |
c6b7ae55 AG |
909 | int lut_idx; |
910 | ||
6169a148 | 911 | |
02646fb8 AG |
912 | if (!omap_dmm) { |
913 | /* early return if dmm/tiler device is not initialized */ | |
914 | return 0; | |
915 | } | |
916 | ||
c6b7ae55 AG |
917 | h_adj = omap_dmm->container_height / ydiv; |
918 | w_adj = omap_dmm->container_width / xdiv; | |
02646fb8 | 919 | |
c6b7ae55 AG |
920 | map = kmalloc(h_adj * sizeof(*map), GFP_KERNEL); |
921 | global_map = kmalloc((w_adj + 1) * h_adj, GFP_KERNEL); | |
6169a148 AG |
922 | |
923 | if (!map || !global_map) | |
924 | goto error; | |
925 | ||
c6b7ae55 | 926 | for (lut_idx = 0; lut_idx < omap_dmm->num_lut; lut_idx++) { |
e1e9c90e | 927 | memset(map, 0, h_adj * sizeof(*map)); |
c6b7ae55 | 928 | memset(global_map, ' ', (w_adj + 1) * h_adj); |
6169a148 | 929 | |
c6b7ae55 AG |
930 | for (i = 0; i < omap_dmm->container_height; i++) { |
931 | map[i] = global_map + i * (w_adj + 1); | |
932 | map[i][w_adj] = 0; | |
933 | } | |
934 | ||
935 | spin_lock_irqsave(&list_lock, flags); | |
936 | ||
937 | list_for_each_entry(block, &omap_dmm->alloc_head, alloc_node) { | |
938 | if (block->area.tcm == omap_dmm->tcm[lut_idx]) { | |
939 | if (block->fmt != TILFMT_PAGE) { | |
940 | fill_map(map, xdiv, ydiv, &block->area, | |
941 | *m2dp, true); | |
942 | if (!*++a2dp) | |
943 | a2dp = a2d; | |
944 | if (!*++m2dp) | |
945 | m2dp = m2d; | |
946 | map_2d_info(map, xdiv, ydiv, nice, | |
947 | &block->area); | |
948 | } else { | |
949 | bool start = read_map_pt(map, xdiv, | |
950 | ydiv, &block->area.p0) == ' '; | |
951 | bool end = read_map_pt(map, xdiv, ydiv, | |
952 | &block->area.p1) == ' '; | |
953 | ||
954 | tcm_for_each_slice(a, block->area, p) | |
955 | fill_map(map, xdiv, ydiv, &a, | |
956 | '=', true); | |
957 | fill_map_pt(map, xdiv, ydiv, | |
958 | &block->area.p0, | |
6169a148 | 959 | start ? '<' : 'X'); |
c6b7ae55 AG |
960 | fill_map_pt(map, xdiv, ydiv, |
961 | &block->area.p1, | |
6169a148 | 962 | end ? '>' : 'X'); |
c6b7ae55 AG |
963 | map_1d_info(map, xdiv, ydiv, nice, |
964 | &block->area); | |
965 | } | |
966 | } | |
6169a148 | 967 | } |
6169a148 | 968 | |
c6b7ae55 | 969 | spin_unlock_irqrestore(&list_lock, flags); |
6169a148 | 970 | |
c6b7ae55 AG |
971 | if (s) { |
972 | seq_printf(s, "CONTAINER %d DUMP BEGIN\n", lut_idx); | |
973 | for (i = 0; i < 128; i++) | |
974 | seq_printf(s, "%03d:%s\n", i, map[i]); | |
975 | seq_printf(s, "CONTAINER %d DUMP END\n", lut_idx); | |
976 | } else { | |
977 | dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP BEGIN\n", | |
978 | lut_idx); | |
979 | for (i = 0; i < 128; i++) | |
980 | dev_dbg(omap_dmm->dev, "%03d:%s\n", i, map[i]); | |
981 | dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP END\n", | |
982 | lut_idx); | |
983 | } | |
6169a148 AG |
984 | } |
985 | ||
986 | error: | |
987 | kfree(map); | |
988 | kfree(global_map); | |
989 | ||
990 | return 0; | |
991 | } | |
992 | #endif | |
5c137797 | 993 | |
1d601da2 | 994 | #ifdef CONFIG_PM_SLEEP |
e78edba1 AG |
995 | static int omap_dmm_resume(struct device *dev) |
996 | { | |
997 | struct tcm_area area; | |
998 | int i; | |
999 | ||
1000 | if (!omap_dmm) | |
1001 | return -ENODEV; | |
1002 | ||
1003 | area = (struct tcm_area) { | |
e78edba1 AG |
1004 | .tcm = NULL, |
1005 | .p1.x = omap_dmm->container_width - 1, | |
1006 | .p1.y = omap_dmm->container_height - 1, | |
1007 | }; | |
1008 | ||
1009 | /* initialize all LUTs to dummy page entries */ | |
1010 | for (i = 0; i < omap_dmm->num_lut; i++) { | |
1011 | area.tcm = omap_dmm->tcm[i]; | |
1012 | if (fill(&area, NULL, 0, 0, true)) | |
1013 | dev_err(dev, "refill failed"); | |
1014 | } | |
1015 | ||
1016 | return 0; | |
1017 | } | |
e78edba1 AG |
1018 | #endif |
1019 | ||
1d601da2 GS |
1020 | static SIMPLE_DEV_PM_OPS(omap_dmm_pm_ops, NULL, omap_dmm_resume); |
1021 | ||
3d232346 | 1022 | #if defined(CONFIG_OF) |
7cb0d6c1 TV |
1023 | static const struct dmm_platform_data dmm_omap4_platform_data = { |
1024 | .cpu_cache_flags = OMAP_BO_WC, | |
1025 | }; | |
1026 | ||
1027 | static const struct dmm_platform_data dmm_omap5_platform_data = { | |
1028 | .cpu_cache_flags = OMAP_BO_UNCACHED, | |
1029 | }; | |
1030 | ||
3d232346 | 1031 | static const struct of_device_id dmm_of_match[] = { |
7cb0d6c1 TV |
1032 | { |
1033 | .compatible = "ti,omap4-dmm", | |
1034 | .data = &dmm_omap4_platform_data, | |
1035 | }, | |
1036 | { | |
1037 | .compatible = "ti,omap5-dmm", | |
1038 | .data = &dmm_omap5_platform_data, | |
1039 | }, | |
3d232346 AT |
1040 | {}, |
1041 | }; | |
1042 | #endif | |
1043 | ||
5c137797 AG |
1044 | struct platform_driver omap_dmm_driver = { |
1045 | .probe = omap_dmm_probe, | |
1046 | .remove = omap_dmm_remove, | |
1047 | .driver = { | |
1048 | .owner = THIS_MODULE, | |
1049 | .name = DMM_DRIVER_NAME, | |
3d232346 | 1050 | .of_match_table = of_match_ptr(dmm_of_match), |
e78edba1 | 1051 | .pm = &omap_dmm_pm_ops, |
5c137797 AG |
1052 | }, |
1053 | }; | |
1054 | ||
1055 | MODULE_LICENSE("GPL v2"); | |
1056 | MODULE_AUTHOR("Andy Gross <andy.gross@ti.com>"); | |
1057 | MODULE_DESCRIPTION("OMAP DMM/Tiler Driver"); |