staging: omapdrm: Fix error paths during dmm init
[deliverable/linux.git] / drivers / staging / omapdrm / omap_dmm_tiler.c
CommitLineData
71e8831f
AG
1/*
2 * DMM IOMMU driver support functions for TI OMAP processors.
3 *
4 * Author: Rob Clark <rob@ti.com>
5 * Andy Gross <andy.gross@ti.com>
6 *
7 * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation version 2.
12 *
13 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
14 * kind, whether express or implied; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18#include <linux/init.h>
19#include <linux/module.h>
20#include <linux/platform_device.h> /* platform_device() */
21#include <linux/errno.h>
22#include <linux/sched.h>
23#include <linux/wait.h>
24#include <linux/interrupt.h>
25#include <linux/dma-mapping.h>
26#include <linux/slab.h>
27#include <linux/vmalloc.h>
28#include <linux/delay.h>
29#include <linux/mm.h>
30#include <linux/time.h>
31#include <linux/list.h>
32#include <linux/semaphore.h>
33
34#include "omap_dmm_tiler.h"
35#include "omap_dmm_priv.h"
36
5c137797
AG
37#define DMM_DRIVER_NAME "dmm"
38
71e8831f
AG
39/* mappings for associating views to luts */
40static struct tcm *containers[TILFMT_NFORMATS];
41static struct dmm *omap_dmm;
42
ef445934
AG
43/* global spinlock for protecting lists */
44static DEFINE_SPINLOCK(list_lock);
45
71e8831f
AG
46/* Geometry table */
47#define GEOM(xshift, yshift, bytes_per_pixel) { \
48 .x_shft = (xshift), \
49 .y_shft = (yshift), \
50 .cpp = (bytes_per_pixel), \
51 .slot_w = 1 << (SLOT_WIDTH_BITS - (xshift)), \
52 .slot_h = 1 << (SLOT_HEIGHT_BITS - (yshift)), \
53 }
54
55static const struct {
56 uint32_t x_shft; /* unused X-bits (as part of bpp) */
57 uint32_t y_shft; /* unused Y-bits (as part of bpp) */
58 uint32_t cpp; /* bytes/chars per pixel */
59 uint32_t slot_w; /* width of each slot (in pixels) */
60 uint32_t slot_h; /* height of each slot (in pixels) */
61} geom[TILFMT_NFORMATS] = {
62 [TILFMT_8BIT] = GEOM(0, 0, 1),
63 [TILFMT_16BIT] = GEOM(0, 1, 2),
64 [TILFMT_32BIT] = GEOM(1, 1, 4),
65 [TILFMT_PAGE] = GEOM(SLOT_WIDTH_BITS, SLOT_HEIGHT_BITS, 1),
66};
67
68
69/* lookup table for registers w/ per-engine instances */
70static const uint32_t reg[][4] = {
71 [PAT_STATUS] = {DMM_PAT_STATUS__0, DMM_PAT_STATUS__1,
72 DMM_PAT_STATUS__2, DMM_PAT_STATUS__3},
73 [PAT_DESCR] = {DMM_PAT_DESCR__0, DMM_PAT_DESCR__1,
74 DMM_PAT_DESCR__2, DMM_PAT_DESCR__3},
75};
76
77/* simple allocator to grab next 16 byte aligned memory from txn */
78static void *alloc_dma(struct dmm_txn *txn, size_t sz, dma_addr_t *pa)
79{
80 void *ptr;
81 struct refill_engine *engine = txn->engine_handle;
82
83 /* dmm programming requires 16 byte aligned addresses */
84 txn->current_pa = round_up(txn->current_pa, 16);
85 txn->current_va = (void *)round_up((long)txn->current_va, 16);
86
87 ptr = txn->current_va;
88 *pa = txn->current_pa;
89
90 txn->current_pa += sz;
91 txn->current_va += sz;
92
93 BUG_ON((txn->current_va - engine->refill_va) > REFILL_BUFFER_SIZE);
94
95 return ptr;
96}
97
98/* check status and spin until wait_mask comes true */
99static int wait_status(struct refill_engine *engine, uint32_t wait_mask)
100{
101 struct dmm *dmm = engine->dmm;
102 uint32_t r = 0, err, i;
103
104 i = DMM_FIXED_RETRY_COUNT;
105 while (true) {
106 r = readl(dmm->base + reg[PAT_STATUS][engine->id]);
107 err = r & DMM_PATSTATUS_ERR;
108 if (err)
109 return -EFAULT;
110
111 if ((r & wait_mask) == wait_mask)
112 break;
113
114 if (--i == 0)
115 return -ETIMEDOUT;
116
117 udelay(1);
118 }
119
120 return 0;
121}
122
123irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
124{
125 struct dmm *dmm = arg;
126 uint32_t status = readl(dmm->base + DMM_PAT_IRQSTATUS);
127 int i;
128
129 /* ack IRQ */
130 writel(status, dmm->base + DMM_PAT_IRQSTATUS);
131
132 for (i = 0; i < dmm->num_engines; i++) {
133 if (status & DMM_IRQSTAT_LST)
134 wake_up_interruptible(&dmm->engines[i].wait_for_refill);
135
136 status >>= 8;
137 }
138
139 return IRQ_HANDLED;
140}
141
142/**
143 * Get a handle for a DMM transaction
144 */
145static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
146{
147 struct dmm_txn *txn = NULL;
148 struct refill_engine *engine = NULL;
149
150 down(&dmm->engine_sem);
151
152 /* grab an idle engine */
ef445934 153 spin_lock(&list_lock);
71e8831f
AG
154 if (!list_empty(&dmm->idle_head)) {
155 engine = list_entry(dmm->idle_head.next, struct refill_engine,
156 idle_node);
157 list_del(&engine->idle_node);
158 }
ef445934 159 spin_unlock(&list_lock);
71e8831f
AG
160
161 BUG_ON(!engine);
162
163 txn = &engine->txn;
164 engine->tcm = tcm;
165 txn->engine_handle = engine;
166 txn->last_pat = NULL;
167 txn->current_va = engine->refill_va;
168 txn->current_pa = engine->refill_pa;
169
170 return txn;
171}
172
173/**
174 * Add region to DMM transaction. If pages or pages[i] is NULL, then the
175 * corresponding slot is cleared (ie. dummy_pa is programmed)
176 */
177static int dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
a6a91827 178 struct page **pages, uint32_t npages, uint32_t roll)
71e8831f
AG
179{
180 dma_addr_t pat_pa = 0;
181 uint32_t *data;
182 struct pat *pat;
183 struct refill_engine *engine = txn->engine_handle;
184 int columns = (1 + area->x1 - area->x0);
185 int rows = (1 + area->y1 - area->y0);
186 int i = columns*rows;
187 u32 *lut = omap_dmm->lut + (engine->tcm->lut_id * omap_dmm->lut_width *
188 omap_dmm->lut_height) +
189 (area->y0 * omap_dmm->lut_width) + area->x0;
190
191 pat = alloc_dma(txn, sizeof(struct pat), &pat_pa);
192
193 if (txn->last_pat)
194 txn->last_pat->next_pa = (uint32_t)pat_pa;
195
196 pat->area = *area;
197 pat->ctrl = (struct pat_ctrl){
198 .start = 1,
199 .lut_id = engine->tcm->lut_id,
200 };
201
202 data = alloc_dma(txn, 4*i, &pat->data_pa);
203
204 while (i--) {
a6a91827
RC
205 int n = i + roll;
206 if (n >= npages)
207 n -= npages;
208 data[i] = (pages && pages[n]) ?
209 page_to_phys(pages[n]) : engine->dmm->dummy_pa;
71e8831f
AG
210 }
211
212 /* fill in lut with new addresses */
213 for (i = 0; i < rows; i++, lut += omap_dmm->lut_width)
214 memcpy(lut, &data[i*columns], columns * sizeof(u32));
215
216 txn->last_pat = pat;
217
218 return 0;
219}
220
221/**
222 * Commit the DMM transaction.
223 */
224static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
225{
226 int ret = 0;
227 struct refill_engine *engine = txn->engine_handle;
228 struct dmm *dmm = engine->dmm;
229
230 if (!txn->last_pat) {
231 dev_err(engine->dmm->dev, "need at least one txn\n");
232 ret = -EINVAL;
233 goto cleanup;
234 }
235
236 txn->last_pat->next_pa = 0;
237
238 /* write to PAT_DESCR to clear out any pending transaction */
239 writel(0x0, dmm->base + reg[PAT_DESCR][engine->id]);
240
241 /* wait for engine ready: */
242 ret = wait_status(engine, DMM_PATSTATUS_READY);
243 if (ret) {
244 ret = -EFAULT;
245 goto cleanup;
246 }
247
248 /* kick reload */
249 writel(engine->refill_pa,
250 dmm->base + reg[PAT_DESCR][engine->id]);
251
252 if (wait) {
253 if (wait_event_interruptible_timeout(engine->wait_for_refill,
254 wait_status(engine, DMM_PATSTATUS_READY) == 0,
255 msecs_to_jiffies(1)) <= 0) {
256 dev_err(dmm->dev, "timed out waiting for done\n");
257 ret = -ETIMEDOUT;
258 }
259 }
260
261cleanup:
ef445934 262 spin_lock(&list_lock);
71e8831f 263 list_add(&engine->idle_node, &dmm->idle_head);
ef445934 264 spin_unlock(&list_lock);
71e8831f
AG
265
266 up(&omap_dmm->engine_sem);
267 return ret;
268}
269
270/*
271 * DMM programming
272 */
a6a91827
RC
273static int fill(struct tcm_area *area, struct page **pages,
274 uint32_t npages, uint32_t roll, bool wait)
71e8831f
AG
275{
276 int ret = 0;
277 struct tcm_area slice, area_s;
278 struct dmm_txn *txn;
279
280 txn = dmm_txn_init(omap_dmm, area->tcm);
281 if (IS_ERR_OR_NULL(txn))
282 return PTR_ERR(txn);
283
284 tcm_for_each_slice(slice, *area, area_s) {
285 struct pat_area p_area = {
286 .x0 = slice.p0.x, .y0 = slice.p0.y,
287 .x1 = slice.p1.x, .y1 = slice.p1.y,
288 };
289
a6a91827 290 ret = dmm_txn_append(txn, &p_area, pages, npages, roll);
71e8831f
AG
291 if (ret)
292 goto fail;
293
a6a91827 294 roll += tcm_sizeof(slice);
71e8831f
AG
295 }
296
297 ret = dmm_txn_commit(txn, wait);
298
299fail:
300 return ret;
301}
302
303/*
304 * Pin/unpin
305 */
306
307/* note: slots for which pages[i] == NULL are filled w/ dummy page
308 */
a6a91827
RC
309int tiler_pin(struct tiler_block *block, struct page **pages,
310 uint32_t npages, uint32_t roll, bool wait)
71e8831f
AG
311{
312 int ret;
313
a6a91827 314 ret = fill(&block->area, pages, npages, roll, wait);
71e8831f
AG
315
316 if (ret)
317 tiler_unpin(block);
318
319 return ret;
320}
321
322int tiler_unpin(struct tiler_block *block)
323{
a6a91827 324 return fill(&block->area, NULL, 0, 0, false);
71e8831f
AG
325}
326
327/*
328 * Reserve/release
329 */
330struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
331 uint16_t h, uint16_t align)
332{
333 struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
334 u32 min_align = 128;
335 int ret;
336
337 BUG_ON(!validfmt(fmt));
338
339 /* convert width/height to slots */
340 w = DIV_ROUND_UP(w, geom[fmt].slot_w);
341 h = DIV_ROUND_UP(h, geom[fmt].slot_h);
342
343 /* convert alignment to slots */
344 min_align = max(min_align, (geom[fmt].slot_w * geom[fmt].cpp));
345 align = ALIGN(align, min_align);
346 align /= geom[fmt].slot_w * geom[fmt].cpp;
347
348 block->fmt = fmt;
349
350 ret = tcm_reserve_2d(containers[fmt], w, h, align, &block->area);
351 if (ret) {
352 kfree(block);
1c3a4dc3 353 return ERR_PTR(-ENOMEM);
71e8831f
AG
354 }
355
356 /* add to allocation list */
ef445934 357 spin_lock(&list_lock);
71e8831f 358 list_add(&block->alloc_node, &omap_dmm->alloc_head);
ef445934 359 spin_unlock(&list_lock);
71e8831f
AG
360
361 return block;
362}
363
364struct tiler_block *tiler_reserve_1d(size_t size)
365{
366 struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
367 int num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
368
369 if (!block)
370 return 0;
371
372 block->fmt = TILFMT_PAGE;
373
374 if (tcm_reserve_1d(containers[TILFMT_PAGE], num_pages,
375 &block->area)) {
376 kfree(block);
1c3a4dc3 377 return ERR_PTR(-ENOMEM);
71e8831f
AG
378 }
379
ef445934 380 spin_lock(&list_lock);
71e8831f 381 list_add(&block->alloc_node, &omap_dmm->alloc_head);
ef445934 382 spin_unlock(&list_lock);
71e8831f
AG
383
384 return block;
385}
386
387/* note: if you have pin'd pages, you should have already unpin'd first! */
388int tiler_release(struct tiler_block *block)
389{
390 int ret = tcm_free(&block->area);
391
392 if (block->area.tcm)
393 dev_err(omap_dmm->dev, "failed to release block\n");
394
ef445934 395 spin_lock(&list_lock);
71e8831f 396 list_del(&block->alloc_node);
ef445934 397 spin_unlock(&list_lock);
71e8831f
AG
398
399 kfree(block);
400 return ret;
401}
402
403/*
404 * Utils
405 */
406
407/* calculate the tiler space address of a pixel in a view orientation */
408static u32 tiler_get_address(u32 orient, enum tiler_fmt fmt, u32 x, u32 y)
409{
410 u32 x_bits, y_bits, tmp, x_mask, y_mask, alignment;
411
412 x_bits = CONT_WIDTH_BITS - geom[fmt].x_shft;
413 y_bits = CONT_HEIGHT_BITS - geom[fmt].y_shft;
414 alignment = geom[fmt].x_shft + geom[fmt].y_shft;
415
416 /* validate coordinate */
417 x_mask = MASK(x_bits);
418 y_mask = MASK(y_bits);
419
420 if (x < 0 || x > x_mask || y < 0 || y > y_mask)
421 return 0;
422
423 /* account for mirroring */
424 if (orient & MASK_X_INVERT)
425 x ^= x_mask;
426 if (orient & MASK_Y_INVERT)
427 y ^= y_mask;
428
429 /* get coordinate address */
430 if (orient & MASK_XY_FLIP)
431 tmp = ((x << y_bits) + y);
432 else
433 tmp = ((y << x_bits) + x);
434
435 return TIL_ADDR((tmp << alignment), orient, fmt);
436}
437
438dma_addr_t tiler_ssptr(struct tiler_block *block)
439{
440 BUG_ON(!validfmt(block->fmt));
441
442 return TILVIEW_8BIT + tiler_get_address(0, block->fmt,
443 block->area.p0.x * geom[block->fmt].slot_w,
444 block->area.p0.y * geom[block->fmt].slot_h);
445}
446
447void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h)
448{
449 BUG_ON(!validfmt(fmt));
450 *w = round_up(*w, geom[fmt].slot_w);
451 *h = round_up(*h, geom[fmt].slot_h);
452}
453
454uint32_t tiler_stride(enum tiler_fmt fmt)
455{
456 BUG_ON(!validfmt(fmt));
457
458 return 1 << (CONT_WIDTH_BITS + geom[fmt].y_shft);
459}
460
461size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h)
462{
463 tiler_align(fmt, &w, &h);
464 return geom[fmt].cpp * w * h;
465}
466
467size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h)
468{
469 BUG_ON(!validfmt(fmt));
470 return round_up(geom[fmt].cpp * w, PAGE_SIZE) * h;
471}
472
5c137797
AG
473bool dmm_is_initialized(void)
474{
475 return omap_dmm ? true : false;
476}
477
478static int omap_dmm_remove(struct platform_device *dev)
71e8831f
AG
479{
480 struct tiler_block *block, *_block;
481 int i;
482
483 if (omap_dmm) {
484 /* free all area regions */
ef445934 485 spin_lock(&list_lock);
71e8831f
AG
486 list_for_each_entry_safe(block, _block, &omap_dmm->alloc_head,
487 alloc_node) {
488 list_del(&block->alloc_node);
489 kfree(block);
490 }
ef445934 491 spin_unlock(&list_lock);
71e8831f
AG
492
493 for (i = 0; i < omap_dmm->num_lut; i++)
494 if (omap_dmm->tcm && omap_dmm->tcm[i])
495 omap_dmm->tcm[i]->deinit(omap_dmm->tcm[i]);
496 kfree(omap_dmm->tcm);
497
498 kfree(omap_dmm->engines);
499 if (omap_dmm->refill_va)
500 dma_free_coherent(omap_dmm->dev,
501 REFILL_BUFFER_SIZE * omap_dmm->num_engines,
502 omap_dmm->refill_va,
503 omap_dmm->refill_pa);
504 if (omap_dmm->dummy_page)
505 __free_page(omap_dmm->dummy_page);
506
507 vfree(omap_dmm->lut);
508
ef445934 509 if (omap_dmm->irq > 0)
71e8831f
AG
510 free_irq(omap_dmm->irq, omap_dmm);
511
5c137797 512 iounmap(omap_dmm->base);
71e8831f 513 kfree(omap_dmm);
5c137797 514 omap_dmm = NULL;
71e8831f
AG
515 }
516
517 return 0;
518}
519
5c137797 520static int omap_dmm_probe(struct platform_device *dev)
71e8831f
AG
521{
522 int ret = -EFAULT, i;
523 struct tcm_area area = {0};
524 u32 hwinfo, pat_geom, lut_table_size;
5c137797 525 struct resource *mem;
71e8831f
AG
526
527 omap_dmm = kzalloc(sizeof(*omap_dmm), GFP_KERNEL);
528 if (!omap_dmm) {
5c137797 529 dev_err(&dev->dev, "failed to allocate driver data section\n");
71e8831f
AG
530 goto fail;
531 }
532
ef445934
AG
533 /* initialize lists */
534 INIT_LIST_HEAD(&omap_dmm->alloc_head);
535 INIT_LIST_HEAD(&omap_dmm->idle_head);
536
71e8831f 537 /* lookup hwmod data - base address and irq */
5c137797
AG
538 mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
539 if (!mem) {
540 dev_err(&dev->dev, "failed to get base address resource\n");
541 goto fail;
542 }
543
544 omap_dmm->base = ioremap(mem->start, SZ_2K);
71e8831f
AG
545
546 if (!omap_dmm->base) {
5c137797 547 dev_err(&dev->dev, "failed to get dmm base address\n");
71e8831f
AG
548 goto fail;
549 }
550
5c137797
AG
551 omap_dmm->irq = platform_get_irq(dev, 0);
552 if (omap_dmm->irq < 0) {
553 dev_err(&dev->dev, "failed to get IRQ resource\n");
554 goto fail;
555 }
556
557 omap_dmm->dev = &dev->dev;
558
71e8831f
AG
559 hwinfo = readl(omap_dmm->base + DMM_PAT_HWINFO);
560 omap_dmm->num_engines = (hwinfo >> 24) & 0x1F;
561 omap_dmm->num_lut = (hwinfo >> 16) & 0x1F;
562 omap_dmm->container_width = 256;
563 omap_dmm->container_height = 128;
564
565 /* read out actual LUT width and height */
566 pat_geom = readl(omap_dmm->base + DMM_PAT_GEOMETRY);
567 omap_dmm->lut_width = ((pat_geom >> 16) & 0xF) << 5;
568 omap_dmm->lut_height = ((pat_geom >> 24) & 0xF) << 5;
569
570 /* initialize DMM registers */
571 writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__0);
572 writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__1);
573 writel(0x80808080, omap_dmm->base + DMM_PAT_VIEW_MAP__0);
574 writel(0x80000000, omap_dmm->base + DMM_PAT_VIEW_MAP_BASE);
575 writel(0x88888888, omap_dmm->base + DMM_TILER_OR__0);
576 writel(0x88888888, omap_dmm->base + DMM_TILER_OR__1);
577
578 ret = request_irq(omap_dmm->irq, omap_dmm_irq_handler, IRQF_SHARED,
579 "omap_dmm_irq_handler", omap_dmm);
580
581 if (ret) {
5c137797 582 dev_err(&dev->dev, "couldn't register IRQ %d, error %d\n",
71e8831f
AG
583 omap_dmm->irq, ret);
584 omap_dmm->irq = -1;
585 goto fail;
586 }
587
a6a91827
RC
588 /* Enable all interrupts for each refill engine except
589 * ERR_LUT_MISS<n> (which is just advisory, and we don't care
590 * about because we want to be able to refill live scanout
591 * buffers for accelerated pan/scroll) and FILL_DSC<n> which
592 * we just generally don't care about.
593 */
594 writel(0x7e7e7e7e, omap_dmm->base + DMM_PAT_IRQENABLE_SET);
71e8831f
AG
595
596 lut_table_size = omap_dmm->lut_width * omap_dmm->lut_height *
597 omap_dmm->num_lut;
598
599 omap_dmm->lut = vmalloc(lut_table_size * sizeof(*omap_dmm->lut));
600 if (!omap_dmm->lut) {
5c137797 601 dev_err(&dev->dev, "could not allocate lut table\n");
71e8831f
AG
602 ret = -ENOMEM;
603 goto fail;
604 }
605
606 omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32);
607 if (!omap_dmm->dummy_page) {
5c137797 608 dev_err(&dev->dev, "could not allocate dummy page\n");
71e8831f
AG
609 ret = -ENOMEM;
610 goto fail;
611 }
5c137797
AG
612
613 /* set dma mask for device */
614 /* NOTE: this is a workaround for the hwmod not initializing properly */
615 dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
616
71e8831f
AG
617 omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page);
618
619 /* alloc refill memory */
5c137797 620 omap_dmm->refill_va = dma_alloc_coherent(&dev->dev,
71e8831f
AG
621 REFILL_BUFFER_SIZE * omap_dmm->num_engines,
622 &omap_dmm->refill_pa, GFP_KERNEL);
623 if (!omap_dmm->refill_va) {
5c137797 624 dev_err(&dev->dev, "could not allocate refill memory\n");
71e8831f
AG
625 goto fail;
626 }
627
628 /* alloc engines */
629 omap_dmm->engines = kzalloc(
630 omap_dmm->num_engines * sizeof(struct refill_engine),
631 GFP_KERNEL);
632 if (!omap_dmm->engines) {
5c137797 633 dev_err(&dev->dev, "could not allocate engines\n");
71e8831f
AG
634 ret = -ENOMEM;
635 goto fail;
636 }
637
638 sema_init(&omap_dmm->engine_sem, omap_dmm->num_engines);
71e8831f
AG
639 for (i = 0; i < omap_dmm->num_engines; i++) {
640 omap_dmm->engines[i].id = i;
641 omap_dmm->engines[i].dmm = omap_dmm;
642 omap_dmm->engines[i].refill_va = omap_dmm->refill_va +
643 (REFILL_BUFFER_SIZE * i);
644 omap_dmm->engines[i].refill_pa = omap_dmm->refill_pa +
645 (REFILL_BUFFER_SIZE * i);
646 init_waitqueue_head(&omap_dmm->engines[i].wait_for_refill);
647
648 list_add(&omap_dmm->engines[i].idle_node, &omap_dmm->idle_head);
649 }
650
651 omap_dmm->tcm = kzalloc(omap_dmm->num_lut * sizeof(*omap_dmm->tcm),
652 GFP_KERNEL);
653 if (!omap_dmm->tcm) {
5c137797 654 dev_err(&dev->dev, "failed to allocate lut ptrs\n");
71e8831f
AG
655 ret = -ENOMEM;
656 goto fail;
657 }
658
659 /* init containers */
660 for (i = 0; i < omap_dmm->num_lut; i++) {
661 omap_dmm->tcm[i] = sita_init(omap_dmm->container_width,
662 omap_dmm->container_height,
663 NULL);
664
665 if (!omap_dmm->tcm[i]) {
5c137797 666 dev_err(&dev->dev, "failed to allocate container\n");
71e8831f
AG
667 ret = -ENOMEM;
668 goto fail;
669 }
670
671 omap_dmm->tcm[i]->lut_id = i;
672 }
673
674 /* assign access mode containers to applicable tcm container */
675 /* OMAP 4 has 1 container for all 4 views */
676 containers[TILFMT_8BIT] = omap_dmm->tcm[0];
677 containers[TILFMT_16BIT] = omap_dmm->tcm[0];
678 containers[TILFMT_32BIT] = omap_dmm->tcm[0];
679 containers[TILFMT_PAGE] = omap_dmm->tcm[0];
680
71e8831f
AG
681 area = (struct tcm_area) {
682 .is2d = true,
683 .tcm = NULL,
684 .p1.x = omap_dmm->container_width - 1,
685 .p1.y = omap_dmm->container_height - 1,
686 };
687
688 for (i = 0; i < lut_table_size; i++)
689 omap_dmm->lut[i] = omap_dmm->dummy_pa;
690
691 /* initialize all LUTs to dummy page entries */
692 for (i = 0; i < omap_dmm->num_lut; i++) {
693 area.tcm = omap_dmm->tcm[i];
a6a91827 694 if (fill(&area, NULL, 0, 0, true))
71e8831f
AG
695 dev_err(omap_dmm->dev, "refill failed");
696 }
697
698 dev_info(omap_dmm->dev, "initialized all PAT entries\n");
699
700 return 0;
701
702fail:
ef445934
AG
703 if (omap_dmm_remove(dev))
704 dev_err(&dev->dev, "cleanup failed\n");
71e8831f
AG
705 return ret;
706}
6169a148
AG
707
708/*
709 * debugfs support
710 */
711
712#ifdef CONFIG_DEBUG_FS
713
714static const char *alphabet = "abcdefghijklmnopqrstuvwxyz"
715 "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
716static const char *special = ".,:;'\"`~!^-+";
717
718static void fill_map(char **map, int xdiv, int ydiv, struct tcm_area *a,
719 char c, bool ovw)
720{
721 int x, y;
722 for (y = a->p0.y / ydiv; y <= a->p1.y / ydiv; y++)
723 for (x = a->p0.x / xdiv; x <= a->p1.x / xdiv; x++)
724 if (map[y][x] == ' ' || ovw)
725 map[y][x] = c;
726}
727
728static void fill_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p,
729 char c)
730{
731 map[p->y / ydiv][p->x / xdiv] = c;
732}
733
734static char read_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p)
735{
736 return map[p->y / ydiv][p->x / xdiv];
737}
738
739static int map_width(int xdiv, int x0, int x1)
740{
741 return (x1 / xdiv) - (x0 / xdiv) + 1;
742}
743
744static void text_map(char **map, int xdiv, char *nice, int yd, int x0, int x1)
745{
746 char *p = map[yd] + (x0 / xdiv);
747 int w = (map_width(xdiv, x0, x1) - strlen(nice)) / 2;
748 if (w >= 0) {
749 p += w;
750 while (*nice)
751 *p++ = *nice++;
752 }
753}
754
755static void map_1d_info(char **map, int xdiv, int ydiv, char *nice,
756 struct tcm_area *a)
757{
758 sprintf(nice, "%dK", tcm_sizeof(*a) * 4);
759 if (a->p0.y + 1 < a->p1.y) {
760 text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv, 0,
761 256 - 1);
762 } else if (a->p0.y < a->p1.y) {
763 if (strlen(nice) < map_width(xdiv, a->p0.x, 256 - 1))
764 text_map(map, xdiv, nice, a->p0.y / ydiv,
765 a->p0.x + xdiv, 256 - 1);
766 else if (strlen(nice) < map_width(xdiv, 0, a->p1.x))
767 text_map(map, xdiv, nice, a->p1.y / ydiv,
768 0, a->p1.y - xdiv);
769 } else if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x)) {
770 text_map(map, xdiv, nice, a->p0.y / ydiv, a->p0.x, a->p1.x);
771 }
772}
773
774static void map_2d_info(char **map, int xdiv, int ydiv, char *nice,
775 struct tcm_area *a)
776{
777 sprintf(nice, "(%d*%d)", tcm_awidth(*a), tcm_aheight(*a));
778 if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x))
779 text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv,
780 a->p0.x, a->p1.x);
781}
782
783int tiler_map_show(struct seq_file *s, void *arg)
784{
785 int xdiv = 2, ydiv = 1;
786 char **map = NULL, *global_map;
787 struct tiler_block *block;
788 struct tcm_area a, p;
789 int i;
790 const char *m2d = alphabet;
791 const char *a2d = special;
792 const char *m2dp = m2d, *a2dp = a2d;
793 char nice[128];
02646fb8
AG
794 int h_adj;
795 int w_adj;
6169a148
AG
796 unsigned long flags;
797
02646fb8
AG
798 if (!omap_dmm) {
799 /* early return if dmm/tiler device is not initialized */
800 return 0;
801 }
802
803 h_adj = omap_dmm->lut_height / ydiv;
804 w_adj = omap_dmm->lut_width / xdiv;
805
6169a148
AG
806 map = kzalloc(h_adj * sizeof(*map), GFP_KERNEL);
807 global_map = kzalloc((w_adj + 1) * h_adj, GFP_KERNEL);
808
809 if (!map || !global_map)
810 goto error;
811
812 memset(global_map, ' ', (w_adj + 1) * h_adj);
813 for (i = 0; i < omap_dmm->lut_height; i++) {
814 map[i] = global_map + i * (w_adj + 1);
815 map[i][w_adj] = 0;
816 }
ef445934 817 spin_lock_irqsave(&list_lock, flags);
6169a148
AG
818
819 list_for_each_entry(block, &omap_dmm->alloc_head, alloc_node) {
820 if (block->fmt != TILFMT_PAGE) {
821 fill_map(map, xdiv, ydiv, &block->area, *m2dp, true);
822 if (!*++a2dp)
823 a2dp = a2d;
824 if (!*++m2dp)
825 m2dp = m2d;
826 map_2d_info(map, xdiv, ydiv, nice, &block->area);
827 } else {
828 bool start = read_map_pt(map, xdiv, ydiv,
829 &block->area.p0)
830 == ' ';
831 bool end = read_map_pt(map, xdiv, ydiv, &block->area.p1)
832 == ' ';
833 tcm_for_each_slice(a, block->area, p)
834 fill_map(map, xdiv, ydiv, &a, '=', true);
835 fill_map_pt(map, xdiv, ydiv, &block->area.p0,
836 start ? '<' : 'X');
837 fill_map_pt(map, xdiv, ydiv, &block->area.p1,
838 end ? '>' : 'X');
839 map_1d_info(map, xdiv, ydiv, nice, &block->area);
840 }
841 }
842
ef445934 843 spin_unlock_irqrestore(&list_lock, flags);
6169a148
AG
844
845 if (s) {
846 seq_printf(s, "BEGIN DMM TILER MAP\n");
847 for (i = 0; i < 128; i++)
848 seq_printf(s, "%03d:%s\n", i, map[i]);
849 seq_printf(s, "END TILER MAP\n");
850 } else {
851 dev_dbg(omap_dmm->dev, "BEGIN DMM TILER MAP\n");
852 for (i = 0; i < 128; i++)
853 dev_dbg(omap_dmm->dev, "%03d:%s\n", i, map[i]);
854 dev_dbg(omap_dmm->dev, "END TILER MAP\n");
855 }
856
857error:
858 kfree(map);
859 kfree(global_map);
860
861 return 0;
862}
863#endif
5c137797
AG
864
865struct platform_driver omap_dmm_driver = {
866 .probe = omap_dmm_probe,
867 .remove = omap_dmm_remove,
868 .driver = {
869 .owner = THIS_MODULE,
870 .name = DMM_DRIVER_NAME,
871 },
872};
873
874MODULE_LICENSE("GPL v2");
875MODULE_AUTHOR("Andy Gross <andy.gross@ti.com>");
876MODULE_DESCRIPTION("OMAP DMM/Tiler Driver");
877MODULE_ALIAS("platform:" DMM_DRIVER_NAME);
This page took 0.119521 seconds and 5 git commands to generate.