Merge remote-tracking branches 'asoc/topic/atmel', 'asoc/topic/cirrus' and 'asoc...
[deliverable/linux.git] / drivers / gpu / host1x / syncpt.c
1 /*
2 * Tegra host1x Syncpoints
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include <linux/module.h>
20 #include <linux/device.h>
21 #include <linux/slab.h>
22
23 #include <trace/events/host1x.h>
24
25 #include "syncpt.h"
26 #include "dev.h"
27 #include "intr.h"
28 #include "debug.h"
29
30 #define SYNCPT_CHECK_PERIOD (2 * HZ)
31 #define MAX_STUCK_CHECK_COUNT 15
32
33 static struct host1x_syncpt_base *
34 host1x_syncpt_base_request(struct host1x *host)
35 {
36 struct host1x_syncpt_base *bases = host->bases;
37 unsigned int i;
38
39 for (i = 0; i < host->info->nb_bases; i++)
40 if (!bases[i].requested)
41 break;
42
43 if (i >= host->info->nb_bases)
44 return NULL;
45
46 bases[i].requested = true;
47 return &bases[i];
48 }
49
50 static void host1x_syncpt_base_free(struct host1x_syncpt_base *base)
51 {
52 if (base)
53 base->requested = false;
54 }
55
56 static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
57 struct device *dev,
58 unsigned long flags)
59 {
60 int i;
61 struct host1x_syncpt *sp = host->syncpt;
62 char *name;
63
64 for (i = 0; i < host->info->nb_pts && sp->name; i++, sp++)
65 ;
66
67 if (i >= host->info->nb_pts)
68 return NULL;
69
70 if (flags & HOST1X_SYNCPT_HAS_BASE) {
71 sp->base = host1x_syncpt_base_request(host);
72 if (!sp->base)
73 return NULL;
74 }
75
76 name = kasprintf(GFP_KERNEL, "%02d-%s", sp->id,
77 dev ? dev_name(dev) : NULL);
78 if (!name)
79 return NULL;
80
81 sp->dev = dev;
82 sp->name = name;
83
84 if (flags & HOST1X_SYNCPT_CLIENT_MANAGED)
85 sp->client_managed = true;
86 else
87 sp->client_managed = false;
88
89 return sp;
90 }
91
92 u32 host1x_syncpt_id(struct host1x_syncpt *sp)
93 {
94 return sp->id;
95 }
96 EXPORT_SYMBOL(host1x_syncpt_id);
97
98 /*
99 * Updates the value sent to hardware.
100 */
101 u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs)
102 {
103 return (u32)atomic_add_return(incrs, &sp->max_val);
104 }
105 EXPORT_SYMBOL(host1x_syncpt_incr_max);
106
107 /*
108 * Write cached syncpoint and waitbase values to hardware.
109 */
110 void host1x_syncpt_restore(struct host1x *host)
111 {
112 struct host1x_syncpt *sp_base = host->syncpt;
113 u32 i;
114
115 for (i = 0; i < host1x_syncpt_nb_pts(host); i++)
116 host1x_hw_syncpt_restore(host, sp_base + i);
117 for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
118 host1x_hw_syncpt_restore_wait_base(host, sp_base + i);
119 wmb();
120 }
121
122 /*
123 * Update the cached syncpoint and waitbase values by reading them
124 * from the registers.
125 */
126 void host1x_syncpt_save(struct host1x *host)
127 {
128 struct host1x_syncpt *sp_base = host->syncpt;
129 u32 i;
130
131 for (i = 0; i < host1x_syncpt_nb_pts(host); i++) {
132 if (host1x_syncpt_client_managed(sp_base + i))
133 host1x_hw_syncpt_load(host, sp_base + i);
134 else
135 WARN_ON(!host1x_syncpt_idle(sp_base + i));
136 }
137
138 for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
139 host1x_hw_syncpt_load_wait_base(host, sp_base + i);
140 }
141
142 /*
143 * Updates the cached syncpoint value by reading a new value from the hardware
144 * register
145 */
146 u32 host1x_syncpt_load(struct host1x_syncpt *sp)
147 {
148 u32 val;
149 val = host1x_hw_syncpt_load(sp->host, sp);
150 trace_host1x_syncpt_load_min(sp->id, val);
151
152 return val;
153 }
154
155 /*
156 * Get the current syncpoint base
157 */
158 u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp)
159 {
160 u32 val;
161 host1x_hw_syncpt_load_wait_base(sp->host, sp);
162 val = sp->base_val;
163 return val;
164 }
165
166 /*
167 * Increment syncpoint value from cpu, updating cache
168 */
169 int host1x_syncpt_incr(struct host1x_syncpt *sp)
170 {
171 return host1x_hw_syncpt_cpu_incr(sp->host, sp);
172 }
173 EXPORT_SYMBOL(host1x_syncpt_incr);
174
175 /*
176 * Updated sync point form hardware, and returns true if syncpoint is expired,
177 * false if we may need to wait
178 */
179 static bool syncpt_load_min_is_expired(struct host1x_syncpt *sp, u32 thresh)
180 {
181 host1x_hw_syncpt_load(sp->host, sp);
182 return host1x_syncpt_is_expired(sp, thresh);
183 }
184
185 /*
186 * Main entrypoint for syncpoint value waits.
187 */
188 int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
189 u32 *value)
190 {
191 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
192 void *ref;
193 struct host1x_waitlist *waiter;
194 int err = 0, check_count = 0;
195 u32 val;
196
197 if (value)
198 *value = 0;
199
200 /* first check cache */
201 if (host1x_syncpt_is_expired(sp, thresh)) {
202 if (value)
203 *value = host1x_syncpt_load(sp);
204 return 0;
205 }
206
207 /* try to read from register */
208 val = host1x_hw_syncpt_load(sp->host, sp);
209 if (host1x_syncpt_is_expired(sp, thresh)) {
210 if (value)
211 *value = val;
212 goto done;
213 }
214
215 if (!timeout) {
216 err = -EAGAIN;
217 goto done;
218 }
219
220 /* allocate a waiter */
221 waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
222 if (!waiter) {
223 err = -ENOMEM;
224 goto done;
225 }
226
227 /* schedule a wakeup when the syncpoint value is reached */
228 err = host1x_intr_add_action(sp->host, sp->id, thresh,
229 HOST1X_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
230 &wq, waiter, &ref);
231 if (err)
232 goto done;
233
234 err = -EAGAIN;
235 /* Caller-specified timeout may be impractically low */
236 if (timeout < 0)
237 timeout = LONG_MAX;
238
239 /* wait for the syncpoint, or timeout, or signal */
240 while (timeout) {
241 long check = min_t(long, SYNCPT_CHECK_PERIOD, timeout);
242 int remain = wait_event_interruptible_timeout(wq,
243 syncpt_load_min_is_expired(sp, thresh),
244 check);
245 if (remain > 0 || host1x_syncpt_is_expired(sp, thresh)) {
246 if (value)
247 *value = host1x_syncpt_load(sp);
248 err = 0;
249 break;
250 }
251 if (remain < 0) {
252 err = remain;
253 break;
254 }
255 timeout -= check;
256 if (timeout && check_count <= MAX_STUCK_CHECK_COUNT) {
257 dev_warn(sp->host->dev,
258 "%s: syncpoint id %d (%s) stuck waiting %d, timeout=%ld\n",
259 current->comm, sp->id, sp->name,
260 thresh, timeout);
261
262 host1x_debug_dump_syncpts(sp->host);
263 if (check_count == MAX_STUCK_CHECK_COUNT)
264 host1x_debug_dump(sp->host);
265 check_count++;
266 }
267 }
268 host1x_intr_put_ref(sp->host, sp->id, ref);
269
270 done:
271 return err;
272 }
273 EXPORT_SYMBOL(host1x_syncpt_wait);
274
275 /*
276 * Returns true if syncpoint is expired, false if we may need to wait
277 */
278 bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh)
279 {
280 u32 current_val;
281 u32 future_val;
282 smp_rmb();
283 current_val = (u32)atomic_read(&sp->min_val);
284 future_val = (u32)atomic_read(&sp->max_val);
285
286 /* Note the use of unsigned arithmetic here (mod 1<<32).
287 *
288 * c = current_val = min_val = the current value of the syncpoint.
289 * t = thresh = the value we are checking
290 * f = future_val = max_val = the value c will reach when all
291 * outstanding increments have completed.
292 *
293 * Note that c always chases f until it reaches f.
294 *
295 * Dtf = (f - t)
296 * Dtc = (c - t)
297 *
298 * Consider all cases:
299 *
300 * A) .....c..t..f..... Dtf < Dtc need to wait
301 * B) .....c.....f..t.. Dtf > Dtc expired
302 * C) ..t..c.....f..... Dtf > Dtc expired (Dct very large)
303 *
304 * Any case where f==c: always expired (for any t). Dtf == Dcf
305 * Any case where t==c: always expired (for any f). Dtf >= Dtc (because Dtc==0)
306 * Any case where t==f!=c: always wait. Dtf < Dtc (because Dtf==0,
307 * Dtc!=0)
308 *
309 * Other cases:
310 *
311 * A) .....t..f..c..... Dtf < Dtc need to wait
312 * A) .....f..c..t..... Dtf < Dtc need to wait
313 * A) .....f..t..c..... Dtf > Dtc expired
314 *
315 * So:
316 * Dtf >= Dtc implies EXPIRED (return true)
317 * Dtf < Dtc implies WAIT (return false)
318 *
319 * Note: If t is expired then we *cannot* wait on it. We would wait
320 * forever (hang the system).
321 *
322 * Note: do NOT get clever and remove the -thresh from both sides. It
323 * is NOT the same.
324 *
325 * If future valueis zero, we have a client managed sync point. In that
326 * case we do a direct comparison.
327 */
328 if (!host1x_syncpt_client_managed(sp))
329 return future_val - thresh >= current_val - thresh;
330 else
331 return (s32)(current_val - thresh) >= 0;
332 }
333
334 /* remove a wait pointed to by patch_addr */
335 int host1x_syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr)
336 {
337 return host1x_hw_syncpt_patch_wait(sp->host, sp, patch_addr);
338 }
339
340 int host1x_syncpt_init(struct host1x *host)
341 {
342 struct host1x_syncpt_base *bases;
343 struct host1x_syncpt *syncpt;
344 int i;
345
346 syncpt = devm_kzalloc(host->dev, sizeof(*syncpt) * host->info->nb_pts,
347 GFP_KERNEL);
348 if (!syncpt)
349 return -ENOMEM;
350
351 bases = devm_kzalloc(host->dev, sizeof(*bases) * host->info->nb_bases,
352 GFP_KERNEL);
353 if (!bases)
354 return -ENOMEM;
355
356 for (i = 0; i < host->info->nb_pts; i++) {
357 syncpt[i].id = i;
358 syncpt[i].host = host;
359 }
360
361 for (i = 0; i < host->info->nb_bases; i++)
362 bases[i].id = i;
363
364 host->syncpt = syncpt;
365 host->bases = bases;
366
367 host1x_syncpt_restore(host);
368
369 /* Allocate sync point to use for clearing waits for expired fences */
370 host->nop_sp = host1x_syncpt_alloc(host, NULL, 0);
371 if (!host->nop_sp)
372 return -ENOMEM;
373
374 return 0;
375 }
376
377 struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
378 unsigned long flags)
379 {
380 struct host1x *host = dev_get_drvdata(dev->parent);
381 return host1x_syncpt_alloc(host, dev, flags);
382 }
383 EXPORT_SYMBOL(host1x_syncpt_request);
384
385 void host1x_syncpt_free(struct host1x_syncpt *sp)
386 {
387 if (!sp)
388 return;
389
390 host1x_syncpt_base_free(sp->base);
391 kfree(sp->name);
392 sp->base = NULL;
393 sp->dev = NULL;
394 sp->name = NULL;
395 sp->client_managed = false;
396 }
397 EXPORT_SYMBOL(host1x_syncpt_free);
398
399 void host1x_syncpt_deinit(struct host1x *host)
400 {
401 int i;
402 struct host1x_syncpt *sp = host->syncpt;
403 for (i = 0; i < host->info->nb_pts; i++, sp++)
404 kfree(sp->name);
405 }
406
407 /*
408 * Read max. It indicates how many operations there are in queue, either in
409 * channel or in a software thread.
410 * */
411 u32 host1x_syncpt_read_max(struct host1x_syncpt *sp)
412 {
413 smp_rmb();
414 return (u32)atomic_read(&sp->max_val);
415 }
416 EXPORT_SYMBOL(host1x_syncpt_read_max);
417
418 /*
419 * Read min, which is a shadow of the current sync point value in hardware.
420 */
421 u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
422 {
423 smp_rmb();
424 return (u32)atomic_read(&sp->min_val);
425 }
426 EXPORT_SYMBOL(host1x_syncpt_read_min);
427
428 int host1x_syncpt_nb_pts(struct host1x *host)
429 {
430 return host->info->nb_pts;
431 }
432
433 int host1x_syncpt_nb_bases(struct host1x *host)
434 {
435 return host->info->nb_bases;
436 }
437
438 int host1x_syncpt_nb_mlocks(struct host1x *host)
439 {
440 return host->info->nb_mlocks;
441 }
442
443 struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id)
444 {
445 if (host->info->nb_pts < id)
446 return NULL;
447 return host->syncpt + id;
448 }
449 EXPORT_SYMBOL(host1x_syncpt_get);
450
451 struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp)
452 {
453 return sp ? sp->base : NULL;
454 }
455 EXPORT_SYMBOL(host1x_syncpt_get_base);
456
457 u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base)
458 {
459 return base->id;
460 }
461 EXPORT_SYMBOL(host1x_syncpt_base_id);
This page took 0.041681 seconds and 5 git commands to generate.