2 * Copyright (C) 2015 Etnaviv Project
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
17 #include <linux/component.h>
18 #include <linux/fence.h>
19 #include <linux/moduleparam.h>
20 #include <linux/of_device.h>
21 #include "etnaviv_dump.h"
22 #include "etnaviv_gpu.h"
23 #include "etnaviv_gem.h"
24 #include "etnaviv_mmu.h"
25 #include "etnaviv_iommu.h"
26 #include "etnaviv_iommu_v2.h"
27 #include "common.xml.h"
28 #include "state.xml.h"
29 #include "state_hi.xml.h"
30 #include "cmdstream.xml.h"
32 static const struct platform_device_id gpu_ids
[] = {
33 { .name
= "etnaviv-gpu,2d" },
37 static bool etnaviv_dump_core
= true;
38 module_param_named(dump_core
, etnaviv_dump_core
, bool, 0600);
44 int etnaviv_gpu_get_param(struct etnaviv_gpu
*gpu
, u32 param
, u64
*value
)
47 case ETNAVIV_PARAM_GPU_MODEL
:
48 *value
= gpu
->identity
.model
;
51 case ETNAVIV_PARAM_GPU_REVISION
:
52 *value
= gpu
->identity
.revision
;
55 case ETNAVIV_PARAM_GPU_FEATURES_0
:
56 *value
= gpu
->identity
.features
;
59 case ETNAVIV_PARAM_GPU_FEATURES_1
:
60 *value
= gpu
->identity
.minor_features0
;
63 case ETNAVIV_PARAM_GPU_FEATURES_2
:
64 *value
= gpu
->identity
.minor_features1
;
67 case ETNAVIV_PARAM_GPU_FEATURES_3
:
68 *value
= gpu
->identity
.minor_features2
;
71 case ETNAVIV_PARAM_GPU_FEATURES_4
:
72 *value
= gpu
->identity
.minor_features3
;
75 case ETNAVIV_PARAM_GPU_FEATURES_5
:
76 *value
= gpu
->identity
.minor_features4
;
79 case ETNAVIV_PARAM_GPU_FEATURES_6
:
80 *value
= gpu
->identity
.minor_features5
;
83 case ETNAVIV_PARAM_GPU_STREAM_COUNT
:
84 *value
= gpu
->identity
.stream_count
;
87 case ETNAVIV_PARAM_GPU_REGISTER_MAX
:
88 *value
= gpu
->identity
.register_max
;
91 case ETNAVIV_PARAM_GPU_THREAD_COUNT
:
92 *value
= gpu
->identity
.thread_count
;
95 case ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE
:
96 *value
= gpu
->identity
.vertex_cache_size
;
99 case ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT
:
100 *value
= gpu
->identity
.shader_core_count
;
103 case ETNAVIV_PARAM_GPU_PIXEL_PIPES
:
104 *value
= gpu
->identity
.pixel_pipes
;
107 case ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE
:
108 *value
= gpu
->identity
.vertex_output_buffer_size
;
111 case ETNAVIV_PARAM_GPU_BUFFER_SIZE
:
112 *value
= gpu
->identity
.buffer_size
;
115 case ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT
:
116 *value
= gpu
->identity
.instruction_count
;
119 case ETNAVIV_PARAM_GPU_NUM_CONSTANTS
:
120 *value
= gpu
->identity
.num_constants
;
123 case ETNAVIV_PARAM_GPU_NUM_VARYINGS
:
124 *value
= gpu
->identity
.varyings_count
;
128 DBG("%s: invalid param: %u", dev_name(gpu
->dev
), param
);
136 #define etnaviv_is_model_rev(gpu, mod, rev) \
137 ((gpu)->identity.model == chipModel_##mod && \
138 (gpu)->identity.revision == rev)
139 #define etnaviv_field(val, field) \
140 (((val) & field##__MASK) >> field##__SHIFT)
142 static void etnaviv_hw_specs(struct etnaviv_gpu
*gpu
)
144 if (gpu
->identity
.minor_features0
&
145 chipMinorFeatures0_MORE_MINOR_FEATURES
) {
147 unsigned int streams
;
149 specs
[0] = gpu_read(gpu
, VIVS_HI_CHIP_SPECS
);
150 specs
[1] = gpu_read(gpu
, VIVS_HI_CHIP_SPECS_2
);
151 specs
[2] = gpu_read(gpu
, VIVS_HI_CHIP_SPECS_3
);
152 specs
[3] = gpu_read(gpu
, VIVS_HI_CHIP_SPECS_4
);
154 gpu
->identity
.stream_count
= etnaviv_field(specs
[0],
155 VIVS_HI_CHIP_SPECS_STREAM_COUNT
);
156 gpu
->identity
.register_max
= etnaviv_field(specs
[0],
157 VIVS_HI_CHIP_SPECS_REGISTER_MAX
);
158 gpu
->identity
.thread_count
= etnaviv_field(specs
[0],
159 VIVS_HI_CHIP_SPECS_THREAD_COUNT
);
160 gpu
->identity
.vertex_cache_size
= etnaviv_field(specs
[0],
161 VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE
);
162 gpu
->identity
.shader_core_count
= etnaviv_field(specs
[0],
163 VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT
);
164 gpu
->identity
.pixel_pipes
= etnaviv_field(specs
[0],
165 VIVS_HI_CHIP_SPECS_PIXEL_PIPES
);
166 gpu
->identity
.vertex_output_buffer_size
=
167 etnaviv_field(specs
[0],
168 VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE
);
170 gpu
->identity
.buffer_size
= etnaviv_field(specs
[1],
171 VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE
);
172 gpu
->identity
.instruction_count
= etnaviv_field(specs
[1],
173 VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT
);
174 gpu
->identity
.num_constants
= etnaviv_field(specs
[1],
175 VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS
);
177 gpu
->identity
.varyings_count
= etnaviv_field(specs
[2],
178 VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT
);
180 /* This overrides the value from older register if non-zero */
181 streams
= etnaviv_field(specs
[3],
182 VIVS_HI_CHIP_SPECS_4_STREAM_COUNT
);
184 gpu
->identity
.stream_count
= streams
;
187 /* Fill in the stream count if not specified */
188 if (gpu
->identity
.stream_count
== 0) {
189 if (gpu
->identity
.model
>= 0x1000)
190 gpu
->identity
.stream_count
= 4;
192 gpu
->identity
.stream_count
= 1;
195 /* Convert the register max value */
196 if (gpu
->identity
.register_max
)
197 gpu
->identity
.register_max
= 1 << gpu
->identity
.register_max
;
198 else if (gpu
->identity
.model
== chipModel_GC400
)
199 gpu
->identity
.register_max
= 32;
201 gpu
->identity
.register_max
= 64;
203 /* Convert thread count */
204 if (gpu
->identity
.thread_count
)
205 gpu
->identity
.thread_count
= 1 << gpu
->identity
.thread_count
;
206 else if (gpu
->identity
.model
== chipModel_GC400
)
207 gpu
->identity
.thread_count
= 64;
208 else if (gpu
->identity
.model
== chipModel_GC500
||
209 gpu
->identity
.model
== chipModel_GC530
)
210 gpu
->identity
.thread_count
= 128;
212 gpu
->identity
.thread_count
= 256;
214 if (gpu
->identity
.vertex_cache_size
== 0)
215 gpu
->identity
.vertex_cache_size
= 8;
217 if (gpu
->identity
.shader_core_count
== 0) {
218 if (gpu
->identity
.model
>= 0x1000)
219 gpu
->identity
.shader_core_count
= 2;
221 gpu
->identity
.shader_core_count
= 1;
224 if (gpu
->identity
.pixel_pipes
== 0)
225 gpu
->identity
.pixel_pipes
= 1;
227 /* Convert virtex buffer size */
228 if (gpu
->identity
.vertex_output_buffer_size
) {
229 gpu
->identity
.vertex_output_buffer_size
=
230 1 << gpu
->identity
.vertex_output_buffer_size
;
231 } else if (gpu
->identity
.model
== chipModel_GC400
) {
232 if (gpu
->identity
.revision
< 0x4000)
233 gpu
->identity
.vertex_output_buffer_size
= 512;
234 else if (gpu
->identity
.revision
< 0x4200)
235 gpu
->identity
.vertex_output_buffer_size
= 256;
237 gpu
->identity
.vertex_output_buffer_size
= 128;
239 gpu
->identity
.vertex_output_buffer_size
= 512;
242 switch (gpu
->identity
.instruction_count
) {
244 if (etnaviv_is_model_rev(gpu
, GC2000
, 0x5108) ||
245 gpu
->identity
.model
== chipModel_GC880
)
246 gpu
->identity
.instruction_count
= 512;
248 gpu
->identity
.instruction_count
= 256;
252 gpu
->identity
.instruction_count
= 1024;
256 gpu
->identity
.instruction_count
= 2048;
260 gpu
->identity
.instruction_count
= 256;
264 if (gpu
->identity
.num_constants
== 0)
265 gpu
->identity
.num_constants
= 168;
267 if (gpu
->identity
.varyings_count
== 0) {
268 if (gpu
->identity
.minor_features1
& chipMinorFeatures1_HALTI0
)
269 gpu
->identity
.varyings_count
= 12;
271 gpu
->identity
.varyings_count
= 8;
275 * For some cores, two varyings are consumed for position, so the
276 * maximum varying count needs to be reduced by one.
278 if (etnaviv_is_model_rev(gpu
, GC5000
, 0x5434) ||
279 etnaviv_is_model_rev(gpu
, GC4000
, 0x5222) ||
280 etnaviv_is_model_rev(gpu
, GC4000
, 0x5245) ||
281 etnaviv_is_model_rev(gpu
, GC4000
, 0x5208) ||
282 etnaviv_is_model_rev(gpu
, GC3000
, 0x5435) ||
283 etnaviv_is_model_rev(gpu
, GC2200
, 0x5244) ||
284 etnaviv_is_model_rev(gpu
, GC2100
, 0x5108) ||
285 etnaviv_is_model_rev(gpu
, GC2000
, 0x5108) ||
286 etnaviv_is_model_rev(gpu
, GC1500
, 0x5246) ||
287 etnaviv_is_model_rev(gpu
, GC880
, 0x5107) ||
288 etnaviv_is_model_rev(gpu
, GC880
, 0x5106))
289 gpu
->identity
.varyings_count
-= 1;
292 static void etnaviv_hw_identify(struct etnaviv_gpu
*gpu
)
296 chipIdentity
= gpu_read(gpu
, VIVS_HI_CHIP_IDENTITY
);
298 /* Special case for older graphic cores. */
299 if (etnaviv_field(chipIdentity
, VIVS_HI_CHIP_IDENTITY_FAMILY
) == 0x01) {
300 gpu
->identity
.model
= chipModel_GC500
;
301 gpu
->identity
.revision
= etnaviv_field(chipIdentity
,
302 VIVS_HI_CHIP_IDENTITY_REVISION
);
305 gpu
->identity
.model
= gpu_read(gpu
, VIVS_HI_CHIP_MODEL
);
306 gpu
->identity
.revision
= gpu_read(gpu
, VIVS_HI_CHIP_REV
);
309 * !!!! HACK ALERT !!!!
310 * Because people change device IDs without letting software
311 * know about it - here is the hack to make it all look the
312 * same. Only for GC400 family.
314 if ((gpu
->identity
.model
& 0xff00) == 0x0400 &&
315 gpu
->identity
.model
!= chipModel_GC420
) {
316 gpu
->identity
.model
= gpu
->identity
.model
& 0x0400;
319 /* Another special case */
320 if (etnaviv_is_model_rev(gpu
, GC300
, 0x2201)) {
321 u32 chipDate
= gpu_read(gpu
, VIVS_HI_CHIP_DATE
);
322 u32 chipTime
= gpu_read(gpu
, VIVS_HI_CHIP_TIME
);
324 if (chipDate
== 0x20080814 && chipTime
== 0x12051100) {
326 * This IP has an ECO; put the correct
329 gpu
->identity
.revision
= 0x1051;
334 dev_info(gpu
->dev
, "model: GC%x, revision: %x\n",
335 gpu
->identity
.model
, gpu
->identity
.revision
);
337 gpu
->identity
.features
= gpu_read(gpu
, VIVS_HI_CHIP_FEATURE
);
339 /* Disable fast clear on GC700. */
340 if (gpu
->identity
.model
== chipModel_GC700
)
341 gpu
->identity
.features
&= ~chipFeatures_FAST_CLEAR
;
343 if ((gpu
->identity
.model
== chipModel_GC500
&&
344 gpu
->identity
.revision
< 2) ||
345 (gpu
->identity
.model
== chipModel_GC300
&&
346 gpu
->identity
.revision
< 0x2000)) {
349 * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these
352 gpu
->identity
.minor_features0
= 0;
353 gpu
->identity
.minor_features1
= 0;
354 gpu
->identity
.minor_features2
= 0;
355 gpu
->identity
.minor_features3
= 0;
356 gpu
->identity
.minor_features4
= 0;
357 gpu
->identity
.minor_features5
= 0;
359 gpu
->identity
.minor_features0
=
360 gpu_read(gpu
, VIVS_HI_CHIP_MINOR_FEATURE_0
);
362 if (gpu
->identity
.minor_features0
&
363 chipMinorFeatures0_MORE_MINOR_FEATURES
) {
364 gpu
->identity
.minor_features1
=
365 gpu_read(gpu
, VIVS_HI_CHIP_MINOR_FEATURE_1
);
366 gpu
->identity
.minor_features2
=
367 gpu_read(gpu
, VIVS_HI_CHIP_MINOR_FEATURE_2
);
368 gpu
->identity
.minor_features3
=
369 gpu_read(gpu
, VIVS_HI_CHIP_MINOR_FEATURE_3
);
370 gpu
->identity
.minor_features4
=
371 gpu_read(gpu
, VIVS_HI_CHIP_MINOR_FEATURE_4
);
372 gpu
->identity
.minor_features5
=
373 gpu_read(gpu
, VIVS_HI_CHIP_MINOR_FEATURE_5
);
376 /* GC600 idle register reports zero bits where modules aren't present */
377 if (gpu
->identity
.model
== chipModel_GC600
) {
378 gpu
->idle_mask
= VIVS_HI_IDLE_STATE_TX
|
379 VIVS_HI_IDLE_STATE_RA
|
380 VIVS_HI_IDLE_STATE_SE
|
381 VIVS_HI_IDLE_STATE_PA
|
382 VIVS_HI_IDLE_STATE_SH
|
383 VIVS_HI_IDLE_STATE_PE
|
384 VIVS_HI_IDLE_STATE_DE
|
385 VIVS_HI_IDLE_STATE_FE
;
387 gpu
->idle_mask
= ~VIVS_HI_IDLE_STATE_AXI_LP
;
390 etnaviv_hw_specs(gpu
);
393 static void etnaviv_gpu_load_clock(struct etnaviv_gpu
*gpu
, u32 clock
)
395 gpu_write(gpu
, VIVS_HI_CLOCK_CONTROL
, clock
|
396 VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD
);
397 gpu_write(gpu
, VIVS_HI_CLOCK_CONTROL
, clock
);
400 static int etnaviv_hw_reset(struct etnaviv_gpu
*gpu
)
403 unsigned long timeout
;
413 /* We hope that the GPU resets in under one second */
414 timeout
= jiffies
+ msecs_to_jiffies(1000);
416 while (time_is_after_jiffies(timeout
)) {
417 control
= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS
|
418 VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
421 etnaviv_gpu_load_clock(gpu
, control
);
423 /* Wait for stable clock. Vivante's code waited for 1ms */
424 usleep_range(1000, 10000);
426 /* isolate the GPU. */
427 control
|= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU
;
428 gpu_write(gpu
, VIVS_HI_CLOCK_CONTROL
, control
);
430 /* set soft reset. */
431 control
|= VIVS_HI_CLOCK_CONTROL_SOFT_RESET
;
432 gpu_write(gpu
, VIVS_HI_CLOCK_CONTROL
, control
);
434 /* wait for reset. */
437 /* reset soft reset bit. */
438 control
&= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET
;
439 gpu_write(gpu
, VIVS_HI_CLOCK_CONTROL
, control
);
441 /* reset GPU isolation. */
442 control
&= ~VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU
;
443 gpu_write(gpu
, VIVS_HI_CLOCK_CONTROL
, control
);
445 /* read idle register. */
446 idle
= gpu_read(gpu
, VIVS_HI_IDLE_STATE
);
448 /* try reseting again if FE it not idle */
449 if ((idle
& VIVS_HI_IDLE_STATE_FE
) == 0) {
450 dev_dbg(gpu
->dev
, "FE is not idle\n");
454 /* read reset register. */
455 control
= gpu_read(gpu
, VIVS_HI_CLOCK_CONTROL
);
457 /* is the GPU idle? */
458 if (((control
& VIVS_HI_CLOCK_CONTROL_IDLE_3D
) == 0) ||
459 ((control
& VIVS_HI_CLOCK_CONTROL_IDLE_2D
) == 0)) {
460 dev_dbg(gpu
->dev
, "GPU is not idle\n");
469 idle
= gpu_read(gpu
, VIVS_HI_IDLE_STATE
);
470 control
= gpu_read(gpu
, VIVS_HI_CLOCK_CONTROL
);
472 dev_err(gpu
->dev
, "GPU failed to reset: FE %sidle, 3D %sidle, 2D %sidle\n",
473 idle
& VIVS_HI_IDLE_STATE_FE
? "" : "not ",
474 control
& VIVS_HI_CLOCK_CONTROL_IDLE_3D
? "" : "not ",
475 control
& VIVS_HI_CLOCK_CONTROL_IDLE_2D
? "" : "not ");
480 /* We rely on the GPU running, so program the clock */
481 control
= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS
|
482 VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
485 etnaviv_gpu_load_clock(gpu
, control
);
490 static void etnaviv_gpu_hw_init(struct etnaviv_gpu
*gpu
)
494 if ((etnaviv_is_model_rev(gpu
, GC320
, 0x5007) ||
495 etnaviv_is_model_rev(gpu
, GC320
, 0x5220)) &&
496 gpu_read(gpu
, VIVS_HI_CHIP_TIME
) != 0x2062400) {
499 mc_memory_debug
= gpu_read(gpu
, VIVS_MC_DEBUG_MEMORY
) & ~0xff;
501 if (gpu
->identity
.revision
== 0x5007)
502 mc_memory_debug
|= 0x0c;
504 mc_memory_debug
|= 0x08;
506 gpu_write(gpu
, VIVS_MC_DEBUG_MEMORY
, mc_memory_debug
);
510 * Update GPU AXI cache atttribute to "cacheable, no allocate".
511 * This is necessary to prevent the iMX6 SoC locking up.
513 gpu_write(gpu
, VIVS_HI_AXI_CONFIG
,
514 VIVS_HI_AXI_CONFIG_AWCACHE(2) |
515 VIVS_HI_AXI_CONFIG_ARCACHE(2));
517 /* GC2000 rev 5108 needs a special bus config */
518 if (etnaviv_is_model_rev(gpu
, GC2000
, 0x5108)) {
519 u32 bus_config
= gpu_read(gpu
, VIVS_MC_BUS_CONFIG
);
520 bus_config
&= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK
|
521 VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK
);
522 bus_config
|= VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(1) |
523 VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(0);
524 gpu_write(gpu
, VIVS_MC_BUS_CONFIG
, bus_config
);
527 /* set base addresses */
528 gpu_write(gpu
, VIVS_MC_MEMORY_BASE_ADDR_RA
, gpu
->memory_base
);
529 gpu_write(gpu
, VIVS_MC_MEMORY_BASE_ADDR_FE
, gpu
->memory_base
);
530 gpu_write(gpu
, VIVS_MC_MEMORY_BASE_ADDR_TX
, gpu
->memory_base
);
531 gpu_write(gpu
, VIVS_MC_MEMORY_BASE_ADDR_PEZ
, gpu
->memory_base
);
532 gpu_write(gpu
, VIVS_MC_MEMORY_BASE_ADDR_PE
, gpu
->memory_base
);
534 /* setup the MMU page table pointers */
535 etnaviv_iommu_domain_restore(gpu
, gpu
->mmu
->domain
);
537 /* Start command processor */
538 prefetch
= etnaviv_buffer_init(gpu
);
540 gpu_write(gpu
, VIVS_HI_INTR_ENBL
, ~0U);
541 gpu_write(gpu
, VIVS_FE_COMMAND_ADDRESS
,
542 gpu
->buffer
->paddr
- gpu
->memory_base
);
543 gpu_write(gpu
, VIVS_FE_COMMAND_CONTROL
,
544 VIVS_FE_COMMAND_CONTROL_ENABLE
|
545 VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch
));
548 int etnaviv_gpu_init(struct etnaviv_gpu
*gpu
)
551 struct iommu_domain
*iommu
;
552 enum etnaviv_iommu_version version
;
555 ret
= pm_runtime_get_sync(gpu
->dev
);
559 etnaviv_hw_identify(gpu
);
561 if (gpu
->identity
.model
== 0) {
562 dev_err(gpu
->dev
, "Unknown GPU model\n");
567 /* Exclude VG cores with FE2.0 */
568 if (gpu
->identity
.features
& chipFeatures_PIPE_VG
&&
569 gpu
->identity
.features
& chipFeatures_FE20
) {
570 dev_info(gpu
->dev
, "Ignoring GPU with VG and FE2.0\n");
576 * Set the GPU linear window to be at the end of the DMA window, where
577 * the CMA area is likely to reside. This ensures that we are able to
578 * map the command buffers while having the linear window overlap as
579 * much RAM as possible, so we can optimize mappings for other buffers.
581 * For 3D cores only do this if MC2.0 is present, as with MC1.0 it leads
582 * to different views of the memory on the individual engines.
584 if (!(gpu
->identity
.features
& chipFeatures_PIPE_3D
) ||
585 (gpu
->identity
.minor_features0
& chipMinorFeatures0_MC20
)) {
586 u32 dma_mask
= (u32
)dma_get_required_mask(gpu
->dev
);
587 if (dma_mask
< PHYS_OFFSET
+ SZ_2G
)
588 gpu
->memory_base
= PHYS_OFFSET
;
590 gpu
->memory_base
= dma_mask
- SZ_2G
+ 1;
593 ret
= etnaviv_hw_reset(gpu
);
597 /* Setup IOMMU.. eventually we will (I think) do this once per context
598 * and have separate page tables per context. For now, to keep things
599 * simple and to get something working, just use a single address space:
601 mmuv2
= gpu
->identity
.minor_features1
& chipMinorFeatures1_MMU_VERSION
;
602 dev_dbg(gpu
->dev
, "mmuv2: %d\n", mmuv2
);
605 iommu
= etnaviv_iommu_domain_alloc(gpu
);
606 version
= ETNAVIV_IOMMU_V1
;
608 iommu
= etnaviv_iommu_v2_domain_alloc(gpu
);
609 version
= ETNAVIV_IOMMU_V2
;
617 gpu
->mmu
= etnaviv_iommu_new(gpu
, iommu
, version
);
619 iommu_domain_free(iommu
);
625 gpu
->buffer
= etnaviv_gpu_cmdbuf_new(gpu
, PAGE_SIZE
, 0);
628 dev_err(gpu
->dev
, "could not create command buffer\n");
631 if (gpu
->buffer
->paddr
- gpu
->memory_base
> 0x80000000) {
634 "command buffer outside valid memory window\n");
638 /* Setup event management */
639 spin_lock_init(&gpu
->event_spinlock
);
640 init_completion(&gpu
->event_free
);
641 for (i
= 0; i
< ARRAY_SIZE(gpu
->event
); i
++) {
642 gpu
->event
[i
].used
= false;
643 complete(&gpu
->event_free
);
646 /* Now program the hardware */
647 mutex_lock(&gpu
->lock
);
648 etnaviv_gpu_hw_init(gpu
);
649 gpu
->exec_state
= -1;
650 mutex_unlock(&gpu
->lock
);
652 pm_runtime_mark_last_busy(gpu
->dev
);
653 pm_runtime_put_autosuspend(gpu
->dev
);
658 etnaviv_gpu_cmdbuf_free(gpu
->buffer
);
661 etnaviv_iommu_destroy(gpu
->mmu
);
664 pm_runtime_mark_last_busy(gpu
->dev
);
665 pm_runtime_put_autosuspend(gpu
->dev
);
670 #ifdef CONFIG_DEBUG_FS
676 static void verify_dma(struct etnaviv_gpu
*gpu
, struct dma_debug
*debug
)
680 debug
->address
[0] = gpu_read(gpu
, VIVS_FE_DMA_ADDRESS
);
681 debug
->state
[0] = gpu_read(gpu
, VIVS_FE_DMA_DEBUG_STATE
);
683 for (i
= 0; i
< 500; i
++) {
684 debug
->address
[1] = gpu_read(gpu
, VIVS_FE_DMA_ADDRESS
);
685 debug
->state
[1] = gpu_read(gpu
, VIVS_FE_DMA_DEBUG_STATE
);
687 if (debug
->address
[0] != debug
->address
[1])
690 if (debug
->state
[0] != debug
->state
[1])
695 int etnaviv_gpu_debugfs(struct etnaviv_gpu
*gpu
, struct seq_file
*m
)
697 struct dma_debug debug
;
698 u32 dma_lo
, dma_hi
, axi
, idle
;
701 seq_printf(m
, "%s Status:\n", dev_name(gpu
->dev
));
703 ret
= pm_runtime_get_sync(gpu
->dev
);
707 dma_lo
= gpu_read(gpu
, VIVS_FE_DMA_LOW
);
708 dma_hi
= gpu_read(gpu
, VIVS_FE_DMA_HIGH
);
709 axi
= gpu_read(gpu
, VIVS_HI_AXI_STATUS
);
710 idle
= gpu_read(gpu
, VIVS_HI_IDLE_STATE
);
712 verify_dma(gpu
, &debug
);
714 seq_puts(m
, "\tfeatures\n");
715 seq_printf(m
, "\t minor_features0: 0x%08x\n",
716 gpu
->identity
.minor_features0
);
717 seq_printf(m
, "\t minor_features1: 0x%08x\n",
718 gpu
->identity
.minor_features1
);
719 seq_printf(m
, "\t minor_features2: 0x%08x\n",
720 gpu
->identity
.minor_features2
);
721 seq_printf(m
, "\t minor_features3: 0x%08x\n",
722 gpu
->identity
.minor_features3
);
723 seq_printf(m
, "\t minor_features4: 0x%08x\n",
724 gpu
->identity
.minor_features4
);
725 seq_printf(m
, "\t minor_features5: 0x%08x\n",
726 gpu
->identity
.minor_features5
);
728 seq_puts(m
, "\tspecs\n");
729 seq_printf(m
, "\t stream_count: %d\n",
730 gpu
->identity
.stream_count
);
731 seq_printf(m
, "\t register_max: %d\n",
732 gpu
->identity
.register_max
);
733 seq_printf(m
, "\t thread_count: %d\n",
734 gpu
->identity
.thread_count
);
735 seq_printf(m
, "\t vertex_cache_size: %d\n",
736 gpu
->identity
.vertex_cache_size
);
737 seq_printf(m
, "\t shader_core_count: %d\n",
738 gpu
->identity
.shader_core_count
);
739 seq_printf(m
, "\t pixel_pipes: %d\n",
740 gpu
->identity
.pixel_pipes
);
741 seq_printf(m
, "\t vertex_output_buffer_size: %d\n",
742 gpu
->identity
.vertex_output_buffer_size
);
743 seq_printf(m
, "\t buffer_size: %d\n",
744 gpu
->identity
.buffer_size
);
745 seq_printf(m
, "\t instruction_count: %d\n",
746 gpu
->identity
.instruction_count
);
747 seq_printf(m
, "\t num_constants: %d\n",
748 gpu
->identity
.num_constants
);
749 seq_printf(m
, "\t varyings_count: %d\n",
750 gpu
->identity
.varyings_count
);
752 seq_printf(m
, "\taxi: 0x%08x\n", axi
);
753 seq_printf(m
, "\tidle: 0x%08x\n", idle
);
754 idle
|= ~gpu
->idle_mask
& ~VIVS_HI_IDLE_STATE_AXI_LP
;
755 if ((idle
& VIVS_HI_IDLE_STATE_FE
) == 0)
756 seq_puts(m
, "\t FE is not idle\n");
757 if ((idle
& VIVS_HI_IDLE_STATE_DE
) == 0)
758 seq_puts(m
, "\t DE is not idle\n");
759 if ((idle
& VIVS_HI_IDLE_STATE_PE
) == 0)
760 seq_puts(m
, "\t PE is not idle\n");
761 if ((idle
& VIVS_HI_IDLE_STATE_SH
) == 0)
762 seq_puts(m
, "\t SH is not idle\n");
763 if ((idle
& VIVS_HI_IDLE_STATE_PA
) == 0)
764 seq_puts(m
, "\t PA is not idle\n");
765 if ((idle
& VIVS_HI_IDLE_STATE_SE
) == 0)
766 seq_puts(m
, "\t SE is not idle\n");
767 if ((idle
& VIVS_HI_IDLE_STATE_RA
) == 0)
768 seq_puts(m
, "\t RA is not idle\n");
769 if ((idle
& VIVS_HI_IDLE_STATE_TX
) == 0)
770 seq_puts(m
, "\t TX is not idle\n");
771 if ((idle
& VIVS_HI_IDLE_STATE_VG
) == 0)
772 seq_puts(m
, "\t VG is not idle\n");
773 if ((idle
& VIVS_HI_IDLE_STATE_IM
) == 0)
774 seq_puts(m
, "\t IM is not idle\n");
775 if ((idle
& VIVS_HI_IDLE_STATE_FP
) == 0)
776 seq_puts(m
, "\t FP is not idle\n");
777 if ((idle
& VIVS_HI_IDLE_STATE_TS
) == 0)
778 seq_puts(m
, "\t TS is not idle\n");
779 if (idle
& VIVS_HI_IDLE_STATE_AXI_LP
)
780 seq_puts(m
, "\t AXI low power mode\n");
782 if (gpu
->identity
.features
& chipFeatures_DEBUG_MODE
) {
783 u32 read0
= gpu_read(gpu
, VIVS_MC_DEBUG_READ0
);
784 u32 read1
= gpu_read(gpu
, VIVS_MC_DEBUG_READ1
);
785 u32 write
= gpu_read(gpu
, VIVS_MC_DEBUG_WRITE
);
787 seq_puts(m
, "\tMC\n");
788 seq_printf(m
, "\t read0: 0x%08x\n", read0
);
789 seq_printf(m
, "\t read1: 0x%08x\n", read1
);
790 seq_printf(m
, "\t write: 0x%08x\n", write
);
793 seq_puts(m
, "\tDMA ");
795 if (debug
.address
[0] == debug
.address
[1] &&
796 debug
.state
[0] == debug
.state
[1]) {
797 seq_puts(m
, "seems to be stuck\n");
798 } else if (debug
.address
[0] == debug
.address
[1]) {
799 seq_puts(m
, "adress is constant\n");
801 seq_puts(m
, "is runing\n");
804 seq_printf(m
, "\t address 0: 0x%08x\n", debug
.address
[0]);
805 seq_printf(m
, "\t address 1: 0x%08x\n", debug
.address
[1]);
806 seq_printf(m
, "\t state 0: 0x%08x\n", debug
.state
[0]);
807 seq_printf(m
, "\t state 1: 0x%08x\n", debug
.state
[1]);
808 seq_printf(m
, "\t last fetch 64 bit word: 0x%08x 0x%08x\n",
813 pm_runtime_mark_last_busy(gpu
->dev
);
814 pm_runtime_put_autosuspend(gpu
->dev
);
823 static int enable_clk(struct etnaviv_gpu
*gpu
)
826 clk_prepare_enable(gpu
->clk_core
);
828 clk_prepare_enable(gpu
->clk_shader
);
833 static int disable_clk(struct etnaviv_gpu
*gpu
)
836 clk_disable_unprepare(gpu
->clk_core
);
838 clk_disable_unprepare(gpu
->clk_shader
);
843 static int enable_axi(struct etnaviv_gpu
*gpu
)
846 clk_prepare_enable(gpu
->clk_bus
);
851 static int disable_axi(struct etnaviv_gpu
*gpu
)
854 clk_disable_unprepare(gpu
->clk_bus
);
860 * Hangcheck detection for locked gpu:
862 static void recover_worker(struct work_struct
*work
)
864 struct etnaviv_gpu
*gpu
= container_of(work
, struct etnaviv_gpu
,
869 dev_err(gpu
->dev
, "hangcheck recover!\n");
871 if (pm_runtime_get_sync(gpu
->dev
) < 0)
874 mutex_lock(&gpu
->lock
);
876 /* Only catch the first event, or when manually re-armed */
877 if (etnaviv_dump_core
) {
878 etnaviv_core_dump(gpu
);
879 etnaviv_dump_core
= false;
882 etnaviv_hw_reset(gpu
);
884 /* complete all events, the GPU won't do it after the reset */
885 spin_lock_irqsave(&gpu
->event_spinlock
, flags
);
886 for (i
= 0; i
< ARRAY_SIZE(gpu
->event
); i
++) {
887 if (!gpu
->event
[i
].used
)
889 fence_signal(gpu
->event
[i
].fence
);
890 gpu
->event
[i
].fence
= NULL
;
891 gpu
->event
[i
].used
= false;
892 complete(&gpu
->event_free
);
894 spin_unlock_irqrestore(&gpu
->event_spinlock
, flags
);
895 gpu
->completed_fence
= gpu
->active_fence
;
897 etnaviv_gpu_hw_init(gpu
);
898 gpu
->switch_context
= true;
899 gpu
->exec_state
= -1;
901 mutex_unlock(&gpu
->lock
);
902 pm_runtime_mark_last_busy(gpu
->dev
);
903 pm_runtime_put_autosuspend(gpu
->dev
);
905 /* Retire the buffer objects in a work */
906 etnaviv_queue_work(gpu
->drm
, &gpu
->retire_work
);
909 static void hangcheck_timer_reset(struct etnaviv_gpu
*gpu
)
911 DBG("%s", dev_name(gpu
->dev
));
912 mod_timer(&gpu
->hangcheck_timer
,
913 round_jiffies_up(jiffies
+ DRM_ETNAVIV_HANGCHECK_JIFFIES
));
916 static void hangcheck_handler(unsigned long data
)
918 struct etnaviv_gpu
*gpu
= (struct etnaviv_gpu
*)data
;
919 u32 fence
= gpu
->completed_fence
;
920 bool progress
= false;
922 if (fence
!= gpu
->hangcheck_fence
) {
923 gpu
->hangcheck_fence
= fence
;
928 u32 dma_addr
= gpu_read(gpu
, VIVS_FE_DMA_ADDRESS
);
929 int change
= dma_addr
- gpu
->hangcheck_dma_addr
;
931 if (change
< 0 || change
> 16) {
932 gpu
->hangcheck_dma_addr
= dma_addr
;
937 if (!progress
&& fence_after(gpu
->active_fence
, fence
)) {
938 dev_err(gpu
->dev
, "hangcheck detected gpu lockup!\n");
939 dev_err(gpu
->dev
, " completed fence: %u\n", fence
);
940 dev_err(gpu
->dev
, " active fence: %u\n",
942 etnaviv_queue_work(gpu
->drm
, &gpu
->recover_work
);
945 /* if still more pending work, reset the hangcheck timer: */
946 if (fence_after(gpu
->active_fence
, gpu
->hangcheck_fence
))
947 hangcheck_timer_reset(gpu
);
950 static void hangcheck_disable(struct etnaviv_gpu
*gpu
)
952 del_timer_sync(&gpu
->hangcheck_timer
);
953 cancel_work_sync(&gpu
->recover_work
);
956 /* fence object management */
957 struct etnaviv_fence
{
958 struct etnaviv_gpu
*gpu
;
962 static inline struct etnaviv_fence
*to_etnaviv_fence(struct fence
*fence
)
964 return container_of(fence
, struct etnaviv_fence
, base
);
967 static const char *etnaviv_fence_get_driver_name(struct fence
*fence
)
972 static const char *etnaviv_fence_get_timeline_name(struct fence
*fence
)
974 struct etnaviv_fence
*f
= to_etnaviv_fence(fence
);
976 return dev_name(f
->gpu
->dev
);
979 static bool etnaviv_fence_enable_signaling(struct fence
*fence
)
984 static bool etnaviv_fence_signaled(struct fence
*fence
)
986 struct etnaviv_fence
*f
= to_etnaviv_fence(fence
);
988 return fence_completed(f
->gpu
, f
->base
.seqno
);
991 static void etnaviv_fence_release(struct fence
*fence
)
993 struct etnaviv_fence
*f
= to_etnaviv_fence(fence
);
995 kfree_rcu(f
, base
.rcu
);
998 static const struct fence_ops etnaviv_fence_ops
= {
999 .get_driver_name
= etnaviv_fence_get_driver_name
,
1000 .get_timeline_name
= etnaviv_fence_get_timeline_name
,
1001 .enable_signaling
= etnaviv_fence_enable_signaling
,
1002 .signaled
= etnaviv_fence_signaled
,
1003 .wait
= fence_default_wait
,
1004 .release
= etnaviv_fence_release
,
1007 static struct fence
*etnaviv_gpu_fence_alloc(struct etnaviv_gpu
*gpu
)
1009 struct etnaviv_fence
*f
;
1011 f
= kzalloc(sizeof(*f
), GFP_KERNEL
);
1017 fence_init(&f
->base
, &etnaviv_fence_ops
, &gpu
->fence_spinlock
,
1018 gpu
->fence_context
, ++gpu
->next_fence
);
1023 int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object
*etnaviv_obj
,
1024 unsigned int context
, bool exclusive
)
1026 struct reservation_object
*robj
= etnaviv_obj
->resv
;
1027 struct reservation_object_list
*fobj
;
1028 struct fence
*fence
;
1032 ret
= reservation_object_reserve_shared(robj
);
1038 * If we have any shared fences, then the exclusive fence
1039 * should be ignored as it will already have been signalled.
1041 fobj
= reservation_object_get_list(robj
);
1042 if (!fobj
|| fobj
->shared_count
== 0) {
1043 /* Wait on any existing exclusive fence which isn't our own */
1044 fence
= reservation_object_get_excl(robj
);
1045 if (fence
&& fence
->context
!= context
) {
1046 ret
= fence_wait(fence
, true);
1052 if (!exclusive
|| !fobj
)
1055 for (i
= 0; i
< fobj
->shared_count
; i
++) {
1056 fence
= rcu_dereference_protected(fobj
->shared
[i
],
1057 reservation_object_held(robj
));
1058 if (fence
->context
!= context
) {
1059 ret
= fence_wait(fence
, true);
1072 static unsigned int event_alloc(struct etnaviv_gpu
*gpu
)
1074 unsigned long ret
, flags
;
1075 unsigned int i
, event
= ~0U;
1077 ret
= wait_for_completion_timeout(&gpu
->event_free
,
1078 msecs_to_jiffies(10 * 10000));
1080 dev_err(gpu
->dev
, "wait_for_completion_timeout failed");
1082 spin_lock_irqsave(&gpu
->event_spinlock
, flags
);
1084 /* find first free event */
1085 for (i
= 0; i
< ARRAY_SIZE(gpu
->event
); i
++) {
1086 if (gpu
->event
[i
].used
== false) {
1087 gpu
->event
[i
].used
= true;
1093 spin_unlock_irqrestore(&gpu
->event_spinlock
, flags
);
1098 static void event_free(struct etnaviv_gpu
*gpu
, unsigned int event
)
1100 unsigned long flags
;
1102 spin_lock_irqsave(&gpu
->event_spinlock
, flags
);
1104 if (gpu
->event
[event
].used
== false) {
1105 dev_warn(gpu
->dev
, "event %u is already marked as free",
1107 spin_unlock_irqrestore(&gpu
->event_spinlock
, flags
);
1109 gpu
->event
[event
].used
= false;
1110 spin_unlock_irqrestore(&gpu
->event_spinlock
, flags
);
1112 complete(&gpu
->event_free
);
1117 * Cmdstream submission/retirement:
1120 struct etnaviv_cmdbuf
*etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu
*gpu
, u32 size
,
1123 struct etnaviv_cmdbuf
*cmdbuf
;
1124 size_t sz
= size_vstruct(nr_bos
, sizeof(cmdbuf
->bo_map
[0]),
1127 cmdbuf
= kzalloc(sz
, GFP_KERNEL
);
1131 cmdbuf
->vaddr
= dma_alloc_wc(gpu
->dev
, size
, &cmdbuf
->paddr
,
1133 if (!cmdbuf
->vaddr
) {
1139 cmdbuf
->size
= size
;
1144 void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf
*cmdbuf
)
1146 dma_free_wc(cmdbuf
->gpu
->dev
, cmdbuf
->size
, cmdbuf
->vaddr
,
1151 static void retire_worker(struct work_struct
*work
)
1153 struct etnaviv_gpu
*gpu
= container_of(work
, struct etnaviv_gpu
,
1155 u32 fence
= gpu
->completed_fence
;
1156 struct etnaviv_cmdbuf
*cmdbuf
, *tmp
;
1159 mutex_lock(&gpu
->lock
);
1160 list_for_each_entry_safe(cmdbuf
, tmp
, &gpu
->active_cmd_list
, node
) {
1161 if (!fence_is_signaled(cmdbuf
->fence
))
1164 list_del(&cmdbuf
->node
);
1165 fence_put(cmdbuf
->fence
);
1167 for (i
= 0; i
< cmdbuf
->nr_bos
; i
++) {
1168 struct etnaviv_vram_mapping
*mapping
= cmdbuf
->bo_map
[i
];
1169 struct etnaviv_gem_object
*etnaviv_obj
= mapping
->object
;
1171 atomic_dec(&etnaviv_obj
->gpu_active
);
1172 /* drop the refcount taken in etnaviv_gpu_submit */
1173 etnaviv_gem_mapping_unreference(mapping
);
1176 etnaviv_gpu_cmdbuf_free(cmdbuf
);
1178 * We need to balance the runtime PM count caused by
1179 * each submission. Upon submission, we increment
1180 * the runtime PM counter, and allocate one event.
1181 * So here, we put the runtime PM count for each
1184 pm_runtime_put_autosuspend(gpu
->dev
);
1187 gpu
->retired_fence
= fence
;
1189 mutex_unlock(&gpu
->lock
);
1191 wake_up_all(&gpu
->fence_event
);
1194 int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu
*gpu
,
1195 u32 fence
, struct timespec
*timeout
)
1199 if (fence_after(fence
, gpu
->next_fence
)) {
1200 DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
1201 fence
, gpu
->next_fence
);
1206 /* No timeout was requested: just test for completion */
1207 ret
= fence_completed(gpu
, fence
) ? 0 : -EBUSY
;
1209 unsigned long remaining
= etnaviv_timeout_to_jiffies(timeout
);
1211 ret
= wait_event_interruptible_timeout(gpu
->fence_event
,
1212 fence_completed(gpu
, fence
),
1215 DBG("timeout waiting for fence: %u (retired: %u completed: %u)",
1216 fence
, gpu
->retired_fence
,
1217 gpu
->completed_fence
);
1219 } else if (ret
!= -ERESTARTSYS
) {
1228 * Wait for an object to become inactive. This, on it's own, is not race
1229 * free: the object is moved by the retire worker off the active list, and
1230 * then the iova is put. Moreover, the object could be re-submitted just
1231 * after we notice that it's become inactive.
1233 * Although the retirement happens under the gpu lock, we don't want to hold
1234 * that lock in this function while waiting.
1236 int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu
*gpu
,
1237 struct etnaviv_gem_object
*etnaviv_obj
, struct timespec
*timeout
)
1239 unsigned long remaining
;
1243 return !is_active(etnaviv_obj
) ? 0 : -EBUSY
;
1245 remaining
= etnaviv_timeout_to_jiffies(timeout
);
1247 ret
= wait_event_interruptible_timeout(gpu
->fence_event
,
1248 !is_active(etnaviv_obj
),
1251 struct etnaviv_drm_private
*priv
= gpu
->drm
->dev_private
;
1253 /* Synchronise with the retire worker */
1254 flush_workqueue(priv
->wq
);
1256 } else if (ret
== -ERESTARTSYS
) {
1257 return -ERESTARTSYS
;
1263 int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu
*gpu
)
1265 return pm_runtime_get_sync(gpu
->dev
);
1268 void etnaviv_gpu_pm_put(struct etnaviv_gpu
*gpu
)
1270 pm_runtime_mark_last_busy(gpu
->dev
);
1271 pm_runtime_put_autosuspend(gpu
->dev
);
1274 /* add bo's to gpu's ring, and kick gpu: */
1275 int etnaviv_gpu_submit(struct etnaviv_gpu
*gpu
,
1276 struct etnaviv_gem_submit
*submit
, struct etnaviv_cmdbuf
*cmdbuf
)
1278 struct fence
*fence
;
1279 unsigned int event
, i
;
1282 ret
= etnaviv_gpu_pm_get_sync(gpu
);
1286 mutex_lock(&gpu
->lock
);
1297 event
= event_alloc(gpu
);
1298 if (unlikely(event
== ~0U)) {
1299 DRM_ERROR("no free event\n");
1304 fence
= etnaviv_gpu_fence_alloc(gpu
);
1306 event_free(gpu
, event
);
1311 gpu
->event
[event
].fence
= fence
;
1312 submit
->fence
= fence
->seqno
;
1313 gpu
->active_fence
= submit
->fence
;
1315 if (gpu
->lastctx
!= cmdbuf
->ctx
) {
1316 gpu
->mmu
->need_flush
= true;
1317 gpu
->switch_context
= true;
1318 gpu
->lastctx
= cmdbuf
->ctx
;
1321 etnaviv_buffer_queue(gpu
, event
, cmdbuf
);
1323 cmdbuf
->fence
= fence
;
1324 list_add_tail(&cmdbuf
->node
, &gpu
->active_cmd_list
);
1326 /* We're committed to adding this command buffer, hold a PM reference */
1327 pm_runtime_get_noresume(gpu
->dev
);
1329 for (i
= 0; i
< submit
->nr_bos
; i
++) {
1330 struct etnaviv_gem_object
*etnaviv_obj
= submit
->bos
[i
].obj
;
1332 /* Each cmdbuf takes a refcount on the mapping */
1333 etnaviv_gem_mapping_reference(submit
->bos
[i
].mapping
);
1334 cmdbuf
->bo_map
[i
] = submit
->bos
[i
].mapping
;
1335 atomic_inc(&etnaviv_obj
->gpu_active
);
1337 if (submit
->bos
[i
].flags
& ETNA_SUBMIT_BO_WRITE
)
1338 reservation_object_add_excl_fence(etnaviv_obj
->resv
,
1341 reservation_object_add_shared_fence(etnaviv_obj
->resv
,
1344 cmdbuf
->nr_bos
= submit
->nr_bos
;
1345 hangcheck_timer_reset(gpu
);
1349 mutex_unlock(&gpu
->lock
);
1351 etnaviv_gpu_pm_put(gpu
);
1359 static irqreturn_t
irq_handler(int irq
, void *data
)
1361 struct etnaviv_gpu
*gpu
= data
;
1362 irqreturn_t ret
= IRQ_NONE
;
1364 u32 intr
= gpu_read(gpu
, VIVS_HI_INTR_ACKNOWLEDGE
);
1369 pm_runtime_mark_last_busy(gpu
->dev
);
1371 dev_dbg(gpu
->dev
, "intr 0x%08x\n", intr
);
1373 if (intr
& VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR
) {
1374 dev_err(gpu
->dev
, "AXI bus error\n");
1375 intr
&= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR
;
1378 while ((event
= ffs(intr
)) != 0) {
1379 struct fence
*fence
;
1383 intr
&= ~(1 << event
);
1385 dev_dbg(gpu
->dev
, "event %u\n", event
);
1387 fence
= gpu
->event
[event
].fence
;
1388 gpu
->event
[event
].fence
= NULL
;
1389 fence_signal(fence
);
1392 * Events can be processed out of order. Eg,
1393 * - allocate and queue event 0
1394 * - allocate event 1
1395 * - event 0 completes, we process it
1396 * - allocate and queue event 0
1397 * - event 1 and event 0 complete
1398 * we can end up processing event 0 first, then 1.
1400 if (fence_after(fence
->seqno
, gpu
->completed_fence
))
1401 gpu
->completed_fence
= fence
->seqno
;
1403 event_free(gpu
, event
);
1406 /* Retire the buffer objects in a work */
1407 etnaviv_queue_work(gpu
->drm
, &gpu
->retire_work
);
1415 static int etnaviv_gpu_clk_enable(struct etnaviv_gpu
*gpu
)
1419 ret
= enable_clk(gpu
);
1423 ret
= enable_axi(gpu
);
1432 static int etnaviv_gpu_clk_disable(struct etnaviv_gpu
*gpu
)
1436 ret
= disable_axi(gpu
);
1440 ret
= disable_clk(gpu
);
1447 static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu
*gpu
)
1450 unsigned long timeout
;
1452 /* Replace the last WAIT with END */
1453 etnaviv_buffer_end(gpu
);
1456 * We know that only the FE is busy here, this should
1457 * happen quickly (as the WAIT is only 200 cycles). If
1458 * we fail, just warn and continue.
1460 timeout
= jiffies
+ msecs_to_jiffies(100);
1462 u32 idle
= gpu_read(gpu
, VIVS_HI_IDLE_STATE
);
1464 if ((idle
& gpu
->idle_mask
) == gpu
->idle_mask
)
1467 if (time_is_before_jiffies(timeout
)) {
1469 "timed out waiting for idle: idle=0x%x\n",
1478 return etnaviv_gpu_clk_disable(gpu
);
1482 static int etnaviv_gpu_hw_resume(struct etnaviv_gpu
*gpu
)
1487 ret
= mutex_lock_killable(&gpu
->lock
);
1491 clock
= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS
|
1492 VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
1494 etnaviv_gpu_load_clock(gpu
, clock
);
1495 etnaviv_gpu_hw_init(gpu
);
1497 gpu
->switch_context
= true;
1498 gpu
->exec_state
= -1;
1500 mutex_unlock(&gpu
->lock
);
1506 static int etnaviv_gpu_bind(struct device
*dev
, struct device
*master
,
1509 struct drm_device
*drm
= data
;
1510 struct etnaviv_drm_private
*priv
= drm
->dev_private
;
1511 struct etnaviv_gpu
*gpu
= dev_get_drvdata(dev
);
1515 ret
= pm_runtime_get_sync(gpu
->dev
);
1517 ret
= etnaviv_gpu_clk_enable(gpu
);
1523 gpu
->fence_context
= fence_context_alloc(1);
1524 spin_lock_init(&gpu
->fence_spinlock
);
1526 INIT_LIST_HEAD(&gpu
->active_cmd_list
);
1527 INIT_WORK(&gpu
->retire_work
, retire_worker
);
1528 INIT_WORK(&gpu
->recover_work
, recover_worker
);
1529 init_waitqueue_head(&gpu
->fence_event
);
1531 setup_timer(&gpu
->hangcheck_timer
, hangcheck_handler
,
1532 (unsigned long)gpu
);
1534 priv
->gpu
[priv
->num_gpus
++] = gpu
;
1536 pm_runtime_mark_last_busy(gpu
->dev
);
1537 pm_runtime_put_autosuspend(gpu
->dev
);
1542 static void etnaviv_gpu_unbind(struct device
*dev
, struct device
*master
,
1545 struct etnaviv_gpu
*gpu
= dev_get_drvdata(dev
);
1547 DBG("%s", dev_name(gpu
->dev
));
1549 hangcheck_disable(gpu
);
1552 pm_runtime_get_sync(gpu
->dev
);
1553 pm_runtime_put_sync_suspend(gpu
->dev
);
1555 etnaviv_gpu_hw_suspend(gpu
);
1559 etnaviv_gpu_cmdbuf_free(gpu
->buffer
);
1564 etnaviv_iommu_destroy(gpu
->mmu
);
1571 static const struct component_ops gpu_ops
= {
1572 .bind
= etnaviv_gpu_bind
,
1573 .unbind
= etnaviv_gpu_unbind
,
1576 static const struct of_device_id etnaviv_gpu_match
[] = {
1578 .compatible
= "vivante,gc"
1583 static int etnaviv_gpu_platform_probe(struct platform_device
*pdev
)
1585 struct device
*dev
= &pdev
->dev
;
1586 struct etnaviv_gpu
*gpu
;
1589 gpu
= devm_kzalloc(dev
, sizeof(*gpu
), GFP_KERNEL
);
1593 gpu
->dev
= &pdev
->dev
;
1594 mutex_init(&gpu
->lock
);
1596 /* Map registers: */
1597 gpu
->mmio
= etnaviv_ioremap(pdev
, NULL
, dev_name(gpu
->dev
));
1598 if (IS_ERR(gpu
->mmio
))
1599 return PTR_ERR(gpu
->mmio
);
1601 /* Get Interrupt: */
1602 gpu
->irq
= platform_get_irq(pdev
, 0);
1605 dev_err(dev
, "failed to get irq: %d\n", err
);
1609 err
= devm_request_irq(&pdev
->dev
, gpu
->irq
, irq_handler
, 0,
1610 dev_name(gpu
->dev
), gpu
);
1612 dev_err(dev
, "failed to request IRQ%u: %d\n", gpu
->irq
, err
);
1617 gpu
->clk_bus
= devm_clk_get(&pdev
->dev
, "bus");
1618 DBG("clk_bus: %p", gpu
->clk_bus
);
1619 if (IS_ERR(gpu
->clk_bus
))
1620 gpu
->clk_bus
= NULL
;
1622 gpu
->clk_core
= devm_clk_get(&pdev
->dev
, "core");
1623 DBG("clk_core: %p", gpu
->clk_core
);
1624 if (IS_ERR(gpu
->clk_core
))
1625 gpu
->clk_core
= NULL
;
1627 gpu
->clk_shader
= devm_clk_get(&pdev
->dev
, "shader");
1628 DBG("clk_shader: %p", gpu
->clk_shader
);
1629 if (IS_ERR(gpu
->clk_shader
))
1630 gpu
->clk_shader
= NULL
;
1632 /* TODO: figure out max mapped size */
1633 dev_set_drvdata(dev
, gpu
);
1636 * We treat the device as initially suspended. The runtime PM
1637 * autosuspend delay is rather arbitary: no measurements have
1638 * yet been performed to determine an appropriate value.
1640 pm_runtime_use_autosuspend(gpu
->dev
);
1641 pm_runtime_set_autosuspend_delay(gpu
->dev
, 200);
1642 pm_runtime_enable(gpu
->dev
);
1644 err
= component_add(&pdev
->dev
, &gpu_ops
);
1646 dev_err(&pdev
->dev
, "failed to register component: %d\n", err
);
1656 static int etnaviv_gpu_platform_remove(struct platform_device
*pdev
)
1658 component_del(&pdev
->dev
, &gpu_ops
);
1659 pm_runtime_disable(&pdev
->dev
);
1664 static int etnaviv_gpu_rpm_suspend(struct device
*dev
)
1666 struct etnaviv_gpu
*gpu
= dev_get_drvdata(dev
);
1669 /* If we have outstanding fences, we're not idle */
1670 if (gpu
->completed_fence
!= gpu
->active_fence
)
1673 /* Check whether the hardware (except FE) is idle */
1674 mask
= gpu
->idle_mask
& ~VIVS_HI_IDLE_STATE_FE
;
1675 idle
= gpu_read(gpu
, VIVS_HI_IDLE_STATE
) & mask
;
1679 return etnaviv_gpu_hw_suspend(gpu
);
1682 static int etnaviv_gpu_rpm_resume(struct device
*dev
)
1684 struct etnaviv_gpu
*gpu
= dev_get_drvdata(dev
);
1687 ret
= etnaviv_gpu_clk_enable(gpu
);
1691 /* Re-initialise the basic hardware state */
1692 if (gpu
->drm
&& gpu
->buffer
) {
1693 ret
= etnaviv_gpu_hw_resume(gpu
);
1695 etnaviv_gpu_clk_disable(gpu
);
1704 static const struct dev_pm_ops etnaviv_gpu_pm_ops
= {
1705 SET_RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend
, etnaviv_gpu_rpm_resume
,
1709 struct platform_driver etnaviv_gpu_driver
= {
1711 .name
= "etnaviv-gpu",
1712 .owner
= THIS_MODULE
,
1713 .pm
= &etnaviv_gpu_pm_ops
,
1714 .of_match_table
= etnaviv_gpu_match
,
1716 .probe
= etnaviv_gpu_platform_probe
,
1717 .remove
= etnaviv_gpu_platform_remove
,
1718 .id_table
= gpu_ids
,