2 * Copyright (C) 2015 Etnaviv Project
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
17 #include <linux/component.h>
18 #include <linux/fence.h>
19 #include <linux/moduleparam.h>
20 #include <linux/of_device.h>
21 #include "etnaviv_dump.h"
22 #include "etnaviv_gpu.h"
23 #include "etnaviv_gem.h"
24 #include "etnaviv_mmu.h"
25 #include "etnaviv_iommu.h"
26 #include "etnaviv_iommu_v2.h"
27 #include "common.xml.h"
28 #include "state.xml.h"
29 #include "state_hi.xml.h"
30 #include "cmdstream.xml.h"
32 static const struct platform_device_id gpu_ids
[] = {
33 { .name
= "etnaviv-gpu,2d" },
37 static bool etnaviv_dump_core
= true;
38 module_param_named(dump_core
, etnaviv_dump_core
, bool, 0600);
44 int etnaviv_gpu_get_param(struct etnaviv_gpu
*gpu
, u32 param
, u64
*value
)
47 case ETNAVIV_PARAM_GPU_MODEL
:
48 *value
= gpu
->identity
.model
;
51 case ETNAVIV_PARAM_GPU_REVISION
:
52 *value
= gpu
->identity
.revision
;
55 case ETNAVIV_PARAM_GPU_FEATURES_0
:
56 *value
= gpu
->identity
.features
;
59 case ETNAVIV_PARAM_GPU_FEATURES_1
:
60 *value
= gpu
->identity
.minor_features0
;
63 case ETNAVIV_PARAM_GPU_FEATURES_2
:
64 *value
= gpu
->identity
.minor_features1
;
67 case ETNAVIV_PARAM_GPU_FEATURES_3
:
68 *value
= gpu
->identity
.minor_features2
;
71 case ETNAVIV_PARAM_GPU_FEATURES_4
:
72 *value
= gpu
->identity
.minor_features3
;
75 case ETNAVIV_PARAM_GPU_FEATURES_5
:
76 *value
= gpu
->identity
.minor_features4
;
79 case ETNAVIV_PARAM_GPU_FEATURES_6
:
80 *value
= gpu
->identity
.minor_features5
;
83 case ETNAVIV_PARAM_GPU_STREAM_COUNT
:
84 *value
= gpu
->identity
.stream_count
;
87 case ETNAVIV_PARAM_GPU_REGISTER_MAX
:
88 *value
= gpu
->identity
.register_max
;
91 case ETNAVIV_PARAM_GPU_THREAD_COUNT
:
92 *value
= gpu
->identity
.thread_count
;
95 case ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE
:
96 *value
= gpu
->identity
.vertex_cache_size
;
99 case ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT
:
100 *value
= gpu
->identity
.shader_core_count
;
103 case ETNAVIV_PARAM_GPU_PIXEL_PIPES
:
104 *value
= gpu
->identity
.pixel_pipes
;
107 case ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE
:
108 *value
= gpu
->identity
.vertex_output_buffer_size
;
111 case ETNAVIV_PARAM_GPU_BUFFER_SIZE
:
112 *value
= gpu
->identity
.buffer_size
;
115 case ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT
:
116 *value
= gpu
->identity
.instruction_count
;
119 case ETNAVIV_PARAM_GPU_NUM_CONSTANTS
:
120 *value
= gpu
->identity
.num_constants
;
123 case ETNAVIV_PARAM_GPU_NUM_VARYINGS
:
124 *value
= gpu
->identity
.varyings_count
;
128 DBG("%s: invalid param: %u", dev_name(gpu
->dev
), param
);
136 #define etnaviv_is_model_rev(gpu, mod, rev) \
137 ((gpu)->identity.model == chipModel_##mod && \
138 (gpu)->identity.revision == rev)
139 #define etnaviv_field(val, field) \
140 (((val) & field##__MASK) >> field##__SHIFT)
142 static void etnaviv_hw_specs(struct etnaviv_gpu
*gpu
)
144 if (gpu
->identity
.minor_features0
&
145 chipMinorFeatures0_MORE_MINOR_FEATURES
) {
147 unsigned int streams
;
149 specs
[0] = gpu_read(gpu
, VIVS_HI_CHIP_SPECS
);
150 specs
[1] = gpu_read(gpu
, VIVS_HI_CHIP_SPECS_2
);
151 specs
[2] = gpu_read(gpu
, VIVS_HI_CHIP_SPECS_3
);
152 specs
[3] = gpu_read(gpu
, VIVS_HI_CHIP_SPECS_4
);
154 gpu
->identity
.stream_count
= etnaviv_field(specs
[0],
155 VIVS_HI_CHIP_SPECS_STREAM_COUNT
);
156 gpu
->identity
.register_max
= etnaviv_field(specs
[0],
157 VIVS_HI_CHIP_SPECS_REGISTER_MAX
);
158 gpu
->identity
.thread_count
= etnaviv_field(specs
[0],
159 VIVS_HI_CHIP_SPECS_THREAD_COUNT
);
160 gpu
->identity
.vertex_cache_size
= etnaviv_field(specs
[0],
161 VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE
);
162 gpu
->identity
.shader_core_count
= etnaviv_field(specs
[0],
163 VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT
);
164 gpu
->identity
.pixel_pipes
= etnaviv_field(specs
[0],
165 VIVS_HI_CHIP_SPECS_PIXEL_PIPES
);
166 gpu
->identity
.vertex_output_buffer_size
=
167 etnaviv_field(specs
[0],
168 VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE
);
170 gpu
->identity
.buffer_size
= etnaviv_field(specs
[1],
171 VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE
);
172 gpu
->identity
.instruction_count
= etnaviv_field(specs
[1],
173 VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT
);
174 gpu
->identity
.num_constants
= etnaviv_field(specs
[1],
175 VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS
);
177 gpu
->identity
.varyings_count
= etnaviv_field(specs
[2],
178 VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT
);
180 /* This overrides the value from older register if non-zero */
181 streams
= etnaviv_field(specs
[3],
182 VIVS_HI_CHIP_SPECS_4_STREAM_COUNT
);
184 gpu
->identity
.stream_count
= streams
;
187 /* Fill in the stream count if not specified */
188 if (gpu
->identity
.stream_count
== 0) {
189 if (gpu
->identity
.model
>= 0x1000)
190 gpu
->identity
.stream_count
= 4;
192 gpu
->identity
.stream_count
= 1;
195 /* Convert the register max value */
196 if (gpu
->identity
.register_max
)
197 gpu
->identity
.register_max
= 1 << gpu
->identity
.register_max
;
198 else if (gpu
->identity
.model
== chipModel_GC400
)
199 gpu
->identity
.register_max
= 32;
201 gpu
->identity
.register_max
= 64;
203 /* Convert thread count */
204 if (gpu
->identity
.thread_count
)
205 gpu
->identity
.thread_count
= 1 << gpu
->identity
.thread_count
;
206 else if (gpu
->identity
.model
== chipModel_GC400
)
207 gpu
->identity
.thread_count
= 64;
208 else if (gpu
->identity
.model
== chipModel_GC500
||
209 gpu
->identity
.model
== chipModel_GC530
)
210 gpu
->identity
.thread_count
= 128;
212 gpu
->identity
.thread_count
= 256;
214 if (gpu
->identity
.vertex_cache_size
== 0)
215 gpu
->identity
.vertex_cache_size
= 8;
217 if (gpu
->identity
.shader_core_count
== 0) {
218 if (gpu
->identity
.model
>= 0x1000)
219 gpu
->identity
.shader_core_count
= 2;
221 gpu
->identity
.shader_core_count
= 1;
224 if (gpu
->identity
.pixel_pipes
== 0)
225 gpu
->identity
.pixel_pipes
= 1;
227 /* Convert virtex buffer size */
228 if (gpu
->identity
.vertex_output_buffer_size
) {
229 gpu
->identity
.vertex_output_buffer_size
=
230 1 << gpu
->identity
.vertex_output_buffer_size
;
231 } else if (gpu
->identity
.model
== chipModel_GC400
) {
232 if (gpu
->identity
.revision
< 0x4000)
233 gpu
->identity
.vertex_output_buffer_size
= 512;
234 else if (gpu
->identity
.revision
< 0x4200)
235 gpu
->identity
.vertex_output_buffer_size
= 256;
237 gpu
->identity
.vertex_output_buffer_size
= 128;
239 gpu
->identity
.vertex_output_buffer_size
= 512;
242 switch (gpu
->identity
.instruction_count
) {
244 if (etnaviv_is_model_rev(gpu
, GC2000
, 0x5108) ||
245 gpu
->identity
.model
== chipModel_GC880
)
246 gpu
->identity
.instruction_count
= 512;
248 gpu
->identity
.instruction_count
= 256;
252 gpu
->identity
.instruction_count
= 1024;
256 gpu
->identity
.instruction_count
= 2048;
260 gpu
->identity
.instruction_count
= 256;
264 if (gpu
->identity
.num_constants
== 0)
265 gpu
->identity
.num_constants
= 168;
267 if (gpu
->identity
.varyings_count
== 0) {
268 if (gpu
->identity
.minor_features1
& chipMinorFeatures1_HALTI0
)
269 gpu
->identity
.varyings_count
= 12;
271 gpu
->identity
.varyings_count
= 8;
275 * For some cores, two varyings are consumed for position, so the
276 * maximum varying count needs to be reduced by one.
278 if (etnaviv_is_model_rev(gpu
, GC5000
, 0x5434) ||
279 etnaviv_is_model_rev(gpu
, GC4000
, 0x5222) ||
280 etnaviv_is_model_rev(gpu
, GC4000
, 0x5245) ||
281 etnaviv_is_model_rev(gpu
, GC4000
, 0x5208) ||
282 etnaviv_is_model_rev(gpu
, GC3000
, 0x5435) ||
283 etnaviv_is_model_rev(gpu
, GC2200
, 0x5244) ||
284 etnaviv_is_model_rev(gpu
, GC2100
, 0x5108) ||
285 etnaviv_is_model_rev(gpu
, GC2000
, 0x5108) ||
286 etnaviv_is_model_rev(gpu
, GC1500
, 0x5246) ||
287 etnaviv_is_model_rev(gpu
, GC880
, 0x5107) ||
288 etnaviv_is_model_rev(gpu
, GC880
, 0x5106))
289 gpu
->identity
.varyings_count
-= 1;
292 static void etnaviv_hw_identify(struct etnaviv_gpu
*gpu
)
296 chipIdentity
= gpu_read(gpu
, VIVS_HI_CHIP_IDENTITY
);
298 /* Special case for older graphic cores. */
299 if (etnaviv_field(chipIdentity
, VIVS_HI_CHIP_IDENTITY_FAMILY
) == 0x01) {
300 gpu
->identity
.model
= chipModel_GC500
;
301 gpu
->identity
.revision
= etnaviv_field(chipIdentity
,
302 VIVS_HI_CHIP_IDENTITY_REVISION
);
305 gpu
->identity
.model
= gpu_read(gpu
, VIVS_HI_CHIP_MODEL
);
306 gpu
->identity
.revision
= gpu_read(gpu
, VIVS_HI_CHIP_REV
);
309 * !!!! HACK ALERT !!!!
310 * Because people change device IDs without letting software
311 * know about it - here is the hack to make it all look the
312 * same. Only for GC400 family.
314 if ((gpu
->identity
.model
& 0xff00) == 0x0400 &&
315 gpu
->identity
.model
!= chipModel_GC420
) {
316 gpu
->identity
.model
= gpu
->identity
.model
& 0x0400;
319 /* Another special case */
320 if (etnaviv_is_model_rev(gpu
, GC300
, 0x2201)) {
321 u32 chipDate
= gpu_read(gpu
, VIVS_HI_CHIP_DATE
);
322 u32 chipTime
= gpu_read(gpu
, VIVS_HI_CHIP_TIME
);
324 if (chipDate
== 0x20080814 && chipTime
== 0x12051100) {
326 * This IP has an ECO; put the correct
329 gpu
->identity
.revision
= 0x1051;
334 dev_info(gpu
->dev
, "model: GC%x, revision: %x\n",
335 gpu
->identity
.model
, gpu
->identity
.revision
);
337 gpu
->identity
.features
= gpu_read(gpu
, VIVS_HI_CHIP_FEATURE
);
339 /* Disable fast clear on GC700. */
340 if (gpu
->identity
.model
== chipModel_GC700
)
341 gpu
->identity
.features
&= ~chipFeatures_FAST_CLEAR
;
343 if ((gpu
->identity
.model
== chipModel_GC500
&&
344 gpu
->identity
.revision
< 2) ||
345 (gpu
->identity
.model
== chipModel_GC300
&&
346 gpu
->identity
.revision
< 0x2000)) {
349 * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these
352 gpu
->identity
.minor_features0
= 0;
353 gpu
->identity
.minor_features1
= 0;
354 gpu
->identity
.minor_features2
= 0;
355 gpu
->identity
.minor_features3
= 0;
356 gpu
->identity
.minor_features4
= 0;
357 gpu
->identity
.minor_features5
= 0;
359 gpu
->identity
.minor_features0
=
360 gpu_read(gpu
, VIVS_HI_CHIP_MINOR_FEATURE_0
);
362 if (gpu
->identity
.minor_features0
&
363 chipMinorFeatures0_MORE_MINOR_FEATURES
) {
364 gpu
->identity
.minor_features1
=
365 gpu_read(gpu
, VIVS_HI_CHIP_MINOR_FEATURE_1
);
366 gpu
->identity
.minor_features2
=
367 gpu_read(gpu
, VIVS_HI_CHIP_MINOR_FEATURE_2
);
368 gpu
->identity
.minor_features3
=
369 gpu_read(gpu
, VIVS_HI_CHIP_MINOR_FEATURE_3
);
370 gpu
->identity
.minor_features4
=
371 gpu_read(gpu
, VIVS_HI_CHIP_MINOR_FEATURE_4
);
372 gpu
->identity
.minor_features5
=
373 gpu_read(gpu
, VIVS_HI_CHIP_MINOR_FEATURE_5
);
376 /* GC600 idle register reports zero bits where modules aren't present */
377 if (gpu
->identity
.model
== chipModel_GC600
) {
378 gpu
->idle_mask
= VIVS_HI_IDLE_STATE_TX
|
379 VIVS_HI_IDLE_STATE_RA
|
380 VIVS_HI_IDLE_STATE_SE
|
381 VIVS_HI_IDLE_STATE_PA
|
382 VIVS_HI_IDLE_STATE_SH
|
383 VIVS_HI_IDLE_STATE_PE
|
384 VIVS_HI_IDLE_STATE_DE
|
385 VIVS_HI_IDLE_STATE_FE
;
387 gpu
->idle_mask
= ~VIVS_HI_IDLE_STATE_AXI_LP
;
390 etnaviv_hw_specs(gpu
);
393 static void etnaviv_gpu_load_clock(struct etnaviv_gpu
*gpu
, u32 clock
)
395 gpu_write(gpu
, VIVS_HI_CLOCK_CONTROL
, clock
|
396 VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD
);
397 gpu_write(gpu
, VIVS_HI_CLOCK_CONTROL
, clock
);
400 static int etnaviv_hw_reset(struct etnaviv_gpu
*gpu
)
403 unsigned long timeout
;
413 /* We hope that the GPU resets in under one second */
414 timeout
= jiffies
+ msecs_to_jiffies(1000);
416 while (time_is_after_jiffies(timeout
)) {
417 control
= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS
|
418 VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
421 etnaviv_gpu_load_clock(gpu
, control
);
423 /* Wait for stable clock. Vivante's code waited for 1ms */
424 usleep_range(1000, 10000);
426 /* isolate the GPU. */
427 control
|= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU
;
428 gpu_write(gpu
, VIVS_HI_CLOCK_CONTROL
, control
);
430 /* set soft reset. */
431 control
|= VIVS_HI_CLOCK_CONTROL_SOFT_RESET
;
432 gpu_write(gpu
, VIVS_HI_CLOCK_CONTROL
, control
);
434 /* wait for reset. */
437 /* reset soft reset bit. */
438 control
&= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET
;
439 gpu_write(gpu
, VIVS_HI_CLOCK_CONTROL
, control
);
441 /* reset GPU isolation. */
442 control
&= ~VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU
;
443 gpu_write(gpu
, VIVS_HI_CLOCK_CONTROL
, control
);
445 /* read idle register. */
446 idle
= gpu_read(gpu
, VIVS_HI_IDLE_STATE
);
448 /* try reseting again if FE it not idle */
449 if ((idle
& VIVS_HI_IDLE_STATE_FE
) == 0) {
450 dev_dbg(gpu
->dev
, "FE is not idle\n");
454 /* read reset register. */
455 control
= gpu_read(gpu
, VIVS_HI_CLOCK_CONTROL
);
457 /* is the GPU idle? */
458 if (((control
& VIVS_HI_CLOCK_CONTROL_IDLE_3D
) == 0) ||
459 ((control
& VIVS_HI_CLOCK_CONTROL_IDLE_2D
) == 0)) {
460 dev_dbg(gpu
->dev
, "GPU is not idle\n");
469 idle
= gpu_read(gpu
, VIVS_HI_IDLE_STATE
);
470 control
= gpu_read(gpu
, VIVS_HI_CLOCK_CONTROL
);
472 dev_err(gpu
->dev
, "GPU failed to reset: FE %sidle, 3D %sidle, 2D %sidle\n",
473 idle
& VIVS_HI_IDLE_STATE_FE
? "" : "not ",
474 control
& VIVS_HI_CLOCK_CONTROL_IDLE_3D
? "" : "not ",
475 control
& VIVS_HI_CLOCK_CONTROL_IDLE_2D
? "" : "not ");
480 /* We rely on the GPU running, so program the clock */
481 control
= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS
|
482 VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
485 etnaviv_gpu_load_clock(gpu
, control
);
490 static void etnaviv_gpu_hw_init(struct etnaviv_gpu
*gpu
)
494 if ((etnaviv_is_model_rev(gpu
, GC320
, 0x5007) ||
495 etnaviv_is_model_rev(gpu
, GC320
, 0x5220)) &&
496 gpu_read(gpu
, VIVS_HI_CHIP_TIME
) != 0x2062400) {
499 mc_memory_debug
= gpu_read(gpu
, VIVS_MC_DEBUG_MEMORY
) & ~0xff;
501 if (gpu
->identity
.revision
== 0x5007)
502 mc_memory_debug
|= 0x0c;
504 mc_memory_debug
|= 0x08;
506 gpu_write(gpu
, VIVS_MC_DEBUG_MEMORY
, mc_memory_debug
);
510 * Update GPU AXI cache atttribute to "cacheable, no allocate".
511 * This is necessary to prevent the iMX6 SoC locking up.
513 gpu_write(gpu
, VIVS_HI_AXI_CONFIG
,
514 VIVS_HI_AXI_CONFIG_AWCACHE(2) |
515 VIVS_HI_AXI_CONFIG_ARCACHE(2));
517 /* GC2000 rev 5108 needs a special bus config */
518 if (etnaviv_is_model_rev(gpu
, GC2000
, 0x5108)) {
519 u32 bus_config
= gpu_read(gpu
, VIVS_MC_BUS_CONFIG
);
520 bus_config
&= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK
|
521 VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK
);
522 bus_config
|= VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(1) |
523 VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(0);
524 gpu_write(gpu
, VIVS_MC_BUS_CONFIG
, bus_config
);
527 /* set base addresses */
528 gpu_write(gpu
, VIVS_MC_MEMORY_BASE_ADDR_RA
, gpu
->memory_base
);
529 gpu_write(gpu
, VIVS_MC_MEMORY_BASE_ADDR_FE
, gpu
->memory_base
);
530 gpu_write(gpu
, VIVS_MC_MEMORY_BASE_ADDR_TX
, gpu
->memory_base
);
531 gpu_write(gpu
, VIVS_MC_MEMORY_BASE_ADDR_PEZ
, gpu
->memory_base
);
532 gpu_write(gpu
, VIVS_MC_MEMORY_BASE_ADDR_PE
, gpu
->memory_base
);
534 /* setup the MMU page table pointers */
535 etnaviv_iommu_domain_restore(gpu
, gpu
->mmu
->domain
);
537 /* Start command processor */
538 prefetch
= etnaviv_buffer_init(gpu
);
540 gpu_write(gpu
, VIVS_HI_INTR_ENBL
, ~0U);
541 gpu_write(gpu
, VIVS_FE_COMMAND_ADDRESS
,
542 gpu
->buffer
->paddr
- gpu
->memory_base
);
543 gpu_write(gpu
, VIVS_FE_COMMAND_CONTROL
,
544 VIVS_FE_COMMAND_CONTROL_ENABLE
|
545 VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch
));
548 int etnaviv_gpu_init(struct etnaviv_gpu
*gpu
)
551 struct iommu_domain
*iommu
;
552 enum etnaviv_iommu_version version
;
555 ret
= pm_runtime_get_sync(gpu
->dev
);
559 etnaviv_hw_identify(gpu
);
561 if (gpu
->identity
.model
== 0) {
562 dev_err(gpu
->dev
, "Unknown GPU model\n");
567 /* Exclude VG cores with FE2.0 */
568 if (gpu
->identity
.features
& chipFeatures_PIPE_VG
&&
569 gpu
->identity
.features
& chipFeatures_FE20
) {
570 dev_info(gpu
->dev
, "Ignoring GPU with VG and FE2.0\n");
575 ret
= etnaviv_hw_reset(gpu
);
579 /* Setup IOMMU.. eventually we will (I think) do this once per context
580 * and have separate page tables per context. For now, to keep things
581 * simple and to get something working, just use a single address space:
583 mmuv2
= gpu
->identity
.minor_features1
& chipMinorFeatures1_MMU_VERSION
;
584 dev_dbg(gpu
->dev
, "mmuv2: %d\n", mmuv2
);
587 iommu
= etnaviv_iommu_domain_alloc(gpu
);
588 version
= ETNAVIV_IOMMU_V1
;
590 iommu
= etnaviv_iommu_v2_domain_alloc(gpu
);
591 version
= ETNAVIV_IOMMU_V2
;
599 gpu
->mmu
= etnaviv_iommu_new(gpu
, iommu
, version
);
601 iommu_domain_free(iommu
);
607 gpu
->buffer
= etnaviv_gpu_cmdbuf_new(gpu
, PAGE_SIZE
, 0);
610 dev_err(gpu
->dev
, "could not create command buffer\n");
613 if (gpu
->buffer
->paddr
- gpu
->memory_base
> 0x80000000) {
616 "command buffer outside valid memory window\n");
620 /* Setup event management */
621 spin_lock_init(&gpu
->event_spinlock
);
622 init_completion(&gpu
->event_free
);
623 for (i
= 0; i
< ARRAY_SIZE(gpu
->event
); i
++) {
624 gpu
->event
[i
].used
= false;
625 complete(&gpu
->event_free
);
628 /* Now program the hardware */
629 mutex_lock(&gpu
->lock
);
630 etnaviv_gpu_hw_init(gpu
);
631 gpu
->exec_state
= -1;
632 mutex_unlock(&gpu
->lock
);
634 pm_runtime_mark_last_busy(gpu
->dev
);
635 pm_runtime_put_autosuspend(gpu
->dev
);
640 etnaviv_gpu_cmdbuf_free(gpu
->buffer
);
643 etnaviv_iommu_destroy(gpu
->mmu
);
646 pm_runtime_mark_last_busy(gpu
->dev
);
647 pm_runtime_put_autosuspend(gpu
->dev
);
652 #ifdef CONFIG_DEBUG_FS
658 static void verify_dma(struct etnaviv_gpu
*gpu
, struct dma_debug
*debug
)
662 debug
->address
[0] = gpu_read(gpu
, VIVS_FE_DMA_ADDRESS
);
663 debug
->state
[0] = gpu_read(gpu
, VIVS_FE_DMA_DEBUG_STATE
);
665 for (i
= 0; i
< 500; i
++) {
666 debug
->address
[1] = gpu_read(gpu
, VIVS_FE_DMA_ADDRESS
);
667 debug
->state
[1] = gpu_read(gpu
, VIVS_FE_DMA_DEBUG_STATE
);
669 if (debug
->address
[0] != debug
->address
[1])
672 if (debug
->state
[0] != debug
->state
[1])
677 int etnaviv_gpu_debugfs(struct etnaviv_gpu
*gpu
, struct seq_file
*m
)
679 struct dma_debug debug
;
680 u32 dma_lo
, dma_hi
, axi
, idle
;
683 seq_printf(m
, "%s Status:\n", dev_name(gpu
->dev
));
685 ret
= pm_runtime_get_sync(gpu
->dev
);
689 dma_lo
= gpu_read(gpu
, VIVS_FE_DMA_LOW
);
690 dma_hi
= gpu_read(gpu
, VIVS_FE_DMA_HIGH
);
691 axi
= gpu_read(gpu
, VIVS_HI_AXI_STATUS
);
692 idle
= gpu_read(gpu
, VIVS_HI_IDLE_STATE
);
694 verify_dma(gpu
, &debug
);
696 seq_puts(m
, "\tfeatures\n");
697 seq_printf(m
, "\t minor_features0: 0x%08x\n",
698 gpu
->identity
.minor_features0
);
699 seq_printf(m
, "\t minor_features1: 0x%08x\n",
700 gpu
->identity
.minor_features1
);
701 seq_printf(m
, "\t minor_features2: 0x%08x\n",
702 gpu
->identity
.minor_features2
);
703 seq_printf(m
, "\t minor_features3: 0x%08x\n",
704 gpu
->identity
.minor_features3
);
705 seq_printf(m
, "\t minor_features4: 0x%08x\n",
706 gpu
->identity
.minor_features4
);
707 seq_printf(m
, "\t minor_features5: 0x%08x\n",
708 gpu
->identity
.minor_features5
);
710 seq_puts(m
, "\tspecs\n");
711 seq_printf(m
, "\t stream_count: %d\n",
712 gpu
->identity
.stream_count
);
713 seq_printf(m
, "\t register_max: %d\n",
714 gpu
->identity
.register_max
);
715 seq_printf(m
, "\t thread_count: %d\n",
716 gpu
->identity
.thread_count
);
717 seq_printf(m
, "\t vertex_cache_size: %d\n",
718 gpu
->identity
.vertex_cache_size
);
719 seq_printf(m
, "\t shader_core_count: %d\n",
720 gpu
->identity
.shader_core_count
);
721 seq_printf(m
, "\t pixel_pipes: %d\n",
722 gpu
->identity
.pixel_pipes
);
723 seq_printf(m
, "\t vertex_output_buffer_size: %d\n",
724 gpu
->identity
.vertex_output_buffer_size
);
725 seq_printf(m
, "\t buffer_size: %d\n",
726 gpu
->identity
.buffer_size
);
727 seq_printf(m
, "\t instruction_count: %d\n",
728 gpu
->identity
.instruction_count
);
729 seq_printf(m
, "\t num_constants: %d\n",
730 gpu
->identity
.num_constants
);
731 seq_printf(m
, "\t varyings_count: %d\n",
732 gpu
->identity
.varyings_count
);
734 seq_printf(m
, "\taxi: 0x%08x\n", axi
);
735 seq_printf(m
, "\tidle: 0x%08x\n", idle
);
736 idle
|= ~gpu
->idle_mask
& ~VIVS_HI_IDLE_STATE_AXI_LP
;
737 if ((idle
& VIVS_HI_IDLE_STATE_FE
) == 0)
738 seq_puts(m
, "\t FE is not idle\n");
739 if ((idle
& VIVS_HI_IDLE_STATE_DE
) == 0)
740 seq_puts(m
, "\t DE is not idle\n");
741 if ((idle
& VIVS_HI_IDLE_STATE_PE
) == 0)
742 seq_puts(m
, "\t PE is not idle\n");
743 if ((idle
& VIVS_HI_IDLE_STATE_SH
) == 0)
744 seq_puts(m
, "\t SH is not idle\n");
745 if ((idle
& VIVS_HI_IDLE_STATE_PA
) == 0)
746 seq_puts(m
, "\t PA is not idle\n");
747 if ((idle
& VIVS_HI_IDLE_STATE_SE
) == 0)
748 seq_puts(m
, "\t SE is not idle\n");
749 if ((idle
& VIVS_HI_IDLE_STATE_RA
) == 0)
750 seq_puts(m
, "\t RA is not idle\n");
751 if ((idle
& VIVS_HI_IDLE_STATE_TX
) == 0)
752 seq_puts(m
, "\t TX is not idle\n");
753 if ((idle
& VIVS_HI_IDLE_STATE_VG
) == 0)
754 seq_puts(m
, "\t VG is not idle\n");
755 if ((idle
& VIVS_HI_IDLE_STATE_IM
) == 0)
756 seq_puts(m
, "\t IM is not idle\n");
757 if ((idle
& VIVS_HI_IDLE_STATE_FP
) == 0)
758 seq_puts(m
, "\t FP is not idle\n");
759 if ((idle
& VIVS_HI_IDLE_STATE_TS
) == 0)
760 seq_puts(m
, "\t TS is not idle\n");
761 if (idle
& VIVS_HI_IDLE_STATE_AXI_LP
)
762 seq_puts(m
, "\t AXI low power mode\n");
764 if (gpu
->identity
.features
& chipFeatures_DEBUG_MODE
) {
765 u32 read0
= gpu_read(gpu
, VIVS_MC_DEBUG_READ0
);
766 u32 read1
= gpu_read(gpu
, VIVS_MC_DEBUG_READ1
);
767 u32 write
= gpu_read(gpu
, VIVS_MC_DEBUG_WRITE
);
769 seq_puts(m
, "\tMC\n");
770 seq_printf(m
, "\t read0: 0x%08x\n", read0
);
771 seq_printf(m
, "\t read1: 0x%08x\n", read1
);
772 seq_printf(m
, "\t write: 0x%08x\n", write
);
775 seq_puts(m
, "\tDMA ");
777 if (debug
.address
[0] == debug
.address
[1] &&
778 debug
.state
[0] == debug
.state
[1]) {
779 seq_puts(m
, "seems to be stuck\n");
780 } else if (debug
.address
[0] == debug
.address
[1]) {
781 seq_puts(m
, "adress is constant\n");
783 seq_puts(m
, "is runing\n");
786 seq_printf(m
, "\t address 0: 0x%08x\n", debug
.address
[0]);
787 seq_printf(m
, "\t address 1: 0x%08x\n", debug
.address
[1]);
788 seq_printf(m
, "\t state 0: 0x%08x\n", debug
.state
[0]);
789 seq_printf(m
, "\t state 1: 0x%08x\n", debug
.state
[1]);
790 seq_printf(m
, "\t last fetch 64 bit word: 0x%08x 0x%08x\n",
795 pm_runtime_mark_last_busy(gpu
->dev
);
796 pm_runtime_put_autosuspend(gpu
->dev
);
805 static int enable_clk(struct etnaviv_gpu
*gpu
)
808 clk_prepare_enable(gpu
->clk_core
);
810 clk_prepare_enable(gpu
->clk_shader
);
815 static int disable_clk(struct etnaviv_gpu
*gpu
)
818 clk_disable_unprepare(gpu
->clk_core
);
820 clk_disable_unprepare(gpu
->clk_shader
);
825 static int enable_axi(struct etnaviv_gpu
*gpu
)
828 clk_prepare_enable(gpu
->clk_bus
);
833 static int disable_axi(struct etnaviv_gpu
*gpu
)
836 clk_disable_unprepare(gpu
->clk_bus
);
842 * Hangcheck detection for locked gpu:
844 static void recover_worker(struct work_struct
*work
)
846 struct etnaviv_gpu
*gpu
= container_of(work
, struct etnaviv_gpu
,
851 dev_err(gpu
->dev
, "hangcheck recover!\n");
853 if (pm_runtime_get_sync(gpu
->dev
) < 0)
856 mutex_lock(&gpu
->lock
);
858 /* Only catch the first event, or when manually re-armed */
859 if (etnaviv_dump_core
) {
860 etnaviv_core_dump(gpu
);
861 etnaviv_dump_core
= false;
864 etnaviv_hw_reset(gpu
);
866 /* complete all events, the GPU won't do it after the reset */
867 spin_lock_irqsave(&gpu
->event_spinlock
, flags
);
868 for (i
= 0; i
< ARRAY_SIZE(gpu
->event
); i
++) {
869 if (!gpu
->event
[i
].used
)
871 fence_signal(gpu
->event
[i
].fence
);
872 gpu
->event
[i
].fence
= NULL
;
873 gpu
->event
[i
].used
= false;
874 complete(&gpu
->event_free
);
876 spin_unlock_irqrestore(&gpu
->event_spinlock
, flags
);
877 gpu
->completed_fence
= gpu
->active_fence
;
879 etnaviv_gpu_hw_init(gpu
);
880 gpu
->switch_context
= true;
881 gpu
->exec_state
= -1;
883 mutex_unlock(&gpu
->lock
);
884 pm_runtime_mark_last_busy(gpu
->dev
);
885 pm_runtime_put_autosuspend(gpu
->dev
);
887 /* Retire the buffer objects in a work */
888 etnaviv_queue_work(gpu
->drm
, &gpu
->retire_work
);
891 static void hangcheck_timer_reset(struct etnaviv_gpu
*gpu
)
893 DBG("%s", dev_name(gpu
->dev
));
894 mod_timer(&gpu
->hangcheck_timer
,
895 round_jiffies_up(jiffies
+ DRM_ETNAVIV_HANGCHECK_JIFFIES
));
898 static void hangcheck_handler(unsigned long data
)
900 struct etnaviv_gpu
*gpu
= (struct etnaviv_gpu
*)data
;
901 u32 fence
= gpu
->completed_fence
;
902 bool progress
= false;
904 if (fence
!= gpu
->hangcheck_fence
) {
905 gpu
->hangcheck_fence
= fence
;
910 u32 dma_addr
= gpu_read(gpu
, VIVS_FE_DMA_ADDRESS
);
911 int change
= dma_addr
- gpu
->hangcheck_dma_addr
;
913 if (change
< 0 || change
> 16) {
914 gpu
->hangcheck_dma_addr
= dma_addr
;
919 if (!progress
&& fence_after(gpu
->active_fence
, fence
)) {
920 dev_err(gpu
->dev
, "hangcheck detected gpu lockup!\n");
921 dev_err(gpu
->dev
, " completed fence: %u\n", fence
);
922 dev_err(gpu
->dev
, " active fence: %u\n",
924 etnaviv_queue_work(gpu
->drm
, &gpu
->recover_work
);
927 /* if still more pending work, reset the hangcheck timer: */
928 if (fence_after(gpu
->active_fence
, gpu
->hangcheck_fence
))
929 hangcheck_timer_reset(gpu
);
932 static void hangcheck_disable(struct etnaviv_gpu
*gpu
)
934 del_timer_sync(&gpu
->hangcheck_timer
);
935 cancel_work_sync(&gpu
->recover_work
);
938 /* fence object management */
939 struct etnaviv_fence
{
940 struct etnaviv_gpu
*gpu
;
944 static inline struct etnaviv_fence
*to_etnaviv_fence(struct fence
*fence
)
946 return container_of(fence
, struct etnaviv_fence
, base
);
949 static const char *etnaviv_fence_get_driver_name(struct fence
*fence
)
954 static const char *etnaviv_fence_get_timeline_name(struct fence
*fence
)
956 struct etnaviv_fence
*f
= to_etnaviv_fence(fence
);
958 return dev_name(f
->gpu
->dev
);
961 static bool etnaviv_fence_enable_signaling(struct fence
*fence
)
966 static bool etnaviv_fence_signaled(struct fence
*fence
)
968 struct etnaviv_fence
*f
= to_etnaviv_fence(fence
);
970 return fence_completed(f
->gpu
, f
->base
.seqno
);
973 static void etnaviv_fence_release(struct fence
*fence
)
975 struct etnaviv_fence
*f
= to_etnaviv_fence(fence
);
977 kfree_rcu(f
, base
.rcu
);
980 static const struct fence_ops etnaviv_fence_ops
= {
981 .get_driver_name
= etnaviv_fence_get_driver_name
,
982 .get_timeline_name
= etnaviv_fence_get_timeline_name
,
983 .enable_signaling
= etnaviv_fence_enable_signaling
,
984 .signaled
= etnaviv_fence_signaled
,
985 .wait
= fence_default_wait
,
986 .release
= etnaviv_fence_release
,
989 static struct fence
*etnaviv_gpu_fence_alloc(struct etnaviv_gpu
*gpu
)
991 struct etnaviv_fence
*f
;
993 f
= kzalloc(sizeof(*f
), GFP_KERNEL
);
999 fence_init(&f
->base
, &etnaviv_fence_ops
, &gpu
->fence_spinlock
,
1000 gpu
->fence_context
, ++gpu
->next_fence
);
1005 int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object
*etnaviv_obj
,
1006 unsigned int context
, bool exclusive
)
1008 struct reservation_object
*robj
= etnaviv_obj
->resv
;
1009 struct reservation_object_list
*fobj
;
1010 struct fence
*fence
;
1014 ret
= reservation_object_reserve_shared(robj
);
1020 * If we have any shared fences, then the exclusive fence
1021 * should be ignored as it will already have been signalled.
1023 fobj
= reservation_object_get_list(robj
);
1024 if (!fobj
|| fobj
->shared_count
== 0) {
1025 /* Wait on any existing exclusive fence which isn't our own */
1026 fence
= reservation_object_get_excl(robj
);
1027 if (fence
&& fence
->context
!= context
) {
1028 ret
= fence_wait(fence
, true);
1034 if (!exclusive
|| !fobj
)
1037 for (i
= 0; i
< fobj
->shared_count
; i
++) {
1038 fence
= rcu_dereference_protected(fobj
->shared
[i
],
1039 reservation_object_held(robj
));
1040 if (fence
->context
!= context
) {
1041 ret
= fence_wait(fence
, true);
1054 static unsigned int event_alloc(struct etnaviv_gpu
*gpu
)
1056 unsigned long ret
, flags
;
1057 unsigned int i
, event
= ~0U;
1059 ret
= wait_for_completion_timeout(&gpu
->event_free
,
1060 msecs_to_jiffies(10 * 10000));
1062 dev_err(gpu
->dev
, "wait_for_completion_timeout failed");
1064 spin_lock_irqsave(&gpu
->event_spinlock
, flags
);
1066 /* find first free event */
1067 for (i
= 0; i
< ARRAY_SIZE(gpu
->event
); i
++) {
1068 if (gpu
->event
[i
].used
== false) {
1069 gpu
->event
[i
].used
= true;
1075 spin_unlock_irqrestore(&gpu
->event_spinlock
, flags
);
1080 static void event_free(struct etnaviv_gpu
*gpu
, unsigned int event
)
1082 unsigned long flags
;
1084 spin_lock_irqsave(&gpu
->event_spinlock
, flags
);
1086 if (gpu
->event
[event
].used
== false) {
1087 dev_warn(gpu
->dev
, "event %u is already marked as free",
1089 spin_unlock_irqrestore(&gpu
->event_spinlock
, flags
);
1091 gpu
->event
[event
].used
= false;
1092 spin_unlock_irqrestore(&gpu
->event_spinlock
, flags
);
1094 complete(&gpu
->event_free
);
1099 * Cmdstream submission/retirement:
1102 struct etnaviv_cmdbuf
*etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu
*gpu
, u32 size
,
1105 struct etnaviv_cmdbuf
*cmdbuf
;
1106 size_t sz
= size_vstruct(nr_bos
, sizeof(cmdbuf
->bo_map
[0]),
1109 cmdbuf
= kzalloc(sz
, GFP_KERNEL
);
1113 cmdbuf
->vaddr
= dma_alloc_wc(gpu
->dev
, size
, &cmdbuf
->paddr
,
1115 if (!cmdbuf
->vaddr
) {
1121 cmdbuf
->size
= size
;
1126 void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf
*cmdbuf
)
1128 dma_free_wc(cmdbuf
->gpu
->dev
, cmdbuf
->size
, cmdbuf
->vaddr
,
1133 static void retire_worker(struct work_struct
*work
)
1135 struct etnaviv_gpu
*gpu
= container_of(work
, struct etnaviv_gpu
,
1137 u32 fence
= gpu
->completed_fence
;
1138 struct etnaviv_cmdbuf
*cmdbuf
, *tmp
;
1141 mutex_lock(&gpu
->lock
);
1142 list_for_each_entry_safe(cmdbuf
, tmp
, &gpu
->active_cmd_list
, node
) {
1143 if (!fence_is_signaled(cmdbuf
->fence
))
1146 list_del(&cmdbuf
->node
);
1147 fence_put(cmdbuf
->fence
);
1149 for (i
= 0; i
< cmdbuf
->nr_bos
; i
++) {
1150 struct etnaviv_vram_mapping
*mapping
= cmdbuf
->bo_map
[i
];
1151 struct etnaviv_gem_object
*etnaviv_obj
= mapping
->object
;
1153 atomic_dec(&etnaviv_obj
->gpu_active
);
1154 /* drop the refcount taken in etnaviv_gpu_submit */
1155 etnaviv_gem_mapping_unreference(mapping
);
1158 etnaviv_gpu_cmdbuf_free(cmdbuf
);
1160 * We need to balance the runtime PM count caused by
1161 * each submission. Upon submission, we increment
1162 * the runtime PM counter, and allocate one event.
1163 * So here, we put the runtime PM count for each
1166 pm_runtime_put_autosuspend(gpu
->dev
);
1169 gpu
->retired_fence
= fence
;
1171 mutex_unlock(&gpu
->lock
);
1173 wake_up_all(&gpu
->fence_event
);
1176 int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu
*gpu
,
1177 u32 fence
, struct timespec
*timeout
)
1181 if (fence_after(fence
, gpu
->next_fence
)) {
1182 DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
1183 fence
, gpu
->next_fence
);
1188 /* No timeout was requested: just test for completion */
1189 ret
= fence_completed(gpu
, fence
) ? 0 : -EBUSY
;
1191 unsigned long remaining
= etnaviv_timeout_to_jiffies(timeout
);
1193 ret
= wait_event_interruptible_timeout(gpu
->fence_event
,
1194 fence_completed(gpu
, fence
),
1197 DBG("timeout waiting for fence: %u (retired: %u completed: %u)",
1198 fence
, gpu
->retired_fence
,
1199 gpu
->completed_fence
);
1201 } else if (ret
!= -ERESTARTSYS
) {
1210 * Wait for an object to become inactive. This, on it's own, is not race
1211 * free: the object is moved by the retire worker off the active list, and
1212 * then the iova is put. Moreover, the object could be re-submitted just
1213 * after we notice that it's become inactive.
1215 * Although the retirement happens under the gpu lock, we don't want to hold
1216 * that lock in this function while waiting.
1218 int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu
*gpu
,
1219 struct etnaviv_gem_object
*etnaviv_obj
, struct timespec
*timeout
)
1221 unsigned long remaining
;
1225 return !is_active(etnaviv_obj
) ? 0 : -EBUSY
;
1227 remaining
= etnaviv_timeout_to_jiffies(timeout
);
1229 ret
= wait_event_interruptible_timeout(gpu
->fence_event
,
1230 !is_active(etnaviv_obj
),
1233 struct etnaviv_drm_private
*priv
= gpu
->drm
->dev_private
;
1235 /* Synchronise with the retire worker */
1236 flush_workqueue(priv
->wq
);
1238 } else if (ret
== -ERESTARTSYS
) {
1239 return -ERESTARTSYS
;
1245 int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu
*gpu
)
1247 return pm_runtime_get_sync(gpu
->dev
);
1250 void etnaviv_gpu_pm_put(struct etnaviv_gpu
*gpu
)
1252 pm_runtime_mark_last_busy(gpu
->dev
);
1253 pm_runtime_put_autosuspend(gpu
->dev
);
1256 /* add bo's to gpu's ring, and kick gpu: */
1257 int etnaviv_gpu_submit(struct etnaviv_gpu
*gpu
,
1258 struct etnaviv_gem_submit
*submit
, struct etnaviv_cmdbuf
*cmdbuf
)
1260 struct fence
*fence
;
1261 unsigned int event
, i
;
1264 ret
= etnaviv_gpu_pm_get_sync(gpu
);
1268 mutex_lock(&gpu
->lock
);
1279 event
= event_alloc(gpu
);
1280 if (unlikely(event
== ~0U)) {
1281 DRM_ERROR("no free event\n");
1286 fence
= etnaviv_gpu_fence_alloc(gpu
);
1288 event_free(gpu
, event
);
1293 gpu
->event
[event
].fence
= fence
;
1294 submit
->fence
= fence
->seqno
;
1295 gpu
->active_fence
= submit
->fence
;
1297 if (gpu
->lastctx
!= cmdbuf
->ctx
) {
1298 gpu
->mmu
->need_flush
= true;
1299 gpu
->switch_context
= true;
1300 gpu
->lastctx
= cmdbuf
->ctx
;
1303 etnaviv_buffer_queue(gpu
, event
, cmdbuf
);
1305 cmdbuf
->fence
= fence
;
1306 list_add_tail(&cmdbuf
->node
, &gpu
->active_cmd_list
);
1308 /* We're committed to adding this command buffer, hold a PM reference */
1309 pm_runtime_get_noresume(gpu
->dev
);
1311 for (i
= 0; i
< submit
->nr_bos
; i
++) {
1312 struct etnaviv_gem_object
*etnaviv_obj
= submit
->bos
[i
].obj
;
1314 /* Each cmdbuf takes a refcount on the mapping */
1315 etnaviv_gem_mapping_reference(submit
->bos
[i
].mapping
);
1316 cmdbuf
->bo_map
[i
] = submit
->bos
[i
].mapping
;
1317 atomic_inc(&etnaviv_obj
->gpu_active
);
1319 if (submit
->bos
[i
].flags
& ETNA_SUBMIT_BO_WRITE
)
1320 reservation_object_add_excl_fence(etnaviv_obj
->resv
,
1323 reservation_object_add_shared_fence(etnaviv_obj
->resv
,
1326 cmdbuf
->nr_bos
= submit
->nr_bos
;
1327 hangcheck_timer_reset(gpu
);
1331 mutex_unlock(&gpu
->lock
);
1333 etnaviv_gpu_pm_put(gpu
);
1341 static irqreturn_t
irq_handler(int irq
, void *data
)
1343 struct etnaviv_gpu
*gpu
= data
;
1344 irqreturn_t ret
= IRQ_NONE
;
1346 u32 intr
= gpu_read(gpu
, VIVS_HI_INTR_ACKNOWLEDGE
);
1351 pm_runtime_mark_last_busy(gpu
->dev
);
1353 dev_dbg(gpu
->dev
, "intr 0x%08x\n", intr
);
1355 if (intr
& VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR
) {
1356 dev_err(gpu
->dev
, "AXI bus error\n");
1357 intr
&= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR
;
1360 while ((event
= ffs(intr
)) != 0) {
1361 struct fence
*fence
;
1365 intr
&= ~(1 << event
);
1367 dev_dbg(gpu
->dev
, "event %u\n", event
);
1369 fence
= gpu
->event
[event
].fence
;
1370 gpu
->event
[event
].fence
= NULL
;
1371 fence_signal(fence
);
1374 * Events can be processed out of order. Eg,
1375 * - allocate and queue event 0
1376 * - allocate event 1
1377 * - event 0 completes, we process it
1378 * - allocate and queue event 0
1379 * - event 1 and event 0 complete
1380 * we can end up processing event 0 first, then 1.
1382 if (fence_after(fence
->seqno
, gpu
->completed_fence
))
1383 gpu
->completed_fence
= fence
->seqno
;
1385 event_free(gpu
, event
);
1388 /* Retire the buffer objects in a work */
1389 etnaviv_queue_work(gpu
->drm
, &gpu
->retire_work
);
1397 static int etnaviv_gpu_clk_enable(struct etnaviv_gpu
*gpu
)
1401 ret
= enable_clk(gpu
);
1405 ret
= enable_axi(gpu
);
1414 static int etnaviv_gpu_clk_disable(struct etnaviv_gpu
*gpu
)
1418 ret
= disable_axi(gpu
);
1422 ret
= disable_clk(gpu
);
1429 static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu
*gpu
)
1432 unsigned long timeout
;
1434 /* Replace the last WAIT with END */
1435 etnaviv_buffer_end(gpu
);
1438 * We know that only the FE is busy here, this should
1439 * happen quickly (as the WAIT is only 200 cycles). If
1440 * we fail, just warn and continue.
1442 timeout
= jiffies
+ msecs_to_jiffies(100);
1444 u32 idle
= gpu_read(gpu
, VIVS_HI_IDLE_STATE
);
1446 if ((idle
& gpu
->idle_mask
) == gpu
->idle_mask
)
1449 if (time_is_before_jiffies(timeout
)) {
1451 "timed out waiting for idle: idle=0x%x\n",
1460 return etnaviv_gpu_clk_disable(gpu
);
1464 static int etnaviv_gpu_hw_resume(struct etnaviv_gpu
*gpu
)
1469 ret
= mutex_lock_killable(&gpu
->lock
);
1473 clock
= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS
|
1474 VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
1476 etnaviv_gpu_load_clock(gpu
, clock
);
1477 etnaviv_gpu_hw_init(gpu
);
1479 gpu
->switch_context
= true;
1480 gpu
->exec_state
= -1;
1482 mutex_unlock(&gpu
->lock
);
1488 static int etnaviv_gpu_bind(struct device
*dev
, struct device
*master
,
1491 struct drm_device
*drm
= data
;
1492 struct etnaviv_drm_private
*priv
= drm
->dev_private
;
1493 struct etnaviv_gpu
*gpu
= dev_get_drvdata(dev
);
1497 ret
= pm_runtime_get_sync(gpu
->dev
);
1499 ret
= etnaviv_gpu_clk_enable(gpu
);
1505 gpu
->fence_context
= fence_context_alloc(1);
1506 spin_lock_init(&gpu
->fence_spinlock
);
1508 INIT_LIST_HEAD(&gpu
->active_cmd_list
);
1509 INIT_WORK(&gpu
->retire_work
, retire_worker
);
1510 INIT_WORK(&gpu
->recover_work
, recover_worker
);
1511 init_waitqueue_head(&gpu
->fence_event
);
1513 setup_timer(&gpu
->hangcheck_timer
, hangcheck_handler
,
1514 (unsigned long)gpu
);
1516 priv
->gpu
[priv
->num_gpus
++] = gpu
;
1518 pm_runtime_mark_last_busy(gpu
->dev
);
1519 pm_runtime_put_autosuspend(gpu
->dev
);
1524 static void etnaviv_gpu_unbind(struct device
*dev
, struct device
*master
,
1527 struct etnaviv_gpu
*gpu
= dev_get_drvdata(dev
);
1529 DBG("%s", dev_name(gpu
->dev
));
1531 hangcheck_disable(gpu
);
1534 pm_runtime_get_sync(gpu
->dev
);
1535 pm_runtime_put_sync_suspend(gpu
->dev
);
1537 etnaviv_gpu_hw_suspend(gpu
);
1541 etnaviv_gpu_cmdbuf_free(gpu
->buffer
);
1546 etnaviv_iommu_destroy(gpu
->mmu
);
1553 static const struct component_ops gpu_ops
= {
1554 .bind
= etnaviv_gpu_bind
,
1555 .unbind
= etnaviv_gpu_unbind
,
1558 static const struct of_device_id etnaviv_gpu_match
[] = {
1560 .compatible
= "vivante,gc"
1565 static int etnaviv_gpu_platform_probe(struct platform_device
*pdev
)
1567 struct device
*dev
= &pdev
->dev
;
1568 struct etnaviv_gpu
*gpu
;
1572 gpu
= devm_kzalloc(dev
, sizeof(*gpu
), GFP_KERNEL
);
1576 gpu
->dev
= &pdev
->dev
;
1577 mutex_init(&gpu
->lock
);
1580 * Set the GPU linear window to be at the end of the DMA window, where
1581 * the CMA area is likely to reside. This ensures that we are able to
1582 * map the command buffers while having the linear window overlap as
1583 * much RAM as possible, so we can optimize mappings for other buffers.
1585 dma_mask
= (u32
)dma_get_required_mask(dev
);
1586 if (dma_mask
< PHYS_OFFSET
+ SZ_2G
)
1587 gpu
->memory_base
= PHYS_OFFSET
;
1589 gpu
->memory_base
= dma_mask
- SZ_2G
+ 1;
1591 /* Map registers: */
1592 gpu
->mmio
= etnaviv_ioremap(pdev
, NULL
, dev_name(gpu
->dev
));
1593 if (IS_ERR(gpu
->mmio
))
1594 return PTR_ERR(gpu
->mmio
);
1596 /* Get Interrupt: */
1597 gpu
->irq
= platform_get_irq(pdev
, 0);
1600 dev_err(dev
, "failed to get irq: %d\n", err
);
1604 err
= devm_request_irq(&pdev
->dev
, gpu
->irq
, irq_handler
, 0,
1605 dev_name(gpu
->dev
), gpu
);
1607 dev_err(dev
, "failed to request IRQ%u: %d\n", gpu
->irq
, err
);
1612 gpu
->clk_bus
= devm_clk_get(&pdev
->dev
, "bus");
1613 DBG("clk_bus: %p", gpu
->clk_bus
);
1614 if (IS_ERR(gpu
->clk_bus
))
1615 gpu
->clk_bus
= NULL
;
1617 gpu
->clk_core
= devm_clk_get(&pdev
->dev
, "core");
1618 DBG("clk_core: %p", gpu
->clk_core
);
1619 if (IS_ERR(gpu
->clk_core
))
1620 gpu
->clk_core
= NULL
;
1622 gpu
->clk_shader
= devm_clk_get(&pdev
->dev
, "shader");
1623 DBG("clk_shader: %p", gpu
->clk_shader
);
1624 if (IS_ERR(gpu
->clk_shader
))
1625 gpu
->clk_shader
= NULL
;
1627 /* TODO: figure out max mapped size */
1628 dev_set_drvdata(dev
, gpu
);
1631 * We treat the device as initially suspended. The runtime PM
1632 * autosuspend delay is rather arbitary: no measurements have
1633 * yet been performed to determine an appropriate value.
1635 pm_runtime_use_autosuspend(gpu
->dev
);
1636 pm_runtime_set_autosuspend_delay(gpu
->dev
, 200);
1637 pm_runtime_enable(gpu
->dev
);
1639 err
= component_add(&pdev
->dev
, &gpu_ops
);
1641 dev_err(&pdev
->dev
, "failed to register component: %d\n", err
);
1651 static int etnaviv_gpu_platform_remove(struct platform_device
*pdev
)
1653 component_del(&pdev
->dev
, &gpu_ops
);
1654 pm_runtime_disable(&pdev
->dev
);
1659 static int etnaviv_gpu_rpm_suspend(struct device
*dev
)
1661 struct etnaviv_gpu
*gpu
= dev_get_drvdata(dev
);
1664 /* If we have outstanding fences, we're not idle */
1665 if (gpu
->completed_fence
!= gpu
->active_fence
)
1668 /* Check whether the hardware (except FE) is idle */
1669 mask
= gpu
->idle_mask
& ~VIVS_HI_IDLE_STATE_FE
;
1670 idle
= gpu_read(gpu
, VIVS_HI_IDLE_STATE
) & mask
;
1674 return etnaviv_gpu_hw_suspend(gpu
);
1677 static int etnaviv_gpu_rpm_resume(struct device
*dev
)
1679 struct etnaviv_gpu
*gpu
= dev_get_drvdata(dev
);
1682 ret
= etnaviv_gpu_clk_enable(gpu
);
1686 /* Re-initialise the basic hardware state */
1687 if (gpu
->drm
&& gpu
->buffer
) {
1688 ret
= etnaviv_gpu_hw_resume(gpu
);
1690 etnaviv_gpu_clk_disable(gpu
);
1699 static const struct dev_pm_ops etnaviv_gpu_pm_ops
= {
1700 SET_RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend
, etnaviv_gpu_rpm_resume
,
1704 struct platform_driver etnaviv_gpu_driver
= {
1706 .name
= "etnaviv-gpu",
1707 .owner
= THIS_MODULE
,
1708 .pm
= &etnaviv_gpu_pm_ops
,
1709 .of_match_table
= etnaviv_gpu_match
,
1711 .probe
= etnaviv_gpu_platform_probe
,
1712 .remove
= etnaviv_gpu_platform_remove
,
1713 .id_table
= gpu_ids
,