2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/slab.h>
25 #include <linux/list.h>
26 #include <linux/types.h>
27 #include <linux/printk.h>
28 #include <linux/bitops.h>
30 #include "kfd_device_queue_manager.h"
31 #include "kfd_mqd_manager.h"
33 #include "kfd_kernel_queue.h"
34 #include "../../radeon/cik_reg.h"
36 /* Size of the per-pipe EOP queue */
37 #define CIK_HPD_EOP_BYTES_LOG2 11
38 #define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
40 static bool is_mem_initialized
;
42 static int init_memory(struct device_queue_manager
*dqm
);
43 static int set_pasid_vmid_mapping(struct device_queue_manager
*dqm
,
44 unsigned int pasid
, unsigned int vmid
);
46 static int create_compute_queue_nocpsch(struct device_queue_manager
*dqm
,
48 struct qcm_process_device
*qpd
);
50 static int execute_queues_cpsch(struct device_queue_manager
*dqm
, bool lock
);
51 static int destroy_queues_cpsch(struct device_queue_manager
*dqm
, bool lock
);
53 static int create_sdma_queue_nocpsch(struct device_queue_manager
*dqm
,
55 struct qcm_process_device
*qpd
);
57 static void deallocate_sdma_queue(struct device_queue_manager
*dqm
,
58 unsigned int sdma_queue_id
);
61 enum KFD_MQD_TYPE
get_mqd_type_from_queue_type(enum kfd_queue_type type
)
63 if (type
== KFD_QUEUE_TYPE_SDMA
)
64 return KFD_MQD_TYPE_CIK_SDMA
;
65 return KFD_MQD_TYPE_CIK_CP
;
68 static inline unsigned int get_pipes_num(struct device_queue_manager
*dqm
)
70 BUG_ON(!dqm
|| !dqm
->dev
);
71 return dqm
->dev
->shared_resources
.compute_pipe_count
;
74 static inline unsigned int get_first_pipe(struct device_queue_manager
*dqm
)
77 return dqm
->dev
->shared_resources
.first_compute_pipe
;
80 static inline unsigned int get_pipes_num_cpsch(void)
82 return PIPE_PER_ME_CP_SCHEDULING
;
85 static inline unsigned int
86 get_sh_mem_bases_nybble_64(struct kfd_process_device
*pdd
)
90 nybble
= (pdd
->lds_base
>> 60) & 0x0E;
95 static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device
*pdd
)
97 unsigned int shared_base
;
99 shared_base
= (pdd
->lds_base
>> 16) & 0xFF;
104 static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble
);
105 static void init_process_memory(struct device_queue_manager
*dqm
,
106 struct qcm_process_device
*qpd
)
108 struct kfd_process_device
*pdd
;
111 BUG_ON(!dqm
|| !qpd
);
113 pdd
= qpd_to_pdd(qpd
);
115 /* check if sh_mem_config register already configured */
116 if (qpd
->sh_mem_config
== 0) {
118 ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED
) |
119 DEFAULT_MTYPE(MTYPE_NONCACHED
) |
120 APE1_MTYPE(MTYPE_NONCACHED
);
121 qpd
->sh_mem_ape1_limit
= 0;
122 qpd
->sh_mem_ape1_base
= 0;
125 if (qpd
->pqm
->process
->is_32bit_user_mode
) {
126 temp
= get_sh_mem_bases_32(pdd
);
127 qpd
->sh_mem_bases
= SHARED_BASE(temp
);
128 qpd
->sh_mem_config
|= PTR32
;
130 temp
= get_sh_mem_bases_nybble_64(pdd
);
131 qpd
->sh_mem_bases
= compute_sh_mem_bases_64bit(temp
);
134 pr_debug("kfd: is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
135 qpd
->pqm
->process
->is_32bit_user_mode
, temp
, qpd
->sh_mem_bases
);
138 static void program_sh_mem_settings(struct device_queue_manager
*dqm
,
139 struct qcm_process_device
*qpd
)
141 return kfd2kgd
->program_sh_mem_settings(dqm
->dev
->kgd
, qpd
->vmid
,
143 qpd
->sh_mem_ape1_base
,
144 qpd
->sh_mem_ape1_limit
,
148 static int allocate_vmid(struct device_queue_manager
*dqm
,
149 struct qcm_process_device
*qpd
,
152 int bit
, allocated_vmid
;
154 if (dqm
->vmid_bitmap
== 0)
157 bit
= find_first_bit((unsigned long *)&dqm
->vmid_bitmap
, CIK_VMID_NUM
);
158 clear_bit(bit
, (unsigned long *)&dqm
->vmid_bitmap
);
160 /* Kaveri kfd vmid's starts from vmid 8 */
161 allocated_vmid
= bit
+ KFD_VMID_START_OFFSET
;
162 pr_debug("kfd: vmid allocation %d\n", allocated_vmid
);
163 qpd
->vmid
= allocated_vmid
;
164 q
->properties
.vmid
= allocated_vmid
;
166 set_pasid_vmid_mapping(dqm
, q
->process
->pasid
, q
->properties
.vmid
);
167 program_sh_mem_settings(dqm
, qpd
);
172 static void deallocate_vmid(struct device_queue_manager
*dqm
,
173 struct qcm_process_device
*qpd
,
176 int bit
= qpd
->vmid
- KFD_VMID_START_OFFSET
;
178 set_bit(bit
, (unsigned long *)&dqm
->vmid_bitmap
);
180 q
->properties
.vmid
= 0;
183 static int create_queue_nocpsch(struct device_queue_manager
*dqm
,
185 struct qcm_process_device
*qpd
,
190 BUG_ON(!dqm
|| !q
|| !qpd
|| !allocated_vmid
);
192 pr_debug("kfd: In func %s\n", __func__
);
195 mutex_lock(&dqm
->lock
);
197 if (list_empty(&qpd
->queues_list
)) {
198 retval
= allocate_vmid(dqm
, qpd
, q
);
200 mutex_unlock(&dqm
->lock
);
204 *allocated_vmid
= qpd
->vmid
;
205 q
->properties
.vmid
= qpd
->vmid
;
207 if (q
->properties
.type
== KFD_QUEUE_TYPE_COMPUTE
)
208 retval
= create_compute_queue_nocpsch(dqm
, q
, qpd
);
209 if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA
)
210 retval
= create_sdma_queue_nocpsch(dqm
, q
, qpd
);
213 if (list_empty(&qpd
->queues_list
)) {
214 deallocate_vmid(dqm
, qpd
, q
);
217 mutex_unlock(&dqm
->lock
);
221 list_add(&q
->list
, &qpd
->queues_list
);
223 if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA
)
224 dqm
->sdma_queue_count
++;
225 mutex_unlock(&dqm
->lock
);
229 static int allocate_hqd(struct device_queue_manager
*dqm
, struct queue
*q
)
236 for (pipe
= dqm
->next_pipe_to_allocate
; pipe
< get_pipes_num(dqm
);
237 pipe
= (pipe
+ 1) % get_pipes_num(dqm
)) {
238 if (dqm
->allocated_queues
[pipe
] != 0) {
239 bit
= find_first_bit(
240 (unsigned long *)&dqm
->allocated_queues
[pipe
],
244 (unsigned long *)&dqm
->allocated_queues
[pipe
]);
255 pr_debug("kfd: DQM %s hqd slot - pipe (%d) queue(%d)\n",
256 __func__
, q
->pipe
, q
->queue
);
257 /* horizontal hqd allocation */
258 dqm
->next_pipe_to_allocate
= (pipe
+ 1) % get_pipes_num(dqm
);
263 static inline void deallocate_hqd(struct device_queue_manager
*dqm
,
266 set_bit(q
->queue
, (unsigned long *)&dqm
->allocated_queues
[q
->pipe
]);
269 static int create_compute_queue_nocpsch(struct device_queue_manager
*dqm
,
271 struct qcm_process_device
*qpd
)
274 struct mqd_manager
*mqd
;
276 BUG_ON(!dqm
|| !q
|| !qpd
);
278 mqd
= dqm
->get_mqd_manager(dqm
, KFD_MQD_TYPE_CIK_COMPUTE
);
282 retval
= allocate_hqd(dqm
, q
);
286 retval
= mqd
->init_mqd(mqd
, &q
->mqd
, &q
->mqd_mem_obj
,
287 &q
->gart_mqd_addr
, &q
->properties
);
289 deallocate_hqd(dqm
, q
);
296 static int destroy_queue_nocpsch(struct device_queue_manager
*dqm
,
297 struct qcm_process_device
*qpd
,
301 struct mqd_manager
*mqd
, *mqd_sdma
;
302 BUG_ON(!dqm
|| !q
|| !q
->mqd
|| !qpd
);
306 pr_debug("kfd: In Func %s\n", __func__
);
308 mutex_lock(&dqm
->lock
);
309 mqd
= dqm
->get_mqd_manager(dqm
, KFD_MQD_TYPE_CIK_COMPUTE
);
315 mqd_sdma
= dqm
->get_mqd_manager(dqm
, KFD_MQD_TYPE_CIK_SDMA
);
316 if (mqd_sdma
== NULL
) {
317 mutex_unlock(&dqm
->lock
);
321 retval
= mqd
->destroy_mqd(mqd
, q
->mqd
,
322 KFD_PREEMPT_TYPE_WAVEFRONT
,
323 QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS
,
329 if (q
->properties
.type
== KFD_QUEUE_TYPE_COMPUTE
)
330 deallocate_hqd(dqm
, q
);
331 else if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA
) {
332 dqm
->sdma_queue_count
--;
333 deallocate_sdma_queue(dqm
, q
->sdma_id
);
336 mqd
->uninit_mqd(mqd
, q
->mqd
, q
->mqd_mem_obj
);
339 if (list_empty(&qpd
->queues_list
))
340 deallocate_vmid(dqm
, qpd
, q
);
343 mutex_unlock(&dqm
->lock
);
347 static int update_queue(struct device_queue_manager
*dqm
, struct queue
*q
)
350 struct mqd_manager
*mqd
;
352 BUG_ON(!dqm
|| !q
|| !q
->mqd
);
354 mutex_lock(&dqm
->lock
);
355 mqd
= dqm
->get_mqd_manager(dqm
, q
->properties
.type
);
357 mutex_unlock(&dqm
->lock
);
361 retval
= mqd
->update_mqd(mqd
, q
->mqd
, &q
->properties
);
362 if (q
->properties
.is_active
== true)
367 if (sched_policy
!= KFD_SCHED_POLICY_NO_HWS
)
368 retval
= execute_queues_cpsch(dqm
, false);
370 mutex_unlock(&dqm
->lock
);
374 static struct mqd_manager
*get_mqd_manager_nocpsch(
375 struct device_queue_manager
*dqm
, enum KFD_MQD_TYPE type
)
377 struct mqd_manager
*mqd
;
379 BUG_ON(!dqm
|| type
>= KFD_MQD_TYPE_MAX
);
381 pr_debug("kfd: In func %s mqd type %d\n", __func__
, type
);
383 mqd
= dqm
->mqds
[type
];
385 mqd
= mqd_manager_init(type
, dqm
->dev
);
387 pr_err("kfd: mqd manager is NULL");
388 dqm
->mqds
[type
] = mqd
;
394 static int register_process_nocpsch(struct device_queue_manager
*dqm
,
395 struct qcm_process_device
*qpd
)
397 struct device_process_node
*n
;
399 BUG_ON(!dqm
|| !qpd
);
401 pr_debug("kfd: In func %s\n", __func__
);
403 n
= kzalloc(sizeof(struct device_process_node
), GFP_KERNEL
);
409 mutex_lock(&dqm
->lock
);
410 list_add(&n
->list
, &dqm
->queues
);
412 init_process_memory(dqm
, qpd
);
413 dqm
->processes_count
++;
415 mutex_unlock(&dqm
->lock
);
420 static int unregister_process_nocpsch(struct device_queue_manager
*dqm
,
421 struct qcm_process_device
*qpd
)
424 struct device_process_node
*cur
, *next
;
426 BUG_ON(!dqm
|| !qpd
);
428 BUG_ON(!list_empty(&qpd
->queues_list
));
430 pr_debug("kfd: In func %s\n", __func__
);
433 mutex_lock(&dqm
->lock
);
435 list_for_each_entry_safe(cur
, next
, &dqm
->queues
, list
) {
436 if (qpd
== cur
->qpd
) {
437 list_del(&cur
->list
);
439 dqm
->processes_count
--;
443 /* qpd not found in dqm list */
446 mutex_unlock(&dqm
->lock
);
451 set_pasid_vmid_mapping(struct device_queue_manager
*dqm
, unsigned int pasid
,
454 uint32_t pasid_mapping
;
456 pasid_mapping
= (pasid
== 0) ? 0 : (uint32_t)pasid
|
457 ATC_VMID_PASID_MAPPING_VALID
;
458 return kfd2kgd
->set_pasid_vmid_mapping(dqm
->dev
->kgd
, pasid_mapping
,
462 static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble
)
464 /* In 64-bit mode, we can only control the top 3 bits of the LDS,
465 * scratch and GPUVM apertures.
466 * The hardware fills in the remaining 59 bits according to the
468 * LDS: X0000000'00000000 - X0000001'00000000 (4GB)
469 * Scratch: X0000001'00000000 - X0000002'00000000 (4GB)
470 * GPUVM: Y0010000'00000000 - Y0020000'00000000 (1TB)
472 * (where X/Y is the configurable nybble with the low-bit 0)
474 * LDS and scratch will have the same top nybble programmed in the
475 * top 3 bits of SH_MEM_BASES.PRIVATE_BASE.
476 * GPUVM can have a different top nybble programmed in the
477 * top 3 bits of SH_MEM_BASES.SHARED_BASE.
478 * We don't bother to support different top nybbles
479 * for LDS/Scratch and GPUVM.
482 BUG_ON((top_address_nybble
& 1) || top_address_nybble
> 0xE ||
483 top_address_nybble
== 0);
485 return PRIVATE_BASE(top_address_nybble
<< 12) |
486 SHARED_BASE(top_address_nybble
<< 12);
489 static int init_memory(struct device_queue_manager
*dqm
)
493 for (i
= 8; i
< 16; i
++)
494 set_pasid_vmid_mapping(dqm
, 0, i
);
496 retval
= kfd2kgd
->init_memory(dqm
->dev
->kgd
);
498 is_mem_initialized
= true;
503 static int init_pipelines(struct device_queue_manager
*dqm
,
504 unsigned int pipes_num
, unsigned int first_pipe
)
507 struct mqd_manager
*mqd
;
508 unsigned int i
, err
, inx
;
509 uint64_t pipe_hpd_addr
;
511 BUG_ON(!dqm
|| !dqm
->dev
);
513 pr_debug("kfd: In func %s\n", __func__
);
516 * Allocate memory for the HPDs. This is hardware-owned per-pipe data.
517 * The driver never accesses this memory after zeroing it.
518 * It doesn't even have to be saved/restored on suspend/resume
519 * because it contains no data when there are no active queues.
522 err
= kfd_gtt_sa_allocate(dqm
->dev
, CIK_HPD_EOP_BYTES
* pipes_num
,
526 pr_err("kfd: error allocate vidmem num pipes: %d\n",
531 hpdptr
= dqm
->pipeline_mem
->cpu_ptr
;
532 dqm
->pipelines_addr
= dqm
->pipeline_mem
->gpu_addr
;
534 memset(hpdptr
, 0, CIK_HPD_EOP_BYTES
* pipes_num
);
536 mqd
= dqm
->get_mqd_manager(dqm
, KFD_MQD_TYPE_CIK_COMPUTE
);
538 kfd_gtt_sa_free(dqm
->dev
, dqm
->pipeline_mem
);
542 for (i
= 0; i
< pipes_num
; i
++) {
543 inx
= i
+ first_pipe
;
544 pipe_hpd_addr
= dqm
->pipelines_addr
+ i
* CIK_HPD_EOP_BYTES
;
545 pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr
);
546 /* = log2(bytes/4)-1 */
547 kfd2kgd
->init_pipeline(dqm
->dev
->kgd
, i
,
548 CIK_HPD_EOP_BYTES_LOG2
- 3, pipe_hpd_addr
);
554 static int init_scheduler(struct device_queue_manager
*dqm
)
560 pr_debug("kfd: In %s\n", __func__
);
562 retval
= init_pipelines(dqm
, get_pipes_num(dqm
), KFD_DQM_FIRST_PIPE
);
566 retval
= init_memory(dqm
);
571 static int initialize_nocpsch(struct device_queue_manager
*dqm
)
577 pr_debug("kfd: In func %s num of pipes: %d\n",
578 __func__
, get_pipes_num(dqm
));
580 mutex_init(&dqm
->lock
);
581 INIT_LIST_HEAD(&dqm
->queues
);
582 dqm
->queue_count
= dqm
->next_pipe_to_allocate
= 0;
583 dqm
->sdma_queue_count
= 0;
584 dqm
->allocated_queues
= kcalloc(get_pipes_num(dqm
),
585 sizeof(unsigned int), GFP_KERNEL
);
586 if (!dqm
->allocated_queues
) {
587 mutex_destroy(&dqm
->lock
);
591 for (i
= 0; i
< get_pipes_num(dqm
); i
++)
592 dqm
->allocated_queues
[i
] = (1 << QUEUES_PER_PIPE
) - 1;
594 dqm
->vmid_bitmap
= (1 << VMID_PER_DEVICE
) - 1;
595 dqm
->sdma_bitmap
= (1 << CIK_SDMA_QUEUES
) - 1;
601 static void uninitialize_nocpsch(struct device_queue_manager
*dqm
)
607 BUG_ON(dqm
->queue_count
> 0 || dqm
->processes_count
> 0);
609 kfree(dqm
->allocated_queues
);
610 for (i
= 0 ; i
< KFD_MQD_TYPE_MAX
; i
++)
612 mutex_destroy(&dqm
->lock
);
613 kfd_gtt_sa_free(dqm
->dev
, dqm
->pipeline_mem
);
616 static int start_nocpsch(struct device_queue_manager
*dqm
)
621 static int stop_nocpsch(struct device_queue_manager
*dqm
)
626 static int allocate_sdma_queue(struct device_queue_manager
*dqm
,
627 unsigned int *sdma_queue_id
)
631 if (dqm
->sdma_bitmap
== 0)
634 bit
= find_first_bit((unsigned long *)&dqm
->sdma_bitmap
,
637 clear_bit(bit
, (unsigned long *)&dqm
->sdma_bitmap
);
638 *sdma_queue_id
= bit
;
643 static void deallocate_sdma_queue(struct device_queue_manager
*dqm
,
644 unsigned int sdma_queue_id
)
646 if (sdma_queue_id
< 0 || sdma_queue_id
>= CIK_SDMA_QUEUES
)
648 set_bit(sdma_queue_id
, (unsigned long *)&dqm
->sdma_bitmap
);
651 static void init_sdma_vm(struct device_queue_manager
*dqm
, struct queue
*q
,
652 struct qcm_process_device
*qpd
)
654 uint32_t value
= SDMA_ATC
;
656 if (q
->process
->is_32bit_user_mode
)
657 value
|= SDMA_VA_PTR32
| get_sh_mem_bases_32(qpd_to_pdd(qpd
));
659 value
|= SDMA_VA_SHARED_BASE(get_sh_mem_bases_nybble_64(
661 q
->properties
.sdma_vm_addr
= value
;
664 static int create_sdma_queue_nocpsch(struct device_queue_manager
*dqm
,
666 struct qcm_process_device
*qpd
)
668 struct mqd_manager
*mqd
;
671 mqd
= dqm
->get_mqd_manager(dqm
, KFD_MQD_TYPE_CIK_SDMA
);
675 retval
= allocate_sdma_queue(dqm
, &q
->sdma_id
);
679 q
->properties
.sdma_queue_id
= q
->sdma_id
% CIK_SDMA_QUEUES_PER_ENGINE
;
680 q
->properties
.sdma_engine_id
= q
->sdma_id
/ CIK_SDMA_ENGINE_NUM
;
682 pr_debug("kfd: sdma id is: %d\n", q
->sdma_id
);
683 pr_debug(" sdma queue id: %d\n", q
->properties
.sdma_queue_id
);
684 pr_debug(" sdma engine id: %d\n", q
->properties
.sdma_engine_id
);
686 retval
= mqd
->init_mqd(mqd
, &q
->mqd
, &q
->mqd_mem_obj
,
687 &q
->gart_mqd_addr
, &q
->properties
);
689 deallocate_sdma_queue(dqm
, q
->sdma_id
);
693 init_sdma_vm(dqm
, q
, qpd
);
698 * Device Queue Manager implementation for cp scheduler
701 static int set_sched_resources(struct device_queue_manager
*dqm
)
703 struct scheduling_resources res
;
704 unsigned int queue_num
, queue_mask
;
708 pr_debug("kfd: In func %s\n", __func__
);
710 queue_num
= get_pipes_num_cpsch() * QUEUES_PER_PIPE
;
711 queue_mask
= (1 << queue_num
) - 1;
712 res
.vmid_mask
= (1 << VMID_PER_DEVICE
) - 1;
713 res
.vmid_mask
<<= KFD_VMID_START_OFFSET
;
714 res
.queue_mask
= queue_mask
<< (get_first_pipe(dqm
) * QUEUES_PER_PIPE
);
715 res
.gws_mask
= res
.oac_mask
= res
.gds_heap_base
=
716 res
.gds_heap_size
= 0;
718 pr_debug("kfd: scheduling resources:\n"
719 " vmid mask: 0x%8X\n"
720 " queue mask: 0x%8llX\n",
721 res
.vmid_mask
, res
.queue_mask
);
723 return pm_send_set_resources(&dqm
->packets
, &res
);
726 static int initialize_cpsch(struct device_queue_manager
*dqm
)
732 pr_debug("kfd: In func %s num of pipes: %d\n",
733 __func__
, get_pipes_num_cpsch());
735 mutex_init(&dqm
->lock
);
736 INIT_LIST_HEAD(&dqm
->queues
);
737 dqm
->queue_count
= dqm
->processes_count
= 0;
738 dqm
->sdma_queue_count
= 0;
739 dqm
->active_runlist
= false;
740 retval
= init_pipelines(dqm
, get_pipes_num(dqm
), 0);
742 goto fail_init_pipelines
;
747 mutex_destroy(&dqm
->lock
);
751 static int start_cpsch(struct device_queue_manager
*dqm
)
753 struct device_process_node
*node
;
760 retval
= pm_init(&dqm
->packets
, dqm
);
762 goto fail_packet_manager_init
;
764 retval
= set_sched_resources(dqm
);
766 goto fail_set_sched_resources
;
768 pr_debug("kfd: allocating fence memory\n");
770 /* allocate fence memory on the gart */
771 retval
= kfd_gtt_sa_allocate(dqm
->dev
, sizeof(*dqm
->fence_addr
),
775 goto fail_allocate_vidmem
;
777 dqm
->fence_addr
= dqm
->fence_mem
->cpu_ptr
;
778 dqm
->fence_gpu_addr
= dqm
->fence_mem
->gpu_addr
;
779 list_for_each_entry(node
, &dqm
->queues
, list
)
780 if (node
->qpd
->pqm
->process
&& dqm
->dev
)
781 kfd_bind_process_to_device(dqm
->dev
,
782 node
->qpd
->pqm
->process
);
784 execute_queues_cpsch(dqm
, true);
787 fail_allocate_vidmem
:
788 fail_set_sched_resources
:
789 pm_uninit(&dqm
->packets
);
790 fail_packet_manager_init
:
794 static int stop_cpsch(struct device_queue_manager
*dqm
)
796 struct device_process_node
*node
;
797 struct kfd_process_device
*pdd
;
801 destroy_queues_cpsch(dqm
, true);
803 list_for_each_entry(node
, &dqm
->queues
, list
) {
804 pdd
= qpd_to_pdd(node
->qpd
);
807 kfd_gtt_sa_free(dqm
->dev
, dqm
->fence_mem
);
808 pm_uninit(&dqm
->packets
);
813 static int create_kernel_queue_cpsch(struct device_queue_manager
*dqm
,
814 struct kernel_queue
*kq
,
815 struct qcm_process_device
*qpd
)
817 BUG_ON(!dqm
|| !kq
|| !qpd
);
819 pr_debug("kfd: In func %s\n", __func__
);
821 mutex_lock(&dqm
->lock
);
822 list_add(&kq
->list
, &qpd
->priv_queue_list
);
824 qpd
->is_debug
= true;
825 execute_queues_cpsch(dqm
, false);
826 mutex_unlock(&dqm
->lock
);
831 static void destroy_kernel_queue_cpsch(struct device_queue_manager
*dqm
,
832 struct kernel_queue
*kq
,
833 struct qcm_process_device
*qpd
)
837 pr_debug("kfd: In %s\n", __func__
);
839 mutex_lock(&dqm
->lock
);
840 destroy_queues_cpsch(dqm
, false);
843 qpd
->is_debug
= false;
844 execute_queues_cpsch(dqm
, false);
845 mutex_unlock(&dqm
->lock
);
848 static void select_sdma_engine_id(struct queue
*q
)
852 q
->sdma_id
= sdma_id
;
853 sdma_id
= (sdma_id
+ 1) % 2;
856 static int create_queue_cpsch(struct device_queue_manager
*dqm
, struct queue
*q
,
857 struct qcm_process_device
*qpd
, int *allocate_vmid
)
860 struct mqd_manager
*mqd
;
862 BUG_ON(!dqm
|| !q
|| !qpd
);
869 mutex_lock(&dqm
->lock
);
871 if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA
)
872 select_sdma_engine_id(q
);
874 mqd
= dqm
->get_mqd_manager(dqm
,
875 get_mqd_type_from_queue_type(q
->properties
.type
));
878 mutex_unlock(&dqm
->lock
);
882 retval
= mqd
->init_mqd(mqd
, &q
->mqd
, &q
->mqd_mem_obj
,
883 &q
->gart_mqd_addr
, &q
->properties
);
887 list_add(&q
->list
, &qpd
->queues_list
);
888 if (q
->properties
.is_active
) {
890 retval
= execute_queues_cpsch(dqm
, false);
893 if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA
)
894 dqm
->sdma_queue_count
++;
897 mutex_unlock(&dqm
->lock
);
901 static int fence_wait_timeout(unsigned int *fence_addr
,
902 unsigned int fence_value
,
903 unsigned long timeout
)
908 while (*fence_addr
!= fence_value
) {
909 if (time_after(jiffies
, timeout
)) {
910 pr_err("kfd: qcm fence wait loop timeout expired\n");
919 static int destroy_sdma_queues(struct device_queue_manager
*dqm
,
920 unsigned int sdma_engine
)
922 return pm_send_unmap_queue(&dqm
->packets
, KFD_QUEUE_TYPE_SDMA
,
923 KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES
, 0, false,
927 static int destroy_queues_cpsch(struct device_queue_manager
*dqm
, bool lock
)
936 mutex_lock(&dqm
->lock
);
937 if (dqm
->active_runlist
== false)
940 pr_debug("kfd: Before destroying queues, sdma queue count is : %u\n",
941 dqm
->sdma_queue_count
);
943 if (dqm
->sdma_queue_count
> 0) {
944 destroy_sdma_queues(dqm
, 0);
945 destroy_sdma_queues(dqm
, 1);
948 retval
= pm_send_unmap_queue(&dqm
->packets
, KFD_QUEUE_TYPE_COMPUTE
,
949 KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES
, 0, false, 0);
953 *dqm
->fence_addr
= KFD_FENCE_INIT
;
954 pm_send_query_status(&dqm
->packets
, dqm
->fence_gpu_addr
,
955 KFD_FENCE_COMPLETED
);
956 /* should be timed out */
957 fence_wait_timeout(dqm
->fence_addr
, KFD_FENCE_COMPLETED
,
958 QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS
);
959 pm_release_ib(&dqm
->packets
);
960 dqm
->active_runlist
= false;
964 mutex_unlock(&dqm
->lock
);
968 static int execute_queues_cpsch(struct device_queue_manager
*dqm
, bool lock
)
975 mutex_lock(&dqm
->lock
);
977 retval
= destroy_queues_cpsch(dqm
, false);
979 pr_err("kfd: the cp might be in an unrecoverable state due to an unsuccessful queues preemption");
983 if (dqm
->queue_count
<= 0 || dqm
->processes_count
<= 0) {
988 if (dqm
->active_runlist
) {
993 retval
= pm_send_runlist(&dqm
->packets
, &dqm
->queues
);
995 pr_err("kfd: failed to execute runlist");
998 dqm
->active_runlist
= true;
1002 mutex_unlock(&dqm
->lock
);
1006 static int destroy_queue_cpsch(struct device_queue_manager
*dqm
,
1007 struct qcm_process_device
*qpd
,
1011 struct mqd_manager
*mqd
;
1013 BUG_ON(!dqm
|| !qpd
|| !q
);
1017 /* remove queue from list to prevent rescheduling after preemption */
1018 mutex_lock(&dqm
->lock
);
1019 mqd
= dqm
->get_mqd_manager(dqm
,
1020 get_mqd_type_from_queue_type(q
->properties
.type
));
1026 if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA
)
1027 dqm
->sdma_queue_count
--;
1032 execute_queues_cpsch(dqm
, false);
1034 mqd
->uninit_mqd(mqd
, q
->mqd
, q
->mqd_mem_obj
);
1036 mutex_unlock(&dqm
->lock
);
1041 mutex_unlock(&dqm
->lock
);
1046 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
1047 * stay in user mode.
1049 #define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
1050 /* APE1 limit is inclusive and 64K aligned. */
1051 #define APE1_LIMIT_ALIGNMENT 0xFFFF
1053 static bool set_cache_memory_policy(struct device_queue_manager
*dqm
,
1054 struct qcm_process_device
*qpd
,
1055 enum cache_policy default_policy
,
1056 enum cache_policy alternate_policy
,
1057 void __user
*alternate_aperture_base
,
1058 uint64_t alternate_aperture_size
)
1060 uint32_t default_mtype
;
1061 uint32_t ape1_mtype
;
1063 pr_debug("kfd: In func %s\n", __func__
);
1065 mutex_lock(&dqm
->lock
);
1067 if (alternate_aperture_size
== 0) {
1068 /* base > limit disables APE1 */
1069 qpd
->sh_mem_ape1_base
= 1;
1070 qpd
->sh_mem_ape1_limit
= 0;
1073 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
1074 * SH_MEM_APE1_BASE[31:0], 0x0000 }
1075 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
1076 * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
1077 * Verify that the base and size parameters can be
1078 * represented in this format and convert them.
1079 * Additionally restrict APE1 to user-mode addresses.
1082 uint64_t base
= (uintptr_t)alternate_aperture_base
;
1083 uint64_t limit
= base
+ alternate_aperture_size
- 1;
1088 if ((base
& APE1_FIXED_BITS_MASK
) != 0)
1091 if ((limit
& APE1_FIXED_BITS_MASK
) != APE1_LIMIT_ALIGNMENT
)
1094 qpd
->sh_mem_ape1_base
= base
>> 16;
1095 qpd
->sh_mem_ape1_limit
= limit
>> 16;
1098 default_mtype
= (default_policy
== cache_policy_coherent
) ?
1102 ape1_mtype
= (alternate_policy
== cache_policy_coherent
) ?
1106 qpd
->sh_mem_config
= (qpd
->sh_mem_config
& PTR32
)
1107 | ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED
)
1108 | DEFAULT_MTYPE(default_mtype
)
1109 | APE1_MTYPE(ape1_mtype
);
1111 if ((sched_policy
== KFD_SCHED_POLICY_NO_HWS
) && (qpd
->vmid
!= 0))
1112 program_sh_mem_settings(dqm
, qpd
);
1114 pr_debug("kfd: sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
1115 qpd
->sh_mem_config
, qpd
->sh_mem_ape1_base
,
1116 qpd
->sh_mem_ape1_limit
);
1118 mutex_unlock(&dqm
->lock
);
1122 mutex_unlock(&dqm
->lock
);
1126 struct device_queue_manager
*device_queue_manager_init(struct kfd_dev
*dev
)
1128 struct device_queue_manager
*dqm
;
1132 dqm
= kzalloc(sizeof(struct device_queue_manager
), GFP_KERNEL
);
1137 switch (sched_policy
) {
1138 case KFD_SCHED_POLICY_HWS
:
1139 case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION
:
1140 /* initialize dqm for cp scheduling */
1141 dqm
->create_queue
= create_queue_cpsch
;
1142 dqm
->initialize
= initialize_cpsch
;
1143 dqm
->start
= start_cpsch
;
1144 dqm
->stop
= stop_cpsch
;
1145 dqm
->destroy_queue
= destroy_queue_cpsch
;
1146 dqm
->update_queue
= update_queue
;
1147 dqm
->get_mqd_manager
= get_mqd_manager_nocpsch
;
1148 dqm
->register_process
= register_process_nocpsch
;
1149 dqm
->unregister_process
= unregister_process_nocpsch
;
1150 dqm
->uninitialize
= uninitialize_nocpsch
;
1151 dqm
->create_kernel_queue
= create_kernel_queue_cpsch
;
1152 dqm
->destroy_kernel_queue
= destroy_kernel_queue_cpsch
;
1153 dqm
->set_cache_memory_policy
= set_cache_memory_policy
;
1155 case KFD_SCHED_POLICY_NO_HWS
:
1156 /* initialize dqm for no cp scheduling */
1157 dqm
->start
= start_nocpsch
;
1158 dqm
->stop
= stop_nocpsch
;
1159 dqm
->create_queue
= create_queue_nocpsch
;
1160 dqm
->destroy_queue
= destroy_queue_nocpsch
;
1161 dqm
->update_queue
= update_queue
;
1162 dqm
->get_mqd_manager
= get_mqd_manager_nocpsch
;
1163 dqm
->register_process
= register_process_nocpsch
;
1164 dqm
->unregister_process
= unregister_process_nocpsch
;
1165 dqm
->initialize
= initialize_nocpsch
;
1166 dqm
->uninitialize
= uninitialize_nocpsch
;
1167 dqm
->set_cache_memory_policy
= set_cache_memory_policy
;
1174 if (dqm
->initialize(dqm
) != 0) {
1182 void device_queue_manager_uninit(struct device_queue_manager
*dqm
)
1186 dqm
->uninitialize(dqm
);