2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/amd-iommu.h>
24 #include <linux/bsearch.h>
25 #include <linux/pci.h>
26 #include <linux/slab.h>
28 #include "kfd_device_queue_manager.h"
29 #include "kfd_pm4_headers.h"
31 #define MQD_SIZE_ALIGNED 768
33 static const struct kfd_device_info kaveri_device_info
= {
34 .asic_family
= CHIP_KAVERI
,
36 .ih_ring_entry_size
= 4 * sizeof(uint32_t),
37 .event_interrupt_class
= &event_interrupt_class_cik
,
38 .mqd_size_aligned
= MQD_SIZE_ALIGNED
41 static const struct kfd_device_info carrizo_device_info
= {
42 .asic_family
= CHIP_CARRIZO
,
44 .ih_ring_entry_size
= 4 * sizeof(uint32_t),
45 .num_of_watch_points
= 4,
46 .mqd_size_aligned
= MQD_SIZE_ALIGNED
51 const struct kfd_device_info
*device_info
;
54 /* Please keep this sorted by increasing device id. */
55 static const struct kfd_deviceid supported_devices
[] = {
56 { 0x1304, &kaveri_device_info
}, /* Kaveri */
57 { 0x1305, &kaveri_device_info
}, /* Kaveri */
58 { 0x1306, &kaveri_device_info
}, /* Kaveri */
59 { 0x1307, &kaveri_device_info
}, /* Kaveri */
60 { 0x1309, &kaveri_device_info
}, /* Kaveri */
61 { 0x130A, &kaveri_device_info
}, /* Kaveri */
62 { 0x130B, &kaveri_device_info
}, /* Kaveri */
63 { 0x130C, &kaveri_device_info
}, /* Kaveri */
64 { 0x130D, &kaveri_device_info
}, /* Kaveri */
65 { 0x130E, &kaveri_device_info
}, /* Kaveri */
66 { 0x130F, &kaveri_device_info
}, /* Kaveri */
67 { 0x1310, &kaveri_device_info
}, /* Kaveri */
68 { 0x1311, &kaveri_device_info
}, /* Kaveri */
69 { 0x1312, &kaveri_device_info
}, /* Kaveri */
70 { 0x1313, &kaveri_device_info
}, /* Kaveri */
71 { 0x1315, &kaveri_device_info
}, /* Kaveri */
72 { 0x1316, &kaveri_device_info
}, /* Kaveri */
73 { 0x1317, &kaveri_device_info
}, /* Kaveri */
74 { 0x1318, &kaveri_device_info
}, /* Kaveri */
75 { 0x131B, &kaveri_device_info
}, /* Kaveri */
76 { 0x131C, &kaveri_device_info
}, /* Kaveri */
77 { 0x131D, &kaveri_device_info
} /* Kaveri */
80 static int kfd_gtt_sa_init(struct kfd_dev
*kfd
, unsigned int buf_size
,
81 unsigned int chunk_size
);
82 static void kfd_gtt_sa_fini(struct kfd_dev
*kfd
);
84 static const struct kfd_device_info
*lookup_device_info(unsigned short did
)
88 for (i
= 0; i
< ARRAY_SIZE(supported_devices
); i
++) {
89 if (supported_devices
[i
].did
== did
) {
90 BUG_ON(supported_devices
[i
].device_info
== NULL
);
91 return supported_devices
[i
].device_info
;
98 struct kfd_dev
*kgd2kfd_probe(struct kgd_dev
*kgd
,
99 struct pci_dev
*pdev
, const struct kfd2kgd_calls
*f2g
)
103 const struct kfd_device_info
*device_info
=
104 lookup_device_info(pdev
->device
);
109 kfd
= kzalloc(sizeof(*kfd
), GFP_KERNEL
);
114 kfd
->device_info
= device_info
;
116 kfd
->init_complete
= false;
119 mutex_init(&kfd
->doorbell_mutex
);
120 memset(&kfd
->doorbell_available_index
, 0,
121 sizeof(kfd
->doorbell_available_index
));
126 static bool device_iommu_pasid_init(struct kfd_dev
*kfd
)
128 const u32 required_iommu_flags
= AMD_IOMMU_DEVICE_FLAG_ATS_SUP
|
129 AMD_IOMMU_DEVICE_FLAG_PRI_SUP
|
130 AMD_IOMMU_DEVICE_FLAG_PASID_SUP
;
132 struct amd_iommu_device_info iommu_info
;
133 unsigned int pasid_limit
;
136 err
= amd_iommu_device_info(kfd
->pdev
, &iommu_info
);
139 "error getting iommu info. is the iommu enabled?\n");
143 if ((iommu_info
.flags
& required_iommu_flags
) != required_iommu_flags
) {
144 dev_err(kfd_device
, "error required iommu flags ats(%i), pri(%i), pasid(%i)\n",
145 (iommu_info
.flags
& AMD_IOMMU_DEVICE_FLAG_ATS_SUP
) != 0,
146 (iommu_info
.flags
& AMD_IOMMU_DEVICE_FLAG_PRI_SUP
) != 0,
147 (iommu_info
.flags
& AMD_IOMMU_DEVICE_FLAG_PASID_SUP
) != 0);
151 pasid_limit
= min_t(unsigned int,
152 (unsigned int)1 << kfd
->device_info
->max_pasid_bits
,
153 iommu_info
.max_pasids
);
155 * last pasid is used for kernel queues doorbells
156 * in the future the last pasid might be used for a kernel thread.
158 pasid_limit
= min_t(unsigned int,
160 kfd
->doorbell_process_limit
- 1);
162 err
= amd_iommu_init_device(kfd
->pdev
, pasid_limit
);
164 dev_err(kfd_device
, "error initializing iommu device\n");
168 if (!kfd_set_pasid_limit(pasid_limit
)) {
169 dev_err(kfd_device
, "error setting pasid limit\n");
170 amd_iommu_free_device(kfd
->pdev
);
177 static void iommu_pasid_shutdown_callback(struct pci_dev
*pdev
, int pasid
)
179 struct kfd_dev
*dev
= kfd_device_by_pci_dev(pdev
);
182 kfd_unbind_process_from_device(dev
, pasid
);
186 * This function called by IOMMU driver on PPR failure
188 static int iommu_invalid_ppr_cb(struct pci_dev
*pdev
, int pasid
,
189 unsigned long address
, u16 flags
)
194 "Invalid PPR device %x:%x.%x pasid %d address 0x%lX flags 0x%X",
195 PCI_BUS_NUM(pdev
->devfn
),
196 PCI_SLOT(pdev
->devfn
),
197 PCI_FUNC(pdev
->devfn
),
202 dev
= kfd_device_by_pci_dev(pdev
);
205 kfd_signal_iommu_event(dev
, pasid
, address
,
206 flags
& PPR_FAULT_WRITE
, flags
& PPR_FAULT_EXEC
);
208 return AMD_IOMMU_INV_PRI_RSP_INVALID
;
211 bool kgd2kfd_device_init(struct kfd_dev
*kfd
,
212 const struct kgd2kfd_shared_resources
*gpu_resources
)
216 kfd
->shared_resources
= *gpu_resources
;
218 /* calculate max size of mqds needed for queues */
219 size
= max_num_of_queues_per_device
*
220 kfd
->device_info
->mqd_size_aligned
;
223 * calculate max size of runlist packet.
224 * There can be only 2 packets at once
226 size
+= (KFD_MAX_NUM_OF_PROCESSES
* sizeof(struct pm4_map_process
) +
227 max_num_of_queues_per_device
*
228 sizeof(struct pm4_map_queues
) + sizeof(struct pm4_runlist
)) * 2;
230 /* Add size of HIQ & DIQ */
231 size
+= KFD_KERNEL_QUEUE_SIZE
* 2;
233 /* add another 512KB for all other allocations on gart (HPD, fences) */
236 if (kfd
->kfd2kgd
->init_gtt_mem_allocation(
237 kfd
->kgd
, size
, &kfd
->gtt_mem
,
238 &kfd
->gtt_start_gpu_addr
, &kfd
->gtt_start_cpu_ptr
)){
240 "Could not allocate %d bytes for device (%x:%x)\n",
241 size
, kfd
->pdev
->vendor
, kfd
->pdev
->device
);
246 "Allocated %d bytes on gart for device(%x:%x)\n",
247 size
, kfd
->pdev
->vendor
, kfd
->pdev
->device
);
249 /* Initialize GTT sa with 512 byte chunk size */
250 if (kfd_gtt_sa_init(kfd
, size
, 512) != 0) {
252 "Error initializing gtt sub-allocator\n");
253 goto kfd_gtt_sa_init_error
;
256 kfd_doorbell_init(kfd
);
258 if (kfd_topology_add_device(kfd
) != 0) {
260 "Error adding device (%x:%x) to topology\n",
261 kfd
->pdev
->vendor
, kfd
->pdev
->device
);
262 goto kfd_topology_add_device_error
;
265 if (kfd_interrupt_init(kfd
)) {
267 "Error initializing interrupts for device (%x:%x)\n",
268 kfd
->pdev
->vendor
, kfd
->pdev
->device
);
269 goto kfd_interrupt_error
;
272 if (!device_iommu_pasid_init(kfd
)) {
274 "Error initializing iommuv2 for device (%x:%x)\n",
275 kfd
->pdev
->vendor
, kfd
->pdev
->device
);
276 goto device_iommu_pasid_error
;
278 amd_iommu_set_invalidate_ctx_cb(kfd
->pdev
,
279 iommu_pasid_shutdown_callback
);
280 amd_iommu_set_invalid_ppr_cb(kfd
->pdev
, iommu_invalid_ppr_cb
);
282 kfd
->dqm
= device_queue_manager_init(kfd
);
285 "Error initializing queue manager for device (%x:%x)\n",
286 kfd
->pdev
->vendor
, kfd
->pdev
->device
);
287 goto device_queue_manager_error
;
290 if (kfd
->dqm
->ops
.start(kfd
->dqm
) != 0) {
292 "Error starting queuen manager for device (%x:%x)\n",
293 kfd
->pdev
->vendor
, kfd
->pdev
->device
);
294 goto dqm_start_error
;
297 kfd
->init_complete
= true;
298 dev_info(kfd_device
, "added device (%x:%x)\n", kfd
->pdev
->vendor
,
301 pr_debug("kfd: Starting kfd with the following scheduling policy %d\n",
307 device_queue_manager_uninit(kfd
->dqm
);
308 device_queue_manager_error
:
309 amd_iommu_free_device(kfd
->pdev
);
310 device_iommu_pasid_error
:
311 kfd_interrupt_exit(kfd
);
313 kfd_topology_remove_device(kfd
);
314 kfd_topology_add_device_error
:
315 kfd_gtt_sa_fini(kfd
);
316 kfd_gtt_sa_init_error
:
317 kfd
->kfd2kgd
->free_gtt_mem(kfd
->kgd
, kfd
->gtt_mem
);
319 "device (%x:%x) NOT added due to errors\n",
320 kfd
->pdev
->vendor
, kfd
->pdev
->device
);
322 return kfd
->init_complete
;
325 void kgd2kfd_device_exit(struct kfd_dev
*kfd
)
327 if (kfd
->init_complete
) {
328 device_queue_manager_uninit(kfd
->dqm
);
329 amd_iommu_free_device(kfd
->pdev
);
330 kfd_interrupt_exit(kfd
);
331 kfd_topology_remove_device(kfd
);
332 kfd_gtt_sa_fini(kfd
);
333 kfd
->kfd2kgd
->free_gtt_mem(kfd
->kgd
, kfd
->gtt_mem
);
339 void kgd2kfd_suspend(struct kfd_dev
*kfd
)
343 if (kfd
->init_complete
) {
344 kfd
->dqm
->ops
.stop(kfd
->dqm
);
345 amd_iommu_set_invalidate_ctx_cb(kfd
->pdev
, NULL
);
346 amd_iommu_set_invalid_ppr_cb(kfd
->pdev
, NULL
);
347 amd_iommu_free_device(kfd
->pdev
);
351 int kgd2kfd_resume(struct kfd_dev
*kfd
)
353 unsigned int pasid_limit
;
358 pasid_limit
= kfd_get_pasid_limit();
360 if (kfd
->init_complete
) {
361 err
= amd_iommu_init_device(kfd
->pdev
, pasid_limit
);
364 amd_iommu_set_invalidate_ctx_cb(kfd
->pdev
,
365 iommu_pasid_shutdown_callback
);
366 amd_iommu_set_invalid_ppr_cb(kfd
->pdev
, iommu_invalid_ppr_cb
);
367 kfd
->dqm
->ops
.start(kfd
->dqm
);
373 /* This is called directly from KGD at ISR. */
374 void kgd2kfd_interrupt(struct kfd_dev
*kfd
, const void *ih_ring_entry
)
376 if (!kfd
->init_complete
)
379 spin_lock(&kfd
->interrupt_lock
);
381 if (kfd
->interrupts_active
382 && interrupt_is_wanted(kfd
, ih_ring_entry
)
383 && enqueue_ih_ring_entry(kfd
, ih_ring_entry
))
384 schedule_work(&kfd
->interrupt_work
);
386 spin_unlock(&kfd
->interrupt_lock
);
389 static int kfd_gtt_sa_init(struct kfd_dev
*kfd
, unsigned int buf_size
,
390 unsigned int chunk_size
)
392 unsigned int num_of_bits
;
395 BUG_ON(!kfd
->gtt_mem
);
396 BUG_ON(buf_size
< chunk_size
);
397 BUG_ON(buf_size
== 0);
398 BUG_ON(chunk_size
== 0);
400 kfd
->gtt_sa_chunk_size
= chunk_size
;
401 kfd
->gtt_sa_num_of_chunks
= buf_size
/ chunk_size
;
403 num_of_bits
= kfd
->gtt_sa_num_of_chunks
/ BITS_PER_BYTE
;
404 BUG_ON(num_of_bits
== 0);
406 kfd
->gtt_sa_bitmap
= kzalloc(num_of_bits
, GFP_KERNEL
);
408 if (!kfd
->gtt_sa_bitmap
)
411 pr_debug("kfd: gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n",
412 kfd
->gtt_sa_num_of_chunks
, kfd
->gtt_sa_bitmap
);
414 mutex_init(&kfd
->gtt_sa_lock
);
420 static void kfd_gtt_sa_fini(struct kfd_dev
*kfd
)
422 mutex_destroy(&kfd
->gtt_sa_lock
);
423 kfree(kfd
->gtt_sa_bitmap
);
426 static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr
,
427 unsigned int bit_num
,
428 unsigned int chunk_size
)
430 return start_addr
+ bit_num
* chunk_size
;
433 static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr
,
434 unsigned int bit_num
,
435 unsigned int chunk_size
)
437 return (uint32_t *) ((uint64_t) start_addr
+ bit_num
* chunk_size
);
440 int kfd_gtt_sa_allocate(struct kfd_dev
*kfd
, unsigned int size
,
441 struct kfd_mem_obj
**mem_obj
)
443 unsigned int found
, start_search
, cur_size
;
450 if (size
> kfd
->gtt_sa_num_of_chunks
* kfd
->gtt_sa_chunk_size
)
453 *mem_obj
= kmalloc(sizeof(struct kfd_mem_obj
), GFP_KERNEL
);
454 if ((*mem_obj
) == NULL
)
457 pr_debug("kfd: allocated mem_obj = %p for size = %d\n", *mem_obj
, size
);
461 mutex_lock(&kfd
->gtt_sa_lock
);
463 kfd_gtt_restart_search
:
464 /* Find the first chunk that is free */
465 found
= find_next_zero_bit(kfd
->gtt_sa_bitmap
,
466 kfd
->gtt_sa_num_of_chunks
,
469 pr_debug("kfd: found = %d\n", found
);
471 /* If there wasn't any free chunk, bail out */
472 if (found
== kfd
->gtt_sa_num_of_chunks
)
473 goto kfd_gtt_no_free_chunk
;
475 /* Update fields of mem_obj */
476 (*mem_obj
)->range_start
= found
;
477 (*mem_obj
)->range_end
= found
;
478 (*mem_obj
)->gpu_addr
= kfd_gtt_sa_calc_gpu_addr(
479 kfd
->gtt_start_gpu_addr
,
481 kfd
->gtt_sa_chunk_size
);
482 (*mem_obj
)->cpu_ptr
= kfd_gtt_sa_calc_cpu_addr(
483 kfd
->gtt_start_cpu_ptr
,
485 kfd
->gtt_sa_chunk_size
);
487 pr_debug("kfd: gpu_addr = %p, cpu_addr = %p\n",
488 (uint64_t *) (*mem_obj
)->gpu_addr
, (*mem_obj
)->cpu_ptr
);
490 /* If we need only one chunk, mark it as allocated and get out */
491 if (size
<= kfd
->gtt_sa_chunk_size
) {
492 pr_debug("kfd: single bit\n");
493 set_bit(found
, kfd
->gtt_sa_bitmap
);
497 /* Otherwise, try to see if we have enough contiguous chunks */
498 cur_size
= size
- kfd
->gtt_sa_chunk_size
;
500 (*mem_obj
)->range_end
=
501 find_next_zero_bit(kfd
->gtt_sa_bitmap
,
502 kfd
->gtt_sa_num_of_chunks
, ++found
);
504 * If next free chunk is not contiguous than we need to
505 * restart our search from the last free chunk we found (which
506 * wasn't contiguous to the previous ones
508 if ((*mem_obj
)->range_end
!= found
) {
509 start_search
= found
;
510 goto kfd_gtt_restart_search
;
514 * If we reached end of buffer, bail out with error
516 if (found
== kfd
->gtt_sa_num_of_chunks
)
517 goto kfd_gtt_no_free_chunk
;
519 /* Check if we don't need another chunk */
520 if (cur_size
<= kfd
->gtt_sa_chunk_size
)
523 cur_size
-= kfd
->gtt_sa_chunk_size
;
525 } while (cur_size
> 0);
527 pr_debug("kfd: range_start = %d, range_end = %d\n",
528 (*mem_obj
)->range_start
, (*mem_obj
)->range_end
);
530 /* Mark the chunks as allocated */
531 for (found
= (*mem_obj
)->range_start
;
532 found
<= (*mem_obj
)->range_end
;
534 set_bit(found
, kfd
->gtt_sa_bitmap
);
537 mutex_unlock(&kfd
->gtt_sa_lock
);
540 kfd_gtt_no_free_chunk
:
541 pr_debug("kfd: allocation failed with mem_obj = %p\n", mem_obj
);
542 mutex_unlock(&kfd
->gtt_sa_lock
);
547 int kfd_gtt_sa_free(struct kfd_dev
*kfd
, struct kfd_mem_obj
*mem_obj
)
553 /* Act like kfree when trying to free a NULL object */
557 pr_debug("kfd: free mem_obj = %p, range_start = %d, range_end = %d\n",
558 mem_obj
, mem_obj
->range_start
, mem_obj
->range_end
);
560 mutex_lock(&kfd
->gtt_sa_lock
);
562 /* Mark the chunks as free */
563 for (bit
= mem_obj
->range_start
;
564 bit
<= mem_obj
->range_end
;
566 clear_bit(bit
, kfd
->gtt_sa_bitmap
);
568 mutex_unlock(&kfd
->gtt_sa_lock
);