db5422e65ec5f684b0d88ce5fa04bac986324653
2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include "amdgpu_ih.h"
29 * amdgpu_ih_ring_alloc - allocate memory for the IH ring
31 * @adev: amdgpu_device pointer
33 * Allocate a ring buffer for the interrupt controller.
34 * Returns 0 for success, errors for failure.
36 static int amdgpu_ih_ring_alloc(struct amdgpu_device
*adev
)
40 /* Allocate ring buffer */
41 if (adev
->irq
.ih
.ring_obj
== NULL
) {
42 r
= amdgpu_bo_create(adev
, adev
->irq
.ih
.ring_size
,
44 AMDGPU_GEM_DOMAIN_GTT
, 0,
45 NULL
, &adev
->irq
.ih
.ring_obj
);
47 DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r
);
50 r
= amdgpu_bo_reserve(adev
->irq
.ih
.ring_obj
, false);
53 r
= amdgpu_bo_pin(adev
->irq
.ih
.ring_obj
,
54 AMDGPU_GEM_DOMAIN_GTT
,
55 &adev
->irq
.ih
.gpu_addr
);
57 amdgpu_bo_unreserve(adev
->irq
.ih
.ring_obj
);
58 DRM_ERROR("amdgpu: failed to pin ih ring buffer (%d).\n", r
);
61 r
= amdgpu_bo_kmap(adev
->irq
.ih
.ring_obj
,
62 (void **)&adev
->irq
.ih
.ring
);
63 amdgpu_bo_unreserve(adev
->irq
.ih
.ring_obj
);
65 DRM_ERROR("amdgpu: failed to map ih ring buffer (%d).\n", r
);
73 * amdgpu_ih_ring_init - initialize the IH state
75 * @adev: amdgpu_device pointer
77 * Initializes the IH state and allocates a buffer
78 * for the IH ring buffer.
79 * Returns 0 for success, errors for failure.
81 int amdgpu_ih_ring_init(struct amdgpu_device
*adev
, unsigned ring_size
,
88 rb_bufsz
= order_base_2(ring_size
/ 4);
89 ring_size
= (1 << rb_bufsz
) * 4;
90 adev
->irq
.ih
.ring_size
= ring_size
;
91 adev
->irq
.ih
.ptr_mask
= adev
->irq
.ih
.ring_size
- 1;
92 adev
->irq
.ih
.rptr
= 0;
93 adev
->irq
.ih
.use_bus_addr
= use_bus_addr
;
95 if (adev
->irq
.ih
.use_bus_addr
) {
96 if (!adev
->irq
.ih
.ring
) {
97 /* add 8 bytes for the rptr/wptr shadows and
98 * add them to the end of the ring allocation.
100 adev
->irq
.ih
.ring
= kzalloc(adev
->irq
.ih
.ring_size
+ 8, GFP_KERNEL
);
101 if (adev
->irq
.ih
.ring
== NULL
)
103 adev
->irq
.ih
.rb_dma_addr
= pci_map_single(adev
->pdev
,
104 (void *)adev
->irq
.ih
.ring
,
105 adev
->irq
.ih
.ring_size
,
106 PCI_DMA_BIDIRECTIONAL
);
107 if (pci_dma_mapping_error(adev
->pdev
, adev
->irq
.ih
.rb_dma_addr
)) {
108 dev_err(&adev
->pdev
->dev
, "Failed to DMA MAP the IH RB page\n");
109 kfree((void *)adev
->irq
.ih
.ring
);
112 adev
->irq
.ih
.wptr_offs
= (adev
->irq
.ih
.ring_size
/ 4) + 0;
113 adev
->irq
.ih
.rptr_offs
= (adev
->irq
.ih
.ring_size
/ 4) + 1;
117 r
= amdgpu_wb_get(adev
, &adev
->irq
.ih
.wptr_offs
);
119 dev_err(adev
->dev
, "(%d) ih wptr_offs wb alloc failed\n", r
);
123 r
= amdgpu_wb_get(adev
, &adev
->irq
.ih
.rptr_offs
);
125 amdgpu_wb_free(adev
, adev
->irq
.ih
.wptr_offs
);
126 dev_err(adev
->dev
, "(%d) ih rptr_offs wb alloc failed\n", r
);
130 return amdgpu_ih_ring_alloc(adev
);
135 * amdgpu_ih_ring_fini - tear down the IH state
137 * @adev: amdgpu_device pointer
139 * Tears down the IH state and frees buffer
140 * used for the IH ring buffer.
142 void amdgpu_ih_ring_fini(struct amdgpu_device
*adev
)
146 if (adev
->irq
.ih
.use_bus_addr
) {
147 if (adev
->irq
.ih
.ring
) {
148 /* add 8 bytes for the rptr/wptr shadows and
149 * add them to the end of the ring allocation.
151 pci_unmap_single(adev
->pdev
, adev
->irq
.ih
.rb_dma_addr
,
152 adev
->irq
.ih
.ring_size
+ 8, PCI_DMA_BIDIRECTIONAL
);
153 kfree((void *)adev
->irq
.ih
.ring
);
154 adev
->irq
.ih
.ring
= NULL
;
157 if (adev
->irq
.ih
.ring_obj
) {
158 r
= amdgpu_bo_reserve(adev
->irq
.ih
.ring_obj
, false);
159 if (likely(r
== 0)) {
160 amdgpu_bo_kunmap(adev
->irq
.ih
.ring_obj
);
161 amdgpu_bo_unpin(adev
->irq
.ih
.ring_obj
);
162 amdgpu_bo_unreserve(adev
->irq
.ih
.ring_obj
);
164 amdgpu_bo_unref(&adev
->irq
.ih
.ring_obj
);
165 adev
->irq
.ih
.ring
= NULL
;
166 adev
->irq
.ih
.ring_obj
= NULL
;
168 amdgpu_wb_free(adev
, adev
->irq
.ih
.wptr_offs
);
169 amdgpu_wb_free(adev
, adev
->irq
.ih
.rptr_offs
);
174 * amdgpu_ih_process - interrupt handler
176 * @adev: amdgpu_device pointer
178 * Interrupt hander (VI), walk the IH ring.
179 * Returns irq process return code.
181 int amdgpu_ih_process(struct amdgpu_device
*adev
)
183 struct amdgpu_iv_entry entry
;
186 if (!adev
->irq
.ih
.enabled
|| adev
->shutdown
)
189 wptr
= amdgpu_ih_get_wptr(adev
);
192 /* is somebody else already processing irqs? */
193 if (atomic_xchg(&adev
->irq
.ih
.lock
, 1))
196 DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__
, adev
->irq
.ih
.rptr
, wptr
);
198 /* Order reading of wptr vs. reading of IH ring data */
201 while (adev
->irq
.ih
.rptr
!= wptr
) {
202 amdgpu_ih_decode_iv(adev
, &entry
);
203 adev
->irq
.ih
.rptr
&= adev
->irq
.ih
.ptr_mask
;
205 amdgpu_irq_dispatch(adev
, &entry
);
207 amdgpu_ih_set_rptr(adev
);
208 atomic_set(&adev
->irq
.ih
.lock
, 0);
210 /* make sure wptr hasn't changed while processing */
211 wptr
= amdgpu_ih_get_wptr(adev
);
212 if (wptr
!= adev
->irq
.ih
.rptr
)
This page took 0.044685 seconds and 4 git commands to generate.