2 * Copyright 2005-2006 Stephane Marchesin
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
27 #include "nouveau_drv.h"
28 #include <nouveau_drm.h>
29 #include "nouveau_dma.h"
30 #include <engine/fifo.h>
31 #include <core/ramht.h>
32 #include "nouveau_fence.h"
33 #include "nouveau_software.h"
35 MODULE_PARM_DESC(vram_pushbuf
, "Force DMA push buffers to be in VRAM");
36 int nouveau_vram_pushbuf
;
37 module_param_named(vram_pushbuf
, nouveau_vram_pushbuf
, int, 0400);
40 nouveau_channel_pushbuf_init(struct nouveau_channel
*chan
)
42 u32 mem
= nouveau_vram_pushbuf
? TTM_PL_FLAG_VRAM
: TTM_PL_FLAG_TT
;
43 struct drm_device
*dev
= chan
->dev
;
44 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
47 /* allocate buffer object */
48 ret
= nouveau_bo_new(dev
, 65536, 0, mem
, 0, 0, NULL
, &chan
->pushbuf_bo
);
52 ret
= nouveau_bo_pin(chan
->pushbuf_bo
, mem
);
56 ret
= nouveau_bo_map(chan
->pushbuf_bo
);
60 /* create DMA object covering the entire memtype where the push
61 * buffer resides, userspace can submit its own push buffers from
62 * anywhere within the same memtype.
64 chan
->pushbuf_base
= chan
->pushbuf_bo
->bo
.offset
;
65 if (dev_priv
->card_type
>= NV_50
) {
66 ret
= nouveau_bo_vma_add(chan
->pushbuf_bo
, chan
->vm
,
71 if (dev_priv
->card_type
< NV_C0
) {
72 ret
= nouveau_gpuobj_dma_new(chan
,
73 NV_CLASS_DMA_IN_MEMORY
, 0,
79 chan
->pushbuf_base
= chan
->pushbuf_vma
.offset
;
81 if (chan
->pushbuf_bo
->bo
.mem
.mem_type
== TTM_PL_TT
) {
82 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
, 0,
83 dev_priv
->gart_info
.aper_size
,
88 if (dev_priv
->card_type
!= NV_04
) {
89 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
, 0,
90 dev_priv
->fb_available_size
,
95 /* NV04 cmdbuf hack, from original ddx.. not sure of it's
96 * exact reason for existing :) PCI access to cmdbuf in
99 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
,
100 pci_resource_start(dev
->pdev
, 1),
101 dev_priv
->fb_available_size
,
109 NV_ERROR(dev
, "error initialising pushbuf: %d\n", ret
);
110 nouveau_bo_vma_del(chan
->pushbuf_bo
, &chan
->pushbuf_vma
);
111 nouveau_gpuobj_ref(NULL
, &chan
->pushbuf
);
112 if (chan
->pushbuf_bo
) {
113 nouveau_bo_unmap(chan
->pushbuf_bo
);
114 nouveau_bo_ref(NULL
, &chan
->pushbuf_bo
);
121 /* allocates and initializes a fifo for user space consumption */
123 nouveau_channel_alloc(struct drm_device
*dev
, struct nouveau_channel
**chan_ret
,
124 struct drm_file
*file_priv
,
125 uint32_t vram_handle
, uint32_t gart_handle
)
127 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
128 struct nouveau_fifo_priv
*pfifo
= nv_engine(dev
, NVOBJ_ENGINE_FIFO
);
129 struct nouveau_fence_priv
*fence
= dev_priv
->fence
.func
;
130 struct nouveau_fpriv
*fpriv
= nouveau_fpriv(file_priv
);
131 struct nouveau_channel
*chan
;
135 /* allocate and lock channel structure */
136 chan
= kzalloc(sizeof(*chan
), GFP_KERNEL
);
140 chan
->file_priv
= file_priv
;
141 chan
->vram_handle
= vram_handle
;
142 chan
->gart_handle
= gart_handle
;
144 kref_init(&chan
->ref
);
145 atomic_set(&chan
->users
, 1);
146 mutex_init(&chan
->mutex
);
147 mutex_lock(&chan
->mutex
);
149 /* allocate hw channel id */
150 spin_lock_irqsave(&dev_priv
->channels
.lock
, flags
);
151 for (chan
->id
= 0; chan
->id
< pfifo
->channels
; chan
->id
++) {
152 if ( dev_priv
->card_type
== NV_50
&& chan
->id
== 0)
155 if (!dev_priv
->channels
.ptr
[chan
->id
]) {
156 nouveau_channel_ref(chan
, &dev_priv
->channels
.ptr
[chan
->id
]);
160 spin_unlock_irqrestore(&dev_priv
->channels
.lock
, flags
);
162 if (chan
->id
== pfifo
->channels
) {
163 mutex_unlock(&chan
->mutex
);
168 NV_DEBUG(dev
, "initialising channel %d\n", chan
->id
);
170 /* setup channel's memory and vm */
171 ret
= nouveau_gpuobj_channel_init(chan
, vram_handle
, gart_handle
);
173 NV_ERROR(dev
, "gpuobj %d\n", ret
);
174 nouveau_channel_put(&chan
);
178 /* Allocate space for per-channel fixed notifier memory */
179 ret
= nouveau_notifier_init_channel(chan
);
181 NV_ERROR(dev
, "ntfy %d\n", ret
);
182 nouveau_channel_put(&chan
);
186 /* Allocate DMA push buffer */
187 ret
= nouveau_channel_pushbuf_init(chan
);
189 NV_ERROR(dev
, "pushbuf %d\n", ret
);
190 nouveau_channel_put(&chan
);
194 nouveau_dma_init(chan
);
195 chan
->user_put
= 0x40;
196 chan
->user_get
= 0x44;
197 if (dev_priv
->card_type
>= NV_50
)
198 chan
->user_get_hi
= 0x60;
200 /* create fifo context */
201 ret
= pfifo
->base
.context_new(chan
, NVOBJ_ENGINE_FIFO
);
203 nouveau_channel_put(&chan
);
207 /* Insert NOPs for NOUVEAU_DMA_SKIPS */
208 ret
= RING_SPACE(chan
, NOUVEAU_DMA_SKIPS
);
210 nouveau_channel_put(&chan
);
214 for (i
= 0; i
< NOUVEAU_DMA_SKIPS
; i
++)
215 OUT_RING (chan
, 0x00000000);
217 ret
= nouveau_gpuobj_gr_new(chan
, NvSw
, nouveau_software_class(dev
));
219 nouveau_channel_put(&chan
);
223 if (dev_priv
->card_type
< NV_C0
) {
224 ret
= RING_SPACE(chan
, 2);
226 nouveau_channel_put(&chan
);
230 BEGIN_NV04(chan
, NvSubSw
, NV01_SUBCHAN_OBJECT
, 1);
231 OUT_RING (chan
, NvSw
);
237 ret
= fence
->context_new(chan
);
239 nouveau_channel_put(&chan
);
243 nouveau_debugfs_channel_init(chan
);
245 NV_DEBUG(dev
, "channel %d initialised\n", chan
->id
);
247 spin_lock(&fpriv
->lock
);
248 list_add(&chan
->list
, &fpriv
->channels
);
249 spin_unlock(&fpriv
->lock
);
255 struct nouveau_channel
*
256 nouveau_channel_get_unlocked(struct nouveau_channel
*ref
)
258 struct nouveau_channel
*chan
= NULL
;
260 if (likely(ref
&& atomic_inc_not_zero(&ref
->users
)))
261 nouveau_channel_ref(ref
, &chan
);
266 struct nouveau_channel
*
267 nouveau_channel_get(struct drm_file
*file_priv
, int id
)
269 struct nouveau_fpriv
*fpriv
= nouveau_fpriv(file_priv
);
270 struct nouveau_channel
*chan
;
272 spin_lock(&fpriv
->lock
);
273 list_for_each_entry(chan
, &fpriv
->channels
, list
) {
274 if (chan
->id
== id
) {
275 chan
= nouveau_channel_get_unlocked(chan
);
276 spin_unlock(&fpriv
->lock
);
277 mutex_lock(&chan
->mutex
);
281 spin_unlock(&fpriv
->lock
);
283 return ERR_PTR(-EINVAL
);
287 nouveau_channel_put_unlocked(struct nouveau_channel
**pchan
)
289 struct nouveau_channel
*chan
= *pchan
;
290 struct drm_device
*dev
= chan
->dev
;
291 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
292 struct nouveau_fence_priv
*fence
= dev_priv
->fence
.func
;
296 /* decrement the refcount, and we're done if there's still refs */
297 if (likely(!atomic_dec_and_test(&chan
->users
))) {
298 nouveau_channel_ref(NULL
, pchan
);
302 /* no one wants the channel anymore */
303 NV_DEBUG(dev
, "freeing channel %d\n", chan
->id
);
304 nouveau_debugfs_channel_fini(chan
);
306 /* give it chance to idle */
307 nouveau_channel_idle(chan
);
309 /* destroy the engine specific contexts */
310 for (i
= NVOBJ_ENGINE_NR
- 1; i
>= 0; i
--) {
312 dev_priv
->eng
[i
]->context_del(chan
, i
);
316 fence
->context_del(chan
);
318 /* aside from its resources, the channel should now be dead,
319 * remove it from the channel list
321 spin_lock_irqsave(&dev_priv
->channels
.lock
, flags
);
322 nouveau_channel_ref(NULL
, &dev_priv
->channels
.ptr
[chan
->id
]);
323 spin_unlock_irqrestore(&dev_priv
->channels
.lock
, flags
);
325 /* destroy any resources the channel owned */
326 nouveau_gpuobj_ref(NULL
, &chan
->pushbuf
);
327 if (chan
->pushbuf_bo
) {
328 nouveau_bo_vma_del(chan
->pushbuf_bo
, &chan
->pushbuf_vma
);
329 nouveau_bo_unmap(chan
->pushbuf_bo
);
330 nouveau_bo_unpin(chan
->pushbuf_bo
);
331 nouveau_bo_ref(NULL
, &chan
->pushbuf_bo
);
333 nouveau_ramht_ref(NULL
, &chan
->ramht
, chan
);
334 nouveau_notifier_takedown_channel(chan
);
335 nouveau_gpuobj_channel_takedown(chan
);
337 nouveau_channel_ref(NULL
, pchan
);
341 nouveau_channel_put(struct nouveau_channel
**pchan
)
343 mutex_unlock(&(*pchan
)->mutex
);
344 nouveau_channel_put_unlocked(pchan
);
348 nouveau_channel_del(struct kref
*ref
)
350 struct nouveau_channel
*chan
=
351 container_of(ref
, struct nouveau_channel
, ref
);
357 nouveau_channel_ref(struct nouveau_channel
*chan
,
358 struct nouveau_channel
**pchan
)
361 kref_get(&chan
->ref
);
364 kref_put(&(*pchan
)->ref
, nouveau_channel_del
);
370 nouveau_channel_idle(struct nouveau_channel
*chan
)
372 struct drm_device
*dev
= chan
->dev
;
373 struct nouveau_fence
*fence
= NULL
;
376 ret
= nouveau_fence_new(chan
, &fence
);
378 ret
= nouveau_fence_wait(fence
, false, false);
379 nouveau_fence_unref(&fence
);
383 NV_ERROR(dev
, "Failed to idle channel %d.\n", chan
->id
);
387 /* cleans up all the fifos from file_priv */
389 nouveau_channel_cleanup(struct drm_device
*dev
, struct drm_file
*file_priv
)
391 struct nouveau_fifo_priv
*pfifo
= nv_engine(dev
, NVOBJ_ENGINE_FIFO
);
392 struct nouveau_channel
*chan
;
398 NV_DEBUG(dev
, "clearing FIFO enables from file_priv\n");
399 for (i
= 0; i
< pfifo
->channels
; i
++) {
400 chan
= nouveau_channel_get(file_priv
, i
);
404 list_del(&chan
->list
);
405 atomic_dec(&chan
->users
);
406 nouveau_channel_put(&chan
);
This page took 0.058538 seconds and 5 git commands to generate.