2 * Copyright (C) 2006 Ben Skeggs.
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
30 * Ben Skeggs <darktama@iinet.net.au>
35 #include "nouveau_drv.h"
36 #include <nouveau_drm.h>
37 #include <engine/fifo.h>
38 #include <core/ramht.h>
39 #include "nouveau_software.h"
41 struct nouveau_gpuobj_method
{
42 struct list_head head
;
44 int (*exec
)(struct nouveau_channel
*, u32
class, u32 mthd
, u32 data
);
47 struct nouveau_gpuobj_class
{
48 struct list_head head
;
49 struct list_head methods
;
55 nouveau_gpuobj_class_new(struct drm_device
*dev
, u32
class, u32 engine
)
57 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
58 struct nouveau_gpuobj_class
*oc
;
60 oc
= kzalloc(sizeof(*oc
), GFP_KERNEL
);
64 INIT_LIST_HEAD(&oc
->methods
);
67 list_add(&oc
->head
, &dev_priv
->classes
);
72 nouveau_gpuobj_mthd_new(struct drm_device
*dev
, u32
class, u32 mthd
,
73 int (*exec
)(struct nouveau_channel
*, u32
, u32
, u32
))
75 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
76 struct nouveau_gpuobj_method
*om
;
77 struct nouveau_gpuobj_class
*oc
;
79 list_for_each_entry(oc
, &dev_priv
->classes
, head
) {
87 om
= kzalloc(sizeof(*om
), GFP_KERNEL
);
93 list_add(&om
->head
, &oc
->methods
);
98 nouveau_gpuobj_mthd_call(struct nouveau_channel
*chan
,
99 u32
class, u32 mthd
, u32 data
)
101 struct drm_nouveau_private
*dev_priv
= chan
->dev
->dev_private
;
102 struct nouveau_gpuobj_method
*om
;
103 struct nouveau_gpuobj_class
*oc
;
105 list_for_each_entry(oc
, &dev_priv
->classes
, head
) {
109 list_for_each_entry(om
, &oc
->methods
, head
) {
110 if (om
->mthd
== mthd
)
111 return om
->exec(chan
, class, mthd
, data
);
119 nouveau_gpuobj_mthd_call2(struct drm_device
*dev
, int chid
,
120 u32
class, u32 mthd
, u32 data
)
122 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
123 struct nouveau_fifo_priv
*pfifo
= nv_engine(dev
, NVOBJ_ENGINE_FIFO
);
124 struct nouveau_channel
*chan
= NULL
;
128 spin_lock_irqsave(&dev_priv
->channels
.lock
, flags
);
129 if (chid
>= 0 && chid
< pfifo
->channels
)
130 chan
= dev_priv
->channels
.ptr
[chid
];
132 ret
= nouveau_gpuobj_mthd_call(chan
, class, mthd
, data
);
133 spin_unlock_irqrestore(&dev_priv
->channels
.lock
, flags
);
138 nv50_gpuobj_dma_init(struct nouveau_gpuobj
*obj
, u32 offset
, int class,
139 u64 base
, u64 size
, int target
, int access
,
142 struct drm_nouveau_private
*dev_priv
= obj
->dev
->dev_private
;
145 flags0
= (comp
<< 29) | (type
<< 22) | class;
146 flags0
|= 0x00100000;
149 case NV_MEM_ACCESS_RO
: flags0
|= 0x00040000; break;
150 case NV_MEM_ACCESS_RW
:
151 case NV_MEM_ACCESS_WO
: flags0
|= 0x00080000; break;
157 case NV_MEM_TARGET_VRAM
:
158 flags0
|= 0x00010000;
160 case NV_MEM_TARGET_PCI
:
161 flags0
|= 0x00020000;
163 case NV_MEM_TARGET_PCI_NOSNOOP
:
164 flags0
|= 0x00030000;
166 case NV_MEM_TARGET_GART
:
167 base
+= dev_priv
->gart_info
.aper_base
;
169 flags0
&= ~0x00100000;
173 /* convert to base + limit */
174 size
= (base
+ size
) - 1;
176 nv_wo32(obj
, offset
+ 0x00, flags0
);
177 nv_wo32(obj
, offset
+ 0x04, lower_32_bits(size
));
178 nv_wo32(obj
, offset
+ 0x08, lower_32_bits(base
));
179 nv_wo32(obj
, offset
+ 0x0c, upper_32_bits(size
) << 24 |
180 upper_32_bits(base
));
181 nv_wo32(obj
, offset
+ 0x10, 0x00000000);
182 nv_wo32(obj
, offset
+ 0x14, 0x00000000);
184 nvimem_flush(obj
->dev
);
188 nv50_gpuobj_dma_new(struct nouveau_channel
*chan
, int class, u64 base
, u64 size
,
189 int target
, int access
, u32 type
, u32 comp
,
190 struct nouveau_gpuobj
**pobj
)
192 struct drm_device
*dev
= chan
->dev
;
195 ret
= nouveau_gpuobj_new(dev
, chan
, 24, 16, NVOBJ_FLAG_ZERO_FREE
, pobj
);
199 nv50_gpuobj_dma_init(*pobj
, 0, class, base
, size
, target
,
205 nouveau_gpuobj_dma_new(struct nouveau_channel
*chan
, int class, u64 base
,
206 u64 size
, int access
, int target
,
207 struct nouveau_gpuobj
**pobj
)
209 struct drm_nouveau_private
*dev_priv
= chan
->dev
->dev_private
;
210 struct drm_device
*dev
= chan
->dev
;
211 struct nouveau_gpuobj
*obj
;
215 if (dev_priv
->card_type
>= NV_50
) {
216 u32 comp
= (target
== NV_MEM_TARGET_VM
) ? NV_MEM_COMP_VM
: 0;
217 u32 type
= (target
== NV_MEM_TARGET_VM
) ? NV_MEM_TYPE_VM
: 0;
219 return nv50_gpuobj_dma_new(chan
, class, base
, size
,
220 target
, access
, type
, comp
, pobj
);
223 if (target
== NV_MEM_TARGET_GART
) {
224 struct nouveau_gpuobj
*gart
= dev_priv
->gart_info
.sg_ctxdma
;
226 if (dev_priv
->gart_info
.type
== NOUVEAU_GART_PDMA
) {
228 nouveau_gpuobj_ref(gart
, pobj
);
232 base
= nouveau_sgdma_get_physical(dev
, base
);
233 target
= NV_MEM_TARGET_PCI
;
235 base
+= dev_priv
->gart_info
.aper_base
;
236 if (dev_priv
->gart_info
.type
== NOUVEAU_GART_AGP
)
237 target
= NV_MEM_TARGET_PCI_NOSNOOP
;
239 target
= NV_MEM_TARGET_PCI
;
244 flags0
|= 0x00003000; /* PT present, PT linear */
248 case NV_MEM_TARGET_PCI
:
249 flags0
|= 0x00020000;
251 case NV_MEM_TARGET_PCI_NOSNOOP
:
252 flags0
|= 0x00030000;
259 case NV_MEM_ACCESS_RO
:
260 flags0
|= 0x00004000;
262 case NV_MEM_ACCESS_WO
:
263 flags0
|= 0x00008000;
265 flags2
|= 0x00000002;
269 flags0
|= (base
& 0x00000fff) << 20;
270 flags2
|= (base
& 0xfffff000);
272 ret
= nouveau_gpuobj_new(dev
, chan
, 16, 16, NVOBJ_FLAG_ZERO_FREE
, &obj
);
276 nv_wo32(obj
, 0x00, flags0
);
277 nv_wo32(obj
, 0x04, size
- 1);
278 nv_wo32(obj
, 0x08, flags2
);
279 nv_wo32(obj
, 0x0c, flags2
);
281 obj
->engine
= NVOBJ_ENGINE_SW
;
288 nouveau_gpuobj_gr_new(struct nouveau_channel
*chan
, u32 handle
, int class)
290 struct drm_nouveau_private
*dev_priv
= chan
->dev
->dev_private
;
291 struct drm_device
*dev
= chan
->dev
;
292 struct nouveau_gpuobj_class
*oc
;
295 NV_DEBUG(dev
, "ch%d class=0x%04x\n", chan
->id
, class);
297 list_for_each_entry(oc
, &dev_priv
->classes
, head
) {
298 struct nouveau_exec_engine
*eng
= dev_priv
->eng
[oc
->engine
];
303 if (!chan
->engctx
[oc
->engine
]) {
304 ret
= eng
->context_new(chan
, oc
->engine
);
309 return eng
->object_new(chan
, oc
->engine
, handle
, class);
316 nv04_gpuobj_channel_init_pramin(struct nouveau_channel
*chan
)
318 struct drm_device
*dev
= chan
->dev
;
321 ret
= nouveau_gpuobj_new(dev
, NULL
, 0x10000, 0x1000,
322 NVOBJ_FLAG_ZERO_ALLOC
, &chan
->ramin
);
330 nv50_gpuobj_channel_init_pramin(struct nouveau_channel
*chan
)
332 struct drm_device
*dev
= chan
->dev
;
335 ret
= nouveau_gpuobj_new(dev
, NULL
, 0x10000, 0x1000,
336 NVOBJ_FLAG_ZERO_ALLOC
, &chan
->ramin
);
340 ret
= nouveau_gpuobj_new(dev
, chan
, 0x0200, 0, 0, &chan
->ramfc
);
344 ret
= nouveau_gpuobj_new(dev
, chan
, 0x1000, 0, 0, &chan
->engptr
);
348 ret
= nouveau_gpuobj_new(dev
, chan
, 0x4000, 0, 0, &chan
->vm_pd
);
356 nv84_gpuobj_channel_init_pramin(struct nouveau_channel
*chan
)
358 struct drm_device
*dev
= chan
->dev
;
361 ret
= nouveau_gpuobj_new(dev
, NULL
, 0x10000, 0x1000,
362 NVOBJ_FLAG_ZERO_ALLOC
, &chan
->ramin
);
366 ret
= nouveau_gpuobj_new(dev
, chan
, 0x0200, 0, 0, &chan
->engptr
);
370 ret
= nouveau_gpuobj_new(dev
, chan
, 0x4000, 0, 0, &chan
->vm_pd
);
378 nvc0_gpuobj_channel_init(struct nouveau_channel
*chan
, struct nouveau_vm
*vm
)
380 struct drm_device
*dev
= chan
->dev
;
383 ret
= nouveau_gpuobj_new(dev
, NULL
, 4096, 0x1000, 0, &chan
->ramin
);
387 ret
= nouveau_gpuobj_new(dev
, NULL
, 65536, 0x1000, 0, &chan
->vm_pd
);
391 nouveau_vm_ref(vm
, &chan
->vm
, chan
->vm_pd
);
393 nv_wo32(chan
->ramin
, 0x0200, lower_32_bits(chan
->vm_pd
->addr
));
394 nv_wo32(chan
->ramin
, 0x0204, upper_32_bits(chan
->vm_pd
->addr
));
395 nv_wo32(chan
->ramin
, 0x0208, 0xffffffff);
396 nv_wo32(chan
->ramin
, 0x020c, 0x000000ff);
402 nouveau_gpuobj_channel_init(struct nouveau_channel
*chan
,
403 uint32_t vram_h
, uint32_t tt_h
)
405 struct drm_device
*dev
= chan
->dev
;
406 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
407 struct nouveau_fpriv
*fpriv
= nouveau_fpriv(chan
->file_priv
);
408 struct nouveau_vm
*vm
= fpriv
? fpriv
->vm
: dev_priv
->chan_vm
;
409 struct nouveau_gpuobj
*vram
= NULL
, *tt
= NULL
;
412 NV_DEBUG(dev
, "ch%d vram=0x%08x tt=0x%08x\n", chan
->id
, vram_h
, tt_h
);
413 if (dev_priv
->card_type
>= NV_C0
)
414 return nvc0_gpuobj_channel_init(chan
, vm
);
416 /* Allocate a chunk of memory for per-channel object storage */
417 if (dev_priv
->chipset
>= 0x84)
418 ret
= nv84_gpuobj_channel_init_pramin(chan
);
420 if (dev_priv
->chipset
== 0x50)
421 ret
= nv50_gpuobj_channel_init_pramin(chan
);
423 ret
= nv04_gpuobj_channel_init_pramin(chan
);
425 NV_ERROR(dev
, "init pramin\n");
430 * - Allocate per-channel page-directory
431 * - Link with shared channel VM
434 nouveau_vm_ref(vm
, &chan
->vm
, chan
->vm_pd
);
437 if (dev_priv
->card_type
< NV_50
) {
438 nouveau_ramht_ref(dev_priv
->ramht
, &chan
->ramht
, NULL
);
440 struct nouveau_gpuobj
*ramht
= NULL
;
442 ret
= nouveau_gpuobj_new(dev
, chan
, 0x8000, 16,
443 NVOBJ_FLAG_ZERO_ALLOC
, &ramht
);
447 ret
= nouveau_ramht_new(dev
, ramht
, &chan
->ramht
);
448 nouveau_gpuobj_ref(NULL
, &ramht
);
454 if (dev_priv
->card_type
>= NV_50
) {
455 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
,
456 0, (1ULL << 40), NV_MEM_ACCESS_RW
,
457 NV_MEM_TARGET_VM
, &vram
);
459 NV_ERROR(dev
, "Error creating VRAM ctxdma: %d\n", ret
);
463 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
,
464 0, dev_priv
->fb_available_size
,
466 NV_MEM_TARGET_VRAM
, &vram
);
468 NV_ERROR(dev
, "Error creating VRAM ctxdma: %d\n", ret
);
473 ret
= nouveau_ramht_insert(chan
, vram_h
, vram
);
474 nouveau_gpuobj_ref(NULL
, &vram
);
476 NV_ERROR(dev
, "Error adding VRAM ctxdma to RAMHT: %d\n", ret
);
480 /* TT memory ctxdma */
481 if (dev_priv
->card_type
>= NV_50
) {
482 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
,
483 0, (1ULL << 40), NV_MEM_ACCESS_RW
,
484 NV_MEM_TARGET_VM
, &tt
);
486 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
,
487 0, dev_priv
->gart_info
.aper_size
,
489 NV_MEM_TARGET_GART
, &tt
);
493 NV_ERROR(dev
, "Error creating TT ctxdma: %d\n", ret
);
497 ret
= nouveau_ramht_insert(chan
, tt_h
, tt
);
498 nouveau_gpuobj_ref(NULL
, &tt
);
500 NV_ERROR(dev
, "Error adding TT ctxdma to RAMHT: %d\n", ret
);
508 nouveau_gpuobj_channel_takedown(struct nouveau_channel
*chan
)
510 NV_DEBUG(chan
->dev
, "ch%d\n", chan
->id
);
512 nouveau_vm_ref(NULL
, &chan
->vm
, chan
->vm_pd
);
513 nouveau_gpuobj_ref(NULL
, &chan
->vm_pd
);
514 nouveau_gpuobj_ref(NULL
, &chan
->ramfc
);
515 nouveau_gpuobj_ref(NULL
, &chan
->engptr
);
517 nouveau_gpuobj_ref(NULL
, &chan
->ramin
);