Commit | Line | Data |
---|---|---|
6ee73861 BS |
1 | /* |
2 | * Copyright (C) 2006 Ben Skeggs. | |
3 | * | |
4 | * All Rights Reserved. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining | |
7 | * a copy of this software and associated documentation files (the | |
8 | * "Software"), to deal in the Software without restriction, including | |
9 | * without limitation the rights to use, copy, modify, merge, publish, | |
10 | * distribute, sublicense, and/or sell copies of the Software, and to | |
11 | * permit persons to whom the Software is furnished to do so, subject to | |
12 | * the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice (including the | |
15 | * next paragraph) shall be included in all copies or substantial | |
16 | * portions of the Software. | |
17 | * | |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
19 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
20 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | |
21 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | |
22 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | |
23 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | |
24 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |
25 | * | |
26 | */ | |
27 | ||
28 | /* | |
29 | * Authors: | |
30 | * Ben Skeggs <darktama@iinet.net.au> | |
31 | */ | |
32 | ||
33 | #include "drmP.h" | |
34 | #include "drm.h" | |
35 | #include "nouveau_drv.h" | |
36 | #include "nouveau_drm.h" | |
c420b2dc | 37 | #include "nouveau_fifo.h" |
479dcaea | 38 | #include "nouveau_ramht.h" |
20abd163 | 39 | #include "nouveau_software.h" |
4c136142 | 40 | #include "nouveau_vm.h" |
6ee73861 | 41 | |
b8c157d3 BS |
42 | struct nouveau_gpuobj_method { |
43 | struct list_head head; | |
44 | u32 mthd; | |
45 | int (*exec)(struct nouveau_channel *, u32 class, u32 mthd, u32 data); | |
46 | }; | |
47 | ||
48 | struct nouveau_gpuobj_class { | |
49 | struct list_head head; | |
50 | struct list_head methods; | |
51 | u32 id; | |
52 | u32 engine; | |
53 | }; | |
54 | ||
55 | int | |
56 | nouveau_gpuobj_class_new(struct drm_device *dev, u32 class, u32 engine) | |
57 | { | |
58 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
59 | struct nouveau_gpuobj_class *oc; | |
60 | ||
61 | oc = kzalloc(sizeof(*oc), GFP_KERNEL); | |
62 | if (!oc) | |
63 | return -ENOMEM; | |
64 | ||
65 | INIT_LIST_HEAD(&oc->methods); | |
66 | oc->id = class; | |
67 | oc->engine = engine; | |
68 | list_add(&oc->head, &dev_priv->classes); | |
69 | return 0; | |
70 | } | |
71 | ||
72 | int | |
73 | nouveau_gpuobj_mthd_new(struct drm_device *dev, u32 class, u32 mthd, | |
74 | int (*exec)(struct nouveau_channel *, u32, u32, u32)) | |
75 | { | |
76 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
77 | struct nouveau_gpuobj_method *om; | |
78 | struct nouveau_gpuobj_class *oc; | |
79 | ||
80 | list_for_each_entry(oc, &dev_priv->classes, head) { | |
81 | if (oc->id == class) | |
82 | goto found; | |
83 | } | |
84 | ||
85 | return -EINVAL; | |
86 | ||
87 | found: | |
88 | om = kzalloc(sizeof(*om), GFP_KERNEL); | |
89 | if (!om) | |
90 | return -ENOMEM; | |
91 | ||
92 | om->mthd = mthd; | |
93 | om->exec = exec; | |
94 | list_add(&om->head, &oc->methods); | |
95 | return 0; | |
96 | } | |
97 | ||
98 | int | |
99 | nouveau_gpuobj_mthd_call(struct nouveau_channel *chan, | |
100 | u32 class, u32 mthd, u32 data) | |
101 | { | |
102 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | |
103 | struct nouveau_gpuobj_method *om; | |
104 | struct nouveau_gpuobj_class *oc; | |
105 | ||
106 | list_for_each_entry(oc, &dev_priv->classes, head) { | |
107 | if (oc->id != class) | |
108 | continue; | |
109 | ||
110 | list_for_each_entry(om, &oc->methods, head) { | |
111 | if (om->mthd == mthd) | |
112 | return om->exec(chan, class, mthd, data); | |
113 | } | |
114 | } | |
115 | ||
116 | return -ENOENT; | |
117 | } | |
118 | ||
274fec93 BS |
119 | int |
120 | nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid, | |
121 | u32 class, u32 mthd, u32 data) | |
122 | { | |
123 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
c420b2dc | 124 | struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); |
274fec93 BS |
125 | struct nouveau_channel *chan = NULL; |
126 | unsigned long flags; | |
127 | int ret = -EINVAL; | |
128 | ||
129 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | |
c420b2dc | 130 | if (chid >= 0 && chid < pfifo->channels) |
274fec93 BS |
131 | chan = dev_priv->channels.ptr[chid]; |
132 | if (chan) | |
133 | ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data); | |
134 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | |
135 | return ret; | |
136 | } | |
137 | ||
6ee73861 BS |
138 | int |
139 | nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, | |
140 | uint32_t size, int align, uint32_t flags, | |
141 | struct nouveau_gpuobj **gpuobj_ret) | |
142 | { | |
143 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
e41115d0 | 144 | struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; |
6ee73861 | 145 | struct nouveau_gpuobj *gpuobj; |
5125bfd8 | 146 | struct drm_mm_node *ramin = NULL; |
e41115d0 | 147 | int ret, i; |
6ee73861 BS |
148 | |
149 | NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n", | |
150 | chan ? chan->id : -1, size, align, flags); | |
151 | ||
6ee73861 BS |
152 | gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); |
153 | if (!gpuobj) | |
154 | return -ENOMEM; | |
155 | NV_DEBUG(dev, "gpuobj %p\n", gpuobj); | |
b3beb167 | 156 | gpuobj->dev = dev; |
6ee73861 | 157 | gpuobj->flags = flags; |
eb9bcbdc | 158 | kref_init(&gpuobj->refcount); |
43efc9ce | 159 | gpuobj->size = size; |
6ee73861 | 160 | |
e05d7eae | 161 | spin_lock(&dev_priv->ramin_lock); |
6ee73861 | 162 | list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); |
e05d7eae | 163 | spin_unlock(&dev_priv->ramin_lock); |
6ee73861 | 164 | |
6e32fedc | 165 | if (!(flags & NVOBJ_FLAG_VM) && chan) { |
5125bfd8 BS |
166 | ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0); |
167 | if (ramin) | |
168 | ramin = drm_mm_get_block(ramin, size, align); | |
5125bfd8 BS |
169 | if (!ramin) { |
170 | nouveau_gpuobj_ref(NULL, &gpuobj); | |
171 | return -ENOMEM; | |
172 | } | |
6ee73861 | 173 | |
e41115d0 BS |
174 | gpuobj->pinst = chan->ramin->pinst; |
175 | if (gpuobj->pinst != ~0) | |
176 | gpuobj->pinst += ramin->start; | |
b833ac26 | 177 | |
ca130c22 | 178 | gpuobj->cinst = ramin->start; |
e41115d0 BS |
179 | gpuobj->vinst = ramin->start + chan->ramin->vinst; |
180 | gpuobj->node = ramin; | |
181 | } else { | |
6e32fedc | 182 | ret = instmem->get(gpuobj, chan, size, align); |
6ee73861 | 183 | if (ret) { |
a8eaebc6 | 184 | nouveau_gpuobj_ref(NULL, &gpuobj); |
6ee73861 BS |
185 | return ret; |
186 | } | |
5125bfd8 | 187 | |
e41115d0 | 188 | ret = -ENOSYS; |
a11c3198 | 189 | if (!(flags & NVOBJ_FLAG_DONT_MAP)) |
e41115d0 BS |
190 | ret = instmem->map(gpuobj); |
191 | if (ret) | |
5125bfd8 | 192 | gpuobj->pinst = ~0; |
e41115d0 BS |
193 | |
194 | gpuobj->cinst = NVOBJ_CINST_GLOBAL; | |
de3a6c0a BS |
195 | } |
196 | ||
6ee73861 | 197 | if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { |
43efc9ce | 198 | for (i = 0; i < gpuobj->size; i += 4) |
b3beb167 | 199 | nv_wo32(gpuobj, i, 0); |
e41115d0 | 200 | instmem->flush(dev); |
6ee73861 BS |
201 | } |
202 | ||
a8eaebc6 | 203 | |
6ee73861 BS |
204 | *gpuobj_ret = gpuobj; |
205 | return 0; | |
206 | } | |
207 | ||
208 | int | |
fbd2895e | 209 | nouveau_gpuobj_init(struct drm_device *dev) |
6ee73861 BS |
210 | { |
211 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
212 | ||
213 | NV_DEBUG(dev, "\n"); | |
214 | ||
215 | INIT_LIST_HEAD(&dev_priv->gpuobj_list); | |
bd2e597d | 216 | INIT_LIST_HEAD(&dev_priv->classes); |
5125bfd8 BS |
217 | spin_lock_init(&dev_priv->ramin_lock); |
218 | dev_priv->ramin_base = ~0; | |
6ee73861 BS |
219 | |
220 | return 0; | |
221 | } | |
222 | ||
6ee73861 BS |
223 | void |
224 | nouveau_gpuobj_takedown(struct drm_device *dev) | |
225 | { | |
226 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
b8c157d3 BS |
227 | struct nouveau_gpuobj_method *om, *tm; |
228 | struct nouveau_gpuobj_class *oc, *tc; | |
6ee73861 BS |
229 | |
230 | NV_DEBUG(dev, "\n"); | |
6ee73861 | 231 | |
b8c157d3 BS |
232 | list_for_each_entry_safe(oc, tc, &dev_priv->classes, head) { |
233 | list_for_each_entry_safe(om, tm, &oc->methods, head) { | |
234 | list_del(&om->head); | |
235 | kfree(om); | |
236 | } | |
237 | list_del(&oc->head); | |
238 | kfree(oc); | |
239 | } | |
240 | ||
6e5a429b | 241 | WARN_ON(!list_empty(&dev_priv->gpuobj_list)); |
6ee73861 BS |
242 | } |
243 | ||
185abecc | 244 | |
eb9bcbdc BS |
245 | static void |
246 | nouveau_gpuobj_del(struct kref *ref) | |
6ee73861 | 247 | { |
eb9bcbdc BS |
248 | struct nouveau_gpuobj *gpuobj = |
249 | container_of(ref, struct nouveau_gpuobj, refcount); | |
a8eaebc6 | 250 | struct drm_device *dev = gpuobj->dev; |
6ee73861 | 251 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
e41115d0 | 252 | struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; |
6ee73861 BS |
253 | int i; |
254 | ||
a8eaebc6 | 255 | NV_DEBUG(dev, "gpuobj %p\n", gpuobj); |
6ee73861 | 256 | |
e41115d0 | 257 | if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { |
43efc9ce | 258 | for (i = 0; i < gpuobj->size; i += 4) |
b3beb167 | 259 | nv_wo32(gpuobj, i, 0); |
e41115d0 | 260 | instmem->flush(dev); |
6ee73861 BS |
261 | } |
262 | ||
263 | if (gpuobj->dtor) | |
264 | gpuobj->dtor(dev, gpuobj); | |
265 | ||
e41115d0 BS |
266 | if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) { |
267 | if (gpuobj->node) { | |
268 | instmem->unmap(gpuobj); | |
269 | instmem->put(gpuobj); | |
270 | } | |
271 | } else { | |
272 | if (gpuobj->node) { | |
273 | spin_lock(&dev_priv->ramin_lock); | |
274 | drm_mm_put_block(gpuobj->node); | |
275 | spin_unlock(&dev_priv->ramin_lock); | |
276 | } | |
277 | } | |
6ee73861 | 278 | |
e05d7eae | 279 | spin_lock(&dev_priv->ramin_lock); |
6ee73861 | 280 | list_del(&gpuobj->list); |
e05d7eae | 281 | spin_unlock(&dev_priv->ramin_lock); |
6ee73861 | 282 | |
6ee73861 | 283 | kfree(gpuobj); |
6ee73861 BS |
284 | } |
285 | ||
a8eaebc6 BS |
286 | void |
287 | nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr) | |
6ee73861 | 288 | { |
a8eaebc6 | 289 | if (ref) |
eb9bcbdc | 290 | kref_get(&ref->refcount); |
6ee73861 | 291 | |
eb9bcbdc BS |
292 | if (*ptr) |
293 | kref_put(&(*ptr)->refcount, nouveau_gpuobj_del); | |
6ee73861 | 294 | |
a8eaebc6 | 295 | *ptr = ref; |
6ee73861 BS |
296 | } |
297 | ||
298 | int | |
43efc9ce BS |
299 | nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst, |
300 | u32 size, u32 flags, struct nouveau_gpuobj **pgpuobj) | |
6ee73861 BS |
301 | { |
302 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
303 | struct nouveau_gpuobj *gpuobj = NULL; | |
304 | int i; | |
305 | ||
306 | NV_DEBUG(dev, | |
43efc9ce BS |
307 | "pinst=0x%08x vinst=0x%010llx size=0x%08x flags=0x%08x\n", |
308 | pinst, vinst, size, flags); | |
6ee73861 BS |
309 | |
310 | gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); | |
311 | if (!gpuobj) | |
312 | return -ENOMEM; | |
313 | NV_DEBUG(dev, "gpuobj %p\n", gpuobj); | |
b3beb167 | 314 | gpuobj->dev = dev; |
43efc9ce | 315 | gpuobj->flags = flags; |
eb9bcbdc | 316 | kref_init(&gpuobj->refcount); |
43efc9ce BS |
317 | gpuobj->size = size; |
318 | gpuobj->pinst = pinst; | |
e41115d0 | 319 | gpuobj->cinst = NVOBJ_CINST_GLOBAL; |
43efc9ce | 320 | gpuobj->vinst = vinst; |
de3a6c0a | 321 | |
6ee73861 | 322 | if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { |
43efc9ce | 323 | for (i = 0; i < gpuobj->size; i += 4) |
b3beb167 | 324 | nv_wo32(gpuobj, i, 0); |
f56cb86f | 325 | dev_priv->engine.instmem.flush(dev); |
6ee73861 BS |
326 | } |
327 | ||
e05d7eae | 328 | spin_lock(&dev_priv->ramin_lock); |
43efc9ce | 329 | list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); |
e05d7eae | 330 | spin_unlock(&dev_priv->ramin_lock); |
43efc9ce | 331 | *pgpuobj = gpuobj; |
6ee73861 BS |
332 | return 0; |
333 | } | |
334 | ||
7f4a195f BS |
335 | void |
336 | nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class, | |
337 | u64 base, u64 size, int target, int access, | |
338 | u32 type, u32 comp) | |
6ee73861 | 339 | { |
7f4a195f BS |
340 | struct drm_nouveau_private *dev_priv = obj->dev->dev_private; |
341 | struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; | |
342 | u32 flags0; | |
6ee73861 | 343 | |
7f4a195f BS |
344 | flags0 = (comp << 29) | (type << 22) | class; |
345 | flags0 |= 0x00100000; | |
346 | ||
347 | switch (access) { | |
348 | case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break; | |
349 | case NV_MEM_ACCESS_RW: | |
350 | case NV_MEM_ACCESS_WO: flags0 |= 0x00080000; break; | |
351 | default: | |
352 | break; | |
353 | } | |
6ee73861 BS |
354 | |
355 | switch (target) { | |
7f4a195f BS |
356 | case NV_MEM_TARGET_VRAM: |
357 | flags0 |= 0x00010000; | |
358 | break; | |
359 | case NV_MEM_TARGET_PCI: | |
360 | flags0 |= 0x00020000; | |
361 | break; | |
362 | case NV_MEM_TARGET_PCI_NOSNOOP: | |
363 | flags0 |= 0x00030000; | |
6ee73861 | 364 | break; |
7f4a195f | 365 | case NV_MEM_TARGET_GART: |
b571fe21 | 366 | base += dev_priv->gart_info.aper_base; |
6ee73861 | 367 | default: |
7f4a195f | 368 | flags0 &= ~0x00100000; |
6ee73861 BS |
369 | break; |
370 | } | |
371 | ||
7f4a195f BS |
372 | /* convert to base + limit */ |
373 | size = (base + size) - 1; | |
6ee73861 | 374 | |
7f4a195f BS |
375 | nv_wo32(obj, offset + 0x00, flags0); |
376 | nv_wo32(obj, offset + 0x04, lower_32_bits(size)); | |
377 | nv_wo32(obj, offset + 0x08, lower_32_bits(base)); | |
378 | nv_wo32(obj, offset + 0x0c, upper_32_bits(size) << 24 | | |
379 | upper_32_bits(base)); | |
380 | nv_wo32(obj, offset + 0x10, 0x00000000); | |
381 | nv_wo32(obj, offset + 0x14, 0x00000000); | |
6ee73861 | 382 | |
7f4a195f BS |
383 | pinstmem->flush(obj->dev); |
384 | } | |
6ee73861 | 385 | |
7f4a195f BS |
386 | int |
387 | nv50_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, u64 size, | |
388 | int target, int access, u32 type, u32 comp, | |
389 | struct nouveau_gpuobj **pobj) | |
390 | { | |
391 | struct drm_device *dev = chan->dev; | |
392 | int ret; | |
6ee73861 | 393 | |
a0fd9b9f | 394 | ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_FREE, pobj); |
7f4a195f BS |
395 | if (ret) |
396 | return ret; | |
6ee73861 | 397 | |
7f4a195f BS |
398 | nv50_gpuobj_dma_init(*pobj, 0, class, base, size, target, |
399 | access, type, comp); | |
6ee73861 BS |
400 | return 0; |
401 | } | |
402 | ||
403 | int | |
7f4a195f BS |
404 | nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, |
405 | u64 size, int access, int target, | |
406 | struct nouveau_gpuobj **pobj) | |
6ee73861 | 407 | { |
7f4a195f | 408 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; |
6ee73861 | 409 | struct drm_device *dev = chan->dev; |
7f4a195f | 410 | struct nouveau_gpuobj *obj; |
fd70b6cd | 411 | u32 flags0, flags2; |
6ee73861 BS |
412 | int ret; |
413 | ||
7f4a195f BS |
414 | if (dev_priv->card_type >= NV_50) { |
415 | u32 comp = (target == NV_MEM_TARGET_VM) ? NV_MEM_COMP_VM : 0; | |
416 | u32 type = (target == NV_MEM_TARGET_VM) ? NV_MEM_TYPE_VM : 0; | |
417 | ||
418 | return nv50_gpuobj_dma_new(chan, class, base, size, | |
419 | target, access, type, comp, pobj); | |
420 | } | |
421 | ||
422 | if (target == NV_MEM_TARGET_GART) { | |
58e6c7a9 BS |
423 | struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma; |
424 | ||
425 | if (dev_priv->gart_info.type == NOUVEAU_GART_PDMA) { | |
426 | if (base == 0) { | |
427 | nouveau_gpuobj_ref(gart, pobj); | |
428 | return 0; | |
429 | } | |
430 | ||
431 | base = nouveau_sgdma_get_physical(dev, base); | |
7f4a195f | 432 | target = NV_MEM_TARGET_PCI; |
7f4a195f | 433 | } else { |
58e6c7a9 BS |
434 | base += dev_priv->gart_info.aper_base; |
435 | if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) | |
436 | target = NV_MEM_TARGET_PCI_NOSNOOP; | |
437 | else | |
438 | target = NV_MEM_TARGET_PCI; | |
6ee73861 | 439 | } |
6ee73861 BS |
440 | } |
441 | ||
7f4a195f BS |
442 | flags0 = class; |
443 | flags0 |= 0x00003000; /* PT present, PT linear */ | |
444 | flags2 = 0; | |
445 | ||
446 | switch (target) { | |
447 | case NV_MEM_TARGET_PCI: | |
448 | flags0 |= 0x00020000; | |
449 | break; | |
450 | case NV_MEM_TARGET_PCI_NOSNOOP: | |
451 | flags0 |= 0x00030000; | |
452 | break; | |
453 | default: | |
454 | break; | |
455 | } | |
456 | ||
457 | switch (access) { | |
458 | case NV_MEM_ACCESS_RO: | |
459 | flags0 |= 0x00004000; | |
460 | break; | |
461 | case NV_MEM_ACCESS_WO: | |
462 | flags0 |= 0x00008000; | |
463 | default: | |
464 | flags2 |= 0x00000002; | |
465 | break; | |
466 | } | |
467 | ||
468 | flags0 |= (base & 0x00000fff) << 20; | |
469 | flags2 |= (base & 0xfffff000); | |
470 | ||
a0fd9b9f | 471 | ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj); |
7f4a195f BS |
472 | if (ret) |
473 | return ret; | |
474 | ||
475 | nv_wo32(obj, 0x00, flags0); | |
476 | nv_wo32(obj, 0x04, size - 1); | |
477 | nv_wo32(obj, 0x08, flags2); | |
478 | nv_wo32(obj, 0x0c, flags2); | |
479 | ||
480 | obj->engine = NVOBJ_ENGINE_SW; | |
481 | obj->class = class; | |
482 | *pobj = obj; | |
483 | return 0; | |
6ee73861 BS |
484 | } |
485 | ||
6ee73861 | 486 | int |
ceac3099 | 487 | nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class) |
6ee73861 | 488 | { |
a6a1a380 | 489 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; |
6ee73861 | 490 | struct drm_device *dev = chan->dev; |
b8c157d3 | 491 | struct nouveau_gpuobj_class *oc; |
6ee73861 BS |
492 | int ret; |
493 | ||
494 | NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class); | |
495 | ||
b8c157d3 | 496 | list_for_each_entry(oc, &dev_priv->classes, head) { |
a82dd49f | 497 | struct nouveau_exec_engine *eng = dev_priv->eng[oc->engine]; |
a6a1a380 | 498 | |
a82dd49f BS |
499 | if (oc->id != class) |
500 | continue; | |
a6a1a380 | 501 | |
a82dd49f BS |
502 | if (!chan->engctx[oc->engine]) { |
503 | ret = eng->context_new(chan, oc->engine); | |
504 | if (ret) | |
505 | return ret; | |
2703c21a | 506 | } |
6ee73861 | 507 | |
a82dd49f | 508 | return eng->object_new(chan, oc->engine, handle, class); |
6ee73861 | 509 | } |
ceac3099 | 510 | |
a82dd49f | 511 | return -EINVAL; |
6ee73861 BS |
512 | } |
513 | ||
6ee73861 BS |
514 | static int |
515 | nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) | |
516 | { | |
517 | struct drm_device *dev = chan->dev; | |
518 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
6ee73861 BS |
519 | uint32_t size; |
520 | uint32_t base; | |
521 | int ret; | |
522 | ||
523 | NV_DEBUG(dev, "ch%d\n", chan->id); | |
524 | ||
525 | /* Base amount for object storage (4KiB enough?) */ | |
bd2e597d | 526 | size = 0x2000; |
6ee73861 BS |
527 | base = 0; |
528 | ||
6ee73861 BS |
529 | if (dev_priv->card_type == NV_50) { |
530 | /* Various fixed table thingos */ | |
531 | size += 0x1400; /* mostly unknown stuff */ | |
532 | size += 0x4000; /* vm pd */ | |
533 | base = 0x6000; | |
534 | /* RAMHT, not sure about setting size yet, 32KiB to be safe */ | |
535 | size += 0x8000; | |
536 | /* RAMFC */ | |
537 | size += 0x1000; | |
6ee73861 BS |
538 | } |
539 | ||
a8eaebc6 | 540 | ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin); |
6ee73861 BS |
541 | if (ret) { |
542 | NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret); | |
543 | return ret; | |
544 | } | |
6ee73861 | 545 | |
1a97b4ac | 546 | ret = drm_mm_init(&chan->ramin_heap, base, size - base); |
6ee73861 BS |
547 | if (ret) { |
548 | NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret); | |
a8eaebc6 | 549 | nouveau_gpuobj_ref(NULL, &chan->ramin); |
6ee73861 BS |
550 | return ret; |
551 | } | |
552 | ||
553 | return 0; | |
554 | } | |
555 | ||
5de8037a BS |
556 | static int |
557 | nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm) | |
558 | { | |
559 | struct drm_device *dev = chan->dev; | |
560 | struct nouveau_gpuobj *pgd = NULL; | |
561 | struct nouveau_vm_pgd *vpgd; | |
35bcf5d5 | 562 | int ret; |
5de8037a BS |
563 | |
564 | ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0, &chan->ramin); | |
565 | if (ret) | |
566 | return ret; | |
567 | ||
568 | /* create page directory for this vm if none currently exists, | |
569 | * will be destroyed automagically when last reference to the | |
570 | * vm is removed | |
571 | */ | |
572 | if (list_empty(&vm->pgd_list)) { | |
573 | ret = nouveau_gpuobj_new(dev, NULL, 65536, 0x1000, 0, &pgd); | |
574 | if (ret) | |
575 | return ret; | |
576 | } | |
577 | nouveau_vm_ref(vm, &chan->vm, pgd); | |
578 | nouveau_gpuobj_ref(NULL, &pgd); | |
579 | ||
580 | /* point channel at vm's page directory */ | |
581 | vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head); | |
582 | nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst)); | |
583 | nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst)); | |
584 | nv_wo32(chan->ramin, 0x0208, 0xffffffff); | |
585 | nv_wo32(chan->ramin, 0x020c, 0x000000ff); | |
586 | ||
5de8037a BS |
587 | return 0; |
588 | } | |
589 | ||
6ee73861 BS |
590 | int |
591 | nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | |
592 | uint32_t vram_h, uint32_t tt_h) | |
593 | { | |
594 | struct drm_device *dev = chan->dev; | |
595 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
0320d791 BS |
596 | struct nouveau_fpriv *fpriv = nouveau_fpriv(chan->file_priv); |
597 | struct nouveau_vm *vm = fpriv ? fpriv->vm : dev_priv->chan_vm; | |
6ee73861 | 598 | struct nouveau_gpuobj *vram = NULL, *tt = NULL; |
35bcf5d5 | 599 | int ret; |
6ee73861 | 600 | |
6ee73861 | 601 | NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); |
2e9733ff | 602 | if (dev_priv->card_type >= NV_C0) |
5de8037a | 603 | return nvc0_gpuobj_channel_init(chan, vm); |
effd6e06 | 604 | |
816544b2 BS |
605 | /* Allocate a chunk of memory for per-channel object storage */ |
606 | ret = nouveau_gpuobj_channel_init_pramin(chan); | |
607 | if (ret) { | |
608 | NV_ERROR(dev, "init pramin\n"); | |
609 | return ret; | |
6ee73861 BS |
610 | } |
611 | ||
effd6e06 | 612 | /* NV50 VM |
6ee73861 | 613 | * - Allocate per-channel page-directory |
4c136142 | 614 | * - Link with shared channel VM |
6ee73861 | 615 | */ |
0320d791 | 616 | if (vm) { |
5125bfd8 BS |
617 | u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200; |
618 | u64 vm_vinst = chan->ramin->vinst + pgd_offs; | |
619 | u32 vm_pinst = chan->ramin->pinst; | |
6ee73861 | 620 | |
5125bfd8 BS |
621 | if (vm_pinst != ~0) |
622 | vm_pinst += pgd_offs; | |
6ee73861 | 623 | |
5125bfd8 | 624 | ret = nouveau_gpuobj_new_fake(dev, vm_pinst, vm_vinst, 0x4000, |
a8eaebc6 | 625 | 0, &chan->vm_pd); |
f56cb86f | 626 | if (ret) |
6ee73861 | 627 | return ret; |
6ee73861 | 628 | |
0320d791 | 629 | nouveau_vm_ref(vm, &chan->vm, chan->vm_pd); |
6ee73861 BS |
630 | } |
631 | ||
632 | /* RAMHT */ | |
633 | if (dev_priv->card_type < NV_50) { | |
a8eaebc6 BS |
634 | nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL); |
635 | } else { | |
636 | struct nouveau_gpuobj *ramht = NULL; | |
637 | ||
638 | ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16, | |
639 | NVOBJ_FLAG_ZERO_ALLOC, &ramht); | |
6ee73861 BS |
640 | if (ret) |
641 | return ret; | |
a8eaebc6 BS |
642 | |
643 | ret = nouveau_ramht_new(dev, ramht, &chan->ramht); | |
644 | nouveau_gpuobj_ref(NULL, &ramht); | |
6ee73861 BS |
645 | if (ret) |
646 | return ret; | |
647 | } | |
648 | ||
649 | /* VRAM ctxdma */ | |
650 | if (dev_priv->card_type >= NV_50) { | |
651 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, | |
4c136142 | 652 | 0, (1ULL << 40), NV_MEM_ACCESS_RW, |
7f4a195f | 653 | NV_MEM_TARGET_VM, &vram); |
6ee73861 BS |
654 | if (ret) { |
655 | NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); | |
656 | return ret; | |
657 | } | |
658 | } else { | |
659 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, | |
a8eaebc6 | 660 | 0, dev_priv->fb_available_size, |
7f4a195f BS |
661 | NV_MEM_ACCESS_RW, |
662 | NV_MEM_TARGET_VRAM, &vram); | |
6ee73861 BS |
663 | if (ret) { |
664 | NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); | |
665 | return ret; | |
666 | } | |
667 | } | |
668 | ||
a8eaebc6 BS |
669 | ret = nouveau_ramht_insert(chan, vram_h, vram); |
670 | nouveau_gpuobj_ref(NULL, &vram); | |
6ee73861 | 671 | if (ret) { |
a8eaebc6 | 672 | NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret); |
6ee73861 BS |
673 | return ret; |
674 | } | |
675 | ||
676 | /* TT memory ctxdma */ | |
677 | if (dev_priv->card_type >= NV_50) { | |
a8eaebc6 | 678 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, |
4c136142 | 679 | 0, (1ULL << 40), NV_MEM_ACCESS_RW, |
7f4a195f | 680 | NV_MEM_TARGET_VM, &tt); |
6ee73861 | 681 | } else { |
7f4a195f BS |
682 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, |
683 | 0, dev_priv->gart_info.aper_size, | |
684 | NV_MEM_ACCESS_RW, | |
685 | NV_MEM_TARGET_GART, &tt); | |
6ee73861 BS |
686 | } |
687 | ||
688 | if (ret) { | |
689 | NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret); | |
690 | return ret; | |
691 | } | |
692 | ||
a8eaebc6 BS |
693 | ret = nouveau_ramht_insert(chan, tt_h, tt); |
694 | nouveau_gpuobj_ref(NULL, &tt); | |
6ee73861 | 695 | if (ret) { |
a8eaebc6 | 696 | NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret); |
6ee73861 BS |
697 | return ret; |
698 | } | |
699 | ||
700 | return 0; | |
701 | } | |
702 | ||
703 | void | |
704 | nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) | |
705 | { | |
35bcf5d5 | 706 | NV_DEBUG(chan->dev, "ch%d\n", chan->id); |
6ee73861 | 707 | |
e432d48f BS |
708 | nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); |
709 | nouveau_gpuobj_ref(NULL, &chan->vm_pd); | |
710 | ||
31a5b8ce | 711 | if (drm_mm_initialized(&chan->ramin_heap)) |
b833ac26 | 712 | drm_mm_takedown(&chan->ramin_heap); |
a8eaebc6 | 713 | nouveau_gpuobj_ref(NULL, &chan->ramin); |
6ee73861 BS |
714 | } |
715 | ||
716 | int | |
717 | nouveau_gpuobj_suspend(struct drm_device *dev) | |
718 | { | |
719 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
720 | struct nouveau_gpuobj *gpuobj; | |
721 | int i; | |
722 | ||
6ee73861 | 723 | list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { |
e41115d0 | 724 | if (gpuobj->cinst != NVOBJ_CINST_GLOBAL) |
6ee73861 BS |
725 | continue; |
726 | ||
dc1e5c0d BS |
727 | gpuobj->suspend = vmalloc(gpuobj->size); |
728 | if (!gpuobj->suspend) { | |
6ee73861 BS |
729 | nouveau_gpuobj_resume(dev); |
730 | return -ENOMEM; | |
731 | } | |
732 | ||
43efc9ce | 733 | for (i = 0; i < gpuobj->size; i += 4) |
dc1e5c0d | 734 | gpuobj->suspend[i/4] = nv_ro32(gpuobj, i); |
6ee73861 BS |
735 | } |
736 | ||
737 | return 0; | |
738 | } | |
739 | ||
6ee73861 BS |
740 | void |
741 | nouveau_gpuobj_resume(struct drm_device *dev) | |
742 | { | |
743 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
744 | struct nouveau_gpuobj *gpuobj; | |
745 | int i; | |
746 | ||
6ee73861 | 747 | list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { |
dc1e5c0d | 748 | if (!gpuobj->suspend) |
6ee73861 BS |
749 | continue; |
750 | ||
43efc9ce | 751 | for (i = 0; i < gpuobj->size; i += 4) |
dc1e5c0d BS |
752 | nv_wo32(gpuobj, i, gpuobj->suspend[i/4]); |
753 | ||
754 | vfree(gpuobj->suspend); | |
755 | gpuobj->suspend = NULL; | |
6ee73861 BS |
756 | } |
757 | ||
dc1e5c0d | 758 | dev_priv->engine.instmem.flush(dev); |
6ee73861 BS |
759 | } |
760 | ||
b3beb167 BS |
761 | u32 |
762 | nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset) | |
763 | { | |
5125bfd8 BS |
764 | struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; |
765 | struct drm_device *dev = gpuobj->dev; | |
04eb34a4 | 766 | unsigned long flags; |
5125bfd8 BS |
767 | |
768 | if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) { | |
769 | u64 ptr = gpuobj->vinst + offset; | |
770 | u32 base = ptr >> 16; | |
771 | u32 val; | |
772 | ||
04eb34a4 | 773 | spin_lock_irqsave(&dev_priv->vm_lock, flags); |
5125bfd8 BS |
774 | if (dev_priv->ramin_base != base) { |
775 | dev_priv->ramin_base = base; | |
776 | nv_wr32(dev, 0x001700, dev_priv->ramin_base); | |
777 | } | |
778 | val = nv_rd32(dev, 0x700000 + (ptr & 0xffff)); | |
04eb34a4 | 779 | spin_unlock_irqrestore(&dev_priv->vm_lock, flags); |
5125bfd8 BS |
780 | return val; |
781 | } | |
782 | ||
783 | return nv_ri32(dev, gpuobj->pinst + offset); | |
b3beb167 BS |
784 | } |
785 | ||
786 | void | |
787 | nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val) | |
788 | { | |
5125bfd8 BS |
789 | struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; |
790 | struct drm_device *dev = gpuobj->dev; | |
04eb34a4 | 791 | unsigned long flags; |
5125bfd8 BS |
792 | |
793 | if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) { | |
794 | u64 ptr = gpuobj->vinst + offset; | |
795 | u32 base = ptr >> 16; | |
796 | ||
04eb34a4 | 797 | spin_lock_irqsave(&dev_priv->vm_lock, flags); |
5125bfd8 BS |
798 | if (dev_priv->ramin_base != base) { |
799 | dev_priv->ramin_base = base; | |
800 | nv_wr32(dev, 0x001700, dev_priv->ramin_base); | |
801 | } | |
802 | nv_wr32(dev, 0x700000 + (ptr & 0xffff), val); | |
04eb34a4 | 803 | spin_unlock_irqrestore(&dev_priv->vm_lock, flags); |
5125bfd8 BS |
804 | return; |
805 | } | |
806 | ||
807 | nv_wi32(dev, gpuobj->pinst + offset, val); | |
b3beb167 | 808 | } |