Commit | Line | Data |
---|---|---|
6ee73861 BS |
1 | /* |
2 | * Copyright (C) 2008 Ben Skeggs. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining | |
6 | * a copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sublicense, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice (including the | |
14 | * next paragraph) shall be included in all copies or substantial | |
15 | * portions of the Software. | |
16 | * | |
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | |
20 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | |
21 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | |
22 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | |
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |
24 | * | |
25 | */ | |
6ee73861 | 26 | |
ebb945a9 BS |
27 | #include <subdev/fb.h> |
28 | ||
29 | #include "nouveau_drm.h" | |
6ee73861 | 30 | #include "nouveau_dma.h" |
d375e7d5 | 31 | #include "nouveau_fence.h" |
ebb945a9 | 32 | #include "nouveau_abi16.h" |
6ee73861 | 33 | |
ebb945a9 BS |
34 | #include "nouveau_ttm.h" |
35 | #include "nouveau_gem.h" | |
6ee73861 | 36 | |
6ee73861 BS |
37 | void |
38 | nouveau_gem_object_del(struct drm_gem_object *gem) | |
39 | { | |
55fb74ad | 40 | struct nouveau_bo *nvbo = nouveau_gem_object(gem); |
6ee73861 BS |
41 | struct ttm_buffer_object *bo = &nvbo->bo; |
42 | ||
22b33e8e DA |
43 | if (gem->import_attach) |
44 | drm_prime_gem_destroy(gem, nvbo->bo.sg); | |
45 | ||
fd632aa3 | 46 | drm_gem_object_release(gem); |
55fb74ad DH |
47 | |
48 | /* reset filp so nouveau_bo_del_ttm() can test for it */ | |
49 | gem->filp = NULL; | |
50 | ttm_bo_unref(&bo); | |
6ee73861 BS |
51 | } |
52 | ||
639212d0 BS |
53 | int |
54 | nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) | |
55 | { | |
ebb945a9 | 56 | struct nouveau_cli *cli = nouveau_cli(file_priv); |
2fd3db6f BS |
57 | struct nouveau_bo *nvbo = nouveau_gem_object(gem); |
58 | struct nouveau_vma *vma; | |
59 | int ret; | |
639212d0 | 60 | |
ebb945a9 | 61 | if (!cli->base.vm) |
639212d0 BS |
62 | return 0; |
63 | ||
ee3939e0 | 64 | ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL); |
2fd3db6f BS |
65 | if (ret) |
66 | return ret; | |
67 | ||
ebb945a9 | 68 | vma = nouveau_bo_vma_find(nvbo, cli->base.vm); |
2fd3db6f BS |
69 | if (!vma) { |
70 | vma = kzalloc(sizeof(*vma), GFP_KERNEL); | |
71 | if (!vma) { | |
72 | ret = -ENOMEM; | |
73 | goto out; | |
74 | } | |
75 | ||
ebb945a9 | 76 | ret = nouveau_bo_vma_add(nvbo, cli->base.vm, vma); |
2fd3db6f BS |
77 | if (ret) { |
78 | kfree(vma); | |
79 | goto out; | |
80 | } | |
81 | } else { | |
82 | vma->refcount++; | |
83 | } | |
84 | ||
85 | out: | |
86 | ttm_bo_unreserve(&nvbo->bo); | |
87 | return ret; | |
639212d0 BS |
88 | } |
89 | ||
c4c7044f BS |
90 | static void |
91 | nouveau_gem_object_delete(void *data) | |
92 | { | |
93 | struct nouveau_vma *vma = data; | |
94 | nouveau_vm_unmap(vma); | |
95 | nouveau_vm_put(vma); | |
96 | kfree(vma); | |
97 | } | |
98 | ||
99 | static void | |
100 | nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma) | |
101 | { | |
102 | const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM; | |
103 | struct nouveau_fence *fence = NULL; | |
104 | ||
105 | list_del(&vma->head); | |
106 | ||
107 | if (mapped) { | |
108 | spin_lock(&nvbo->bo.bdev->fence_lock); | |
5d216f60 | 109 | fence = nouveau_fence_ref(nvbo->bo.sync_obj); |
c4c7044f BS |
110 | spin_unlock(&nvbo->bo.bdev->fence_lock); |
111 | } | |
112 | ||
113 | if (fence) { | |
114 | nouveau_fence_work(fence, nouveau_gem_object_delete, vma); | |
115 | } else { | |
116 | if (mapped) | |
117 | nouveau_vm_unmap(vma); | |
118 | nouveau_vm_put(vma); | |
119 | kfree(vma); | |
120 | } | |
121 | nouveau_fence_unref(&fence); | |
122 | } | |
123 | ||
639212d0 BS |
124 | void |
125 | nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) | |
126 | { | |
ebb945a9 | 127 | struct nouveau_cli *cli = nouveau_cli(file_priv); |
2fd3db6f BS |
128 | struct nouveau_bo *nvbo = nouveau_gem_object(gem); |
129 | struct nouveau_vma *vma; | |
130 | int ret; | |
639212d0 | 131 | |
ebb945a9 | 132 | if (!cli->base.vm) |
639212d0 | 133 | return; |
2fd3db6f | 134 | |
ee3939e0 | 135 | ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL); |
2fd3db6f BS |
136 | if (ret) |
137 | return; | |
138 | ||
ebb945a9 | 139 | vma = nouveau_bo_vma_find(nvbo, cli->base.vm); |
2fd3db6f | 140 | if (vma) { |
c4c7044f BS |
141 | if (--vma->refcount == 0) |
142 | nouveau_gem_object_unmap(nvbo, vma); | |
2fd3db6f BS |
143 | } |
144 | ttm_bo_unreserve(&nvbo->bo); | |
639212d0 BS |
145 | } |
146 | ||
6ee73861 | 147 | int |
f6d4e621 BS |
148 | nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain, |
149 | uint32_t tile_mode, uint32_t tile_flags, | |
150 | struct nouveau_bo **pnvbo) | |
6ee73861 | 151 | { |
ebb945a9 | 152 | struct nouveau_drm *drm = nouveau_drm(dev); |
6ee73861 | 153 | struct nouveau_bo *nvbo; |
6ba9a683 | 154 | u32 flags = 0; |
6ee73861 BS |
155 | int ret; |
156 | ||
6ba9a683 BS |
157 | if (domain & NOUVEAU_GEM_DOMAIN_VRAM) |
158 | flags |= TTM_PL_FLAG_VRAM; | |
159 | if (domain & NOUVEAU_GEM_DOMAIN_GART) | |
160 | flags |= TTM_PL_FLAG_TT; | |
161 | if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU) | |
162 | flags |= TTM_PL_FLAG_SYSTEM; | |
163 | ||
7375c95b | 164 | ret = nouveau_bo_new(dev, size, align, flags, tile_mode, |
22b33e8e | 165 | tile_flags, NULL, pnvbo); |
6ee73861 BS |
166 | if (ret) |
167 | return ret; | |
168 | nvbo = *pnvbo; | |
169 | ||
db5c8e29 BS |
170 | /* we restrict allowed domains on nv50+ to only the types |
171 | * that were requested at creation time. not possibly on | |
172 | * earlier chips without busting the ABI. | |
173 | */ | |
174 | nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM | | |
175 | NOUVEAU_GEM_DOMAIN_GART; | |
ebb945a9 | 176 | if (nv_device(drm->device)->card_type >= NV_50) |
db5c8e29 BS |
177 | nvbo->valid_domains &= domain; |
178 | ||
55fb74ad DH |
179 | /* Initialize the embedded gem-object. We return a single gem-reference |
180 | * to the caller, instead of a normal nouveau_bo ttm reference. */ | |
181 | ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size); | |
182 | if (ret) { | |
6ee73861 BS |
183 | nouveau_bo_ref(NULL, pnvbo); |
184 | return -ENOMEM; | |
185 | } | |
186 | ||
55fb74ad | 187 | nvbo->bo.persistent_swap_storage = nvbo->gem.filp; |
6ee73861 BS |
188 | return 0; |
189 | } | |
190 | ||
191 | static int | |
e758a311 BS |
192 | nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem, |
193 | struct drm_nouveau_gem_info *rep) | |
6ee73861 | 194 | { |
ebb945a9 | 195 | struct nouveau_cli *cli = nouveau_cli(file_priv); |
6ee73861 | 196 | struct nouveau_bo *nvbo = nouveau_gem_object(gem); |
e758a311 | 197 | struct nouveau_vma *vma; |
6ee73861 BS |
198 | |
199 | if (nvbo->bo.mem.mem_type == TTM_PL_TT) | |
200 | rep->domain = NOUVEAU_GEM_DOMAIN_GART; | |
201 | else | |
202 | rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; | |
203 | ||
e758a311 | 204 | rep->offset = nvbo->bo.offset; |
ebb945a9 BS |
205 | if (cli->base.vm) { |
206 | vma = nouveau_bo_vma_find(nvbo, cli->base.vm); | |
e758a311 BS |
207 | if (!vma) |
208 | return -EINVAL; | |
209 | ||
210 | rep->offset = vma->offset; | |
211 | } | |
212 | ||
6ee73861 | 213 | rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; |
72525b3f | 214 | rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node); |
6ee73861 BS |
215 | rep->tile_mode = nvbo->tile_mode; |
216 | rep->tile_flags = nvbo->tile_flags; | |
217 | return 0; | |
218 | } | |
219 | ||
6ee73861 BS |
220 | int |
221 | nouveau_gem_ioctl_new(struct drm_device *dev, void *data, | |
222 | struct drm_file *file_priv) | |
223 | { | |
ebb945a9 | 224 | struct nouveau_drm *drm = nouveau_drm(dev); |
a84fa1a3 | 225 | struct nouveau_cli *cli = nouveau_cli(file_priv); |
ebb945a9 | 226 | struct nouveau_fb *pfb = nouveau_fb(drm->device); |
6ee73861 BS |
227 | struct drm_nouveau_gem_new *req = data; |
228 | struct nouveau_bo *nvbo = NULL; | |
6ee73861 BS |
229 | int ret = 0; |
230 | ||
ebb945a9 | 231 | if (!pfb->memtype_valid(pfb, req->info.tile_flags)) { |
a84fa1a3 | 232 | NV_ERROR(cli, "bad page flags: 0x%08x\n", req->info.tile_flags); |
6ee73861 | 233 | return -EINVAL; |
60d2a88a | 234 | } |
6ee73861 | 235 | |
f6d4e621 | 236 | ret = nouveau_gem_new(dev, req->info.size, req->align, |
6ba9a683 BS |
237 | req->info.domain, req->info.tile_mode, |
238 | req->info.tile_flags, &nvbo); | |
6ee73861 BS |
239 | if (ret) |
240 | return ret; | |
241 | ||
55fb74ad | 242 | ret = drm_gem_handle_create(file_priv, &nvbo->gem, &req->info.handle); |
e758a311 | 243 | if (ret == 0) { |
55fb74ad | 244 | ret = nouveau_gem_info(file_priv, &nvbo->gem, &req->info); |
e758a311 BS |
245 | if (ret) |
246 | drm_gem_handle_delete(file_priv, req->info.handle); | |
247 | } | |
248 | ||
29d08b3e | 249 | /* drop reference from allocate - handle holds it now */ |
55fb74ad | 250 | drm_gem_object_unreference_unlocked(&nvbo->gem); |
6ee73861 BS |
251 | return ret; |
252 | } | |
253 | ||
254 | static int | |
255 | nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains, | |
256 | uint32_t write_domains, uint32_t valid_domains) | |
257 | { | |
55fb74ad | 258 | struct nouveau_bo *nvbo = nouveau_gem_object(gem); |
6ee73861 | 259 | struct ttm_buffer_object *bo = &nvbo->bo; |
db5c8e29 | 260 | uint32_t domains = valid_domains & nvbo->valid_domains & |
78ad0f7b FJ |
261 | (write_domains ? write_domains : read_domains); |
262 | uint32_t pref_flags = 0, valid_flags = 0; | |
6ee73861 | 263 | |
78ad0f7b | 264 | if (!domains) |
6ee73861 BS |
265 | return -EINVAL; |
266 | ||
78ad0f7b FJ |
267 | if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) |
268 | valid_flags |= TTM_PL_FLAG_VRAM; | |
269 | ||
270 | if (valid_domains & NOUVEAU_GEM_DOMAIN_GART) | |
271 | valid_flags |= TTM_PL_FLAG_TT; | |
272 | ||
273 | if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) && | |
274 | bo->mem.mem_type == TTM_PL_VRAM) | |
275 | pref_flags |= TTM_PL_FLAG_VRAM; | |
276 | ||
277 | else if ((domains & NOUVEAU_GEM_DOMAIN_GART) && | |
278 | bo->mem.mem_type == TTM_PL_TT) | |
279 | pref_flags |= TTM_PL_FLAG_TT; | |
280 | ||
281 | else if (domains & NOUVEAU_GEM_DOMAIN_VRAM) | |
282 | pref_flags |= TTM_PL_FLAG_VRAM; | |
283 | ||
284 | else | |
285 | pref_flags |= TTM_PL_FLAG_TT; | |
286 | ||
287 | nouveau_bo_placement_set(nvbo, pref_flags, valid_flags); | |
6ee73861 | 288 | |
6ee73861 BS |
289 | return 0; |
290 | } | |
291 | ||
292 | struct validate_op { | |
6ee73861 BS |
293 | struct list_head vram_list; |
294 | struct list_head gart_list; | |
295 | struct list_head both_list; | |
ecff665f | 296 | struct ww_acquire_ctx ticket; |
6ee73861 BS |
297 | }; |
298 | ||
299 | static void | |
ecff665f ML |
300 | validate_fini_list(struct list_head *list, struct nouveau_fence *fence, |
301 | struct ww_acquire_ctx *ticket) | |
6ee73861 BS |
302 | { |
303 | struct list_head *entry, *tmp; | |
304 | struct nouveau_bo *nvbo; | |
305 | ||
306 | list_for_each_safe(entry, tmp, list) { | |
307 | nvbo = list_entry(entry, struct nouveau_bo, entry); | |
332b242f | 308 | |
9360bd11 BS |
309 | if (likely(fence)) |
310 | nouveau_bo_fence(nvbo, fence); | |
6ee73861 | 311 | |
a1606a95 BS |
312 | if (unlikely(nvbo->validate_mapped)) { |
313 | ttm_bo_kunmap(&nvbo->kmap); | |
314 | nvbo->validate_mapped = false; | |
315 | } | |
316 | ||
6ee73861 BS |
317 | list_del(&nvbo->entry); |
318 | nvbo->reserved_by = NULL; | |
ecff665f | 319 | ttm_bo_unreserve_ticket(&nvbo->bo, ticket); |
55fb74ad | 320 | drm_gem_object_unreference_unlocked(&nvbo->gem); |
6ee73861 BS |
321 | } |
322 | } | |
323 | ||
324 | static void | |
ecff665f | 325 | validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence) |
6ee73861 | 326 | { |
ecff665f ML |
327 | validate_fini_list(&op->vram_list, fence, &op->ticket); |
328 | validate_fini_list(&op->gart_list, fence, &op->ticket); | |
329 | validate_fini_list(&op->both_list, fence, &op->ticket); | |
330 | } | |
331 | ||
332 | static void | |
333 | validate_fini(struct validate_op *op, struct nouveau_fence *fence) | |
334 | { | |
335 | validate_fini_no_ticket(op, fence); | |
336 | ww_acquire_fini(&op->ticket); | |
6ee73861 BS |
337 | } |
338 | ||
339 | static int | |
340 | validate_init(struct nouveau_channel *chan, struct drm_file *file_priv, | |
341 | struct drm_nouveau_gem_pushbuf_bo *pbbo, | |
342 | int nr_buffers, struct validate_op *op) | |
343 | { | |
a84fa1a3 | 344 | struct nouveau_cli *cli = nouveau_cli(file_priv); |
ebb945a9 | 345 | struct drm_device *dev = chan->drm->dev; |
6ee73861 BS |
346 | int trycnt = 0; |
347 | int ret, i; | |
c354c893 | 348 | struct nouveau_bo *res_bo = NULL; |
6ee73861 | 349 | |
ecff665f | 350 | ww_acquire_init(&op->ticket, &reservation_ww_class); |
6ee73861 BS |
351 | retry: |
352 | if (++trycnt > 100000) { | |
a84fa1a3 | 353 | NV_ERROR(cli, "%s failed and gave up.\n", __func__); |
6ee73861 BS |
354 | return -EINVAL; |
355 | } | |
356 | ||
357 | for (i = 0; i < nr_buffers; i++) { | |
358 | struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i]; | |
359 | struct drm_gem_object *gem; | |
360 | struct nouveau_bo *nvbo; | |
361 | ||
362 | gem = drm_gem_object_lookup(dev, file_priv, b->handle); | |
363 | if (!gem) { | |
a84fa1a3 | 364 | NV_ERROR(cli, "Unknown handle 0x%08x\n", b->handle); |
ecff665f | 365 | ww_acquire_done(&op->ticket); |
6ee73861 | 366 | validate_fini(op, NULL); |
bf79cb91 | 367 | return -ENOENT; |
6ee73861 | 368 | } |
55fb74ad | 369 | nvbo = nouveau_gem_object(gem); |
c354c893 ML |
370 | if (nvbo == res_bo) { |
371 | res_bo = NULL; | |
372 | drm_gem_object_unreference_unlocked(gem); | |
373 | continue; | |
374 | } | |
6ee73861 BS |
375 | |
376 | if (nvbo->reserved_by && nvbo->reserved_by == file_priv) { | |
a84fa1a3 | 377 | NV_ERROR(cli, "multiple instances of buffer %d on " |
6ee73861 | 378 | "validation list\n", b->handle); |
5086f69e | 379 | drm_gem_object_unreference_unlocked(gem); |
ecff665f | 380 | ww_acquire_done(&op->ticket); |
6ee73861 BS |
381 | validate_fini(op, NULL); |
382 | return -EINVAL; | |
383 | } | |
384 | ||
ecff665f | 385 | ret = ttm_bo_reserve(&nvbo->bo, true, false, true, &op->ticket); |
6ee73861 | 386 | if (ret) { |
ecff665f | 387 | validate_fini_no_ticket(op, NULL); |
5e338405 | 388 | if (unlikely(ret == -EDEADLK)) { |
c354c893 | 389 | ret = ttm_bo_reserve_slowpath(&nvbo->bo, true, |
ecff665f | 390 | &op->ticket); |
c354c893 ML |
391 | if (!ret) |
392 | res_bo = nvbo; | |
393 | } | |
938c40ed | 394 | if (unlikely(ret)) { |
ecff665f ML |
395 | ww_acquire_done(&op->ticket); |
396 | ww_acquire_fini(&op->ticket); | |
c354c893 | 397 | drm_gem_object_unreference_unlocked(gem); |
938c40ed | 398 | if (ret != -ERESTARTSYS) |
a84fa1a3 | 399 | NV_ERROR(cli, "fail reserve\n"); |
6ee73861 | 400 | return ret; |
a1606a95 | 401 | } |
6ee73861 BS |
402 | } |
403 | ||
a1606a95 | 404 | b->user_priv = (uint64_t)(unsigned long)nvbo; |
6ee73861 BS |
405 | nvbo->reserved_by = file_priv; |
406 | nvbo->pbbo_index = i; | |
407 | if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && | |
408 | (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)) | |
409 | list_add_tail(&nvbo->entry, &op->both_list); | |
410 | else | |
411 | if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) | |
412 | list_add_tail(&nvbo->entry, &op->vram_list); | |
413 | else | |
414 | if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART) | |
415 | list_add_tail(&nvbo->entry, &op->gart_list); | |
416 | else { | |
a84fa1a3 | 417 | NV_ERROR(cli, "invalid valid domains: 0x%08x\n", |
6ee73861 | 418 | b->valid_domains); |
0208843d | 419 | list_add_tail(&nvbo->entry, &op->both_list); |
ecff665f | 420 | ww_acquire_done(&op->ticket); |
6ee73861 BS |
421 | validate_fini(op, NULL); |
422 | return -EINVAL; | |
423 | } | |
c354c893 ML |
424 | if (nvbo == res_bo) |
425 | goto retry; | |
6ee73861 BS |
426 | } |
427 | ||
ecff665f | 428 | ww_acquire_done(&op->ticket); |
6ee73861 BS |
429 | return 0; |
430 | } | |
431 | ||
525895ba BS |
432 | static int |
433 | validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo) | |
434 | { | |
435 | struct nouveau_fence *fence = NULL; | |
436 | int ret = 0; | |
437 | ||
438 | spin_lock(&nvbo->bo.bdev->fence_lock); | |
5d216f60 | 439 | fence = nouveau_fence_ref(nvbo->bo.sync_obj); |
525895ba BS |
440 | spin_unlock(&nvbo->bo.bdev->fence_lock); |
441 | ||
442 | if (fence) { | |
443 | ret = nouveau_fence_sync(fence, chan); | |
444 | nouveau_fence_unref(&fence); | |
445 | } | |
446 | ||
447 | return ret; | |
448 | } | |
449 | ||
6ee73861 | 450 | static int |
a84fa1a3 MS |
451 | validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli, |
452 | struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo, | |
453 | uint64_t user_pbbo_ptr) | |
6ee73861 | 454 | { |
ebb945a9 | 455 | struct nouveau_drm *drm = chan->drm; |
6ee73861 BS |
456 | struct drm_nouveau_gem_pushbuf_bo __user *upbbo = |
457 | (void __force __user *)(uintptr_t)user_pbbo_ptr; | |
458 | struct nouveau_bo *nvbo; | |
459 | int ret, relocs = 0; | |
460 | ||
461 | list_for_each_entry(nvbo, list, entry) { | |
462 | struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; | |
6ee73861 | 463 | |
55fb74ad | 464 | ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains, |
6ee73861 BS |
465 | b->write_domains, |
466 | b->valid_domains); | |
a1606a95 | 467 | if (unlikely(ret)) { |
a84fa1a3 | 468 | NV_ERROR(cli, "fail set_domain\n"); |
6ee73861 | 469 | return ret; |
a1606a95 | 470 | } |
6ee73861 | 471 | |
97a875cb | 472 | ret = nouveau_bo_validate(nvbo, true, false); |
a1606a95 | 473 | if (unlikely(ret)) { |
938c40ed | 474 | if (ret != -ERESTARTSYS) |
a84fa1a3 | 475 | NV_ERROR(cli, "fail ttm_validate\n"); |
6ee73861 | 476 | return ret; |
a1606a95 | 477 | } |
6ee73861 | 478 | |
525895ba | 479 | ret = validate_sync(chan, nvbo); |
415e6186 | 480 | if (unlikely(ret)) { |
a84fa1a3 | 481 | NV_ERROR(cli, "fail post-validate sync\n"); |
415e6186 BS |
482 | return ret; |
483 | } | |
484 | ||
ebb945a9 | 485 | if (nv_device(drm->device)->card_type < NV_50) { |
a3fcd0a9 BS |
486 | if (nvbo->bo.offset == b->presumed.offset && |
487 | ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && | |
488 | b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || | |
489 | (nvbo->bo.mem.mem_type == TTM_PL_TT && | |
490 | b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART))) | |
491 | continue; | |
492 | ||
493 | if (nvbo->bo.mem.mem_type == TTM_PL_TT) | |
494 | b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART; | |
495 | else | |
496 | b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM; | |
497 | b->presumed.offset = nvbo->bo.offset; | |
498 | b->presumed.valid = 0; | |
499 | relocs++; | |
500 | ||
1d6ac185 | 501 | if (copy_to_user(&upbbo[nvbo->pbbo_index].presumed, |
a3fcd0a9 BS |
502 | &b->presumed, sizeof(b->presumed))) |
503 | return -EFAULT; | |
504 | } | |
6ee73861 BS |
505 | } |
506 | ||
507 | return relocs; | |
508 | } | |
509 | ||
510 | static int | |
511 | nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, | |
512 | struct drm_file *file_priv, | |
513 | struct drm_nouveau_gem_pushbuf_bo *pbbo, | |
514 | uint64_t user_buffers, int nr_buffers, | |
515 | struct validate_op *op, int *apply_relocs) | |
516 | { | |
a84fa1a3 | 517 | struct nouveau_cli *cli = nouveau_cli(file_priv); |
6ee73861 BS |
518 | int ret, relocs = 0; |
519 | ||
520 | INIT_LIST_HEAD(&op->vram_list); | |
521 | INIT_LIST_HEAD(&op->gart_list); | |
522 | INIT_LIST_HEAD(&op->both_list); | |
523 | ||
6ee73861 BS |
524 | if (nr_buffers == 0) |
525 | return 0; | |
526 | ||
527 | ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); | |
a1606a95 | 528 | if (unlikely(ret)) { |
938c40ed | 529 | if (ret != -ERESTARTSYS) |
a84fa1a3 | 530 | NV_ERROR(cli, "validate_init\n"); |
6ee73861 | 531 | return ret; |
a1606a95 | 532 | } |
6ee73861 | 533 | |
a84fa1a3 | 534 | ret = validate_list(chan, cli, &op->vram_list, pbbo, user_buffers); |
6ee73861 | 535 | if (unlikely(ret < 0)) { |
938c40ed | 536 | if (ret != -ERESTARTSYS) |
a84fa1a3 | 537 | NV_ERROR(cli, "validate vram_list\n"); |
6ee73861 BS |
538 | validate_fini(op, NULL); |
539 | return ret; | |
540 | } | |
541 | relocs += ret; | |
542 | ||
a84fa1a3 | 543 | ret = validate_list(chan, cli, &op->gart_list, pbbo, user_buffers); |
6ee73861 | 544 | if (unlikely(ret < 0)) { |
938c40ed | 545 | if (ret != -ERESTARTSYS) |
a84fa1a3 | 546 | NV_ERROR(cli, "validate gart_list\n"); |
6ee73861 BS |
547 | validate_fini(op, NULL); |
548 | return ret; | |
549 | } | |
550 | relocs += ret; | |
551 | ||
a84fa1a3 | 552 | ret = validate_list(chan, cli, &op->both_list, pbbo, user_buffers); |
6ee73861 | 553 | if (unlikely(ret < 0)) { |
938c40ed | 554 | if (ret != -ERESTARTSYS) |
a84fa1a3 | 555 | NV_ERROR(cli, "validate both_list\n"); |
6ee73861 BS |
556 | validate_fini(op, NULL); |
557 | return ret; | |
558 | } | |
559 | relocs += ret; | |
560 | ||
561 | *apply_relocs = relocs; | |
562 | return 0; | |
563 | } | |
564 | ||
c859074e ML |
565 | static inline void |
566 | u_free(void *addr) | |
567 | { | |
568 | if (!is_vmalloc_addr(addr)) | |
569 | kfree(addr); | |
570 | else | |
571 | vfree(addr); | |
572 | } | |
573 | ||
6ee73861 BS |
574 | static inline void * |
575 | u_memcpya(uint64_t user, unsigned nmemb, unsigned size) | |
576 | { | |
577 | void *mem; | |
578 | void __user *userptr = (void __force __user *)(uintptr_t)user; | |
579 | ||
c859074e ML |
580 | size *= nmemb; |
581 | ||
582 | mem = kmalloc(size, GFP_KERNEL | __GFP_NOWARN); | |
583 | if (!mem) | |
584 | mem = vmalloc(size); | |
6ee73861 BS |
585 | if (!mem) |
586 | return ERR_PTR(-ENOMEM); | |
587 | ||
1d6ac185 | 588 | if (copy_from_user(mem, userptr, size)) { |
c859074e | 589 | u_free(mem); |
6ee73861 BS |
590 | return ERR_PTR(-EFAULT); |
591 | } | |
592 | ||
593 | return mem; | |
594 | } | |
595 | ||
596 | static int | |
a84fa1a3 | 597 | nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, |
a1606a95 BS |
598 | struct drm_nouveau_gem_pushbuf *req, |
599 | struct drm_nouveau_gem_pushbuf_bo *bo) | |
6ee73861 BS |
600 | { |
601 | struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; | |
12f735b7 LB |
602 | int ret = 0; |
603 | unsigned i; | |
6ee73861 | 604 | |
a1606a95 | 605 | reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc)); |
6ee73861 BS |
606 | if (IS_ERR(reloc)) |
607 | return PTR_ERR(reloc); | |
608 | ||
a1606a95 | 609 | for (i = 0; i < req->nr_relocs; i++) { |
6ee73861 BS |
610 | struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i]; |
611 | struct drm_nouveau_gem_pushbuf_bo *b; | |
a1606a95 | 612 | struct nouveau_bo *nvbo; |
6ee73861 BS |
613 | uint32_t data; |
614 | ||
a1606a95 | 615 | if (unlikely(r->bo_index > req->nr_buffers)) { |
a84fa1a3 | 616 | NV_ERROR(cli, "reloc bo index invalid\n"); |
6ee73861 BS |
617 | ret = -EINVAL; |
618 | break; | |
619 | } | |
620 | ||
621 | b = &bo[r->bo_index]; | |
a1606a95 | 622 | if (b->presumed.valid) |
6ee73861 BS |
623 | continue; |
624 | ||
a1606a95 | 625 | if (unlikely(r->reloc_bo_index > req->nr_buffers)) { |
a84fa1a3 | 626 | NV_ERROR(cli, "reloc container bo index invalid\n"); |
a1606a95 BS |
627 | ret = -EINVAL; |
628 | break; | |
629 | } | |
630 | nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv; | |
631 | ||
632 | if (unlikely(r->reloc_bo_offset + 4 > | |
633 | nvbo->bo.mem.num_pages << PAGE_SHIFT)) { | |
a84fa1a3 | 634 | NV_ERROR(cli, "reloc outside of bo\n"); |
a1606a95 BS |
635 | ret = -EINVAL; |
636 | break; | |
637 | } | |
638 | ||
639 | if (!nvbo->kmap.virtual) { | |
640 | ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, | |
641 | &nvbo->kmap); | |
642 | if (ret) { | |
a84fa1a3 | 643 | NV_ERROR(cli, "failed kmap for reloc\n"); |
a1606a95 BS |
644 | break; |
645 | } | |
646 | nvbo->validate_mapped = true; | |
647 | } | |
648 | ||
6ee73861 | 649 | if (r->flags & NOUVEAU_GEM_RELOC_LOW) |
a1606a95 | 650 | data = b->presumed.offset + r->data; |
6ee73861 BS |
651 | else |
652 | if (r->flags & NOUVEAU_GEM_RELOC_HIGH) | |
a1606a95 | 653 | data = (b->presumed.offset + r->data) >> 32; |
6ee73861 BS |
654 | else |
655 | data = r->data; | |
656 | ||
657 | if (r->flags & NOUVEAU_GEM_RELOC_OR) { | |
a1606a95 | 658 | if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART) |
6ee73861 BS |
659 | data |= r->tor; |
660 | else | |
661 | data |= r->vor; | |
662 | } | |
663 | ||
702adba2 | 664 | spin_lock(&nvbo->bo.bdev->fence_lock); |
a1606a95 | 665 | ret = ttm_bo_wait(&nvbo->bo, false, false, false); |
702adba2 | 666 | spin_unlock(&nvbo->bo.bdev->fence_lock); |
a1606a95 | 667 | if (ret) { |
a84fa1a3 | 668 | NV_ERROR(cli, "reloc wait_idle failed: %d\n", ret); |
a1606a95 BS |
669 | break; |
670 | } | |
a1606a95 BS |
671 | |
672 | nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data); | |
6ee73861 BS |
673 | } |
674 | ||
c859074e | 675 | u_free(reloc); |
6ee73861 BS |
676 | return ret; |
677 | } | |
678 | ||
679 | int | |
680 | nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |
681 | struct drm_file *file_priv) | |
682 | { | |
ebb945a9 | 683 | struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); |
a84fa1a3 | 684 | struct nouveau_cli *cli = nouveau_cli(file_priv); |
ebb945a9 BS |
685 | struct nouveau_abi16_chan *temp; |
686 | struct nouveau_drm *drm = nouveau_drm(dev); | |
6ee73861 | 687 | struct drm_nouveau_gem_pushbuf *req = data; |
a1606a95 BS |
688 | struct drm_nouveau_gem_pushbuf_push *push; |
689 | struct drm_nouveau_gem_pushbuf_bo *bo; | |
ebb945a9 | 690 | struct nouveau_channel *chan = NULL; |
6ee73861 | 691 | struct validate_op op; |
6e86e041 | 692 | struct nouveau_fence *fence = NULL; |
a1606a95 | 693 | int i, j, ret = 0, do_reloc = 0; |
6ee73861 | 694 | |
ebb945a9 BS |
695 | if (unlikely(!abi16)) |
696 | return -ENOMEM; | |
6ee73861 | 697 | |
ebb945a9 BS |
698 | list_for_each_entry(temp, &abi16->channels, head) { |
699 | if (temp->chan->handle == (NVDRM_CHAN | req->channel)) { | |
700 | chan = temp->chan; | |
701 | break; | |
702 | } | |
703 | } | |
6ee73861 | 704 | |
ebb945a9 BS |
705 | if (!chan) |
706 | return nouveau_abi16_put(abi16, -ENOENT); | |
707 | ||
708 | req->vram_available = drm->gem.vram_available; | |
709 | req->gart_available = drm->gem.gart_available; | |
a1606a95 BS |
710 | if (unlikely(req->nr_push == 0)) |
711 | goto out_next; | |
6ee73861 | 712 | |
a1606a95 | 713 | if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) { |
a84fa1a3 | 714 | NV_ERROR(cli, "pushbuf push count exceeds limit: %d max %d\n", |
a1606a95 | 715 | req->nr_push, NOUVEAU_GEM_MAX_PUSH); |
ebb945a9 | 716 | return nouveau_abi16_put(abi16, -EINVAL); |
6ee73861 BS |
717 | } |
718 | ||
a1606a95 | 719 | if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) { |
a84fa1a3 | 720 | NV_ERROR(cli, "pushbuf bo count exceeds limit: %d max %d\n", |
a1606a95 | 721 | req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS); |
ebb945a9 | 722 | return nouveau_abi16_put(abi16, -EINVAL); |
6ee73861 BS |
723 | } |
724 | ||
a1606a95 | 725 | if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) { |
a84fa1a3 | 726 | NV_ERROR(cli, "pushbuf reloc count exceeds limit: %d max %d\n", |
a1606a95 | 727 | req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS); |
ebb945a9 | 728 | return nouveau_abi16_put(abi16, -EINVAL); |
6ee73861 BS |
729 | } |
730 | ||
a1606a95 | 731 | push = u_memcpya(req->push, req->nr_push, sizeof(*push)); |
ebb945a9 BS |
732 | if (IS_ERR(push)) |
733 | return nouveau_abi16_put(abi16, PTR_ERR(push)); | |
a1606a95 | 734 | |
6ee73861 | 735 | bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); |
a1606a95 | 736 | if (IS_ERR(bo)) { |
c859074e | 737 | u_free(push); |
ebb945a9 | 738 | return nouveau_abi16_put(abi16, PTR_ERR(bo)); |
a1606a95 | 739 | } |
6ee73861 | 740 | |
accf9496 | 741 | /* Ensure all push buffers are on validate list */ |
415e6186 BS |
742 | for (i = 0; i < req->nr_push; i++) { |
743 | if (push[i].bo_index >= req->nr_buffers) { | |
a84fa1a3 | 744 | NV_ERROR(cli, "push %d buffer not in list\n", i); |
415e6186 | 745 | ret = -EINVAL; |
7fa0cba2 | 746 | goto out_prevalid; |
415e6186 | 747 | } |
415e6186 BS |
748 | } |
749 | ||
6ee73861 BS |
750 | /* Validate buffer list */ |
751 | ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, | |
752 | req->nr_buffers, &op, &do_reloc); | |
753 | if (ret) { | |
938c40ed | 754 | if (ret != -ERESTARTSYS) |
a84fa1a3 | 755 | NV_ERROR(cli, "validate: %d\n", ret); |
7fa0cba2 | 756 | goto out_prevalid; |
6ee73861 BS |
757 | } |
758 | ||
6ee73861 BS |
759 | /* Apply any relocations that are required */ |
760 | if (do_reloc) { | |
a84fa1a3 | 761 | ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo); |
6ee73861 | 762 | if (ret) { |
a84fa1a3 | 763 | NV_ERROR(cli, "reloc apply: %d\n", ret); |
6ee73861 BS |
764 | goto out; |
765 | } | |
6ee73861 | 766 | } |
6ee73861 | 767 | |
9a391ad8 | 768 | if (chan->dma.ib_max) { |
5e120f6e | 769 | ret = nouveau_dma_wait(chan, req->nr_push + 1, 16); |
6ee73861 | 770 | if (ret) { |
a84fa1a3 | 771 | NV_ERROR(cli, "nv50cal_space: %d\n", ret); |
6ee73861 BS |
772 | goto out; |
773 | } | |
6ee73861 | 774 | |
a1606a95 BS |
775 | for (i = 0; i < req->nr_push; i++) { |
776 | struct nouveau_bo *nvbo = (void *)(unsigned long) | |
777 | bo[push[i].bo_index].user_priv; | |
778 | ||
779 | nv50_dma_push(chan, nvbo, push[i].offset, | |
780 | push[i].length); | |
781 | } | |
9a391ad8 | 782 | } else |
ebb945a9 | 783 | if (nv_device(drm->device)->chipset >= 0x25) { |
a1606a95 | 784 | ret = RING_SPACE(chan, req->nr_push * 2); |
6ee73861 | 785 | if (ret) { |
a84fa1a3 | 786 | NV_ERROR(cli, "cal_space: %d\n", ret); |
6ee73861 BS |
787 | goto out; |
788 | } | |
a1606a95 BS |
789 | |
790 | for (i = 0; i < req->nr_push; i++) { | |
791 | struct nouveau_bo *nvbo = (void *)(unsigned long) | |
792 | bo[push[i].bo_index].user_priv; | |
a1606a95 | 793 | |
3a92d37e | 794 | OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2); |
a1606a95 BS |
795 | OUT_RING(chan, 0); |
796 | } | |
6ee73861 | 797 | } else { |
a1606a95 | 798 | ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS)); |
6ee73861 | 799 | if (ret) { |
a84fa1a3 | 800 | NV_ERROR(cli, "jmp_space: %d\n", ret); |
6ee73861 BS |
801 | goto out; |
802 | } | |
6ee73861 | 803 | |
a1606a95 BS |
804 | for (i = 0; i < req->nr_push; i++) { |
805 | struct nouveau_bo *nvbo = (void *)(unsigned long) | |
806 | bo[push[i].bo_index].user_priv; | |
a1606a95 BS |
807 | uint32_t cmd; |
808 | ||
ebb945a9 | 809 | cmd = chan->push.vma.offset + ((chan->dma.cur + 2) << 2); |
a1606a95 BS |
810 | cmd |= 0x20000000; |
811 | if (unlikely(cmd != req->suffix0)) { | |
812 | if (!nvbo->kmap.virtual) { | |
813 | ret = ttm_bo_kmap(&nvbo->bo, 0, | |
814 | nvbo->bo.mem. | |
815 | num_pages, | |
816 | &nvbo->kmap); | |
817 | if (ret) { | |
818 | WIND_RING(chan); | |
819 | goto out; | |
820 | } | |
821 | nvbo->validate_mapped = true; | |
822 | } | |
823 | ||
824 | nouveau_bo_wr32(nvbo, (push[i].offset + | |
825 | push[i].length - 8) / 4, cmd); | |
826 | } | |
827 | ||
3a92d37e BS |
828 | OUT_RING(chan, 0x20000000 | |
829 | (nvbo->bo.offset + push[i].offset)); | |
6ee73861 | 830 | OUT_RING(chan, 0); |
a1606a95 BS |
831 | for (j = 0; j < NOUVEAU_DMA_SKIPS; j++) |
832 | OUT_RING(chan, 0); | |
833 | } | |
6ee73861 BS |
834 | } |
835 | ||
264ce192 | 836 | ret = nouveau_fence_new(chan, false, &fence); |
6ee73861 | 837 | if (ret) { |
a84fa1a3 | 838 | NV_ERROR(cli, "error fencing pushbuf: %d\n", ret); |
6ee73861 BS |
839 | WIND_RING(chan); |
840 | goto out; | |
841 | } | |
842 | ||
843 | out: | |
234896a7 | 844 | validate_fini(&op, fence); |
382d62e5 | 845 | nouveau_fence_unref(&fence); |
7fa0cba2 MS |
846 | |
847 | out_prevalid: | |
c859074e ML |
848 | u_free(bo); |
849 | u_free(push); | |
6ee73861 BS |
850 | |
851 | out_next: | |
9a391ad8 BS |
852 | if (chan->dma.ib_max) { |
853 | req->suffix0 = 0x00000000; | |
854 | req->suffix1 = 0x00000000; | |
855 | } else | |
ebb945a9 | 856 | if (nv_device(drm->device)->chipset >= 0x25) { |
6ee73861 BS |
857 | req->suffix0 = 0x00020000; |
858 | req->suffix1 = 0x00000000; | |
859 | } else { | |
860 | req->suffix0 = 0x20000000 | | |
ebb945a9 | 861 | (chan->push.vma.offset + ((chan->dma.cur + 2) << 2)); |
6ee73861 BS |
862 | req->suffix1 = 0x00000000; |
863 | } | |
864 | ||
ebb945a9 | 865 | return nouveau_abi16_put(abi16, ret); |
6ee73861 BS |
866 | } |
867 | ||
6ee73861 BS |
868 | static inline uint32_t |
869 | domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain) | |
870 | { | |
871 | uint32_t flags = 0; | |
872 | ||
873 | if (domain & NOUVEAU_GEM_DOMAIN_VRAM) | |
874 | flags |= TTM_PL_FLAG_VRAM; | |
875 | if (domain & NOUVEAU_GEM_DOMAIN_GART) | |
876 | flags |= TTM_PL_FLAG_TT; | |
877 | ||
878 | return flags; | |
879 | } | |
880 | ||
6ee73861 BS |
881 | int |
882 | nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, | |
883 | struct drm_file *file_priv) | |
884 | { | |
885 | struct drm_nouveau_gem_cpu_prep *req = data; | |
886 | struct drm_gem_object *gem; | |
887 | struct nouveau_bo *nvbo; | |
888 | bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT); | |
889 | int ret = -EINVAL; | |
890 | ||
6ee73861 BS |
891 | gem = drm_gem_object_lookup(dev, file_priv, req->handle); |
892 | if (!gem) | |
bf79cb91 | 893 | return -ENOENT; |
6ee73861 BS |
894 | nvbo = nouveau_gem_object(gem); |
895 | ||
21e86c1c BS |
896 | spin_lock(&nvbo->bo.bdev->fence_lock); |
897 | ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait); | |
898 | spin_unlock(&nvbo->bo.bdev->fence_lock); | |
bc9025bd | 899 | drm_gem_object_unreference_unlocked(gem); |
6ee73861 BS |
900 | return ret; |
901 | } | |
902 | ||
903 | int | |
904 | nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data, | |
905 | struct drm_file *file_priv) | |
906 | { | |
21e86c1c | 907 | return 0; |
6ee73861 BS |
908 | } |
909 | ||
910 | int | |
911 | nouveau_gem_ioctl_info(struct drm_device *dev, void *data, | |
912 | struct drm_file *file_priv) | |
913 | { | |
914 | struct drm_nouveau_gem_info *req = data; | |
915 | struct drm_gem_object *gem; | |
916 | int ret; | |
917 | ||
6ee73861 BS |
918 | gem = drm_gem_object_lookup(dev, file_priv, req->handle); |
919 | if (!gem) | |
bf79cb91 | 920 | return -ENOENT; |
6ee73861 | 921 | |
e758a311 | 922 | ret = nouveau_gem_info(file_priv, gem, req); |
bc9025bd | 923 | drm_gem_object_unreference_unlocked(gem); |
6ee73861 BS |
924 | return ret; |
925 | } | |
926 |