Commit | Line | Data |
---|---|---|
6ee73861 BS |
1 | /* |
2 | * Copyright (C) 2008 Ben Skeggs. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining | |
6 | * a copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sublicense, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice (including the | |
14 | * next paragraph) shall be included in all copies or substantial | |
15 | * portions of the Software. | |
16 | * | |
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | |
20 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | |
21 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | |
22 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | |
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |
24 | * | |
25 | */ | |
6ee73861 | 26 | |
ebb945a9 BS |
27 | #include <subdev/fb.h> |
28 | ||
29 | #include "nouveau_drm.h" | |
6ee73861 | 30 | #include "nouveau_dma.h" |
d375e7d5 | 31 | #include "nouveau_fence.h" |
ebb945a9 | 32 | #include "nouveau_abi16.h" |
6ee73861 | 33 | |
ebb945a9 BS |
34 | #include "nouveau_ttm.h" |
35 | #include "nouveau_gem.h" | |
6ee73861 BS |
36 | |
37 | int | |
38 | nouveau_gem_object_new(struct drm_gem_object *gem) | |
39 | { | |
40 | return 0; | |
41 | } | |
42 | ||
43 | void | |
44 | nouveau_gem_object_del(struct drm_gem_object *gem) | |
45 | { | |
46 | struct nouveau_bo *nvbo = gem->driver_private; | |
47 | struct ttm_buffer_object *bo = &nvbo->bo; | |
48 | ||
49 | if (!nvbo) | |
50 | return; | |
51 | nvbo->gem = NULL; | |
52 | ||
27f06b2d ML |
53 | /* Lockdep hates you for doing reserve with gem object lock held */ |
54 | if (WARN_ON_ONCE(nvbo->pin_refcnt)) { | |
6ee73861 BS |
55 | nvbo->pin_refcnt = 1; |
56 | nouveau_bo_unpin(nvbo); | |
57 | } | |
58 | ||
22b33e8e DA |
59 | if (gem->import_attach) |
60 | drm_prime_gem_destroy(gem, nvbo->bo.sg); | |
61 | ||
6ee73861 | 62 | ttm_bo_unref(&bo); |
fd632aa3 DV |
63 | |
64 | drm_gem_object_release(gem); | |
65 | kfree(gem); | |
6ee73861 BS |
66 | } |
67 | ||
639212d0 BS |
68 | int |
69 | nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) | |
70 | { | |
ebb945a9 | 71 | struct nouveau_cli *cli = nouveau_cli(file_priv); |
2fd3db6f BS |
72 | struct nouveau_bo *nvbo = nouveau_gem_object(gem); |
73 | struct nouveau_vma *vma; | |
74 | int ret; | |
639212d0 | 75 | |
ebb945a9 | 76 | if (!cli->base.vm) |
639212d0 BS |
77 | return 0; |
78 | ||
2fd3db6f BS |
79 | ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); |
80 | if (ret) | |
81 | return ret; | |
82 | ||
ebb945a9 | 83 | vma = nouveau_bo_vma_find(nvbo, cli->base.vm); |
2fd3db6f BS |
84 | if (!vma) { |
85 | vma = kzalloc(sizeof(*vma), GFP_KERNEL); | |
86 | if (!vma) { | |
87 | ret = -ENOMEM; | |
88 | goto out; | |
89 | } | |
90 | ||
ebb945a9 | 91 | ret = nouveau_bo_vma_add(nvbo, cli->base.vm, vma); |
2fd3db6f BS |
92 | if (ret) { |
93 | kfree(vma); | |
94 | goto out; | |
95 | } | |
96 | } else { | |
97 | vma->refcount++; | |
98 | } | |
99 | ||
100 | out: | |
101 | ttm_bo_unreserve(&nvbo->bo); | |
102 | return ret; | |
639212d0 BS |
103 | } |
104 | ||
c4c7044f BS |
105 | static void |
106 | nouveau_gem_object_delete(void *data) | |
107 | { | |
108 | struct nouveau_vma *vma = data; | |
109 | nouveau_vm_unmap(vma); | |
110 | nouveau_vm_put(vma); | |
111 | kfree(vma); | |
112 | } | |
113 | ||
114 | static void | |
115 | nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma) | |
116 | { | |
117 | const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM; | |
118 | struct nouveau_fence *fence = NULL; | |
119 | ||
120 | list_del(&vma->head); | |
121 | ||
122 | if (mapped) { | |
123 | spin_lock(&nvbo->bo.bdev->fence_lock); | |
124 | if (nvbo->bo.sync_obj) | |
125 | fence = nouveau_fence_ref(nvbo->bo.sync_obj); | |
126 | spin_unlock(&nvbo->bo.bdev->fence_lock); | |
127 | } | |
128 | ||
129 | if (fence) { | |
130 | nouveau_fence_work(fence, nouveau_gem_object_delete, vma); | |
131 | } else { | |
132 | if (mapped) | |
133 | nouveau_vm_unmap(vma); | |
134 | nouveau_vm_put(vma); | |
135 | kfree(vma); | |
136 | } | |
137 | nouveau_fence_unref(&fence); | |
138 | } | |
139 | ||
639212d0 BS |
140 | void |
141 | nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) | |
142 | { | |
ebb945a9 | 143 | struct nouveau_cli *cli = nouveau_cli(file_priv); |
2fd3db6f BS |
144 | struct nouveau_bo *nvbo = nouveau_gem_object(gem); |
145 | struct nouveau_vma *vma; | |
146 | int ret; | |
639212d0 | 147 | |
ebb945a9 | 148 | if (!cli->base.vm) |
639212d0 | 149 | return; |
2fd3db6f BS |
150 | |
151 | ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); | |
152 | if (ret) | |
153 | return; | |
154 | ||
ebb945a9 | 155 | vma = nouveau_bo_vma_find(nvbo, cli->base.vm); |
2fd3db6f | 156 | if (vma) { |
c4c7044f BS |
157 | if (--vma->refcount == 0) |
158 | nouveau_gem_object_unmap(nvbo, vma); | |
2fd3db6f BS |
159 | } |
160 | ttm_bo_unreserve(&nvbo->bo); | |
639212d0 BS |
161 | } |
162 | ||
6ee73861 | 163 | int |
f6d4e621 BS |
164 | nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain, |
165 | uint32_t tile_mode, uint32_t tile_flags, | |
166 | struct nouveau_bo **pnvbo) | |
6ee73861 | 167 | { |
ebb945a9 | 168 | struct nouveau_drm *drm = nouveau_drm(dev); |
6ee73861 | 169 | struct nouveau_bo *nvbo; |
6ba9a683 | 170 | u32 flags = 0; |
6ee73861 BS |
171 | int ret; |
172 | ||
6ba9a683 BS |
173 | if (domain & NOUVEAU_GEM_DOMAIN_VRAM) |
174 | flags |= TTM_PL_FLAG_VRAM; | |
175 | if (domain & NOUVEAU_GEM_DOMAIN_GART) | |
176 | flags |= TTM_PL_FLAG_TT; | |
177 | if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU) | |
178 | flags |= TTM_PL_FLAG_SYSTEM; | |
179 | ||
7375c95b | 180 | ret = nouveau_bo_new(dev, size, align, flags, tile_mode, |
22b33e8e | 181 | tile_flags, NULL, pnvbo); |
6ee73861 BS |
182 | if (ret) |
183 | return ret; | |
184 | nvbo = *pnvbo; | |
185 | ||
db5c8e29 BS |
186 | /* we restrict allowed domains on nv50+ to only the types |
187 | * that were requested at creation time. not possibly on | |
188 | * earlier chips without busting the ABI. | |
189 | */ | |
190 | nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM | | |
191 | NOUVEAU_GEM_DOMAIN_GART; | |
ebb945a9 | 192 | if (nv_device(drm->device)->card_type >= NV_50) |
db5c8e29 BS |
193 | nvbo->valid_domains &= domain; |
194 | ||
6ee73861 BS |
195 | nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size); |
196 | if (!nvbo->gem) { | |
197 | nouveau_bo_ref(NULL, pnvbo); | |
198 | return -ENOMEM; | |
199 | } | |
200 | ||
5df23979 | 201 | nvbo->bo.persistent_swap_storage = nvbo->gem->filp; |
6ee73861 BS |
202 | nvbo->gem->driver_private = nvbo; |
203 | return 0; | |
204 | } | |
205 | ||
206 | static int | |
e758a311 BS |
207 | nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem, |
208 | struct drm_nouveau_gem_info *rep) | |
6ee73861 | 209 | { |
ebb945a9 | 210 | struct nouveau_cli *cli = nouveau_cli(file_priv); |
6ee73861 | 211 | struct nouveau_bo *nvbo = nouveau_gem_object(gem); |
e758a311 | 212 | struct nouveau_vma *vma; |
6ee73861 BS |
213 | |
214 | if (nvbo->bo.mem.mem_type == TTM_PL_TT) | |
215 | rep->domain = NOUVEAU_GEM_DOMAIN_GART; | |
216 | else | |
217 | rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; | |
218 | ||
e758a311 | 219 | rep->offset = nvbo->bo.offset; |
ebb945a9 BS |
220 | if (cli->base.vm) { |
221 | vma = nouveau_bo_vma_find(nvbo, cli->base.vm); | |
e758a311 BS |
222 | if (!vma) |
223 | return -EINVAL; | |
224 | ||
225 | rep->offset = vma->offset; | |
226 | } | |
227 | ||
6ee73861 | 228 | rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; |
d550c41e | 229 | rep->map_handle = nvbo->bo.addr_space_offset; |
6ee73861 BS |
230 | rep->tile_mode = nvbo->tile_mode; |
231 | rep->tile_flags = nvbo->tile_flags; | |
232 | return 0; | |
233 | } | |
234 | ||
6ee73861 BS |
235 | int |
236 | nouveau_gem_ioctl_new(struct drm_device *dev, void *data, | |
237 | struct drm_file *file_priv) | |
238 | { | |
ebb945a9 | 239 | struct nouveau_drm *drm = nouveau_drm(dev); |
a84fa1a3 | 240 | struct nouveau_cli *cli = nouveau_cli(file_priv); |
ebb945a9 | 241 | struct nouveau_fb *pfb = nouveau_fb(drm->device); |
6ee73861 BS |
242 | struct drm_nouveau_gem_new *req = data; |
243 | struct nouveau_bo *nvbo = NULL; | |
6ee73861 BS |
244 | int ret = 0; |
245 | ||
ebb945a9 | 246 | drm->ttm.bdev.dev_mapping = drm->dev->dev_mapping; |
6ee73861 | 247 | |
ebb945a9 | 248 | if (!pfb->memtype_valid(pfb, req->info.tile_flags)) { |
a84fa1a3 | 249 | NV_ERROR(cli, "bad page flags: 0x%08x\n", req->info.tile_flags); |
6ee73861 | 250 | return -EINVAL; |
60d2a88a | 251 | } |
6ee73861 | 252 | |
f6d4e621 | 253 | ret = nouveau_gem_new(dev, req->info.size, req->align, |
6ba9a683 BS |
254 | req->info.domain, req->info.tile_mode, |
255 | req->info.tile_flags, &nvbo); | |
6ee73861 BS |
256 | if (ret) |
257 | return ret; | |
258 | ||
6ee73861 | 259 | ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); |
e758a311 BS |
260 | if (ret == 0) { |
261 | ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info); | |
262 | if (ret) | |
263 | drm_gem_handle_delete(file_priv, req->info.handle); | |
264 | } | |
265 | ||
29d08b3e DA |
266 | /* drop reference from allocate - handle holds it now */ |
267 | drm_gem_object_unreference_unlocked(nvbo->gem); | |
6ee73861 BS |
268 | return ret; |
269 | } | |
270 | ||
271 | static int | |
272 | nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains, | |
273 | uint32_t write_domains, uint32_t valid_domains) | |
274 | { | |
275 | struct nouveau_bo *nvbo = gem->driver_private; | |
276 | struct ttm_buffer_object *bo = &nvbo->bo; | |
db5c8e29 | 277 | uint32_t domains = valid_domains & nvbo->valid_domains & |
78ad0f7b FJ |
278 | (write_domains ? write_domains : read_domains); |
279 | uint32_t pref_flags = 0, valid_flags = 0; | |
6ee73861 | 280 | |
78ad0f7b | 281 | if (!domains) |
6ee73861 BS |
282 | return -EINVAL; |
283 | ||
78ad0f7b FJ |
284 | if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) |
285 | valid_flags |= TTM_PL_FLAG_VRAM; | |
286 | ||
287 | if (valid_domains & NOUVEAU_GEM_DOMAIN_GART) | |
288 | valid_flags |= TTM_PL_FLAG_TT; | |
289 | ||
290 | if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) && | |
291 | bo->mem.mem_type == TTM_PL_VRAM) | |
292 | pref_flags |= TTM_PL_FLAG_VRAM; | |
293 | ||
294 | else if ((domains & NOUVEAU_GEM_DOMAIN_GART) && | |
295 | bo->mem.mem_type == TTM_PL_TT) | |
296 | pref_flags |= TTM_PL_FLAG_TT; | |
297 | ||
298 | else if (domains & NOUVEAU_GEM_DOMAIN_VRAM) | |
299 | pref_flags |= TTM_PL_FLAG_VRAM; | |
300 | ||
301 | else | |
302 | pref_flags |= TTM_PL_FLAG_TT; | |
303 | ||
304 | nouveau_bo_placement_set(nvbo, pref_flags, valid_flags); | |
6ee73861 | 305 | |
6ee73861 BS |
306 | return 0; |
307 | } | |
308 | ||
309 | struct validate_op { | |
6ee73861 BS |
310 | struct list_head vram_list; |
311 | struct list_head gart_list; | |
312 | struct list_head both_list; | |
ecff665f | 313 | struct ww_acquire_ctx ticket; |
6ee73861 BS |
314 | }; |
315 | ||
316 | static void | |
ecff665f ML |
317 | validate_fini_list(struct list_head *list, struct nouveau_fence *fence, |
318 | struct ww_acquire_ctx *ticket) | |
6ee73861 BS |
319 | { |
320 | struct list_head *entry, *tmp; | |
321 | struct nouveau_bo *nvbo; | |
322 | ||
323 | list_for_each_safe(entry, tmp, list) { | |
324 | nvbo = list_entry(entry, struct nouveau_bo, entry); | |
332b242f FJ |
325 | |
326 | nouveau_bo_fence(nvbo, fence); | |
6ee73861 | 327 | |
a1606a95 BS |
328 | if (unlikely(nvbo->validate_mapped)) { |
329 | ttm_bo_kunmap(&nvbo->kmap); | |
330 | nvbo->validate_mapped = false; | |
331 | } | |
332 | ||
6ee73861 BS |
333 | list_del(&nvbo->entry); |
334 | nvbo->reserved_by = NULL; | |
ecff665f | 335 | ttm_bo_unreserve_ticket(&nvbo->bo, ticket); |
374c3af8 | 336 | drm_gem_object_unreference_unlocked(nvbo->gem); |
6ee73861 BS |
337 | } |
338 | } | |
339 | ||
340 | static void | |
ecff665f | 341 | validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence) |
6ee73861 | 342 | { |
ecff665f ML |
343 | validate_fini_list(&op->vram_list, fence, &op->ticket); |
344 | validate_fini_list(&op->gart_list, fence, &op->ticket); | |
345 | validate_fini_list(&op->both_list, fence, &op->ticket); | |
346 | } | |
347 | ||
348 | static void | |
349 | validate_fini(struct validate_op *op, struct nouveau_fence *fence) | |
350 | { | |
351 | validate_fini_no_ticket(op, fence); | |
352 | ww_acquire_fini(&op->ticket); | |
6ee73861 BS |
353 | } |
354 | ||
355 | static int | |
356 | validate_init(struct nouveau_channel *chan, struct drm_file *file_priv, | |
357 | struct drm_nouveau_gem_pushbuf_bo *pbbo, | |
358 | int nr_buffers, struct validate_op *op) | |
359 | { | |
a84fa1a3 | 360 | struct nouveau_cli *cli = nouveau_cli(file_priv); |
ebb945a9 | 361 | struct drm_device *dev = chan->drm->dev; |
6ee73861 BS |
362 | int trycnt = 0; |
363 | int ret, i; | |
c354c893 | 364 | struct nouveau_bo *res_bo = NULL; |
6ee73861 | 365 | |
ecff665f | 366 | ww_acquire_init(&op->ticket, &reservation_ww_class); |
6ee73861 BS |
367 | retry: |
368 | if (++trycnt > 100000) { | |
a84fa1a3 | 369 | NV_ERROR(cli, "%s failed and gave up.\n", __func__); |
6ee73861 BS |
370 | return -EINVAL; |
371 | } | |
372 | ||
373 | for (i = 0; i < nr_buffers; i++) { | |
374 | struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i]; | |
375 | struct drm_gem_object *gem; | |
376 | struct nouveau_bo *nvbo; | |
377 | ||
378 | gem = drm_gem_object_lookup(dev, file_priv, b->handle); | |
379 | if (!gem) { | |
a84fa1a3 | 380 | NV_ERROR(cli, "Unknown handle 0x%08x\n", b->handle); |
ecff665f | 381 | ww_acquire_done(&op->ticket); |
6ee73861 | 382 | validate_fini(op, NULL); |
bf79cb91 | 383 | return -ENOENT; |
6ee73861 BS |
384 | } |
385 | nvbo = gem->driver_private; | |
c354c893 ML |
386 | if (nvbo == res_bo) { |
387 | res_bo = NULL; | |
388 | drm_gem_object_unreference_unlocked(gem); | |
389 | continue; | |
390 | } | |
6ee73861 BS |
391 | |
392 | if (nvbo->reserved_by && nvbo->reserved_by == file_priv) { | |
a84fa1a3 | 393 | NV_ERROR(cli, "multiple instances of buffer %d on " |
6ee73861 | 394 | "validation list\n", b->handle); |
5086f69e | 395 | drm_gem_object_unreference_unlocked(gem); |
ecff665f | 396 | ww_acquire_done(&op->ticket); |
6ee73861 BS |
397 | validate_fini(op, NULL); |
398 | return -EINVAL; | |
399 | } | |
400 | ||
ecff665f | 401 | ret = ttm_bo_reserve(&nvbo->bo, true, false, true, &op->ticket); |
6ee73861 | 402 | if (ret) { |
ecff665f | 403 | validate_fini_no_ticket(op, NULL); |
5e338405 | 404 | if (unlikely(ret == -EDEADLK)) { |
c354c893 | 405 | ret = ttm_bo_reserve_slowpath(&nvbo->bo, true, |
ecff665f | 406 | &op->ticket); |
c354c893 ML |
407 | if (!ret) |
408 | res_bo = nvbo; | |
409 | } | |
938c40ed | 410 | if (unlikely(ret)) { |
ecff665f ML |
411 | ww_acquire_done(&op->ticket); |
412 | ww_acquire_fini(&op->ticket); | |
c354c893 | 413 | drm_gem_object_unreference_unlocked(gem); |
938c40ed | 414 | if (ret != -ERESTARTSYS) |
a84fa1a3 | 415 | NV_ERROR(cli, "fail reserve\n"); |
6ee73861 | 416 | return ret; |
a1606a95 | 417 | } |
6ee73861 BS |
418 | } |
419 | ||
a1606a95 | 420 | b->user_priv = (uint64_t)(unsigned long)nvbo; |
6ee73861 BS |
421 | nvbo->reserved_by = file_priv; |
422 | nvbo->pbbo_index = i; | |
423 | if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && | |
424 | (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)) | |
425 | list_add_tail(&nvbo->entry, &op->both_list); | |
426 | else | |
427 | if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) | |
428 | list_add_tail(&nvbo->entry, &op->vram_list); | |
429 | else | |
430 | if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART) | |
431 | list_add_tail(&nvbo->entry, &op->gart_list); | |
432 | else { | |
a84fa1a3 | 433 | NV_ERROR(cli, "invalid valid domains: 0x%08x\n", |
6ee73861 | 434 | b->valid_domains); |
0208843d | 435 | list_add_tail(&nvbo->entry, &op->both_list); |
ecff665f | 436 | ww_acquire_done(&op->ticket); |
6ee73861 BS |
437 | validate_fini(op, NULL); |
438 | return -EINVAL; | |
439 | } | |
c354c893 ML |
440 | if (nvbo == res_bo) |
441 | goto retry; | |
6ee73861 BS |
442 | } |
443 | ||
ecff665f | 444 | ww_acquire_done(&op->ticket); |
6ee73861 BS |
445 | return 0; |
446 | } | |
447 | ||
525895ba BS |
448 | static int |
449 | validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo) | |
450 | { | |
451 | struct nouveau_fence *fence = NULL; | |
452 | int ret = 0; | |
453 | ||
454 | spin_lock(&nvbo->bo.bdev->fence_lock); | |
455 | if (nvbo->bo.sync_obj) | |
456 | fence = nouveau_fence_ref(nvbo->bo.sync_obj); | |
457 | spin_unlock(&nvbo->bo.bdev->fence_lock); | |
458 | ||
459 | if (fence) { | |
460 | ret = nouveau_fence_sync(fence, chan); | |
461 | nouveau_fence_unref(&fence); | |
462 | } | |
463 | ||
464 | return ret; | |
465 | } | |
466 | ||
6ee73861 | 467 | static int |
a84fa1a3 MS |
468 | validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli, |
469 | struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo, | |
470 | uint64_t user_pbbo_ptr) | |
6ee73861 | 471 | { |
ebb945a9 | 472 | struct nouveau_drm *drm = chan->drm; |
6ee73861 BS |
473 | struct drm_nouveau_gem_pushbuf_bo __user *upbbo = |
474 | (void __force __user *)(uintptr_t)user_pbbo_ptr; | |
475 | struct nouveau_bo *nvbo; | |
476 | int ret, relocs = 0; | |
477 | ||
478 | list_for_each_entry(nvbo, list, entry) { | |
479 | struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; | |
6ee73861 | 480 | |
525895ba | 481 | ret = validate_sync(chan, nvbo); |
415e6186 | 482 | if (unlikely(ret)) { |
a84fa1a3 | 483 | NV_ERROR(cli, "fail pre-validate sync\n"); |
415e6186 | 484 | return ret; |
6ee73861 BS |
485 | } |
486 | ||
487 | ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains, | |
488 | b->write_domains, | |
489 | b->valid_domains); | |
a1606a95 | 490 | if (unlikely(ret)) { |
a84fa1a3 | 491 | NV_ERROR(cli, "fail set_domain\n"); |
6ee73861 | 492 | return ret; |
a1606a95 | 493 | } |
6ee73861 | 494 | |
97a875cb | 495 | ret = nouveau_bo_validate(nvbo, true, false); |
a1606a95 | 496 | if (unlikely(ret)) { |
938c40ed | 497 | if (ret != -ERESTARTSYS) |
a84fa1a3 | 498 | NV_ERROR(cli, "fail ttm_validate\n"); |
6ee73861 | 499 | return ret; |
a1606a95 | 500 | } |
6ee73861 | 501 | |
525895ba | 502 | ret = validate_sync(chan, nvbo); |
415e6186 | 503 | if (unlikely(ret)) { |
a84fa1a3 | 504 | NV_ERROR(cli, "fail post-validate sync\n"); |
415e6186 BS |
505 | return ret; |
506 | } | |
507 | ||
ebb945a9 | 508 | if (nv_device(drm->device)->card_type < NV_50) { |
a3fcd0a9 BS |
509 | if (nvbo->bo.offset == b->presumed.offset && |
510 | ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && | |
511 | b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || | |
512 | (nvbo->bo.mem.mem_type == TTM_PL_TT && | |
513 | b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART))) | |
514 | continue; | |
515 | ||
516 | if (nvbo->bo.mem.mem_type == TTM_PL_TT) | |
517 | b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART; | |
518 | else | |
519 | b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM; | |
520 | b->presumed.offset = nvbo->bo.offset; | |
521 | b->presumed.valid = 0; | |
522 | relocs++; | |
523 | ||
524 | if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed, | |
525 | &b->presumed, sizeof(b->presumed))) | |
526 | return -EFAULT; | |
527 | } | |
6ee73861 BS |
528 | } |
529 | ||
530 | return relocs; | |
531 | } | |
532 | ||
533 | static int | |
534 | nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, | |
535 | struct drm_file *file_priv, | |
536 | struct drm_nouveau_gem_pushbuf_bo *pbbo, | |
537 | uint64_t user_buffers, int nr_buffers, | |
538 | struct validate_op *op, int *apply_relocs) | |
539 | { | |
a84fa1a3 | 540 | struct nouveau_cli *cli = nouveau_cli(file_priv); |
6ee73861 BS |
541 | int ret, relocs = 0; |
542 | ||
543 | INIT_LIST_HEAD(&op->vram_list); | |
544 | INIT_LIST_HEAD(&op->gart_list); | |
545 | INIT_LIST_HEAD(&op->both_list); | |
546 | ||
6ee73861 BS |
547 | if (nr_buffers == 0) |
548 | return 0; | |
549 | ||
550 | ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); | |
a1606a95 | 551 | if (unlikely(ret)) { |
938c40ed | 552 | if (ret != -ERESTARTSYS) |
a84fa1a3 | 553 | NV_ERROR(cli, "validate_init\n"); |
6ee73861 | 554 | return ret; |
a1606a95 | 555 | } |
6ee73861 | 556 | |
a84fa1a3 | 557 | ret = validate_list(chan, cli, &op->vram_list, pbbo, user_buffers); |
6ee73861 | 558 | if (unlikely(ret < 0)) { |
938c40ed | 559 | if (ret != -ERESTARTSYS) |
a84fa1a3 | 560 | NV_ERROR(cli, "validate vram_list\n"); |
6ee73861 BS |
561 | validate_fini(op, NULL); |
562 | return ret; | |
563 | } | |
564 | relocs += ret; | |
565 | ||
a84fa1a3 | 566 | ret = validate_list(chan, cli, &op->gart_list, pbbo, user_buffers); |
6ee73861 | 567 | if (unlikely(ret < 0)) { |
938c40ed | 568 | if (ret != -ERESTARTSYS) |
a84fa1a3 | 569 | NV_ERROR(cli, "validate gart_list\n"); |
6ee73861 BS |
570 | validate_fini(op, NULL); |
571 | return ret; | |
572 | } | |
573 | relocs += ret; | |
574 | ||
a84fa1a3 | 575 | ret = validate_list(chan, cli, &op->both_list, pbbo, user_buffers); |
6ee73861 | 576 | if (unlikely(ret < 0)) { |
938c40ed | 577 | if (ret != -ERESTARTSYS) |
a84fa1a3 | 578 | NV_ERROR(cli, "validate both_list\n"); |
6ee73861 BS |
579 | validate_fini(op, NULL); |
580 | return ret; | |
581 | } | |
582 | relocs += ret; | |
583 | ||
584 | *apply_relocs = relocs; | |
585 | return 0; | |
586 | } | |
587 | ||
588 | static inline void * | |
589 | u_memcpya(uint64_t user, unsigned nmemb, unsigned size) | |
590 | { | |
591 | void *mem; | |
592 | void __user *userptr = (void __force __user *)(uintptr_t)user; | |
593 | ||
594 | mem = kmalloc(nmemb * size, GFP_KERNEL); | |
595 | if (!mem) | |
596 | return ERR_PTR(-ENOMEM); | |
597 | ||
598 | if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) { | |
599 | kfree(mem); | |
600 | return ERR_PTR(-EFAULT); | |
601 | } | |
602 | ||
603 | return mem; | |
604 | } | |
605 | ||
606 | static int | |
a84fa1a3 | 607 | nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, |
a1606a95 BS |
608 | struct drm_nouveau_gem_pushbuf *req, |
609 | struct drm_nouveau_gem_pushbuf_bo *bo) | |
6ee73861 BS |
610 | { |
611 | struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; | |
12f735b7 LB |
612 | int ret = 0; |
613 | unsigned i; | |
6ee73861 | 614 | |
a1606a95 | 615 | reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc)); |
6ee73861 BS |
616 | if (IS_ERR(reloc)) |
617 | return PTR_ERR(reloc); | |
618 | ||
a1606a95 | 619 | for (i = 0; i < req->nr_relocs; i++) { |
6ee73861 BS |
620 | struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i]; |
621 | struct drm_nouveau_gem_pushbuf_bo *b; | |
a1606a95 | 622 | struct nouveau_bo *nvbo; |
6ee73861 BS |
623 | uint32_t data; |
624 | ||
a1606a95 | 625 | if (unlikely(r->bo_index > req->nr_buffers)) { |
a84fa1a3 | 626 | NV_ERROR(cli, "reloc bo index invalid\n"); |
6ee73861 BS |
627 | ret = -EINVAL; |
628 | break; | |
629 | } | |
630 | ||
631 | b = &bo[r->bo_index]; | |
a1606a95 | 632 | if (b->presumed.valid) |
6ee73861 BS |
633 | continue; |
634 | ||
a1606a95 | 635 | if (unlikely(r->reloc_bo_index > req->nr_buffers)) { |
a84fa1a3 | 636 | NV_ERROR(cli, "reloc container bo index invalid\n"); |
a1606a95 BS |
637 | ret = -EINVAL; |
638 | break; | |
639 | } | |
640 | nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv; | |
641 | ||
642 | if (unlikely(r->reloc_bo_offset + 4 > | |
643 | nvbo->bo.mem.num_pages << PAGE_SHIFT)) { | |
a84fa1a3 | 644 | NV_ERROR(cli, "reloc outside of bo\n"); |
a1606a95 BS |
645 | ret = -EINVAL; |
646 | break; | |
647 | } | |
648 | ||
649 | if (!nvbo->kmap.virtual) { | |
650 | ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, | |
651 | &nvbo->kmap); | |
652 | if (ret) { | |
a84fa1a3 | 653 | NV_ERROR(cli, "failed kmap for reloc\n"); |
a1606a95 BS |
654 | break; |
655 | } | |
656 | nvbo->validate_mapped = true; | |
657 | } | |
658 | ||
6ee73861 | 659 | if (r->flags & NOUVEAU_GEM_RELOC_LOW) |
a1606a95 | 660 | data = b->presumed.offset + r->data; |
6ee73861 BS |
661 | else |
662 | if (r->flags & NOUVEAU_GEM_RELOC_HIGH) | |
a1606a95 | 663 | data = (b->presumed.offset + r->data) >> 32; |
6ee73861 BS |
664 | else |
665 | data = r->data; | |
666 | ||
667 | if (r->flags & NOUVEAU_GEM_RELOC_OR) { | |
a1606a95 | 668 | if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART) |
6ee73861 BS |
669 | data |= r->tor; |
670 | else | |
671 | data |= r->vor; | |
672 | } | |
673 | ||
702adba2 | 674 | spin_lock(&nvbo->bo.bdev->fence_lock); |
a1606a95 | 675 | ret = ttm_bo_wait(&nvbo->bo, false, false, false); |
702adba2 | 676 | spin_unlock(&nvbo->bo.bdev->fence_lock); |
a1606a95 | 677 | if (ret) { |
a84fa1a3 | 678 | NV_ERROR(cli, "reloc wait_idle failed: %d\n", ret); |
a1606a95 BS |
679 | break; |
680 | } | |
a1606a95 BS |
681 | |
682 | nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data); | |
6ee73861 BS |
683 | } |
684 | ||
685 | kfree(reloc); | |
686 | return ret; | |
687 | } | |
688 | ||
689 | int | |
690 | nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |
691 | struct drm_file *file_priv) | |
692 | { | |
ebb945a9 | 693 | struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); |
a84fa1a3 | 694 | struct nouveau_cli *cli = nouveau_cli(file_priv); |
ebb945a9 BS |
695 | struct nouveau_abi16_chan *temp; |
696 | struct nouveau_drm *drm = nouveau_drm(dev); | |
6ee73861 | 697 | struct drm_nouveau_gem_pushbuf *req = data; |
a1606a95 BS |
698 | struct drm_nouveau_gem_pushbuf_push *push; |
699 | struct drm_nouveau_gem_pushbuf_bo *bo; | |
ebb945a9 | 700 | struct nouveau_channel *chan = NULL; |
6ee73861 | 701 | struct validate_op op; |
6e86e041 | 702 | struct nouveau_fence *fence = NULL; |
a1606a95 | 703 | int i, j, ret = 0, do_reloc = 0; |
6ee73861 | 704 | |
ebb945a9 BS |
705 | if (unlikely(!abi16)) |
706 | return -ENOMEM; | |
6ee73861 | 707 | |
ebb945a9 BS |
708 | list_for_each_entry(temp, &abi16->channels, head) { |
709 | if (temp->chan->handle == (NVDRM_CHAN | req->channel)) { | |
710 | chan = temp->chan; | |
711 | break; | |
712 | } | |
713 | } | |
6ee73861 | 714 | |
ebb945a9 BS |
715 | if (!chan) |
716 | return nouveau_abi16_put(abi16, -ENOENT); | |
717 | ||
718 | req->vram_available = drm->gem.vram_available; | |
719 | req->gart_available = drm->gem.gart_available; | |
a1606a95 BS |
720 | if (unlikely(req->nr_push == 0)) |
721 | goto out_next; | |
6ee73861 | 722 | |
a1606a95 | 723 | if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) { |
a84fa1a3 | 724 | NV_ERROR(cli, "pushbuf push count exceeds limit: %d max %d\n", |
a1606a95 | 725 | req->nr_push, NOUVEAU_GEM_MAX_PUSH); |
ebb945a9 | 726 | return nouveau_abi16_put(abi16, -EINVAL); |
6ee73861 BS |
727 | } |
728 | ||
a1606a95 | 729 | if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) { |
a84fa1a3 | 730 | NV_ERROR(cli, "pushbuf bo count exceeds limit: %d max %d\n", |
a1606a95 | 731 | req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS); |
ebb945a9 | 732 | return nouveau_abi16_put(abi16, -EINVAL); |
6ee73861 BS |
733 | } |
734 | ||
a1606a95 | 735 | if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) { |
a84fa1a3 | 736 | NV_ERROR(cli, "pushbuf reloc count exceeds limit: %d max %d\n", |
a1606a95 | 737 | req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS); |
ebb945a9 | 738 | return nouveau_abi16_put(abi16, -EINVAL); |
6ee73861 BS |
739 | } |
740 | ||
a1606a95 | 741 | push = u_memcpya(req->push, req->nr_push, sizeof(*push)); |
ebb945a9 BS |
742 | if (IS_ERR(push)) |
743 | return nouveau_abi16_put(abi16, PTR_ERR(push)); | |
a1606a95 | 744 | |
6ee73861 | 745 | bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); |
a1606a95 BS |
746 | if (IS_ERR(bo)) { |
747 | kfree(push); | |
ebb945a9 | 748 | return nouveau_abi16_put(abi16, PTR_ERR(bo)); |
a1606a95 | 749 | } |
6ee73861 | 750 | |
accf9496 | 751 | /* Ensure all push buffers are on validate list */ |
415e6186 BS |
752 | for (i = 0; i < req->nr_push; i++) { |
753 | if (push[i].bo_index >= req->nr_buffers) { | |
a84fa1a3 | 754 | NV_ERROR(cli, "push %d buffer not in list\n", i); |
415e6186 | 755 | ret = -EINVAL; |
7fa0cba2 | 756 | goto out_prevalid; |
415e6186 | 757 | } |
415e6186 BS |
758 | } |
759 | ||
6ee73861 BS |
760 | /* Validate buffer list */ |
761 | ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, | |
762 | req->nr_buffers, &op, &do_reloc); | |
763 | if (ret) { | |
938c40ed | 764 | if (ret != -ERESTARTSYS) |
a84fa1a3 | 765 | NV_ERROR(cli, "validate: %d\n", ret); |
7fa0cba2 | 766 | goto out_prevalid; |
6ee73861 BS |
767 | } |
768 | ||
6ee73861 BS |
769 | /* Apply any relocations that are required */ |
770 | if (do_reloc) { | |
a84fa1a3 | 771 | ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo); |
6ee73861 | 772 | if (ret) { |
a84fa1a3 | 773 | NV_ERROR(cli, "reloc apply: %d\n", ret); |
6ee73861 BS |
774 | goto out; |
775 | } | |
6ee73861 | 776 | } |
6ee73861 | 777 | |
9a391ad8 | 778 | if (chan->dma.ib_max) { |
5e120f6e | 779 | ret = nouveau_dma_wait(chan, req->nr_push + 1, 16); |
6ee73861 | 780 | if (ret) { |
a84fa1a3 | 781 | NV_ERROR(cli, "nv50cal_space: %d\n", ret); |
6ee73861 BS |
782 | goto out; |
783 | } | |
6ee73861 | 784 | |
a1606a95 BS |
785 | for (i = 0; i < req->nr_push; i++) { |
786 | struct nouveau_bo *nvbo = (void *)(unsigned long) | |
787 | bo[push[i].bo_index].user_priv; | |
788 | ||
789 | nv50_dma_push(chan, nvbo, push[i].offset, | |
790 | push[i].length); | |
791 | } | |
9a391ad8 | 792 | } else |
ebb945a9 | 793 | if (nv_device(drm->device)->chipset >= 0x25) { |
a1606a95 | 794 | ret = RING_SPACE(chan, req->nr_push * 2); |
6ee73861 | 795 | if (ret) { |
a84fa1a3 | 796 | NV_ERROR(cli, "cal_space: %d\n", ret); |
6ee73861 BS |
797 | goto out; |
798 | } | |
a1606a95 BS |
799 | |
800 | for (i = 0; i < req->nr_push; i++) { | |
801 | struct nouveau_bo *nvbo = (void *)(unsigned long) | |
802 | bo[push[i].bo_index].user_priv; | |
a1606a95 | 803 | |
3a92d37e | 804 | OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2); |
a1606a95 BS |
805 | OUT_RING(chan, 0); |
806 | } | |
6ee73861 | 807 | } else { |
a1606a95 | 808 | ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS)); |
6ee73861 | 809 | if (ret) { |
a84fa1a3 | 810 | NV_ERROR(cli, "jmp_space: %d\n", ret); |
6ee73861 BS |
811 | goto out; |
812 | } | |
6ee73861 | 813 | |
a1606a95 BS |
814 | for (i = 0; i < req->nr_push; i++) { |
815 | struct nouveau_bo *nvbo = (void *)(unsigned long) | |
816 | bo[push[i].bo_index].user_priv; | |
a1606a95 BS |
817 | uint32_t cmd; |
818 | ||
ebb945a9 | 819 | cmd = chan->push.vma.offset + ((chan->dma.cur + 2) << 2); |
a1606a95 BS |
820 | cmd |= 0x20000000; |
821 | if (unlikely(cmd != req->suffix0)) { | |
822 | if (!nvbo->kmap.virtual) { | |
823 | ret = ttm_bo_kmap(&nvbo->bo, 0, | |
824 | nvbo->bo.mem. | |
825 | num_pages, | |
826 | &nvbo->kmap); | |
827 | if (ret) { | |
828 | WIND_RING(chan); | |
829 | goto out; | |
830 | } | |
831 | nvbo->validate_mapped = true; | |
832 | } | |
833 | ||
834 | nouveau_bo_wr32(nvbo, (push[i].offset + | |
835 | push[i].length - 8) / 4, cmd); | |
836 | } | |
837 | ||
3a92d37e BS |
838 | OUT_RING(chan, 0x20000000 | |
839 | (nvbo->bo.offset + push[i].offset)); | |
6ee73861 | 840 | OUT_RING(chan, 0); |
a1606a95 BS |
841 | for (j = 0; j < NOUVEAU_DMA_SKIPS; j++) |
842 | OUT_RING(chan, 0); | |
843 | } | |
6ee73861 BS |
844 | } |
845 | ||
264ce192 | 846 | ret = nouveau_fence_new(chan, false, &fence); |
6ee73861 | 847 | if (ret) { |
a84fa1a3 | 848 | NV_ERROR(cli, "error fencing pushbuf: %d\n", ret); |
6ee73861 BS |
849 | WIND_RING(chan); |
850 | goto out; | |
851 | } | |
852 | ||
853 | out: | |
234896a7 | 854 | validate_fini(&op, fence); |
382d62e5 | 855 | nouveau_fence_unref(&fence); |
7fa0cba2 MS |
856 | |
857 | out_prevalid: | |
6ee73861 | 858 | kfree(bo); |
a1606a95 | 859 | kfree(push); |
6ee73861 BS |
860 | |
861 | out_next: | |
9a391ad8 BS |
862 | if (chan->dma.ib_max) { |
863 | req->suffix0 = 0x00000000; | |
864 | req->suffix1 = 0x00000000; | |
865 | } else | |
ebb945a9 | 866 | if (nv_device(drm->device)->chipset >= 0x25) { |
6ee73861 BS |
867 | req->suffix0 = 0x00020000; |
868 | req->suffix1 = 0x00000000; | |
869 | } else { | |
870 | req->suffix0 = 0x20000000 | | |
ebb945a9 | 871 | (chan->push.vma.offset + ((chan->dma.cur + 2) << 2)); |
6ee73861 BS |
872 | req->suffix1 = 0x00000000; |
873 | } | |
874 | ||
ebb945a9 | 875 | return nouveau_abi16_put(abi16, ret); |
6ee73861 BS |
876 | } |
877 | ||
6ee73861 BS |
878 | static inline uint32_t |
879 | domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain) | |
880 | { | |
881 | uint32_t flags = 0; | |
882 | ||
883 | if (domain & NOUVEAU_GEM_DOMAIN_VRAM) | |
884 | flags |= TTM_PL_FLAG_VRAM; | |
885 | if (domain & NOUVEAU_GEM_DOMAIN_GART) | |
886 | flags |= TTM_PL_FLAG_TT; | |
887 | ||
888 | return flags; | |
889 | } | |
890 | ||
6ee73861 BS |
891 | int |
892 | nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, | |
893 | struct drm_file *file_priv) | |
894 | { | |
895 | struct drm_nouveau_gem_cpu_prep *req = data; | |
896 | struct drm_gem_object *gem; | |
897 | struct nouveau_bo *nvbo; | |
898 | bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT); | |
899 | int ret = -EINVAL; | |
900 | ||
6ee73861 BS |
901 | gem = drm_gem_object_lookup(dev, file_priv, req->handle); |
902 | if (!gem) | |
bf79cb91 | 903 | return -ENOENT; |
6ee73861 BS |
904 | nvbo = nouveau_gem_object(gem); |
905 | ||
21e86c1c BS |
906 | spin_lock(&nvbo->bo.bdev->fence_lock); |
907 | ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait); | |
908 | spin_unlock(&nvbo->bo.bdev->fence_lock); | |
bc9025bd | 909 | drm_gem_object_unreference_unlocked(gem); |
6ee73861 BS |
910 | return ret; |
911 | } | |
912 | ||
913 | int | |
914 | nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data, | |
915 | struct drm_file *file_priv) | |
916 | { | |
21e86c1c | 917 | return 0; |
6ee73861 BS |
918 | } |
919 | ||
920 | int | |
921 | nouveau_gem_ioctl_info(struct drm_device *dev, void *data, | |
922 | struct drm_file *file_priv) | |
923 | { | |
924 | struct drm_nouveau_gem_info *req = data; | |
925 | struct drm_gem_object *gem; | |
926 | int ret; | |
927 | ||
6ee73861 BS |
928 | gem = drm_gem_object_lookup(dev, file_priv, req->handle); |
929 | if (!gem) | |
bf79cb91 | 930 | return -ENOENT; |
6ee73861 | 931 | |
e758a311 | 932 | ret = nouveau_gem_info(file_priv, gem, req); |
bc9025bd | 933 | drm_gem_object_unreference_unlocked(gem); |
6ee73861 BS |
934 | return ret; |
935 | } | |
936 |