drm/vmwgfx: Convert screen targets to new helpers v3
[deliverable/linux.git] / drivers / gpu / drm / vmwgfx / vmwgfx_cmdbuf.c
CommitLineData
3eab3d9e
TH
1/**************************************************************************
2 *
3 * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "ttm/ttm_bo_api.h"
30
31/*
32 * Size of inline command buffers. Try to make sure that a page size is a
33 * multiple of the DMA pool allocation size.
34 */
35#define VMW_CMDBUF_INLINE_ALIGN 64
36#define VMW_CMDBUF_INLINE_SIZE (1024 - VMW_CMDBUF_INLINE_ALIGN)
37
38/**
39 * struct vmw_cmdbuf_context - Command buffer context queues
40 *
41 * @submitted: List of command buffers that have been submitted to the
42 * manager but not yet submitted to hardware.
43 * @hw_submitted: List of command buffers submitted to hardware.
44 * @preempted: List of preempted command buffers.
45 * @num_hw_submitted: Number of buffers currently being processed by hardware
46 */
47struct vmw_cmdbuf_context {
48 struct list_head submitted;
49 struct list_head hw_submitted;
50 struct list_head preempted;
51 unsigned num_hw_submitted;
52};
53
54/**
55 * struct vmw_cmdbuf_man: - Command buffer manager
56 *
57 * @cur_mutex: Mutex protecting the command buffer used for incremental small
58 * kernel command submissions, @cur.
59 * @space_mutex: Mutex to protect against starvation when we allocate
60 * main pool buffer space.
61 * @work: A struct work_struct implementeing command buffer error handling.
62 * Immutable.
63 * @dev_priv: Pointer to the device private struct. Immutable.
64 * @ctx: Array of command buffer context queues. The queues and the context
65 * data is protected by @lock.
66 * @error: List of command buffers that have caused device errors.
67 * Protected by @lock.
68 * @mm: Range manager for the command buffer space. Manager allocations and
69 * frees are protected by @lock.
70 * @cmd_space: Buffer object for the command buffer space, unless we were
71 * able to make a contigous coherent DMA memory allocation, @handle. Immutable.
72 * @map_obj: Mapping state for @cmd_space. Immutable.
73 * @map: Pointer to command buffer space. May be a mapped buffer object or
74 * a contigous coherent DMA memory allocation. Immutable.
75 * @cur: Command buffer for small kernel command submissions. Protected by
76 * the @cur_mutex.
77 * @cur_pos: Space already used in @cur. Protected by @cur_mutex.
78 * @default_size: Default size for the @cur command buffer. Immutable.
79 * @max_hw_submitted: Max number of in-flight command buffers the device can
80 * handle. Immutable.
81 * @lock: Spinlock protecting command submission queues.
82 * @header: Pool of DMA memory for device command buffer headers.
83 * Internal protection.
84 * @dheaders: Pool of DMA memory for device command buffer headers with trailing
85 * space for inline data. Internal protection.
86 * @tasklet: Tasklet struct for irq processing. Immutable.
87 * @alloc_queue: Wait queue for processes waiting to allocate command buffer
88 * space.
89 * @idle_queue: Wait queue for processes waiting for command buffer idle.
90 * @irq_on: Whether the process function has requested irq to be turned on.
91 * Protected by @lock.
92 * @using_mob: Whether the command buffer space is a MOB or a contigous DMA
93 * allocation. Immutable.
94 * @has_pool: Has a large pool of DMA memory which allows larger allocations.
95 * Typically this is false only during bootstrap.
96 * @handle: DMA address handle for the command buffer space if @using_mob is
97 * false. Immutable.
98 * @size: The size of the command buffer space. Immutable.
99 */
100struct vmw_cmdbuf_man {
101 struct mutex cur_mutex;
102 struct mutex space_mutex;
103 struct work_struct work;
104 struct vmw_private *dev_priv;
105 struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
106 struct list_head error;
107 struct drm_mm mm;
108 struct ttm_buffer_object *cmd_space;
109 struct ttm_bo_kmap_obj map_obj;
110 u8 *map;
111 struct vmw_cmdbuf_header *cur;
112 size_t cur_pos;
113 size_t default_size;
114 unsigned max_hw_submitted;
115 spinlock_t lock;
116 struct dma_pool *headers;
117 struct dma_pool *dheaders;
118 struct tasklet_struct tasklet;
119 wait_queue_head_t alloc_queue;
120 wait_queue_head_t idle_queue;
121 bool irq_on;
122 bool using_mob;
123 bool has_pool;
124 dma_addr_t handle;
125 size_t size;
126};
127
128/**
129 * struct vmw_cmdbuf_header - Command buffer metadata
130 *
131 * @man: The command buffer manager.
132 * @cb_header: Device command buffer header, allocated from a DMA pool.
133 * @cb_context: The device command buffer context.
134 * @list: List head for attaching to the manager lists.
135 * @node: The range manager node.
136 * @handle. The DMA address of @cb_header. Handed to the device on command
137 * buffer submission.
138 * @cmd: Pointer to the command buffer space of this buffer.
139 * @size: Size of the command buffer space of this buffer.
140 * @reserved: Reserved space of this buffer.
141 * @inline_space: Whether inline command buffer space is used.
142 */
143struct vmw_cmdbuf_header {
144 struct vmw_cmdbuf_man *man;
145 SVGACBHeader *cb_header;
146 SVGACBContext cb_context;
147 struct list_head list;
148 struct drm_mm_node *node;
149 dma_addr_t handle;
150 u8 *cmd;
151 size_t size;
152 size_t reserved;
153 bool inline_space;
154};
155
156/**
157 * struct vmw_cmdbuf_dheader - Device command buffer header with inline
158 * command buffer space.
159 *
160 * @cb_header: Device command buffer header.
161 * @cmd: Inline command buffer space.
162 */
163struct vmw_cmdbuf_dheader {
164 SVGACBHeader cb_header;
165 u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
166};
167
168/**
169 * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
170 *
171 * @page_size: Size of requested command buffer space in pages.
172 * @node: The range manager node if allocation succeeded.
173 * @ret: Error code if failure. Otherwise 0.
174 */
175struct vmw_cmdbuf_alloc_info {
176 size_t page_size;
177 struct drm_mm_node *node;
178 int ret;
179};
180
181/* Loop over each context in the command buffer manager. */
182#define for_each_cmdbuf_ctx(_man, _i, _ctx) \
183 for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < SVGA_CB_CONTEXT_MAX; \
184 ++(_i), ++(_ctx))
185
186static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, bool enable);
187
188
189/**
190 * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
191 *
192 * @man: The range manager.
193 * @interruptible: Whether to wait interruptible when locking.
194 */
195static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
196{
197 if (interruptible) {
198 if (mutex_lock_interruptible(&man->cur_mutex))
199 return -ERESTARTSYS;
200 } else {
201 mutex_lock(&man->cur_mutex);
202 }
203
204 return 0;
205}
206
207/**
208 * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex.
209 *
210 * @man: The range manager.
211 */
212static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
213{
214 mutex_unlock(&man->cur_mutex);
215}
216
217/**
218 * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has
219 * been used for the device context with inline command buffers.
220 * Need not be called locked.
221 *
222 * @header: Pointer to the header to free.
223 */
224static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
225{
226 struct vmw_cmdbuf_dheader *dheader;
227
228 if (WARN_ON_ONCE(!header->inline_space))
229 return;
230
231 dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
232 cb_header);
233 dma_pool_free(header->man->dheaders, dheader, header->handle);
234 kfree(header);
235}
236
237/**
238 * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its
239 * associated structures.
240 *
241 * header: Pointer to the header to free.
242 *
243 * For internal use. Must be called with man::lock held.
244 */
245static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
246{
247 struct vmw_cmdbuf_man *man = header->man;
248
249 BUG_ON(!spin_is_locked(&man->lock));
250
251 if (header->inline_space) {
252 vmw_cmdbuf_header_inline_free(header);
253 return;
254 }
255
256 drm_mm_remove_node(header->node);
257 kfree(header->node);
258 header->node = NULL;
259 wake_up_all(&man->alloc_queue);
260 if (header->cb_header)
261 dma_pool_free(man->headers, header->cb_header,
262 header->handle);
263 kfree(header);
264}
265
266/**
267 * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its
268 * associated structures.
269 *
270 * @header: Pointer to the header to free.
271 */
272void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
273{
274 struct vmw_cmdbuf_man *man = header->man;
275
276 /* Avoid locking if inline_space */
277 if (header->inline_space) {
278 vmw_cmdbuf_header_inline_free(header);
279 return;
280 }
281 spin_lock_bh(&man->lock);
282 __vmw_cmdbuf_header_free(header);
283 spin_unlock_bh(&man->lock);
284}
285
286
287/**
288 * vmw_cmbuf_header_submit: Submit a command buffer to hardware.
289 *
290 * @header: The header of the buffer to submit.
291 */
292static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
293{
294 struct vmw_cmdbuf_man *man = header->man;
295 u32 val;
296
297 val = (header->handle >> 32);
298 vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
299 val = (header->handle & 0xFFFFFFFFULL);
300 val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
301 vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
302
303 return header->cb_header->status;
304}
305
306/**
307 * vmw_cmdbuf_ctx_init: Initialize a command buffer context.
308 *
309 * @ctx: The command buffer context to initialize
310 */
311static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
312{
313 INIT_LIST_HEAD(&ctx->hw_submitted);
314 INIT_LIST_HEAD(&ctx->submitted);
315 INIT_LIST_HEAD(&ctx->preempted);
316 ctx->num_hw_submitted = 0;
317}
318
319/**
320 * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer
321 * context.
322 *
323 * @man: The command buffer manager.
324 * @ctx: The command buffer context.
325 *
326 * Submits command buffers to hardware until there are no more command
327 * buffers to submit or the hardware can't handle more command buffers.
328 */
329static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
330 struct vmw_cmdbuf_context *ctx)
331{
332 while (ctx->num_hw_submitted < man->max_hw_submitted &&
333 !list_empty(&ctx->submitted)) {
334 struct vmw_cmdbuf_header *entry;
335 SVGACBStatus status;
336
337 entry = list_first_entry(&ctx->submitted,
338 struct vmw_cmdbuf_header,
339 list);
340
341 status = vmw_cmdbuf_header_submit(entry);
342
343 /* This should never happen */
344 if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
345 entry->cb_header->status = SVGA_CB_STATUS_NONE;
346 break;
347 }
348
349 list_del(&entry->list);
350 list_add_tail(&entry->list, &ctx->hw_submitted);
351 ctx->num_hw_submitted++;
352 }
353
354}
355
356/**
357 * vmw_cmdbuf_ctx_submit: Process a command buffer context.
358 *
359 * @man: The command buffer manager.
360 * @ctx: The command buffer context.
361 *
362 * Submit command buffers to hardware if possible, and process finished
363 * buffers. Typically freeing them, but on preemption or error take
364 * appropriate action. Wake up waiters if appropriate.
365 */
366static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
367 struct vmw_cmdbuf_context *ctx,
368 int *notempty)
369{
370 struct vmw_cmdbuf_header *entry, *next;
371
372 vmw_cmdbuf_ctx_submit(man, ctx);
373
374 list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
375 SVGACBStatus status = entry->cb_header->status;
376
377 if (status == SVGA_CB_STATUS_NONE)
378 break;
379
380 list_del(&entry->list);
381 wake_up_all(&man->idle_queue);
382 ctx->num_hw_submitted--;
383 switch (status) {
384 case SVGA_CB_STATUS_COMPLETED:
385 __vmw_cmdbuf_header_free(entry);
386 break;
387 case SVGA_CB_STATUS_COMMAND_ERROR:
388 case SVGA_CB_STATUS_CB_HEADER_ERROR:
389 list_add_tail(&entry->list, &man->error);
390 schedule_work(&man->work);
391 break;
392 case SVGA_CB_STATUS_PREEMPTED:
393 list_add(&entry->list, &ctx->preempted);
394 break;
395 default:
396 WARN_ONCE(true, "Undefined command buffer status.\n");
397 __vmw_cmdbuf_header_free(entry);
398 break;
399 }
400 }
401
402 vmw_cmdbuf_ctx_submit(man, ctx);
403 if (!list_empty(&ctx->submitted))
404 (*notempty)++;
405}
406
407/**
408 * vmw_cmdbuf_man_process - Process all command buffer contexts and
409 * switch on and off irqs as appropriate.
410 *
411 * @man: The command buffer manager.
412 *
413 * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
414 * command buffers left that are not submitted to hardware, Make sure
415 * IRQ handling is turned on. Otherwise, make sure it's turned off. This
416 * function may return -EAGAIN to indicate it should be rerun due to
417 * possibly missed IRQs if IRQs has just been turned on.
418 */
419static int vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
420{
421 int notempty = 0;
422 struct vmw_cmdbuf_context *ctx;
423 int i;
424
425 for_each_cmdbuf_ctx(man, i, ctx)
426 vmw_cmdbuf_ctx_process(man, ctx, &notempty);
427
428 if (man->irq_on && !notempty) {
429 vmw_generic_waiter_remove(man->dev_priv,
430 SVGA_IRQFLAG_COMMAND_BUFFER,
431 &man->dev_priv->cmdbuf_waiters);
432 man->irq_on = false;
433 } else if (!man->irq_on && notempty) {
434 vmw_generic_waiter_add(man->dev_priv,
435 SVGA_IRQFLAG_COMMAND_BUFFER,
436 &man->dev_priv->cmdbuf_waiters);
437 man->irq_on = true;
438
439 /* Rerun in case we just missed an irq. */
440 return -EAGAIN;
441 }
442
443 return 0;
444}
445
446/**
447 * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a
448 * command buffer context
449 *
450 * @man: The command buffer manager.
451 * @header: The header of the buffer to submit.
452 * @cb_context: The command buffer context to use.
453 *
454 * This function adds @header to the "submitted" queue of the command
455 * buffer context identified by @cb_context. It then calls the command buffer
456 * manager processing to potentially submit the buffer to hardware.
457 * @man->lock needs to be held when calling this function.
458 */
459static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
460 struct vmw_cmdbuf_header *header,
461 SVGACBContext cb_context)
462{
463 if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
464 header->cb_header->dxContext = 0;
465 header->cb_context = cb_context;
466 list_add_tail(&header->list, &man->ctx[cb_context].submitted);
467
468 if (vmw_cmdbuf_man_process(man) == -EAGAIN)
469 vmw_cmdbuf_man_process(man);
470}
471
472/**
473 * vmw_cmdbuf_man_tasklet - The main part of the command buffer interrupt
474 * handler implemented as a tasklet.
475 *
476 * @data: Tasklet closure. A pointer to the command buffer manager cast to
477 * an unsigned long.
478 *
479 * The bottom half (tasklet) of the interrupt handler simply calls into the
480 * command buffer processor to free finished buffers and submit any
481 * queued buffers to hardware.
482 */
483static void vmw_cmdbuf_man_tasklet(unsigned long data)
484{
485 struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data;
486
487 spin_lock(&man->lock);
488 if (vmw_cmdbuf_man_process(man) == -EAGAIN)
489 (void) vmw_cmdbuf_man_process(man);
490 spin_unlock(&man->lock);
491}
492
493/**
494 * vmw_cmdbuf_work_func - The deferred work function that handles
495 * command buffer errors.
496 *
497 * @work: The work func closure argument.
498 *
499 * Restarting the command buffer context after an error requires process
500 * context, so it is deferred to this work function.
501 */
502static void vmw_cmdbuf_work_func(struct work_struct *work)
503{
504 struct vmw_cmdbuf_man *man =
505 container_of(work, struct vmw_cmdbuf_man, work);
506 struct vmw_cmdbuf_header *entry, *next;
507 bool restart;
508
509 spin_lock_bh(&man->lock);
510 list_for_each_entry_safe(entry, next, &man->error, list) {
511 restart = true;
512 DRM_ERROR("Command buffer error.\n");
513
514 list_del(&entry->list);
515 __vmw_cmdbuf_header_free(entry);
516 wake_up_all(&man->idle_queue);
517 }
518 spin_unlock_bh(&man->lock);
519
520 if (restart && vmw_cmdbuf_startstop(man, true))
521 DRM_ERROR("Failed restarting command buffer context 0.\n");
522
523}
524
525/**
526 * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle.
527 *
528 * @man: The command buffer manager.
529 * @check_preempted: Check also the preempted queue for pending command buffers.
530 *
531 */
532static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
533 bool check_preempted)
534{
535 struct vmw_cmdbuf_context *ctx;
536 bool idle = false;
537 int i;
538
539 spin_lock_bh(&man->lock);
540 vmw_cmdbuf_man_process(man);
541 for_each_cmdbuf_ctx(man, i, ctx) {
542 if (!list_empty(&ctx->submitted) ||
543 !list_empty(&ctx->hw_submitted) ||
544 (check_preempted && !list_empty(&ctx->preempted)))
545 goto out_unlock;
546 }
547
548 idle = list_empty(&man->error);
549
550out_unlock:
551 spin_unlock_bh(&man->lock);
552
553 return idle;
554}
555
556/**
557 * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
558 * command submissions
559 *
560 * @man: The command buffer manager.
561 *
562 * Flushes the current command buffer without allocating a new one. A new one
563 * is automatically allocated when needed. Call with @man->cur_mutex held.
564 */
565static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
566{
567 struct vmw_cmdbuf_header *cur = man->cur;
568
569 WARN_ON(!mutex_is_locked(&man->cur_mutex));
570
571 if (!cur)
572 return;
573
574 spin_lock_bh(&man->lock);
575 if (man->cur_pos == 0) {
576 __vmw_cmdbuf_header_free(cur);
577 goto out_unlock;
578 }
579
580 man->cur->cb_header->length = man->cur_pos;
581 vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
582out_unlock:
583 spin_unlock_bh(&man->lock);
584 man->cur = NULL;
585 man->cur_pos = 0;
586}
587
588/**
589 * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
590 * command submissions
591 *
592 * @man: The command buffer manager.
593 * @interruptible: Whether to sleep interruptible when sleeping.
594 *
595 * Flushes the current command buffer without allocating a new one. A new one
596 * is automatically allocated when needed.
597 */
598int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
599 bool interruptible)
600{
601 int ret = vmw_cmdbuf_cur_lock(man, interruptible);
602
603 if (ret)
604 return ret;
605
606 __vmw_cmdbuf_cur_flush(man);
607 vmw_cmdbuf_cur_unlock(man);
608
609 return 0;
610}
611
612/**
613 * vmw_cmdbuf_idle - Wait for command buffer manager idle.
614 *
615 * @man: The command buffer manager.
616 * @interruptible: Sleep interruptible while waiting.
617 * @timeout: Time out after this many ticks.
618 *
619 * Wait until the command buffer manager has processed all command buffers,
620 * or until a timeout occurs. If a timeout occurs, the function will return
621 * -EBUSY.
622 */
623int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
624 unsigned long timeout)
625{
626 int ret;
627
628 ret = vmw_cmdbuf_cur_flush(man, interruptible);
629 vmw_generic_waiter_add(man->dev_priv,
630 SVGA_IRQFLAG_COMMAND_BUFFER,
631 &man->dev_priv->cmdbuf_waiters);
632
633 if (interruptible) {
634 ret = wait_event_interruptible_timeout
635 (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
636 timeout);
637 } else {
638 ret = wait_event_timeout
639 (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
640 timeout);
641 }
642 vmw_generic_waiter_remove(man->dev_priv,
643 SVGA_IRQFLAG_COMMAND_BUFFER,
644 &man->dev_priv->cmdbuf_waiters);
645 if (ret == 0) {
646 if (!vmw_cmdbuf_man_idle(man, true))
647 ret = -EBUSY;
648 else
649 ret = 0;
650 }
651 if (ret > 0)
652 ret = 0;
653
654 return ret;
655}
656
657/**
658 * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool.
659 *
660 * @man: The command buffer manager.
661 * @info: Allocation info. Will hold the size on entry and allocated mm node
662 * on successful return.
663 *
664 * Try to allocate buffer space from the main pool. Returns true if succeeded.
665 * If a fatal error was hit, the error code is returned in @info->ret.
666 */
667static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
668 struct vmw_cmdbuf_alloc_info *info)
669{
670 int ret;
671
672 if (info->node)
673 return true;
674
675 info->node = kzalloc(sizeof(*info->node), GFP_KERNEL);
676 if (!info->node) {
677 info->ret = -ENOMEM;
678 return true;
679 }
680
681 spin_lock_bh(&man->lock);
682 ret = drm_mm_insert_node_generic(&man->mm, info->node, info->page_size, 0, 0,
683 DRM_MM_SEARCH_DEFAULT,
684 DRM_MM_CREATE_DEFAULT);
685 spin_unlock_bh(&man->lock);
686 if (ret) {
687 kfree(info->node);
688 info->node = NULL;
689 }
690
691 return !!info->node;
692}
693
694/**
695 * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
696 *
697 * @man: The command buffer manager.
698 * @size: The size of the allocation.
699 * @interruptible: Whether to sleep interruptible while waiting for space.
700 *
701 * This function allocates buffer space from the main pool, and if there is
702 * no space available ATM, it turns on IRQ handling and sleeps waiting for it to
703 * become available.
704 */
705static struct drm_mm_node *vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
706 size_t size,
707 bool interruptible)
708{
709 struct vmw_cmdbuf_alloc_info info;
710
711 info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT;
712 info.node = NULL;
713 info.ret = 0;
714
715 /*
716 * To prevent starvation of large requests, only one allocating call
717 * at a time waiting for space.
718 */
719 if (interruptible) {
720 if (mutex_lock_interruptible(&man->space_mutex))
721 return ERR_PTR(-ERESTARTSYS);
722 } else {
723 mutex_lock(&man->space_mutex);
724 }
725
726 /* Try to allocate space without waiting. */
727 (void) vmw_cmdbuf_try_alloc(man, &info);
728 if (info.ret && !info.node) {
729 mutex_unlock(&man->space_mutex);
730 return ERR_PTR(info.ret);
731 }
732
733 if (info.node) {
734 mutex_unlock(&man->space_mutex);
735 return info.node;
736 }
737
738 vmw_generic_waiter_add(man->dev_priv,
739 SVGA_IRQFLAG_COMMAND_BUFFER,
740 &man->dev_priv->cmdbuf_waiters);
741
742 if (interruptible) {
743 int ret;
744
745 ret = wait_event_interruptible
746 (man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
747 if (ret) {
748 vmw_generic_waiter_remove
749 (man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
750 &man->dev_priv->cmdbuf_waiters);
751 mutex_unlock(&man->space_mutex);
752 return ERR_PTR(ret);
753 }
754 } else {
755 wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
756 }
757 vmw_generic_waiter_remove(man->dev_priv,
758 SVGA_IRQFLAG_COMMAND_BUFFER,
759 &man->dev_priv->cmdbuf_waiters);
760 mutex_unlock(&man->space_mutex);
761 if (info.ret && !info.node)
762 return ERR_PTR(info.ret);
763
764 return info.node;
765}
766
767/**
768 * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer
769 * space from the main pool.
770 *
771 * @man: The command buffer manager.
772 * @header: Pointer to the header to set up.
773 * @size: The requested size of the buffer space.
774 * @interruptible: Whether to sleep interruptible while waiting for space.
775 */
776static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
777 struct vmw_cmdbuf_header *header,
778 size_t size,
779 bool interruptible)
780{
781 SVGACBHeader *cb_hdr;
782 size_t offset;
783 int ret;
784
785 if (!man->has_pool)
786 return -ENOMEM;
787
788 header->node = vmw_cmdbuf_alloc_space(man, size, interruptible);
789
790 if (IS_ERR(header->node))
791 return PTR_ERR(header->node);
792
793 header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL,
794 &header->handle);
795 if (!header->cb_header) {
796 ret = -ENOMEM;
797 goto out_no_cb_header;
798 }
799
800 header->size = header->node->size << PAGE_SHIFT;
801 cb_hdr = header->cb_header;
802 offset = header->node->start << PAGE_SHIFT;
803 header->cmd = man->map + offset;
804 memset(cb_hdr, 0, sizeof(*cb_hdr));
805 if (man->using_mob) {
806 cb_hdr->flags = SVGA_CB_FLAG_MOB;
807 cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
808 cb_hdr->ptr.mob.mobOffset = offset;
809 } else {
810 cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
811 }
812
813 return 0;
814
815out_no_cb_header:
816 spin_lock_bh(&man->lock);
817 drm_mm_remove_node(header->node);
818 spin_unlock_bh(&man->lock);
819 kfree(header->node);
820
821 return ret;
822}
823
824/**
825 * vmw_cmdbuf_space_inline - Set up a command buffer header with
826 * inline command buffer space.
827 *
828 * @man: The command buffer manager.
829 * @header: Pointer to the header to set up.
830 * @size: The requested size of the buffer space.
831 */
832static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
833 struct vmw_cmdbuf_header *header,
834 int size)
835{
836 struct vmw_cmdbuf_dheader *dheader;
837 SVGACBHeader *cb_hdr;
838
839 if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
840 return -ENOMEM;
841
842 dheader = dma_pool_alloc(man->dheaders, GFP_KERNEL,
843 &header->handle);
844 if (!dheader)
845 return -ENOMEM;
846
847 header->inline_space = true;
848 header->size = VMW_CMDBUF_INLINE_SIZE;
849 cb_hdr = &dheader->cb_header;
850 header->cb_header = cb_hdr;
851 header->cmd = dheader->cmd;
852 memset(dheader, 0, sizeof(*dheader));
853 cb_hdr->status = SVGA_CB_STATUS_NONE;
854 cb_hdr->flags = SVGA_CB_FLAG_NONE;
855 cb_hdr->ptr.pa = (u64)header->handle +
856 (u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
857
858 return 0;
859}
860
861/**
862 * vmw_cmdbuf_alloc - Allocate a command buffer header complete with
863 * command buffer space.
864 *
865 * @man: The command buffer manager.
866 * @size: The requested size of the buffer space.
867 * @interruptible: Whether to sleep interruptible while waiting for space.
868 * @p_header: points to a header pointer to populate on successful return.
869 *
870 * Returns a pointer to command buffer space if successful. Otherwise
871 * returns an error pointer. The header pointer returned in @p_header should
872 * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit().
873 */
874void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
875 size_t size, bool interruptible,
876 struct vmw_cmdbuf_header **p_header)
877{
878 struct vmw_cmdbuf_header *header;
879 int ret = 0;
880
881 *p_header = NULL;
882
883 header = kzalloc(sizeof(*header), GFP_KERNEL);
884 if (!header)
885 return ERR_PTR(-ENOMEM);
886
887 if (size <= VMW_CMDBUF_INLINE_SIZE)
888 ret = vmw_cmdbuf_space_inline(man, header, size);
889 else
890 ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
891
892 if (ret) {
893 kfree(header);
894 return ERR_PTR(ret);
895 }
896
897 header->man = man;
898 INIT_LIST_HEAD(&header->list);
899 header->cb_header->status = SVGA_CB_STATUS_NONE;
900 *p_header = header;
901
902 return header->cmd;
903}
904
905/**
906 * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current
907 * command buffer.
908 *
909 * @man: The command buffer manager.
910 * @size: The requested size of the commands.
911 * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
912 * @interruptible: Whether to sleep interruptible while waiting for space.
913 *
914 * Returns a pointer to command buffer space if successful. Otherwise
915 * returns an error pointer.
916 */
917static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
918 size_t size,
919 int ctx_id,
920 bool interruptible)
921{
922 struct vmw_cmdbuf_header *cur;
923 void *ret;
924
925 if (vmw_cmdbuf_cur_lock(man, interruptible))
926 return ERR_PTR(-ERESTARTSYS);
927
928 cur = man->cur;
929 if (cur && (size + man->cur_pos > cur->size ||
930 (ctx_id != SVGA3D_INVALID_ID &&
931 (cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
932 ctx_id != cur->cb_header->dxContext)))
933 __vmw_cmdbuf_cur_flush(man);
934
935 if (!man->cur) {
936 ret = vmw_cmdbuf_alloc(man,
937 max_t(size_t, size, man->default_size),
938 interruptible, &man->cur);
939 if (IS_ERR(ret)) {
940 vmw_cmdbuf_cur_unlock(man);
941 return ret;
942 }
943
944 cur = man->cur;
945 }
946
947 if (ctx_id != SVGA3D_INVALID_ID) {
948 cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
949 cur->cb_header->dxContext = ctx_id;
950 }
951
952 cur->reserved = size;
953
954 return (void *) (man->cur->cmd + man->cur_pos);
955}
956
957/**
958 * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer.
959 *
960 * @man: The command buffer manager.
961 * @size: The size of the commands actually written.
962 * @flush: Whether to flush the command buffer immediately.
963 */
964static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
965 size_t size, bool flush)
966{
967 struct vmw_cmdbuf_header *cur = man->cur;
968
969 WARN_ON(!mutex_is_locked(&man->cur_mutex));
970
971 WARN_ON(size > cur->reserved);
972 man->cur_pos += size;
973 if (!size)
974 cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
975 if (flush)
976 __vmw_cmdbuf_cur_flush(man);
977 vmw_cmdbuf_cur_unlock(man);
978}
979
980/**
981 * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer.
982 *
983 * @man: The command buffer manager.
984 * @size: The requested size of the commands.
985 * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
986 * @interruptible: Whether to sleep interruptible while waiting for space.
987 * @header: Header of the command buffer. NULL if the current command buffer
988 * should be used.
989 *
990 * Returns a pointer to command buffer space if successful. Otherwise
991 * returns an error pointer.
992 */
993void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
994 int ctx_id, bool interruptible,
995 struct vmw_cmdbuf_header *header)
996{
997 if (!header)
998 return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
999
1000 if (size > header->size)
1001 return ERR_PTR(-EINVAL);
1002
1003 if (ctx_id != SVGA3D_INVALID_ID) {
1004 header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1005 header->cb_header->dxContext = ctx_id;
1006 }
1007
1008 header->reserved = size;
1009 return header->cmd;
1010}
1011
1012/**
1013 * vmw_cmdbuf_commit - Commit commands in a command buffer.
1014 *
1015 * @man: The command buffer manager.
1016 * @size: The size of the commands actually written.
1017 * @header: Header of the command buffer. NULL if the current command buffer
1018 * should be used.
1019 * @flush: Whether to flush the command buffer immediately.
1020 */
1021void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1022 struct vmw_cmdbuf_header *header, bool flush)
1023{
1024 if (!header) {
1025 vmw_cmdbuf_commit_cur(man, size, flush);
1026 return;
1027 }
1028
1029 (void) vmw_cmdbuf_cur_lock(man, false);
1030 __vmw_cmdbuf_cur_flush(man);
1031 WARN_ON(size > header->reserved);
1032 man->cur = header;
1033 man->cur_pos = size;
1034 if (!size)
1035 header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1036 if (flush)
1037 __vmw_cmdbuf_cur_flush(man);
1038 vmw_cmdbuf_cur_unlock(man);
1039}
1040
1041/**
1042 * vmw_cmdbuf_tasklet_schedule - Schedule the interrupt handler bottom half.
1043 *
1044 * @man: The command buffer manager.
1045 */
1046void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man)
1047{
1048 if (!man)
1049 return;
1050
1051 tasklet_schedule(&man->tasklet);
1052}
1053
1054/**
1055 * vmw_cmdbuf_send_device_command - Send a command through the device context.
1056 *
1057 * @man: The command buffer manager.
1058 * @command: Pointer to the command to send.
1059 * @size: Size of the command.
1060 *
1061 * Synchronously sends a device context command.
1062 */
1063static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
1064 const void *command,
1065 size_t size)
1066{
1067 struct vmw_cmdbuf_header *header;
1068 int status;
1069 void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
1070
1071 if (IS_ERR(cmd))
1072 return PTR_ERR(cmd);
1073
1074 memcpy(cmd, command, size);
1075 header->cb_header->length = size;
1076 header->cb_context = SVGA_CB_CONTEXT_DEVICE;
1077 spin_lock_bh(&man->lock);
1078 status = vmw_cmdbuf_header_submit(header);
1079 spin_unlock_bh(&man->lock);
1080 vmw_cmdbuf_header_free(header);
1081
1082 if (status != SVGA_CB_STATUS_COMPLETED) {
1083 DRM_ERROR("Device context command failed with status %d\n",
1084 status);
1085 return -EINVAL;
1086 }
1087
1088 return 0;
1089}
1090
1091/**
1092 * vmw_cmdbuf_startstop - Send a start / stop command through the device
1093 * context.
1094 *
1095 * @man: The command buffer manager.
1096 * @enable: Whether to enable or disable the context.
1097 *
1098 * Synchronously sends a device start / stop context command.
1099 */
1100static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man,
1101 bool enable)
1102{
1103 struct {
1104 uint32 id;
1105 SVGADCCmdStartStop body;
1106 } __packed cmd;
1107
1108 cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
1109 cmd.body.enable = (enable) ? 1 : 0;
1110 cmd.body.context = SVGA_CB_CONTEXT_0;
1111
1112 return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1113}
1114
1115/**
1116 * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes
1117 *
1118 * @man: The command buffer manager.
1119 * @size: The size of the main space pool.
1120 * @default_size: The default size of the command buffer for small kernel
1121 * submissions.
1122 *
1123 * Set the size and allocate the main command buffer space pool,
1124 * as well as the default size of the command buffer for
1125 * small kernel submissions. If successful, this enables large command
1126 * submissions. Note that this function requires that rudimentary command
1127 * submission is already available and that the MOB memory manager is alive.
1128 * Returns 0 on success. Negative error code on failure.
1129 */
1130int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
1131 size_t size, size_t default_size)
1132{
1133 struct vmw_private *dev_priv = man->dev_priv;
1134 bool dummy;
1135 int ret;
1136
1137 if (man->has_pool)
1138 return -EINVAL;
1139
1140 /* First, try to allocate a huge chunk of DMA memory */
1141 size = PAGE_ALIGN(size);
1142 man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size,
1143 &man->handle, GFP_KERNEL);
1144 if (man->map) {
1145 man->using_mob = false;
1146 } else {
1147 /*
1148 * DMA memory failed. If we can have command buffers in a
1149 * MOB, try to use that instead. Note that this will
1150 * actually call into the already enabled manager, when
1151 * binding the MOB.
1152 */
1153 if (!(dev_priv->capabilities & SVGA_CAP_CMD_BUFFERS_3))
1154 return -ENOMEM;
1155
1156 ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
1157 &vmw_mob_ne_placement, 0, false, NULL,
1158 &man->cmd_space);
1159 if (ret)
1160 return ret;
1161
1162 man->using_mob = true;
1163 ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
1164 &man->map_obj);
1165 if (ret)
1166 goto out_no_map;
1167
1168 man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
1169 }
1170
1171 man->size = size;
1172 drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
1173
1174 man->has_pool = true;
1175 man->default_size = default_size;
1176 DRM_INFO("Using command buffers with %s pool.\n",
1177 (man->using_mob) ? "MOB" : "DMA");
1178
1179 return 0;
1180
1181out_no_map:
1182 if (man->using_mob)
1183 ttm_bo_unref(&man->cmd_space);
1184
1185 return ret;
1186}
1187
1188/**
1189 * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for
1190 * inline command buffer submissions only.
1191 *
1192 * @dev_priv: Pointer to device private structure.
1193 *
1194 * Returns a pointer to a cummand buffer manager to success or error pointer
1195 * on failure. The command buffer manager will be enabled for submissions of
1196 * size VMW_CMDBUF_INLINE_SIZE only.
1197 */
1198struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
1199{
1200 struct vmw_cmdbuf_man *man;
1201 struct vmw_cmdbuf_context *ctx;
1202 int i;
1203 int ret;
1204
1205 if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
1206 return ERR_PTR(-ENOSYS);
1207
1208 man = kzalloc(sizeof(*man), GFP_KERNEL);
1209 if (!man)
1210 return ERR_PTR(-ENOMEM);
1211
1212 man->headers = dma_pool_create("vmwgfx cmdbuf",
1213 &dev_priv->dev->pdev->dev,
1214 sizeof(SVGACBHeader),
1215 64, PAGE_SIZE);
1216 if (!man->headers) {
1217 ret = -ENOMEM;
1218 goto out_no_pool;
1219 }
1220
1221 man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
1222 &dev_priv->dev->pdev->dev,
1223 sizeof(struct vmw_cmdbuf_dheader),
1224 64, PAGE_SIZE);
1225 if (!man->dheaders) {
1226 ret = -ENOMEM;
1227 goto out_no_dpool;
1228 }
1229
1230 for_each_cmdbuf_ctx(man, i, ctx)
1231 vmw_cmdbuf_ctx_init(ctx);
1232
1233 INIT_LIST_HEAD(&man->error);
1234 spin_lock_init(&man->lock);
1235 mutex_init(&man->cur_mutex);
1236 mutex_init(&man->space_mutex);
1237 tasklet_init(&man->tasklet, vmw_cmdbuf_man_tasklet,
1238 (unsigned long) man);
1239 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1240 init_waitqueue_head(&man->alloc_queue);
1241 init_waitqueue_head(&man->idle_queue);
1242 man->dev_priv = dev_priv;
1243 man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
1244 INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
1245 vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
1246 &dev_priv->error_waiters);
1247 ret = vmw_cmdbuf_startstop(man, true);
1248 if (ret) {
1249 DRM_ERROR("Failed starting command buffer context 0.\n");
1250 vmw_cmdbuf_man_destroy(man);
1251 return ERR_PTR(ret);
1252 }
1253
1254 return man;
1255
1256out_no_dpool:
1257 dma_pool_destroy(man->headers);
1258out_no_pool:
1259 kfree(man);
1260
1261 return ERR_PTR(ret);
1262}
1263
1264/**
1265 * vmw_cmdbuf_remove_pool - Take down the main buffer space pool.
1266 *
1267 * @man: Pointer to a command buffer manager.
1268 *
1269 * This function removes the main buffer space pool, and should be called
1270 * before MOB memory management is removed. When this function has been called,
1271 * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or
1272 * less are allowed, and the default size of the command buffer for small kernel
1273 * submissions is also set to this size.
1274 */
1275void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
1276{
1277 if (!man->has_pool)
1278 return;
1279
1280 man->has_pool = false;
1281 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1282 (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1283 if (man->using_mob) {
1284 (void) ttm_bo_kunmap(&man->map_obj);
1285 ttm_bo_unref(&man->cmd_space);
1286 } else {
1287 dma_free_coherent(&man->dev_priv->dev->pdev->dev,
1288 man->size, man->map, man->handle);
1289 }
1290}
1291
1292/**
1293 * vmw_cmdbuf_man_destroy - Take down a command buffer manager.
1294 *
1295 * @man: Pointer to a command buffer manager.
1296 *
1297 * This function idles and then destroys a command buffer manager.
1298 */
1299void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
1300{
1301 WARN_ON_ONCE(man->has_pool);
1302 (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1303 if (vmw_cmdbuf_startstop(man, false))
1304 DRM_ERROR("Failed stopping command buffer context 0.\n");
1305
1306 vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
1307 &man->dev_priv->error_waiters);
1308 tasklet_kill(&man->tasklet);
1309 (void) cancel_work_sync(&man->work);
1310 dma_pool_destroy(man->dheaders);
1311 dma_pool_destroy(man->headers);
1312 mutex_destroy(&man->cur_mutex);
1313 mutex_destroy(&man->space_mutex);
1314 kfree(man);
1315}
This page took 0.070646 seconds and 5 git commands to generate.