summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
ed7d78b)
This commit addresses some stability problems with the command buffer
submission code recently introduced:
1) Make the vmw_cmdbuf_man_process() function handle reruns internally to
avoid losing interrupts if the caller forgets to rerun on -EAGAIN.
2) Handle default command buffer allocations using inline command buffers.
This avoids rare allocation deadlocks.
3) In case of command buffer errors we might lose fence submissions.
Therefore send a new fence after each command buffer error. This will help
avoid lengthy fence waits.
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Sinclair Yeh <syeh@vmware.com>
*
* Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
* command buffers left that are not submitted to hardware, Make sure
*
* Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
* command buffers left that are not submitted to hardware, Make sure
- * IRQ handling is turned on. Otherwise, make sure it's turned off. This
- * function may return -EAGAIN to indicate it should be rerun due to
- * possibly missed IRQs if IRQs has just been turned on.
+ * IRQ handling is turned on. Otherwise, make sure it's turned off.
-static int vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
+static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
struct vmw_cmdbuf_context *ctx;
int i;
struct vmw_cmdbuf_context *ctx;
int i;
for_each_cmdbuf_ctx(man, i, ctx)
vmw_cmdbuf_ctx_process(man, ctx, ¬empty);
for_each_cmdbuf_ctx(man, i, ctx)
vmw_cmdbuf_ctx_process(man, ctx, ¬empty);
man->irq_on = true;
/* Rerun in case we just missed an irq. */
man->irq_on = true;
/* Rerun in case we just missed an irq. */
header->cb_context = cb_context;
list_add_tail(&header->list, &man->ctx[cb_context].submitted);
header->cb_context = cb_context;
list_add_tail(&header->list, &man->ctx[cb_context].submitted);
- if (vmw_cmdbuf_man_process(man) == -EAGAIN)
- vmw_cmdbuf_man_process(man);
+ vmw_cmdbuf_man_process(man);
struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data;
spin_lock(&man->lock);
struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data;
spin_lock(&man->lock);
- if (vmw_cmdbuf_man_process(man) == -EAGAIN)
- (void) vmw_cmdbuf_man_process(man);
+ vmw_cmdbuf_man_process(man);
spin_unlock(&man->lock);
}
spin_unlock(&man->lock);
}
struct vmw_cmdbuf_man *man =
container_of(work, struct vmw_cmdbuf_man, work);
struct vmw_cmdbuf_header *entry, *next;
struct vmw_cmdbuf_man *man =
container_of(work, struct vmw_cmdbuf_man, work);
struct vmw_cmdbuf_header *entry, *next;
bool restart = false;
spin_lock_bh(&man->lock);
bool restart = false;
spin_lock_bh(&man->lock);
if (restart && vmw_cmdbuf_startstop(man, true))
DRM_ERROR("Failed restarting command buffer context 0.\n");
if (restart && vmw_cmdbuf_startstop(man, true))
DRM_ERROR("Failed restarting command buffer context 0.\n");
+ /* Send a new fence in case one was removed */
+ vmw_fifo_send_fence(man->dev_priv, &dummy);
DRM_MM_SEARCH_DEFAULT,
DRM_MM_CREATE_DEFAULT);
if (ret) {
DRM_MM_SEARCH_DEFAULT,
DRM_MM_CREATE_DEFAULT);
if (ret) {
- (void) vmw_cmdbuf_man_process(man);
+ vmw_cmdbuf_man_process(man);
ret = drm_mm_insert_node_generic(&man->mm, info->node,
info->page_size, 0, 0,
DRM_MM_SEARCH_DEFAULT,
ret = drm_mm_insert_node_generic(&man->mm, info->node,
info->page_size, 0, 0,
DRM_MM_SEARCH_DEFAULT,
drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
man->has_pool = true;
drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
man->has_pool = true;
- man->default_size = default_size;
+
+ /*
+ * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to
+ * prevent deadlocks from happening when vmw_cmdbuf_space_pool()
+ * needs to wait for space and we block on further command
+ * submissions to be able to free up space.
+ */
+ man->default_size = VMW_CMDBUF_INLINE_SIZE;
DRM_INFO("Using command buffers with %s pool.\n",
(man->using_mob) ? "MOB" : "DMA");
DRM_INFO("Using command buffers with %s pool.\n",
(man->using_mob) ? "MOB" : "DMA");