/*
- * Copyright © 2008 Intel Corporation
+ * Copyright © 2008-2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
#include <drm/drm_vma_manager.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "i915_vgpu.h"
#include "i915_trace.h"
#include "intel_drv.h"
-#include <linux/oom.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/pci.h>
#include <linux/dma-buf.h>
+#define RQ_BUG_ON(expr)
+
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
-static __must_check int
-i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
- bool readonly);
static void
-i915_gem_object_retire(struct drm_i915_gem_object *obj);
-
+i915_gem_object_retire__write(struct drm_i915_gem_object *obj);
+static void
+i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring);
static void i915_gem_write_fence(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj);
static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
struct drm_i915_fence_reg *fence,
bool enable);
-static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker,
- struct shrink_control *sc);
-static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
- struct shrink_control *sc);
-static int i915_gem_shrinker_oom(struct notifier_block *nb,
- unsigned long event,
- void *ptr);
-static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
-
static bool cpu_cache_is_coherent(struct drm_device *dev,
enum i915_cache_level level)
{
struct drm_device *dev = obj->base.dev;
void *vaddr = obj->phys_handle->vaddr + args->offset;
char __user *user_data = to_user_ptr(args->data_ptr);
- int ret;
+ int ret = 0;
/* We manually control the domain here and pretend that it
* remains coherent i.e. in the GTT domain, like shmem_pwrite.
if (ret)
return ret;
+ intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
unsigned long unwritten;
mutex_unlock(&dev->struct_mutex);
unwritten = copy_from_user(vaddr, user_data, args->size);
mutex_lock(&dev->struct_mutex);
- if (unwritten)
- return -EFAULT;
+ if (unwritten) {
+ ret = -EFAULT;
+ goto out;
+ }
}
drm_clflush_virt_range(vaddr, args->size);
i915_gem_chipset_flush(dev);
- return 0;
+
+out:
+ intel_fb_obj_flush(obj, false);
+ return ret;
}
void *i915_gem_object_alloc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
+ return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
}
void i915_gem_object_free(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- kmem_cache_free(dev_priv->slab, obj);
+ kmem_cache_free(dev_priv->objects, obj);
}
static int
ret = i915_gem_object_wait_rendering(obj, true);
if (ret)
return ret;
-
- i915_gem_object_retire(obj);
}
ret = i915_gem_object_get_pages(obj);
offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
+ intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT);
+
while (remain > 0) {
/* Operation in this page
*
if (fast_user_write(dev_priv->gtt.mappable, page_base,
page_offset, user_data, page_length)) {
ret = -EFAULT;
- goto out_unpin;
+ goto out_flush;
}
remain -= page_length;
offset += page_length;
}
+out_flush:
+ intel_fb_obj_flush(obj, false);
out_unpin:
i915_gem_object_ggtt_unpin(obj);
out:
ret = i915_gem_object_wait_rendering(obj, false);
if (ret)
return ret;
-
- i915_gem_object_retire(obj);
}
/* Same trick applies to invalidate partially written cachelines read
* before writing. */
if (ret)
return ret;
+ intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
+
i915_gem_object_pin_pages(obj);
offset = args->offset;
if (needs_clflush_after)
i915_gem_chipset_flush(dev);
+ intel_fb_obj_flush(obj, false);
return ret;
}
return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
}
-static bool can_wait_boost(struct drm_i915_file_private *file_priv)
+static int __i915_spin_request(struct drm_i915_gem_request *req)
{
- if (file_priv == NULL)
- return true;
+ unsigned long timeout;
+
+ if (i915_gem_request_get_ring(req)->irq_refcount)
+ return -EBUSY;
+
+ timeout = jiffies + 1;
+ while (!need_resched()) {
+ if (i915_gem_request_completed(req, true))
+ return 0;
+
+ if (time_after_eq(jiffies, timeout))
+ break;
+
+ cpu_relax_lowlatency();
+ }
+ if (i915_gem_request_completed(req, false))
+ return 0;
- return !atomic_xchg(&file_priv->rps_wait_boost, true);
+ return -EAGAIN;
}
/**
unsigned reset_counter,
bool interruptible,
s64 *timeout,
- struct drm_i915_file_private *file_priv)
+ struct intel_rps_client *rps)
{
struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
struct drm_device *dev = ring->dev;
WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
+ if (list_empty(&req->list))
+ return 0;
+
if (i915_gem_request_completed(req, true))
return 0;
timeout_expire = timeout ?
jiffies + nsecs_to_jiffies_timeout((u64)*timeout) : 0;
- if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) {
- gen6_rps_boost(dev_priv);
- if (file_priv)
- mod_delayed_work(dev_priv->wq,
- &file_priv->mm.idle_work,
- msecs_to_jiffies(100));
- }
-
- if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
- return -ENODEV;
+ if (INTEL_INFO(dev_priv)->gen >= 6)
+ gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
/* Record current time in case interrupted by signal, or wedged */
trace_i915_gem_request_wait_begin(req);
before = ktime_get_raw_ns();
+
+ /* Optimistic spin for the next jiffie before touching IRQs */
+ ret = __i915_spin_request(req);
+ if (ret == 0)
+ goto out;
+
+ if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring))) {
+ ret = -ENODEV;
+ goto out;
+ }
+
for (;;) {
struct timer_list timer;
destroy_timer_on_stack(&timer);
}
}
- now = ktime_get_raw_ns();
- trace_i915_gem_request_wait_end(req);
-
if (!irq_test_in_progress)
ring->irq_put(ring);
finish_wait(&ring->irq_queue, &wait);
+out:
+ now = ktime_get_raw_ns();
+ trace_i915_gem_request_wait_end(req);
+
if (timeout) {
s64 tres = *timeout - (now - before);
return ret;
}
+static inline void
+i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
+{
+ struct drm_i915_file_private *file_priv = request->file_priv;
+
+ if (!file_priv)
+ return;
+
+ spin_lock(&file_priv->mm.lock);
+ list_del(&request->client_list);
+ request->file_priv = NULL;
+ spin_unlock(&file_priv->mm.lock);
+}
+
+static void i915_gem_request_retire(struct drm_i915_gem_request *request)
+{
+ trace_i915_gem_request_retire(request);
+
+ /* We know the GPU must have read the request to have
+ * sent us the seqno + interrupt, so use the position
+ * of tail of the request to update the last known position
+ * of the GPU head.
+ *
+ * Note this requires that we are always called in request
+ * completion order.
+ */
+ request->ringbuf->last_retired_head = request->postfix;
+
+ list_del_init(&request->list);
+ i915_gem_request_remove_from_client(request);
+
+ put_pid(request->pid);
+
+ i915_gem_request_unreference(request);
+}
+
+static void
+__i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
+{
+ struct intel_engine_cs *engine = req->ring;
+ struct drm_i915_gem_request *tmp;
+
+ lockdep_assert_held(&engine->dev->struct_mutex);
+
+ if (list_empty(&req->list))
+ return;
+
+ do {
+ tmp = list_first_entry(&engine->request_list,
+ typeof(*tmp), list);
+
+ i915_gem_request_retire(tmp);
+ } while (tmp != req);
+
+ WARN_ON(i915_verify_lists(engine->dev));
+}
+
/**
* Waits for a request to be signaled, and cleans up the
* request and object lists appropriately for that event.
struct drm_device *dev;
struct drm_i915_private *dev_priv;
bool interruptible;
- unsigned reset_counter;
int ret;
BUG_ON(req == NULL);
if (ret)
return ret;
- reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
- i915_gem_request_reference(req);
- ret = __i915_wait_request(req, reset_counter,
+ ret = __i915_wait_request(req,
+ atomic_read(&dev_priv->gpu_error.reset_counter),
interruptible, NULL, NULL);
- i915_gem_request_unreference(req);
- return ret;
-}
-
-static int
-i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj)
-{
- if (!obj->active)
- return 0;
-
- /* Manually manage the write flush as we may have not yet
- * retired the buffer.
- *
- * Note that the last_write_req is always the earlier of
- * the two (read/write) requests, so if we haved successfully waited,
- * we know we have passed the last write.
- */
- i915_gem_request_assign(&obj->last_write_req, NULL);
+ if (ret)
+ return ret;
+ __i915_gem_request_retire__upto(req);
return 0;
}
* Ensures that all rendering to the object has completed and the object is
* safe to unbind from the GTT or access from the CPU.
*/
-static __must_check int
+int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly)
{
- struct drm_i915_gem_request *req;
- int ret;
+ int ret, i;
- req = readonly ? obj->last_write_req : obj->last_read_req;
- if (!req)
+ if (!obj->active)
return 0;
- ret = i915_wait_request(req);
- if (ret)
- return ret;
+ if (readonly) {
+ if (obj->last_write_req != NULL) {
+ ret = i915_wait_request(obj->last_write_req);
+ if (ret)
+ return ret;
+
+ i = obj->last_write_req->ring->id;
+ if (obj->last_read_req[i] == obj->last_write_req)
+ i915_gem_object_retire__read(obj, i);
+ else
+ i915_gem_object_retire__write(obj);
+ }
+ } else {
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ if (obj->last_read_req[i] == NULL)
+ continue;
+
+ ret = i915_wait_request(obj->last_read_req[i]);
+ if (ret)
+ return ret;
+
+ i915_gem_object_retire__read(obj, i);
+ }
+ RQ_BUG_ON(obj->active);
+ }
+
+ return 0;
+}
+
+static void
+i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_request *req)
+{
+ int ring = req->ring->id;
- return i915_gem_object_wait_rendering__tail(obj);
+ if (obj->last_read_req[ring] == req)
+ i915_gem_object_retire__read(obj, ring);
+ else if (obj->last_write_req == req)
+ i915_gem_object_retire__write(obj);
+
+ __i915_gem_request_retire__upto(req);
}
/* A nonblocking variant of the above wait. This is a highly dangerous routine
*/
static __must_check int
i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
- struct drm_i915_file_private *file_priv,
+ struct intel_rps_client *rps,
bool readonly)
{
- struct drm_i915_gem_request *req;
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_request *requests[I915_NUM_RINGS];
unsigned reset_counter;
- int ret;
+ int ret, i, n = 0;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(!dev_priv->mm.interruptible);
- req = readonly ? obj->last_write_req : obj->last_read_req;
- if (!req)
+ if (!obj->active)
return 0;
ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
if (ret)
return ret;
- ret = i915_gem_check_olr(req);
- if (ret)
- return ret;
-
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
- i915_gem_request_reference(req);
+
+ if (readonly) {
+ struct drm_i915_gem_request *req;
+
+ req = obj->last_write_req;
+ if (req == NULL)
+ return 0;
+
+ ret = i915_gem_check_olr(req);
+ if (ret)
+ goto err;
+
+ requests[n++] = i915_gem_request_reference(req);
+ } else {
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ struct drm_i915_gem_request *req;
+
+ req = obj->last_read_req[i];
+ if (req == NULL)
+ continue;
+
+ ret = i915_gem_check_olr(req);
+ if (ret)
+ goto err;
+
+ requests[n++] = i915_gem_request_reference(req);
+ }
+ }
+
mutex_unlock(&dev->struct_mutex);
- ret = __i915_wait_request(req, reset_counter, true, NULL, file_priv);
+ for (i = 0; ret == 0 && i < n; i++)
+ ret = __i915_wait_request(requests[i], reset_counter, true,
+ NULL, rps);
mutex_lock(&dev->struct_mutex);
- i915_gem_request_unreference(req);
- if (ret)
- return ret;
- return i915_gem_object_wait_rendering__tail(obj);
+err:
+ for (i = 0; i < n; i++) {
+ if (ret == 0)
+ i915_gem_object_retire_request(obj, requests[i]);
+ i915_gem_request_unreference(requests[i]);
+ }
+
+ return ret;
+}
+
+static struct intel_rps_client *to_rps_client(struct drm_file *file)
+{
+ struct drm_i915_file_private *fpriv = file->driver_priv;
+ return &fpriv->rps;
}
/**
* to catch cases where we are gazumped.
*/
ret = i915_gem_object_wait_rendering__nonblocking(obj,
- file->driver_priv,
+ to_rps_client(file),
!write_domain);
if (ret)
goto unref;
struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_ggtt_view view = i915_ggtt_view_normal;
pgoff_t page_offset;
unsigned long pfn;
int ret = 0;
goto unlock;
}
- /* Now bind it into the GTT if needed */
- ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
+ /* Use a partial view if the object is bigger than the aperture. */
+ if (obj->base.size >= dev_priv->gtt.mappable_end &&
+ obj->tiling_mode == I915_TILING_NONE) {
+ static const unsigned int chunk_size = 256; // 1 MiB
+
+ memset(&view, 0, sizeof(view));
+ view.type = I915_GGTT_VIEW_PARTIAL;
+ view.params.partial.offset = rounddown(page_offset, chunk_size);
+ view.params.partial.size =
+ min_t(unsigned int,
+ chunk_size,
+ (vma->vm_end - vma->vm_start)/PAGE_SIZE -
+ view.params.partial.offset);
+ }
+
+ /* Now pin it into the GTT if needed */
+ ret = i915_gem_object_ggtt_pin(obj, &view, 0, PIN_MAPPABLE);
if (ret)
goto unlock;
goto unpin;
/* Finally, remap it using the new GTT offset */
- pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
+ pfn = dev_priv->gtt.mappable_base +
+ i915_gem_obj_ggtt_offset_view(obj, &view);
pfn >>= PAGE_SHIFT;
- if (!obj->fault_mappable) {
- unsigned long size = min_t(unsigned long,
- vma->vm_end - vma->vm_start,
- obj->base.size);
- int i;
+ if (unlikely(view.type == I915_GGTT_VIEW_PARTIAL)) {
+ /* Overriding existing pages in partial view does not cause
+ * us any trouble as TLBs are still valid because the fault
+ * is due to userspace losing part of the mapping or never
+ * having accessed it before (at this partials' range).
+ */
+ unsigned long base = vma->vm_start +
+ (view.params.partial.offset << PAGE_SHIFT);
+ unsigned int i;
- for (i = 0; i < size >> PAGE_SHIFT; i++) {
- ret = vm_insert_pfn(vma,
- (unsigned long)vma->vm_start + i * PAGE_SIZE,
- pfn + i);
+ for (i = 0; i < view.params.partial.size; i++) {
+ ret = vm_insert_pfn(vma, base + i * PAGE_SIZE, pfn + i);
if (ret)
break;
}
obj->fault_mappable = true;
- } else
- ret = vm_insert_pfn(vma,
- (unsigned long)vmf->virtual_address,
- pfn + page_offset);
+ } else {
+ if (!obj->fault_mappable) {
+ unsigned long size = min_t(unsigned long,
+ vma->vm_end - vma->vm_start,
+ obj->base.size);
+ int i;
+
+ for (i = 0; i < size >> PAGE_SHIFT; i++) {
+ ret = vm_insert_pfn(vma,
+ (unsigned long)vma->vm_start + i * PAGE_SIZE,
+ pfn + i);
+ if (ret)
+ break;
+ }
+
+ obj->fault_mappable = true;
+ } else
+ ret = vm_insert_pfn(vma,
+ (unsigned long)vmf->virtual_address,
+ pfn + page_offset);
+ }
unpin:
- i915_gem_object_ggtt_unpin(obj);
+ i915_gem_object_ggtt_unpin_view(obj, &view);
unlock:
mutex_unlock(&dev->struct_mutex);
out:
uint32_t handle,
uint64_t *offset)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
int ret;
goto unlock;
}
- if (obj->base.size > dev_priv->gtt.mappable_end) {
- ret = -E2BIG;
- goto out;
- }
-
if (obj->madv != I915_MADV_WILLNEED) {
DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
ret = -EFAULT;
return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
}
-static inline int
-i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
-{
- return obj->madv == I915_MADV_DONTNEED;
-}
-
/* Immediately discard the backing storage */
static void
i915_gem_object_truncate(struct drm_i915_gem_object *obj)
return 0;
}
-unsigned long
-i915_gem_shrink(struct drm_i915_private *dev_priv,
- long target, unsigned flags)
-{
- const struct {
- struct list_head *list;
- unsigned int bit;
- } phases[] = {
- { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
- { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
- { NULL, 0 },
- }, *phase;
- unsigned long count = 0;
-
- /*
- * As we may completely rewrite the (un)bound list whilst unbinding
- * (due to retiring requests) we have to strictly process only
- * one element of the list at the time, and recheck the list
- * on every iteration.
- *
- * In particular, we must hold a reference whilst removing the
- * object as we may end up waiting for and/or retiring the objects.
- * This might release the final reference (held by the active list)
- * and result in the object being freed from under us. This is
- * similar to the precautions the eviction code must take whilst
- * removing objects.
- *
- * Also note that although these lists do not hold a reference to
- * the object we can safely grab one here: The final object
- * unreferencing and the bound_list are both protected by the
- * dev->struct_mutex and so we won't ever be able to observe an
- * object on the bound_list with a reference count equals 0.
- */
- for (phase = phases; phase->list; phase++) {
- struct list_head still_in_list;
-
- if ((flags & phase->bit) == 0)
- continue;
-
- INIT_LIST_HEAD(&still_in_list);
- while (count < target && !list_empty(phase->list)) {
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma, *v;
-
- obj = list_first_entry(phase->list,
- typeof(*obj), global_list);
- list_move_tail(&obj->global_list, &still_in_list);
-
- if (flags & I915_SHRINK_PURGEABLE &&
- !i915_gem_object_is_purgeable(obj))
- continue;
-
- drm_gem_object_reference(&obj->base);
-
- /* For the unbound phase, this should be a no-op! */
- list_for_each_entry_safe(vma, v,
- &obj->vma_list, vma_link)
- if (i915_vma_unbind(vma))
- break;
-
- if (i915_gem_object_put_pages(obj) == 0)
- count += obj->base.size >> PAGE_SHIFT;
-
- drm_gem_object_unreference(&obj->base);
- }
- list_splice(&still_in_list, phase->list);
- }
-
- return count;
-}
-
-static unsigned long
-i915_gem_shrink_all(struct drm_i915_private *dev_priv)
-{
- i915_gem_evict_everything(dev_priv->dev);
- return i915_gem_shrink(dev_priv, LONG_MAX,
- I915_SHRINK_BOUND | I915_SHRINK_UNBOUND);
-}
-
static int
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{
return ret;
list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
+
+ obj->get_page.sg = obj->pages->sgl;
+ obj->get_page.last = 0;
+
return 0;
}
-static void
-i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring)
+void i915_vma_move_to_active(struct i915_vma *vma,
+ struct intel_engine_cs *ring)
{
- struct drm_i915_gem_request *req;
- struct intel_engine_cs *old_ring;
-
- BUG_ON(ring == NULL);
-
- req = intel_ring_get_request(ring);
- old_ring = i915_gem_request_get_ring(obj->last_read_req);
-
- if (old_ring != ring && obj->last_write_req) {
- /* Keep the request relative to the current ring */
- i915_gem_request_assign(&obj->last_write_req, req);
- }
+ struct drm_i915_gem_object *obj = vma->obj;
/* Add a reference if we're newly entering the active list. */
- if (!obj->active) {
+ if (obj->active == 0)
drm_gem_object_reference(&obj->base);
- obj->active = 1;
- }
+ obj->active |= intel_ring_flag(ring);
- list_move_tail(&obj->ring_list, &ring->active_list);
+ list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
+ i915_gem_request_assign(&obj->last_read_req[ring->id],
+ intel_ring_get_request(ring));
- i915_gem_request_assign(&obj->last_read_req, req);
+ list_move_tail(&vma->mm_list, &vma->vm->active_list);
}
-void i915_vma_move_to_active(struct i915_vma *vma,
- struct intel_engine_cs *ring)
+static void
+i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
{
- list_move_tail(&vma->mm_list, &vma->vm->active_list);
- return i915_gem_object_move_to_active(vma->obj, ring);
+ RQ_BUG_ON(obj->last_write_req == NULL);
+ RQ_BUG_ON(!(obj->active & intel_ring_flag(obj->last_write_req->ring)));
+
+ i915_gem_request_assign(&obj->last_write_req, NULL);
+ intel_fb_obj_flush(obj, true);
}
static void
-i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
+i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
{
struct i915_vma *vma;
- BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
- BUG_ON(!obj->active);
+ RQ_BUG_ON(obj->last_read_req[ring] == NULL);
+ RQ_BUG_ON(!(obj->active & (1 << ring)));
+
+ list_del_init(&obj->ring_list[ring]);
+ i915_gem_request_assign(&obj->last_read_req[ring], NULL);
+
+ if (obj->last_write_req && obj->last_write_req->ring->id == ring)
+ i915_gem_object_retire__write(obj);
+
+ obj->active &= ~(1 << ring);
+ if (obj->active)
+ return;
list_for_each_entry(vma, &obj->vma_list, vma_link) {
if (!list_empty(&vma->mm_list))
list_move_tail(&vma->mm_list, &vma->vm->inactive_list);
}
- intel_fb_obj_flush(obj, true);
-
- list_del_init(&obj->ring_list);
-
- i915_gem_request_assign(&obj->last_read_req, NULL);
- i915_gem_request_assign(&obj->last_write_req, NULL);
- obj->base.write_domain = 0;
-
i915_gem_request_assign(&obj->last_fenced_req, NULL);
-
- obj->active = 0;
drm_gem_object_unreference(&obj->base);
-
- WARN_ON(i915_verify_lists(dev));
-}
-
-static void
-i915_gem_object_retire(struct drm_i915_gem_object *obj)
-{
- if (obj->last_read_req == NULL)
- return;
-
- if (i915_gem_request_completed(obj->last_read_req, true))
- i915_gem_object_move_to_inactive(obj);
}
static int
ret = ring->add_request(ring);
if (ret)
return ret;
+
+ request->tail = intel_ring_get_tail(ringbuf);
}
request->head = request_start;
- request->tail = intel_ring_get_tail(ringbuf);
/* Whilst this request exists, batch_obj will be on the
* active_list, and so will hold the active reference. Only when this
list_add_tail(&request->client_list,
&file_priv->mm.request_list);
spin_unlock(&file_priv->mm.lock);
+
+ request->pid = get_pid(task_pid(current));
}
trace_i915_gem_request_add(request);
i915_queue_hangcheck(ring->dev);
- cancel_delayed_work_sync(&dev_priv->mm.idle_work);
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work,
round_jiffies_up_relative(HZ));
return 0;
}
-static inline void
-i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
-{
- struct drm_i915_file_private *file_priv = request->file_priv;
-
- if (!file_priv)
- return;
-
- spin_lock(&file_priv->mm.lock);
- list_del(&request->client_list);
- request->file_priv = NULL;
- spin_unlock(&file_priv->mm.lock);
-}
-
-static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
- const struct intel_context *ctx)
+static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
+ const struct intel_context *ctx)
{
unsigned long elapsed;
}
}
-static void i915_gem_free_request(struct drm_i915_gem_request *request)
-{
- list_del(&request->list);
- i915_gem_request_remove_from_client(request);
-
- i915_gem_request_unreference(request);
-}
-
void i915_gem_request_free(struct kref *req_ref)
{
struct drm_i915_gem_request *req = container_of(req_ref,
i915_gem_context_unreference(ctx);
}
- kfree(req);
+ kmem_cache_free(req->i915->requests, req);
+}
+
+int i915_gem_request_alloc(struct intel_engine_cs *ring,
+ struct intel_context *ctx)
+{
+ struct drm_i915_private *dev_priv = to_i915(ring->dev);
+ struct drm_i915_gem_request *req;
+ int ret;
+
+ if (ring->outstanding_lazy_request)
+ return 0;
+
+ req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
+ if (req == NULL)
+ return -ENOMEM;
+
+ kref_init(&req->ref);
+ req->i915 = dev_priv;
+
+ ret = i915_gem_get_seqno(ring->dev, &req->seqno);
+ if (ret)
+ goto err;
+
+ req->ring = ring;
+
+ if (i915.enable_execlists)
+ ret = intel_logical_ring_alloc_request_extras(req, ctx);
+ else
+ ret = intel_ring_alloc_request_extras(req);
+ if (ret)
+ goto err;
+
+ ring->outstanding_lazy_request = req;
+ return 0;
+
+err:
+ kmem_cache_free(dev_priv->requests, req);
+ return ret;
}
struct drm_i915_gem_request *
obj = list_first_entry(&ring->active_list,
struct drm_i915_gem_object,
- ring_list);
+ ring_list[ring->id]);
- i915_gem_object_move_to_inactive(obj);
+ i915_gem_object_retire__read(obj, ring->id);
}
/*
struct drm_i915_gem_request,
execlist_link);
list_del(&submit_req->execlist_link);
- intel_runtime_pm_put(dev_priv);
if (submit_req->ctx != ring->default_context)
intel_lr_context_unpin(ring, submit_req->ctx);
struct drm_i915_gem_request,
list);
- i915_gem_free_request(request);
+ i915_gem_request_retire(request);
}
/* This may not have been flushed before the reset, so clean it now */
i915_gem_context_reset(dev);
i915_gem_restore_fences(dev);
+
+ WARN_ON(i915_verify_lists(dev));
}
/**
void
i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
{
- if (list_empty(&ring->request_list))
- return;
-
WARN_ON(i915_verify_lists(ring->dev));
+ if (list_empty(&ring->active_list))
+ return;
+
/* Retire requests first as we use it above for the early return.
* If we retire requests last, we may use a later seqno and so clear
* the requests lists without clearing the active list, leading to
*/
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
- struct intel_ringbuffer *ringbuf;
request = list_first_entry(&ring->request_list,
struct drm_i915_gem_request,
if (!i915_gem_request_completed(request, true))
break;
- trace_i915_gem_request_retire(request);
-
- /* This is one of the few common intersection points
- * between legacy ringbuffer submission and execlists:
- * we need to tell them apart in order to find the correct
- * ringbuffer to which the request belongs to.
- */
- if (i915.enable_execlists) {
- struct intel_context *ctx = request->ctx;
- ringbuf = ctx->engine[ring->id].ringbuf;
- } else
- ringbuf = ring->buffer;
-
- /* We know the GPU must have read the request to have
- * sent us the seqno + interrupt, so use the position
- * of tail of the request to update the last known position
- * of the GPU head.
- */
- ringbuf->last_retired_head = request->postfix;
-
- i915_gem_free_request(request);
+ i915_gem_request_retire(request);
}
/* Move any buffers on the active list that are no longer referenced
obj = list_first_entry(&ring->active_list,
struct drm_i915_gem_object,
- ring_list);
+ ring_list[ring->id]);
- if (!i915_gem_request_completed(obj->last_read_req, true))
+ if (!list_empty(&obj->last_read_req[ring->id]->list))
break;
- i915_gem_object_move_to_inactive(obj);
+ i915_gem_object_retire__read(obj, ring->id);
}
if (unlikely(ring->trace_irq_req &&
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), mm.idle_work.work);
+ struct drm_device *dev = dev_priv->dev;
+ struct intel_engine_cs *ring;
+ int i;
+
+ for_each_ring(ring, dev_priv, i)
+ if (!list_empty(&ring->request_list))
+ return;
+
+ intel_mark_idle(dev);
- intel_mark_idle(dev_priv->dev);
+ if (mutex_trylock(&dev->struct_mutex)) {
+ struct intel_engine_cs *ring;
+ int i;
+
+ for_each_ring(ring, dev_priv, i)
+ i915_gem_batch_pool_fini(&ring->batch_pool);
+
+ mutex_unlock(&dev->struct_mutex);
+ }
}
/**
static int
i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
{
- struct intel_engine_cs *ring;
- int ret;
+ int ret, i;
+
+ if (!obj->active)
+ return 0;
+
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ struct drm_i915_gem_request *req;
+
+ req = obj->last_read_req[i];
+ if (req == NULL)
+ continue;
- if (obj->active) {
- ring = i915_gem_request_get_ring(obj->last_read_req);
+ if (list_empty(&req->list))
+ goto retire;
- ret = i915_gem_check_olr(obj->last_read_req);
+ ret = i915_gem_check_olr(req);
if (ret)
return ret;
- i915_gem_retire_requests_ring(ring);
+ if (i915_gem_request_completed(req, true)) {
+ __i915_gem_request_retire__upto(req);
+retire:
+ i915_gem_object_retire__read(obj, i);
+ }
}
return 0;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_wait *args = data;
struct drm_i915_gem_object *obj;
- struct drm_i915_gem_request *req;
+ struct drm_i915_gem_request *req[I915_NUM_RINGS];
unsigned reset_counter;
- int ret = 0;
+ int i, n = 0;
+ int ret;
if (args->flags != 0)
return -EINVAL;
if (ret)
goto out;
- if (!obj->active || !obj->last_read_req)
+ if (!obj->active)
goto out;
- req = obj->last_read_req;
-
/* Do this after OLR check to make sure we make forward progress polling
* on this IOCTL with a timeout == 0 (like busy ioctl)
*/
drm_gem_object_unreference(&obj->base);
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
- i915_gem_request_reference(req);
- mutex_unlock(&dev->struct_mutex);
- ret = __i915_wait_request(req, reset_counter, true,
- args->timeout_ns > 0 ? &args->timeout_ns : NULL,
- file->driver_priv);
- mutex_lock(&dev->struct_mutex);
- i915_gem_request_unreference(req);
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ if (obj->last_read_req[i] == NULL)
+ continue;
+
+ req[n++] = i915_gem_request_reference(obj->last_read_req[i]);
+ }
+
mutex_unlock(&dev->struct_mutex);
+
+ for (i = 0; i < n; i++) {
+ if (ret == 0)
+ ret = __i915_wait_request(req[i], reset_counter, true,
+ args->timeout_ns > 0 ? &args->timeout_ns : NULL,
+ file->driver_priv);
+ i915_gem_request_unreference__unlocked(req[i]);
+ }
return ret;
out:
return ret;
}
+static int
+__i915_gem_object_sync(struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *to,
+ struct drm_i915_gem_request *req)
+{
+ struct intel_engine_cs *from;
+ int ret;
+
+ from = i915_gem_request_get_ring(req);
+ if (to == from)
+ return 0;
+
+ if (i915_gem_request_completed(req, true))
+ return 0;
+
+ ret = i915_gem_check_olr(req);
+ if (ret)
+ return ret;
+
+ if (!i915_semaphore_is_enabled(obj->base.dev)) {
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ ret = __i915_wait_request(req,
+ atomic_read(&i915->gpu_error.reset_counter),
+ i915->mm.interruptible,
+ NULL,
+ &i915->rps.semaphores);
+ if (ret)
+ return ret;
+
+ i915_gem_object_retire_request(obj, req);
+ } else {
+ int idx = intel_ring_sync_index(from, to);
+ u32 seqno = i915_gem_request_get_seqno(req);
+
+ if (seqno <= from->semaphore.sync_seqno[idx])
+ return 0;
+
+ trace_i915_gem_ring_sync_to(from, to, req);
+ ret = to->semaphore.sync_to(to, from, seqno);
+ if (ret)
+ return ret;
+
+ /* We use last_read_req because sync_to()
+ * might have just caused seqno wrap under
+ * the radar.
+ */
+ from->semaphore.sync_seqno[idx] =
+ i915_gem_request_get_seqno(obj->last_read_req[from->id]);
+ }
+
+ return 0;
+}
+
/**
* i915_gem_object_sync - sync an object to a ring.
*
*
* This code is meant to abstract object synchronization with the GPU.
* Calling with NULL implies synchronizing the object with the CPU
- * rather than a particular GPU ring.
+ * rather than a particular GPU ring. Conceptually we serialise writes
+ * between engines inside the GPU. We only allow on engine to write
+ * into a buffer at any time, but multiple readers. To ensure each has
+ * a coherent view of memory, we must:
+ *
+ * - If there is an outstanding write request to the object, the new
+ * request must wait for it to complete (either CPU or in hw, requests
+ * on the same ring will be naturally ordered).
+ *
+ * - If we are a write request (pending_write_domain is set), the new
+ * request must wait for outstanding read requests to complete.
*
* Returns 0 if successful, else propagates up the lower layer error.
*/
i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_engine_cs *to)
{
- struct intel_engine_cs *from;
- u32 seqno;
- int ret, idx;
-
- from = i915_gem_request_get_ring(obj->last_read_req);
-
- if (from == NULL || to == from)
- return 0;
-
- if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
- return i915_gem_object_wait_rendering(obj, false);
+ const bool readonly = obj->base.pending_write_domain == 0;
+ struct drm_i915_gem_request *req[I915_NUM_RINGS];
+ int ret, i, n;
- idx = intel_ring_sync_index(from, to);
-
- seqno = i915_gem_request_get_seqno(obj->last_read_req);
- /* Optimization: Avoid semaphore sync when we are sure we already
- * waited for an object with higher seqno */
- if (seqno <= from->semaphore.sync_seqno[idx])
+ if (!obj->active)
return 0;
- ret = i915_gem_check_olr(obj->last_read_req);
- if (ret)
- return ret;
+ if (to == NULL)
+ return i915_gem_object_wait_rendering(obj, readonly);
- trace_i915_gem_ring_sync_to(from, to, obj->last_read_req);
- ret = to->semaphore.sync_to(to, from, seqno);
- if (!ret)
- /* We use last_read_req because sync_to()
- * might have just caused seqno wrap under
- * the radar.
- */
- from->semaphore.sync_seqno[idx] =
- i915_gem_request_get_seqno(obj->last_read_req);
+ n = 0;
+ if (readonly) {
+ if (obj->last_write_req)
+ req[n++] = obj->last_write_req;
+ } else {
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ if (obj->last_read_req[i])
+ req[n++] = obj->last_read_req[i];
+ }
+ for (i = 0; i < n; i++) {
+ ret = __i915_gem_object_sync(obj, to, req[i]);
+ if (ret)
+ return ret;
+ }
- return ret;
+ return 0;
}
static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
BUG_ON(obj->pages == NULL);
- ret = i915_gem_object_finish_gpu(obj);
+ ret = i915_gem_object_wait_rendering(obj, false);
if (ret)
return ret;
/* Continue on if we fail due to EIO, the GPU is hung so we
trace_i915_vma_unbind(vma);
- vma->unbind_vma(vma);
+ vma->vm->unbind_vma(vma);
+ vma->bound = 0;
list_del_init(&vma->mm_list);
if (i915_is_ggtt(vma->vm)) {
/* Since the unbound list is global, only move to that list if
* no more VMAs exist. */
if (list_empty(&obj->vma_list)) {
- /* Throw away the active reference before
- * moving to the unbound list. */
- i915_gem_object_retire(obj);
-
i915_gem_gtt_finish_object(obj);
list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
}
return ret;
}
+ WARN_ON(i915_verify_lists(dev));
return 0;
}
}
/**
- * Finds free space in the GTT aperture and binds the object there.
+ * Finds free space in the GTT aperture and binds the object or a view of it
+ * there.
*/
static struct i915_vma *
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
+ const struct i915_ggtt_view *ggtt_view,
unsigned alignment,
- uint64_t flags,
- const struct i915_ggtt_view *view)
+ uint64_t flags)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_vma *vma;
int ret;
- fence_size = i915_gem_get_gtt_size(dev,
- obj->base.size,
- obj->tiling_mode);
- fence_alignment = i915_gem_get_gtt_alignment(dev,
- obj->base.size,
- obj->tiling_mode, true);
- unfenced_alignment =
- i915_gem_get_gtt_alignment(dev,
- obj->base.size,
- obj->tiling_mode, false);
+ if (i915_is_ggtt(vm)) {
+ u32 view_size;
+
+ if (WARN_ON(!ggtt_view))
+ return ERR_PTR(-EINVAL);
+
+ view_size = i915_ggtt_view_size(obj, ggtt_view);
+
+ fence_size = i915_gem_get_gtt_size(dev,
+ view_size,
+ obj->tiling_mode);
+ fence_alignment = i915_gem_get_gtt_alignment(dev,
+ view_size,
+ obj->tiling_mode,
+ true);
+ unfenced_alignment = i915_gem_get_gtt_alignment(dev,
+ view_size,
+ obj->tiling_mode,
+ false);
+ size = flags & PIN_MAPPABLE ? fence_size : view_size;
+ } else {
+ fence_size = i915_gem_get_gtt_size(dev,
+ obj->base.size,
+ obj->tiling_mode);
+ fence_alignment = i915_gem_get_gtt_alignment(dev,
+ obj->base.size,
+ obj->tiling_mode,
+ true);
+ unfenced_alignment =
+ i915_gem_get_gtt_alignment(dev,
+ obj->base.size,
+ obj->tiling_mode,
+ false);
+ size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
+ }
if (alignment == 0)
alignment = flags & PIN_MAPPABLE ? fence_alignment :
unfenced_alignment;
if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
- DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
+ DRM_DEBUG("Invalid object (view type=%u) alignment requested %u\n",
+ ggtt_view ? ggtt_view->type : 0,
+ alignment);
return ERR_PTR(-EINVAL);
}
- size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
-
- /* If the object is bigger than the entire aperture, reject it early
- * before evicting everything in a vain attempt to find space.
+ /* If binding the object/GGTT view requires more space than the entire
+ * aperture has, reject it early before evicting everything in a vain
+ * attempt to find space.
*/
- if (obj->base.size > end) {
- DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n",
- obj->base.size,
+ if (size > end) {
+ DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%u > %s aperture=%lu\n",
+ ggtt_view ? ggtt_view->type : 0,
+ size,
flags & PIN_MAPPABLE ? "mappable" : "total",
end);
return ERR_PTR(-E2BIG);
i915_gem_object_pin_pages(obj);
- vma = i915_gem_obj_lookup_or_create_vma_view(obj, vm, view);
+ vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
+ i915_gem_obj_lookup_or_create_vma(obj, vm);
+
if (IS_ERR(vma))
goto err_unpin;
goto err_remove_node;
trace_i915_vma_bind(vma, flags);
- ret = i915_vma_bind(vma, obj->cache_level,
- flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
+ ret = i915_vma_bind(vma, obj->cache_level, flags);
if (ret)
goto err_finish_gtt;
if (ret)
return ret;
- i915_gem_object_retire(obj);
-
/* Flush and acquire obj->pages so that we are coherent through
* direct access in memory with previous cached writes through
* shmemfs and that our cache domain tracking remains valid.
}
if (write)
- intel_fb_obj_invalidate(obj, NULL);
+ intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT);
trace_i915_gem_object_change_domain(obj,
old_read_domains,
}
if (i915_gem_obj_bound_any(obj)) {
- ret = i915_gem_object_finish_gpu(obj);
+ ret = i915_gem_object_wait_rendering(obj, false);
if (ret)
return ret;
list_for_each_entry(vma, &obj->vma_list, vma_link)
if (drm_mm_node_allocated(&vma->node)) {
ret = i915_vma_bind(vma, cache_level,
- vma->bound & GLOBAL_BIND);
+ PIN_UPDATE);
if (ret)
return ret;
}
{
struct drm_i915_gem_caching *args = data;
struct drm_i915_gem_object *obj;
- int ret;
-
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (&obj->base == NULL) {
- ret = -ENOENT;
- goto unlock;
- }
+ if (&obj->base == NULL)
+ return -ENOENT;
switch (obj->cache_level) {
case I915_CACHE_LLC:
break;
}
- drm_gem_object_unreference(&obj->base);
-unlock:
- mutex_unlock(&dev->struct_mutex);
- return ret;
+ drm_gem_object_unreference_unlocked(&obj->base);
+ return 0;
}
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
return ret;
}
-static bool is_pin_display(struct drm_i915_gem_object *obj)
-{
- struct i915_vma *vma;
-
- vma = i915_gem_obj_to_ggtt(obj);
- if (!vma)
- return false;
-
- /* There are 2 sources that pin objects:
- * 1. The display engine (scanouts, sprites, cursors);
- * 2. Reservations for execbuffer;
- *
- * We can ignore reservations as we hold the struct_mutex and
- * are only called outside of the reservation path.
- */
- return vma->pin_count;
-}
-
/*
* Prepare buffer for display plane (scanout, cursors, etc).
* Can be called from an uninterruptible phase (modesetting) and allows
int
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
- struct intel_engine_cs *pipelined)
+ struct intel_engine_cs *pipelined,
+ const struct i915_ggtt_view *view)
{
u32 old_read_domains, old_write_domain;
- bool was_pin_display;
int ret;
- if (pipelined != i915_gem_request_get_ring(obj->last_read_req)) {
- ret = i915_gem_object_sync(obj, pipelined);
- if (ret)
- return ret;
- }
+ ret = i915_gem_object_sync(obj, pipelined);
+ if (ret)
+ return ret;
/* Mark the pin_display early so that we account for the
* display coherency whilst setting up the cache domains.
*/
- was_pin_display = obj->pin_display;
- obj->pin_display = true;
+ obj->pin_display++;
/* The display engine is not coherent with the LLC cache on gen6. As
* a result, we make sure that the pinning that is about to occur is
* (e.g. libkms for the bootup splash), we have to ensure that we
* always use map_and_fenceable for all scanout buffers.
*/
- ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
+ ret = i915_gem_object_ggtt_pin(obj, view, alignment,
+ view->type == I915_GGTT_VIEW_NORMAL ?
+ PIN_MAPPABLE : 0);
if (ret)
goto err_unpin_display;
return 0;
err_unpin_display:
- WARN_ON(was_pin_display != is_pin_display(obj));
- obj->pin_display = was_pin_display;
+ obj->pin_display--;
return ret;
}
void
-i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
-{
- i915_gem_object_ggtt_unpin(obj);
- obj->pin_display = is_pin_display(obj);
-}
-
-int
-i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
+i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view)
{
- int ret;
-
- if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
- return 0;
+ if (WARN_ON(obj->pin_display == 0))
+ return;
- ret = i915_gem_object_wait_rendering(obj, false);
- if (ret)
- return ret;
+ i915_gem_object_ggtt_unpin_view(obj, view);
- /* Ensure that we invalidate the GPU's caches and TLBs. */
- obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
- return 0;
+ obj->pin_display--;
}
/**
if (ret)
return ret;
- i915_gem_object_retire(obj);
i915_gem_object_flush_gtt_write_domain(obj);
old_write_domain = obj->base.write_domain;
}
if (write)
- intel_fb_obj_invalidate(obj, NULL);
+ intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
trace_i915_gem_object_change_domain(obj,
old_read_domains,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_file_private *file_priv = file->driver_priv;
- unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
+ unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
struct drm_i915_gem_request *request, *target = NULL;
unsigned reset_counter;
int ret;
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
- mutex_lock(&dev->struct_mutex);
- i915_gem_request_unreference(target);
- mutex_unlock(&dev->struct_mutex);
+ i915_gem_request_unreference__unlocked(target);
return ret;
}
return false;
}
-int
-i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- uint32_t alignment,
- uint64_t flags,
- const struct i915_ggtt_view *view)
+static int
+i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *ggtt_view,
+ uint32_t alignment,
+ uint64_t flags)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct i915_vma *vma;
if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
return -EINVAL;
- vma = i915_gem_obj_to_vma_view(obj, vm, view);
+ if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
+ return -EINVAL;
+
+ vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
+ i915_gem_obj_to_vma(obj, vm);
+
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
if (vma) {
if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
return -EBUSY;
if (i915_vma_misplaced(vma, alignment, flags)) {
+ unsigned long offset;
+ offset = ggtt_view ? i915_gem_obj_ggtt_offset_view(obj, ggtt_view) :
+ i915_gem_obj_offset(obj, vm);
WARN(vma->pin_count,
- "bo is already pinned with incorrect alignment:"
+ "bo is already pinned in %s with incorrect alignment:"
" offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
" obj->map_and_fenceable=%d\n",
- i915_gem_obj_offset_view(obj, vm, view->type),
+ ggtt_view ? "ggtt" : "ppgtt",
+ offset,
alignment,
!!(flags & PIN_MAPPABLE),
obj->map_and_fenceable);
bound = vma ? vma->bound : 0;
if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
- vma = i915_gem_object_bind_to_vm(obj, vm, alignment,
- flags, view);
+ vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment,
+ flags);
if (IS_ERR(vma))
return PTR_ERR(vma);
- }
-
- if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND)) {
- ret = i915_vma_bind(vma, obj->cache_level, GLOBAL_BIND);
+ } else {
+ ret = i915_vma_bind(vma, obj->cache_level, flags);
if (ret)
return ret;
}
- if ((bound ^ vma->bound) & GLOBAL_BIND) {
+ if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL &&
+ (bound ^ vma->bound) & GLOBAL_BIND) {
bool mappable, fenceable;
u32 fence_size, fence_alignment;
fenceable = (vma->node.size == fence_size &&
(vma->node.start & (fence_alignment - 1)) == 0);
- mappable = (vma->node.start + obj->base.size <=
+ mappable = (vma->node.start + fence_size <=
dev_priv->gtt.mappable_end);
obj->map_and_fenceable = mappable && fenceable;
- }
- WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
+ WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
+ }
vma->pin_count++;
- if (flags & PIN_MAPPABLE)
- obj->pin_mappable |= true;
-
return 0;
}
+int
+i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ uint32_t alignment,
+ uint64_t flags)
+{
+ return i915_gem_object_do_pin(obj, vm,
+ i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL,
+ alignment, flags);
+}
+
+int
+i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view,
+ uint32_t alignment,
+ uint64_t flags)
+{
+ if (WARN_ONCE(!view, "no view specified"))
+ return -EINVAL;
+
+ return i915_gem_object_do_pin(obj, i915_obj_to_ggtt(obj), view,
+ alignment, flags | PIN_GLOBAL);
+}
+
void
-i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
+i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view)
{
- struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
+ struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
BUG_ON(!vma);
- BUG_ON(vma->pin_count == 0);
- BUG_ON(!i915_gem_obj_ggtt_bound(obj));
+ WARN_ON(vma->pin_count == 0);
+ WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
- if (--vma->pin_count == 0)
- obj->pin_mappable = false;
+ --vma->pin_count;
}
bool
* necessary flushes here.
*/
ret = i915_gem_object_flush_active(obj);
+ if (ret)
+ goto unref;
- args->busy = obj->active;
- if (obj->last_read_req) {
- struct intel_engine_cs *ring;
- BUILD_BUG_ON(I915_NUM_RINGS > 16);
- ring = i915_gem_request_get_ring(obj->last_read_req);
- args->busy |= intel_ring_flag(ring) << 16;
- }
+ BUILD_BUG_ON(I915_NUM_RINGS > 16);
+ args->busy = obj->active << 16;
+ if (obj->last_write_req)
+ args->busy |= obj->last_write_req->ring->id;
+unref:
drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
obj->madv = args->madv;
/* if the object is no longer attached, discard its backing storage */
- if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
+ if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
i915_gem_object_truncate(obj);
args->retained = obj->madv != __I915_MADV_PURGED;
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops)
{
+ int i;
+
INIT_LIST_HEAD(&obj->global_list);
- INIT_LIST_HEAD(&obj->ring_list);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ INIT_LIST_HEAD(&obj->ring_list[i]);
INIT_LIST_HEAD(&obj->obj_exec_link);
INIT_LIST_HEAD(&obj->vma_list);
- INIT_LIST_HEAD(&obj->batch_pool_list);
+ INIT_LIST_HEAD(&obj->batch_pool_link);
obj->ops = ops;
intel_runtime_pm_put(dev_priv);
}
-struct i915_vma *i915_gem_obj_to_vma_view(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
+struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
{
struct i915_vma *vma;
- list_for_each_entry(vma, &obj->vma_list, vma_link)
- if (vma->vm == vm && vma->ggtt_view.type == view->type)
+ list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ if (i915_is_ggtt(vma->vm) &&
+ vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
+ continue;
+ if (vma->vm == vm)
return vma;
+ }
+ return NULL;
+}
+
+struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view)
+{
+ struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
+ struct i915_vma *vma;
+ if (WARN_ONCE(!view, "no view specified"))
+ return ERR_PTR(-EINVAL);
+
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ if (vma->vm == ggtt &&
+ i915_ggtt_view_equal(&vma->ggtt_view, view))
+ return vma;
return NULL;
}
list_del(&vma->vma_link);
- kfree(vma);
+ kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
}
static void
i915_gem_retire_requests(dev);
- /* Under UMS, be paranoid and evict. */
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_gem_evict_everything(dev);
-
i915_gem_stop_ringbuffers(dev);
mutex_unlock(&dev->struct_mutex);
}
if (!i915.enable_execlists) {
- dev_priv->gt.do_execbuf = i915_gem_ringbuffer_submission;
+ dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
dev_priv->gt.init_rings = i915_gem_init_rings;
dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
dev_priv->gt.stop_ring = intel_stop_ring_buffer;
} else {
- dev_priv->gt.do_execbuf = intel_execlists_submission;
+ dev_priv->gt.execbuf_submit = intel_execlists_submission;
dev_priv->gt.init_rings = intel_logical_rings_init;
dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup;
dev_priv->gt.stop_ring = intel_logical_ring_stop;
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- dev_priv->slab =
+ dev_priv->objects =
kmem_cache_create("i915_gem_object",
sizeof(struct drm_i915_gem_object), 0,
SLAB_HWCACHE_ALIGN,
NULL);
+ dev_priv->vmas =
+ kmem_cache_create("i915_gem_vma",
+ sizeof(struct i915_vma), 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+ dev_priv->requests =
+ kmem_cache_create("i915_gem_request",
+ sizeof(struct drm_i915_gem_request), 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
INIT_LIST_HEAD(&dev_priv->vm_list);
i915_init_vm(dev_priv, &dev_priv->gtt.base);
i915_gem_idle_work_handler);
init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
- /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
- if (!drm_core_check_feature(dev, DRIVER_MODESET) && IS_GEN3(dev)) {
- I915_WRITE(MI_ARB_STATE,
- _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
- }
-
dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
- /* Old X drivers will take 0-2 for front, back, depth buffers */
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- dev_priv->fence_reg_start = 3;
-
if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
dev_priv->num_fence_regs = 32;
else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
else
dev_priv->num_fence_regs = 8;
+ if (intel_vgpu_active(dev))
+ dev_priv->num_fence_regs =
+ I915_READ(vgtif_reg(avail_rs.fence_num));
+
/* Initialize fence registers to zero */
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
i915_gem_restore_fences(dev);
dev_priv->mm.interruptible = true;
- dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
- dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
- dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
- register_shrinker(&dev_priv->mm.shrinker);
-
- dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
- register_oom_notifier(&dev_priv->mm.oom_notifier);
-
- i915_gem_batch_pool_init(dev, &dev_priv->mm.batch_pool);
+ i915_gem_shrinker_init(dev_priv);
mutex_init(&dev_priv->fb_tracking.lock);
}
{
struct drm_i915_file_private *file_priv = file->driver_priv;
- cancel_delayed_work_sync(&file_priv->mm.idle_work);
-
/* Clean up our request list when the client is going away, so that
* later retire_requests won't dereference our soon-to-be-gone
* file_priv.
request->file_priv = NULL;
}
spin_unlock(&file_priv->mm.lock);
-}
-static void
-i915_gem_file_idle_work_handler(struct work_struct *work)
-{
- struct drm_i915_file_private *file_priv =
- container_of(work, typeof(*file_priv), mm.idle_work.work);
-
- atomic_set(&file_priv->rps_wait_boost, false);
+ if (!list_empty(&file_priv->rps.link)) {
+ spin_lock(&to_i915(dev)->rps.client_lock);
+ list_del(&file_priv->rps.link);
+ spin_unlock(&to_i915(dev)->rps.client_lock);
+ }
}
int i915_gem_open(struct drm_device *dev, struct drm_file *file)
file->driver_priv = file_priv;
file_priv->dev_priv = dev->dev_private;
file_priv->file = file;
+ INIT_LIST_HEAD(&file_priv->rps.link);
spin_lock_init(&file_priv->mm.lock);
INIT_LIST_HEAD(&file_priv->mm.request_list);
- INIT_DELAYED_WORK(&file_priv->mm.idle_work,
- i915_gem_file_idle_work_handler);
ret = i915_gem_context_open(dev, file);
if (ret)
}
}
-static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
-{
- if (!mutex_is_locked(mutex))
- return false;
-
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
- return mutex->owner == task;
-#else
- /* Since UP may be pre-empted, we cannot assume that we own the lock */
- return false;
-#endif
-}
-
-static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
+/* All the new VM stuff */
+unsigned long
+i915_gem_obj_offset(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm)
{
- if (!mutex_trylock(&dev->struct_mutex)) {
- if (!mutex_is_locked_by(&dev->struct_mutex, current))
- return false;
+ struct drm_i915_private *dev_priv = o->base.dev->dev_private;
+ struct i915_vma *vma;
- if (to_i915(dev)->mm.shrinker_no_lock_stealing)
- return false;
+ WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
- *unlock = false;
- } else
- *unlock = true;
+ list_for_each_entry(vma, &o->vma_list, vma_link) {
+ if (i915_is_ggtt(vma->vm) &&
+ vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
+ continue;
+ if (vma->vm == vm)
+ return vma->node.start;
+ }
- return true;
+ WARN(1, "%s vma for this object not found.\n",
+ i915_is_ggtt(vm) ? "global" : "ppgtt");
+ return -1;
}
-static int num_vma_bound(struct drm_i915_gem_object *obj)
+unsigned long
+i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
+ const struct i915_ggtt_view *view)
{
+ struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
struct i915_vma *vma;
- int count = 0;
-
- list_for_each_entry(vma, &obj->vma_list, vma_link)
- if (drm_mm_node_allocated(&vma->node))
- count++;
-
- return count;
-}
-
-static unsigned long
-i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
-{
- struct drm_i915_private *dev_priv =
- container_of(shrinker, struct drm_i915_private, mm.shrinker);
- struct drm_device *dev = dev_priv->dev;
- struct drm_i915_gem_object *obj;
- unsigned long count;
- bool unlock;
-
- if (!i915_gem_shrinker_lock(dev, &unlock))
- return 0;
-
- count = 0;
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
- if (obj->pages_pin_count == 0)
- count += obj->base.size >> PAGE_SHIFT;
-
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- if (!i915_gem_obj_is_pinned(obj) &&
- obj->pages_pin_count == num_vma_bound(obj))
- count += obj->base.size >> PAGE_SHIFT;
- }
- if (unlock)
- mutex_unlock(&dev->struct_mutex);
+ list_for_each_entry(vma, &o->vma_list, vma_link)
+ if (vma->vm == ggtt &&
+ i915_ggtt_view_equal(&vma->ggtt_view, view))
+ return vma->node.start;
- return count;
+ WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
+ return -1;
}
-/* All the new VM stuff */
-unsigned long i915_gem_obj_offset_view(struct drm_i915_gem_object *o,
- struct i915_address_space *vm,
- enum i915_ggtt_view_type view)
+bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm)
{
- struct drm_i915_private *dev_priv = o->base.dev->dev_private;
struct i915_vma *vma;
- WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
-
list_for_each_entry(vma, &o->vma_list, vma_link) {
- if (vma->vm == vm && vma->ggtt_view.type == view)
- return vma->node.start;
-
+ if (i915_is_ggtt(vma->vm) &&
+ vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
+ continue;
+ if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
+ return true;
}
- WARN(1, "%s vma for this object not found.\n",
- i915_is_ggtt(vm) ? "global" : "ppgtt");
- return -1;
+
+ return false;
}
-bool i915_gem_obj_bound_view(struct drm_i915_gem_object *o,
- struct i915_address_space *vm,
- enum i915_ggtt_view_type view)
+bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
+ const struct i915_ggtt_view *view)
{
+ struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
struct i915_vma *vma;
list_for_each_entry(vma, &o->vma_list, vma_link)
- if (vma->vm == vm &&
- vma->ggtt_view.type == view &&
+ if (vma->vm == ggtt &&
+ i915_ggtt_view_equal(&vma->ggtt_view, view) &&
drm_mm_node_allocated(&vma->node))
return true;
BUG_ON(list_empty(&o->vma_list));
- list_for_each_entry(vma, &o->vma_list, vma_link)
+ list_for_each_entry(vma, &o->vma_list, vma_link) {
+ if (i915_is_ggtt(vma->vm) &&
+ vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
+ continue;
if (vma->vm == vm)
return vma->node.size;
-
- return 0;
-}
-
-static unsigned long
-i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
-{
- struct drm_i915_private *dev_priv =
- container_of(shrinker, struct drm_i915_private, mm.shrinker);
- struct drm_device *dev = dev_priv->dev;
- unsigned long freed;
- bool unlock;
-
- if (!i915_gem_shrinker_lock(dev, &unlock))
- return SHRINK_STOP;
-
- freed = i915_gem_shrink(dev_priv,
- sc->nr_to_scan,
- I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND |
- I915_SHRINK_PURGEABLE);
- if (freed < sc->nr_to_scan)
- freed += i915_gem_shrink(dev_priv,
- sc->nr_to_scan - freed,
- I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND);
- if (unlock)
- mutex_unlock(&dev->struct_mutex);
-
- return freed;
-}
-
-static int
-i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
-{
- struct drm_i915_private *dev_priv =
- container_of(nb, struct drm_i915_private, mm.oom_notifier);
- struct drm_device *dev = dev_priv->dev;
- struct drm_i915_gem_object *obj;
- unsigned long timeout = msecs_to_jiffies(5000) + 1;
- unsigned long pinned, bound, unbound, freed_pages;
- bool was_interruptible;
- bool unlock;
-
- while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) {
- schedule_timeout_killable(1);
- if (fatal_signal_pending(current))
- return NOTIFY_DONE;
- }
- if (timeout == 0) {
- pr_err("Unable to purge GPU memory due lock contention.\n");
- return NOTIFY_DONE;
- }
-
- was_interruptible = dev_priv->mm.interruptible;
- dev_priv->mm.interruptible = false;
-
- freed_pages = i915_gem_shrink_all(dev_priv);
-
- dev_priv->mm.interruptible = was_interruptible;
-
- /* Because we may be allocating inside our own driver, we cannot
- * assert that there are no objects with pinned pages that are not
- * being pointed to by hardware.
- */
- unbound = bound = pinned = 0;
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
- if (!obj->base.filp) /* not backed by a freeable object */
- continue;
-
- if (obj->pages_pin_count)
- pinned += obj->base.size;
- else
- unbound += obj->base.size;
- }
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- if (!obj->base.filp)
- continue;
-
- if (obj->pages_pin_count)
- pinned += obj->base.size;
- else
- bound += obj->base.size;
}
-
- if (unlock)
- mutex_unlock(&dev->struct_mutex);
-
- if (freed_pages || unbound || bound)
- pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
- freed_pages << PAGE_SHIFT, pinned);
- if (unbound || bound)
- pr_err("%lu and %lu bytes still available in the "
- "bound and unbound GPU page lists.\n",
- bound, unbound);
-
- *(unsigned long *)ptr += freed_pages;
- return NOTIFY_DONE;
+ return 0;
}
-struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
+bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
{
- struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
struct i915_vma *vma;
-
list_for_each_entry(vma, &obj->vma_list, vma_link)
- if (vma->vm == ggtt &&
- vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
- return vma;
+ if (vma->pin_count > 0)
+ return true;
- return NULL;
+ return false;
}
+