Merge remote-tracking branch 'airlied/drm-next' into drm-intel-next-queued
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_guc_loader.c
index 82a3c03fbc0ea2948f6534648f7fc658857076df..f2b88c7209cb6f44f6a1f73c0041bb14603f598c 100644 (file)
  *
  */
 
-#define I915_SKL_GUC_UCODE "i915/skl_guc_ver4.bin"
+#define I915_SKL_GUC_UCODE "i915/skl_guc_ver6_1.bin"
 MODULE_FIRMWARE(I915_SKL_GUC_UCODE);
 
+#define I915_BXT_GUC_UCODE "i915/bxt_guc_ver8_7.bin"
+MODULE_FIRMWARE(I915_BXT_GUC_UCODE);
+
 /* User-friendly representation of an enum */
 const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
 {
@@ -81,14 +84,14 @@ const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
 
 static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
 {
-       struct intel_engine_cs *ring;
-       int i, irqs;
+       struct intel_engine_cs *engine;
+       int irqs;
 
        /* tell all command streamers NOT to forward interrupts and vblank to GuC */
        irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
        irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
-       for_each_ring(ring, dev_priv, i)
-               I915_WRITE(RING_MODE_GEN7(ring), irqs);
+       for_each_engine(engine, dev_priv)
+               I915_WRITE(RING_MODE_GEN7(engine), irqs);
 
        /* route all GT interrupts to the host */
        I915_WRITE(GUC_BCS_RCS_IER, 0);
@@ -98,14 +101,15 @@ static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
 
 static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
 {
-       struct intel_engine_cs *ring;
-       int i, irqs;
+       struct intel_engine_cs *engine;
+       int irqs;
+       u32 tmp;
 
        /* tell all command streamers to forward interrupts and vblank to GuC */
        irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS);
        irqs |= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
-       for_each_ring(ring, dev_priv, i)
-               I915_WRITE(RING_MODE_GEN7(ring), irqs);
+       for_each_engine(engine, dev_priv)
+               I915_WRITE(RING_MODE_GEN7(engine), irqs);
 
        /* route USER_INTERRUPT to Host, all others are sent to GuC. */
        irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
@@ -114,6 +118,16 @@ static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
        I915_WRITE(GUC_BCS_RCS_IER, ~irqs);
        I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs);
        I915_WRITE(GUC_WD_VECS_IER, ~irqs);
+
+       /*
+        * If GuC has routed PM interrupts to itself, don't keep it.
+        * and keep other interrupts those are unmasked by GuC.
+       */
+       tmp = I915_READ(GEN6_PMINTRMSK);
+       if (tmp & GEN8_PMINTR_REDIRECT_TO_NON_DISP) {
+               dev_priv->rps.pm_intr_keep |= ~(tmp & ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
+               dev_priv->rps.pm_intr_keep &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
+       }
 }
 
 static u32 get_gttype(struct drm_i915_private *dev_priv)
@@ -281,6 +295,17 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
        return ret;
 }
 
+static u32 guc_wopcm_size(struct drm_i915_private *dev_priv)
+{
+       u32 wopcm_size = GUC_WOPCM_TOP;
+
+       /* On BXT, the top of WOPCM is reserved for RC6 context */
+       if (IS_BROXTON(dev_priv))
+               wopcm_size -= BXT_GUC_WOPCM_RC6_RESERVED;
+
+       return wopcm_size;
+}
+
 /*
  * Load the GuC firmware blob into the MinuteIA.
  */
@@ -308,7 +333,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
        /* init WOPCM */
-       I915_WRITE(GUC_WOPCM_SIZE, GUC_WOPCM_SIZE_VALUE);
+       I915_WRITE(GUC_WOPCM_SIZE, guc_wopcm_size(dev_priv));
        I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE);
 
        /* Enable MIA caching. GuC clock gating is disabled. */
@@ -353,73 +378,108 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
        return ret;
 }
 
+static int i915_reset_guc(struct drm_i915_private *dev_priv)
+{
+       int ret;
+       u32 guc_status;
+
+       ret = intel_guc_reset(dev_priv);
+       if (ret) {
+               DRM_ERROR("GuC reset failed, ret = %d\n", ret);
+               return ret;
+       }
+
+       guc_status = I915_READ(GUC_STATUS);
+       WARN(!(guc_status & GS_MIA_IN_RESET),
+            "GuC status: 0x%x, MIA core expected to be in reset\n", guc_status);
+
+       return ret;
+}
+
 /**
- * intel_guc_ucode_load() - load GuC uCode into the device
+ * intel_guc_setup() - finish preparing the GuC for activity
  * @dev:       drm device
  *
  * Called from gem_init_hw() during driver loading and also after a GPU reset.
  *
+ * The main action required here it to load the GuC uCode into the device.
  * The firmware image should have already been fetched into memory by the
- * earlier call to intel_guc_ucode_init(), so here we need only check that
- * is succeeded, and then transfer the image to the h/w.
+ * earlier call to intel_guc_init(), so here we need only check that worked,
+ * and then transfer the image to the h/w.
  *
  * Return:     non-zero code on error
  */
-int intel_guc_ucode_load(struct drm_device *dev)
+int intel_guc_setup(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
-       int err = 0;
-
-       if (!i915.enable_guc_submission)
-               return 0;
+       const char *fw_path = guc_fw->guc_fw_path;
+       int retries, ret, err;
 
-       DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
+       DRM_DEBUG_DRIVER("GuC fw status: path %s, fetch %s, load %s\n",
+               fw_path,
                intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
                intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
 
-       direct_interrupts_to_host(dev_priv);
+       /* Loading forbidden, or no firmware to load? */
+       if (!i915.enable_guc_loading) {
+               err = 0;
+               goto fail;
+       } else if (fw_path == NULL || *fw_path == '\0') {
+               if (*fw_path == '\0')
+                       DRM_INFO("No GuC firmware known for this platform\n");
+               err = -ENODEV;
+               goto fail;
+       }
 
-       if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_NONE)
-               return 0;
+       /* Fetch failed, or already fetched but failed to load? */
+       if (guc_fw->guc_fw_fetch_status != GUC_FIRMWARE_SUCCESS) {
+               err = -EIO;
+               goto fail;
+       } else if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL) {
+               err = -ENOEXEC;
+               goto fail;
+       }
 
-       if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_SUCCESS &&
-           guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL)
-               return -ENOEXEC;
+       direct_interrupts_to_host(dev_priv);
 
        guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
 
-       DRM_DEBUG_DRIVER("GuC fw fetch status %s\n",
-               intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
+       DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
+               intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
+               intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
 
-       switch (guc_fw->guc_fw_fetch_status) {
-       case GUC_FIRMWARE_FAIL:
-               /* something went wrong :( */
-               err = -EIO;
+       err = i915_guc_submission_init(dev);
+       if (err)
                goto fail;
 
-       case GUC_FIRMWARE_NONE:
-       case GUC_FIRMWARE_PENDING:
-       default:
-               /* "can't happen" */
-               WARN_ONCE(1, "GuC fw %s invalid guc_fw_fetch_status %s [%d]\n",
-                       guc_fw->guc_fw_path,
-                       intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
-                       guc_fw->guc_fw_fetch_status);
-               err = -ENXIO;
-               goto fail;
+       /*
+        * WaEnableuKernelHeaderValidFix:skl,bxt
+        * For BXT, this is only upto B0 but below WA is required for later
+        * steppings also so this is extended as well.
+        */
+       /* WaEnableGuCBootHashCheckNotSet:skl,bxt */
+       for (retries = 3; ; ) {
+               /*
+                * Always reset the GuC just before (re)loading, so
+                * that the state and timing are fairly predictable
+                */
+               err = i915_reset_guc(dev_priv);
+               if (err) {
+                       DRM_ERROR("GuC reset failed: %d\n", err);
+                       goto fail;
+               }
 
-       case GUC_FIRMWARE_SUCCESS:
-               break;
-       }
+               err = guc_ucode_xfer(dev_priv);
+               if (!err)
+                       break;
 
-       err = i915_guc_submission_init(dev);
-       if (err)
-               goto fail;
+               if (--retries == 0)
+                       goto fail;
 
-       err = guc_ucode_xfer(dev_priv);
-       if (err)
-               goto fail;
+               DRM_INFO("GuC fw load failed: %d; will reset and "
+                        "retry %d more time(s)\n", err, retries);
+       }
 
        guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS;
 
@@ -447,7 +507,41 @@ fail:
        i915_guc_submission_disable(dev);
        i915_guc_submission_fini(dev);
 
-       return err;
+       /*
+        * We've failed to load the firmware :(
+        *
+        * Decide whether to disable GuC submission and fall back to
+        * execlist mode, and whether to hide the error by returning
+        * zero or to return -EIO, which the caller will treat as a
+        * nonfatal error (i.e. it doesn't prevent driver load, but
+        * marks the GPU as wedged until reset).
+        */
+       if (i915.enable_guc_loading > 1) {
+               ret = -EIO;
+       } else if (i915.enable_guc_submission > 1) {
+               ret = -EIO;
+       } else {
+               ret = 0;
+       }
+
+       if (err == 0)
+               DRM_INFO("GuC firmware load skipped\n");
+       else if (ret == -EIO)
+               DRM_ERROR("GuC firmware load failed: %d\n", err);
+       else
+               DRM_INFO("GuC firmware load failed: %d\n", err);
+
+       if (i915.enable_guc_submission) {
+               if (fw_path == NULL)
+                       DRM_INFO("GuC submission without firmware not supported\n");
+               if (ret == 0)
+                       DRM_INFO("Falling back to execlist mode\n");
+               else
+                       DRM_ERROR("GuC init failed: %d\n", ret);
+       }
+       i915.enable_guc_submission = 0;
+
+       return ret;
 }
 
 static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
@@ -509,9 +603,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
 
        /* Header and uCode will be loaded to WOPCM. Size of the two. */
        size = guc_fw->header_size + guc_fw->ucode_size;
-
-       /* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
-       if (size > GUC_WOPCM_SIZE_VALUE - 0x8000) {
+       if (size > guc_wopcm_size(dev->dev_private)) {
                DRM_ERROR("Firmware is too large to fit in WOPCM\n");
                goto fail;
        }
@@ -574,50 +666,52 @@ fail:
 }
 
 /**
- * intel_guc_ucode_init() - define parameters and fetch firmware
+ * intel_guc_init() - define parameters and fetch firmware
  * @dev:       drm device
  *
  * Called early during driver load, but after GEM is initialised.
  *
  * The firmware will be transferred to the GuC's memory later,
- * when intel_guc_ucode_load() is called.
+ * when intel_guc_setup() is called.
  */
-void intel_guc_ucode_init(struct drm_device *dev)
+void intel_guc_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
        const char *fw_path;
 
-       if (!HAS_GUC_SCHED(dev))
-               i915.enable_guc_submission = false;
+       /* A negative value means "use platform default" */
+       if (i915.enable_guc_loading < 0)
+               i915.enable_guc_loading = HAS_GUC_UCODE(dev);
+       if (i915.enable_guc_submission < 0)
+               i915.enable_guc_submission = HAS_GUC_SCHED(dev);
 
        if (!HAS_GUC_UCODE(dev)) {
                fw_path = NULL;
        } else if (IS_SKYLAKE(dev)) {
                fw_path = I915_SKL_GUC_UCODE;
-               guc_fw->guc_fw_major_wanted = 4;
-               guc_fw->guc_fw_minor_wanted = 3;
+               guc_fw->guc_fw_major_wanted = 6;
+               guc_fw->guc_fw_minor_wanted = 1;
+       } else if (IS_BROXTON(dev)) {
+               fw_path = I915_BXT_GUC_UCODE;
+               guc_fw->guc_fw_major_wanted = 8;
+               guc_fw->guc_fw_minor_wanted = 7;
        } else {
-               i915.enable_guc_submission = false;
                fw_path = "";   /* unknown device */
        }
 
-       if (!i915.enable_guc_submission)
-               return;
-
        guc_fw->guc_dev = dev;
        guc_fw->guc_fw_path = fw_path;
        guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
        guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE;
 
+       /* Early (and silent) return if GuC loading is disabled */
+       if (!i915.enable_guc_loading)
+               return;
        if (fw_path == NULL)
                return;
-
-       if (*fw_path == '\0') {
-               DRM_ERROR("No GuC firmware known for this platform\n");
-               guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
+       if (*fw_path == '\0')
                return;
-       }
 
        guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING;
        DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path);
@@ -626,10 +720,10 @@ void intel_guc_ucode_init(struct drm_device *dev)
 }
 
 /**
- * intel_guc_ucode_fini() - clean up all allocated resources
+ * intel_guc_fini() - clean up all allocated resources
  * @dev:       drm device
  */
-void intel_guc_ucode_fini(struct drm_device *dev)
+void intel_guc_fini(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
This page took 0.030566 seconds and 5 git commands to generate.