ath10k: re-add support for early fw indication
[deliverable/linux.git] / drivers / net / wireless / ath / ath10k / pci.c
index e2f9ef50b1bd3999f6b7cc2989c24e407e9bd72a..0a2d1c20e4dfa74574a977305f14315d4e0428a1 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/spinlock.h>
+#include <linux/bitops.h>
 
 #include "core.h"
 #include "debug.h"
@@ -36,11 +37,9 @@ static unsigned int ath10k_target_ps;
 module_param(ath10k_target_ps, uint, 0644);
 MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
 
-#define QCA988X_1_0_DEVICE_ID  (0xabcd)
 #define QCA988X_2_0_DEVICE_ID  (0x003c)
 
 static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
-       { PCI_VDEVICE(ATHEROS, QCA988X_1_0_DEVICE_ID) }, /* PCI-E QCA988X V1 */
        { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
        {0}
 };
@@ -50,56 +49,245 @@ static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
 
 static void ath10k_pci_process_ce(struct ath10k *ar);
 static int ath10k_pci_post_rx(struct ath10k *ar);
-static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
+static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
                                             int num);
-static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info);
+static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
 static void ath10k_pci_stop_ce(struct ath10k *ar);
-static void ath10k_pci_device_reset(struct ath10k *ar);
-static int ath10k_pci_reset_target(struct ath10k *ar);
-static int ath10k_pci_start_intr(struct ath10k *ar);
-static void ath10k_pci_stop_intr(struct ath10k *ar);
+static int ath10k_pci_device_reset(struct ath10k *ar);
+static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
+static int ath10k_pci_init_irq(struct ath10k *ar);
+static int ath10k_pci_deinit_irq(struct ath10k *ar);
+static int ath10k_pci_request_irq(struct ath10k *ar);
+static void ath10k_pci_free_irq(struct ath10k *ar);
+static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
+                              struct ath10k_ce_pipe *rx_pipe,
+                              struct bmi_xfer *xfer);
+static void ath10k_pci_cleanup_ce(struct ath10k *ar);
 
 static const struct ce_attr host_ce_config_wlan[] = {
-       /* host->target HTC control and raw streams */
-       { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL,},
-       /* could be moved to share CE3 */
-       /* target->host HTT + HTC control */
-       { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL,},
-       /* target->host WMI */
-       { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,},
-       /* host->target WMI */
-       { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,},
-       /* host->target HTT */
-       { /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 0,
-                   CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,},
-       /* unused */
-       { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
-       /* Target autonomous hif_memcpy */
-       { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
-       /* ce_diag, the Diagnostic Window */
-       { /* CE7 */ CE_ATTR_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,},
+       /* CE0: host->target HTC control and raw streams */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 16,
+               .src_sz_max = 256,
+               .dest_nentries = 0,
+       },
+
+       /* CE1: target->host HTT + HTC control */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 0,
+               .src_sz_max = 512,
+               .dest_nentries = 512,
+       },
+
+       /* CE2: target->host WMI */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 0,
+               .src_sz_max = 2048,
+               .dest_nentries = 32,
+       },
+
+       /* CE3: host->target WMI */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 32,
+               .src_sz_max = 2048,
+               .dest_nentries = 0,
+       },
+
+       /* CE4: host->target HTT */
+       {
+               .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+               .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
+               .src_sz_max = 256,
+               .dest_nentries = 0,
+       },
+
+       /* CE5: unused */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 0,
+               .src_sz_max = 0,
+               .dest_nentries = 0,
+       },
+
+       /* CE6: target autonomous hif_memcpy */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 0,
+               .src_sz_max = 0,
+               .dest_nentries = 0,
+       },
+
+       /* CE7: ce_diag, the Diagnostic Window */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 2,
+               .src_sz_max = DIAG_TRANSFER_LIMIT,
+               .dest_nentries = 2,
+       },
 };
 
 /* Target firmware's Copy Engine configuration. */
 static const struct ce_pipe_config target_ce_config_wlan[] = {
-       /* host->target HTC control and raw streams */
-       { /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0,},
-       /* target->host HTT + HTC control */
-       { /* CE1 */ 1, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0,},
-       /* target->host WMI */
-       { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,},
-       /* host->target WMI */
-       { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
-       /* host->target HTT */
-       { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0,},
+       /* CE0: host->target HTC control and raw streams */
+       {
+               .pipenum = 0,
+               .pipedir = PIPEDIR_OUT,
+               .nentries = 32,
+               .nbytes_max = 256,
+               .flags = CE_ATTR_FLAGS,
+               .reserved = 0,
+       },
+
+       /* CE1: target->host HTT + HTC control */
+       {
+               .pipenum = 1,
+               .pipedir = PIPEDIR_IN,
+               .nentries = 32,
+               .nbytes_max = 512,
+               .flags = CE_ATTR_FLAGS,
+               .reserved = 0,
+       },
+
+       /* CE2: target->host WMI */
+       {
+               .pipenum = 2,
+               .pipedir = PIPEDIR_IN,
+               .nentries = 32,
+               .nbytes_max = 2048,
+               .flags = CE_ATTR_FLAGS,
+               .reserved = 0,
+       },
+
+       /* CE3: host->target WMI */
+       {
+               .pipenum = 3,
+               .pipedir = PIPEDIR_OUT,
+               .nentries = 32,
+               .nbytes_max = 2048,
+               .flags = CE_ATTR_FLAGS,
+               .reserved = 0,
+       },
+
+       /* CE4: host->target HTT */
+       {
+               .pipenum = 4,
+               .pipedir = PIPEDIR_OUT,
+               .nentries = 256,
+               .nbytes_max = 256,
+               .flags = CE_ATTR_FLAGS,
+               .reserved = 0,
+       },
+
        /* NB: 50% of src nentries, since tx has 2 frags */
-       /* unused */
-       { /* CE5 */ 5, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
-       /* Reserved for target autonomous hif_memcpy */
-       { /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0,},
+
+       /* CE5: unused */
+       {
+               .pipenum = 5,
+               .pipedir = PIPEDIR_OUT,
+               .nentries = 32,
+               .nbytes_max = 2048,
+               .flags = CE_ATTR_FLAGS,
+               .reserved = 0,
+       },
+
+       /* CE6: Reserved for target autonomous hif_memcpy */
+       {
+               .pipenum = 6,
+               .pipedir = PIPEDIR_INOUT,
+               .nentries = 32,
+               .nbytes_max = 4096,
+               .flags = CE_ATTR_FLAGS,
+               .reserved = 0,
+       },
+
        /* CE7 used only by Host */
 };
 
+static bool ath10k_pci_irq_pending(struct ath10k *ar)
+{
+       u32 cause;
+
+       /* Check if the shared legacy irq is for us */
+       cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+                                 PCIE_INTR_CAUSE_ADDRESS);
+       if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
+               return true;
+
+       return false;
+}
+
+static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
+{
+       /* IMPORTANT: INTR_CLR register has to be set after
+        * INTR_ENABLE is set to 0, otherwise interrupt can not be
+        * really cleared. */
+       ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
+                          0);
+       ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
+                          PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
+
+       /* IMPORTANT: this extra read transaction is required to
+        * flush the posted write buffer. */
+       (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+                                PCIE_INTR_ENABLE_ADDRESS);
+}
+
+static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
+{
+       ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+                          PCIE_INTR_ENABLE_ADDRESS,
+                          PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
+
+       /* IMPORTANT: this extra read transaction is required to
+        * flush the posted write buffer. */
+       (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+                                PCIE_INTR_ENABLE_ADDRESS);
+}
+
+static irqreturn_t ath10k_pci_early_irq_handler(int irq, void *arg)
+{
+       struct ath10k *ar = arg;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+       if (ar_pci->num_msi_intrs == 0) {
+               if (!ath10k_pci_irq_pending(ar))
+                       return IRQ_NONE;
+
+               ath10k_pci_disable_and_clear_legacy_irq(ar);
+       }
+
+       tasklet_schedule(&ar_pci->early_irq_tasklet);
+
+       return IRQ_HANDLED;
+}
+
+static int ath10k_pci_request_early_irq(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int ret;
+
+       /* Regardless whether MSI-X/MSI/legacy irqs have been set up the first
+        * interrupt from irq vector is triggered in all cases for FW
+        * indication/errors */
+       ret = request_irq(ar_pci->pdev->irq, ath10k_pci_early_irq_handler,
+                         IRQF_SHARED, "ath10k_pci (early)", ar);
+       if (ret) {
+               ath10k_warn("failed to request early irq: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void ath10k_pci_free_early_irq(struct ath10k *ar)
+{
+       free_irq(ath10k_pci_priv(ar)->pdev->irq, ar);
+}
+
 /*
  * Diagnostic read/write access is provided for startup/config/debug usage.
  * Caller must guarantee proper alignment, when applicable, and single user
@@ -114,7 +302,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
        unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
        unsigned int id;
        unsigned int flags;
-       struct ce_state *ce_diag;
+       struct ath10k_ce_pipe *ce_diag;
        /* Host buffer address in CE space */
        u32 ce_data;
        dma_addr_t ce_data_base = 0;
@@ -278,7 +466,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
        unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
        unsigned int id;
        unsigned int flags;
-       struct ce_state *ce_diag;
+       struct ath10k_ce_pipe *ce_diag;
        void *data_buf = NULL;
        u32 ce_data;    /* Host buffer address in CE space */
        dma_addr_t ce_data_base = 0;
@@ -426,18 +614,7 @@ static bool ath10k_pci_target_is_awake(struct ath10k *ar)
        return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
 }
 
-static void ath10k_pci_wait(struct ath10k *ar)
-{
-       int n = 100;
-
-       while (n-- && !ath10k_pci_target_is_awake(ar))
-               msleep(10);
-
-       if (n < 0)
-               ath10k_warn("Unable to wakeup target\n");
-}
-
-void ath10k_do_pci_wake(struct ath10k *ar)
+int ath10k_do_pci_wake(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        void __iomem *pci_addr = ar_pci->mem;
@@ -453,18 +630,19 @@ void ath10k_do_pci_wake(struct ath10k *ar)
        atomic_inc(&ar_pci->keep_awake_count);
 
        if (ar_pci->verified_awake)
-               return;
+               return 0;
 
        for (;;) {
                if (ath10k_pci_target_is_awake(ar)) {
                        ar_pci->verified_awake = true;
-                       break;
+                       return 0;
                }
 
                if (tot_delay > PCIE_WAKE_TIMEOUT) {
-                       ath10k_warn("target takes too long to wake up (awake count %d)\n",
+                       ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
+                                   PCIE_WAKE_TIMEOUT,
                                    atomic_read(&ar_pci->keep_awake_count));
-                       break;
+                       return -ETIMEDOUT;
                }
 
                udelay(curr_delay);
@@ -493,7 +671,7 @@ void ath10k_do_pci_sleep(struct ath10k *ar)
  * FIXME: Handle OOM properly.
  */
 static inline
-struct ath10k_pci_compl *get_free_compl(struct hif_ce_pipe_info *pipe_info)
+struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
 {
        struct ath10k_pci_compl *compl = NULL;
 
@@ -511,39 +689,28 @@ exit:
 }
 
 /* Called by lower (CE) layer when a send to Target completes. */
-static void ath10k_pci_ce_send_done(struct ce_state *ce_state,
-                                   void *transfer_context,
-                                   u32 ce_data,
-                                   unsigned int nbytes,
-                                   unsigned int transfer_id)
+static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
 {
        struct ath10k *ar = ce_state->ar;
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       struct hif_ce_pipe_info *pipe_info =  &ar_pci->pipe_info[ce_state->id];
+       struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
        struct ath10k_pci_compl *compl;
-       bool process = false;
-
-       do {
-               /*
-                * For the send completion of an item in sendlist, just
-                * increment num_sends_allowed. The upper layer callback will
-                * be triggered when last fragment is done with send.
-                */
-               if (transfer_context == CE_SENDLIST_ITEM_CTXT) {
-                       spin_lock_bh(&pipe_info->pipe_lock);
-                       pipe_info->num_sends_allowed++;
-                       spin_unlock_bh(&pipe_info->pipe_lock);
-                       continue;
-               }
+       void *transfer_context;
+       u32 ce_data;
+       unsigned int nbytes;
+       unsigned int transfer_id;
 
+       while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
+                                            &ce_data, &nbytes,
+                                            &transfer_id) == 0) {
                compl = get_free_compl(pipe_info);
                if (!compl)
                        break;
 
-               compl->send_or_recv = HIF_CE_COMPLETE_SEND;
+               compl->state = ATH10K_PCI_COMPL_SEND;
                compl->ce_state = ce_state;
                compl->pipe_info = pipe_info;
-               compl->transfer_context = transfer_context;
+               compl->skb = transfer_context;
                compl->nbytes = nbytes;
                compl->transfer_id = transfer_id;
                compl->flags = 0;
@@ -554,46 +721,36 @@ static void ath10k_pci_ce_send_done(struct ce_state *ce_state,
                spin_lock_bh(&ar_pci->compl_lock);
                list_add_tail(&compl->list, &ar_pci->compl_process);
                spin_unlock_bh(&ar_pci->compl_lock);
-
-               process = true;
-       } while (ath10k_ce_completed_send_next(ce_state,
-                                                          &transfer_context,
-                                                          &ce_data, &nbytes,
-                                                          &transfer_id) == 0);
-
-       /*
-        * If only some of the items within a sendlist have completed,
-        * don't invoke completion processing until the entire sendlist
-        * has been sent.
-        */
-       if (!process)
-               return;
+       }
 
        ath10k_pci_process_ce(ar);
 }
 
 /* Called by lower (CE) layer when data is received from the Target. */
-static void ath10k_pci_ce_recv_data(struct ce_state *ce_state,
-                                   void *transfer_context, u32 ce_data,
-                                   unsigned int nbytes,
-                                   unsigned int transfer_id,
-                                   unsigned int flags)
+static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
 {
        struct ath10k *ar = ce_state->ar;
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       struct hif_ce_pipe_info *pipe_info =  &ar_pci->pipe_info[ce_state->id];
+       struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
        struct ath10k_pci_compl *compl;
        struct sk_buff *skb;
+       void *transfer_context;
+       u32 ce_data;
+       unsigned int nbytes;
+       unsigned int transfer_id;
+       unsigned int flags;
 
-       do {
+       while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
+                                            &ce_data, &nbytes, &transfer_id,
+                                            &flags) == 0) {
                compl = get_free_compl(pipe_info);
                if (!compl)
                        break;
 
-               compl->send_or_recv = HIF_CE_COMPLETE_RECV;
+               compl->state = ATH10K_PCI_COMPL_RECV;
                compl->ce_state = ce_state;
                compl->pipe_info = pipe_info;
-               compl->transfer_context = transfer_context;
+               compl->skb = transfer_context;
                compl->nbytes = nbytes;
                compl->transfer_id = transfer_id;
                compl->flags = flags;
@@ -608,12 +765,7 @@ static void ath10k_pci_ce_recv_data(struct ce_state *ce_state,
                spin_lock_bh(&ar_pci->compl_lock);
                list_add_tail(&compl->list, &ar_pci->compl_process);
                spin_unlock_bh(&ar_pci->compl_lock);
-
-       } while (ath10k_ce_completed_recv_next(ce_state,
-                                                          &transfer_context,
-                                                          &ce_data, &nbytes,
-                                                          &transfer_id,
-                                                          &flags) == 0);
+       }
 
        ath10k_pci_process_ce(ar);
 }
@@ -625,15 +777,12 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
 {
        struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe_id]);
-       struct ce_state *ce_hdl = pipe_info->ce_hdl;
-       struct ce_sendlist sendlist;
+       struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
+       struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
        unsigned int len;
        u32 flags = 0;
        int ret;
 
-       memset(&sendlist, 0, sizeof(struct ce_sendlist));
-
        len = min(bytes, nbuf->len);
        bytes -= len;
 
@@ -648,21 +797,10 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
                        "ath10k tx: data: ",
                        nbuf->data, nbuf->len);
 
-       ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags);
-
-       /* Make sure we have resources to handle this request */
-       spin_lock_bh(&pipe_info->pipe_lock);
-       if (!pipe_info->num_sends_allowed) {
-               ath10k_warn("Pipe: %d is full\n", pipe_id);
-               spin_unlock_bh(&pipe_info->pipe_lock);
-               return -ENOSR;
-       }
-       pipe_info->num_sends_allowed--;
-       spin_unlock_bh(&pipe_info->pipe_lock);
-
-       ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
+       ret = ath10k_ce_send(ce_hdl, nbuf, skb_cb->paddr, len, transfer_id,
+                            flags);
        if (ret)
-               ath10k_warn("CE send failed: %p\n", nbuf);
+               ath10k_warn("failed to send sk_buff to CE: %p\n", nbuf);
 
        return ret;
 }
@@ -670,14 +808,7 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe]);
-       int ret;
-
-       spin_lock_bh(&pipe_info->pipe_lock);
-       ret = pipe_info->num_sends_allowed;
-       spin_unlock_bh(&pipe_info->pipe_lock);
-
-       return ret;
+       return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
 }
 
 static void ath10k_pci_hif_dump_area(struct ath10k *ar)
@@ -696,9 +827,10 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar)
                   ar->fw_version_build);
 
        host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
-       if (ath10k_pci_diag_read_mem(ar, host_addr,
-                                    &reg_dump_area, sizeof(u32)) != 0) {
-               ath10k_warn("could not read hi_failure_state\n");
+       ret = ath10k_pci_diag_read_mem(ar, host_addr,
+                                      &reg_dump_area, sizeof(u32));
+       if (ret) {
+               ath10k_err("failed to read FW dump area address: %d\n", ret);
                return;
        }
 
@@ -708,7 +840,7 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar)
                                       &reg_dump_values[0],
                                       REG_DUMP_COUNT_QCA988X * sizeof(u32));
        if (ret != 0) {
-               ath10k_err("could not dump FW Dump Area\n");
+               ath10k_err("failed to read FW dump area: %d\n", ret);
                return;
        }
 
@@ -723,7 +855,7 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar)
                           reg_dump_values[i + 2],
                           reg_dump_values[i + 3]);
 
-       ieee80211_queue_work(ar->hw, &ar->restart_work);
+       queue_work(ar->workqueue, &ar->restart_work);
 }
 
 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
@@ -761,59 +893,45 @@ static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
               sizeof(ar_pci->msg_callbacks_current));
 }
 
-static int ath10k_pci_start_ce(struct ath10k *ar)
+static int ath10k_pci_alloc_compl(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       struct ce_state *ce_diag = ar_pci->ce_diag;
        const struct ce_attr *attr;
-       struct hif_ce_pipe_info *pipe_info;
+       struct ath10k_pci_pipe *pipe_info;
        struct ath10k_pci_compl *compl;
-       int i, pipe_num, completions, disable_interrupts;
+       int i, pipe_num, completions;
 
        spin_lock_init(&ar_pci->compl_lock);
        INIT_LIST_HEAD(&ar_pci->compl_process);
 
-       for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+       for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
                pipe_info = &ar_pci->pipe_info[pipe_num];
 
                spin_lock_init(&pipe_info->pipe_lock);
                INIT_LIST_HEAD(&pipe_info->compl_free);
 
                /* Handle Diagnostic CE specially */
-               if (pipe_info->ce_hdl == ce_diag)
+               if (pipe_info->ce_hdl == ar_pci->ce_diag)
                        continue;
 
                attr = &host_ce_config_wlan[pipe_num];
                completions = 0;
 
-               if (attr->src_nentries) {
-                       disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
-                       ath10k_ce_send_cb_register(pipe_info->ce_hdl,
-                                                  ath10k_pci_ce_send_done,
-                                                  disable_interrupts);
+               if (attr->src_nentries)
                        completions += attr->src_nentries;
-                       pipe_info->num_sends_allowed = attr->src_nentries - 1;
-               }
 
-               if (attr->dest_nentries) {
-                       ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
-                                                  ath10k_pci_ce_recv_data);
+               if (attr->dest_nentries)
                        completions += attr->dest_nentries;
-               }
-
-               if (completions == 0)
-                       continue;
 
                for (i = 0; i < completions; i++) {
-                       compl = kmalloc(sizeof(struct ath10k_pci_compl),
-                                       GFP_KERNEL);
+                       compl = kmalloc(sizeof(*compl), GFP_KERNEL);
                        if (!compl) {
                                ath10k_warn("No memory for completion state\n");
-                               ath10k_pci_stop_ce(ar);
+                               ath10k_pci_cleanup_ce(ar);
                                return -ENOMEM;
                        }
 
-                       compl->send_or_recv = HIF_CE_COMPLETE_FREE;
+                       compl->state = ATH10K_PCI_COMPL_FREE;
                        list_add_tail(&compl->list, &pipe_info->compl_free);
                }
        }
@@ -821,26 +939,61 @@ static int ath10k_pci_start_ce(struct ath10k *ar)
        return 0;
 }
 
-static void ath10k_pci_stop_ce(struct ath10k *ar)
+static int ath10k_pci_setup_ce_irq(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       struct ath10k_pci_compl *compl;
-       struct sk_buff *skb;
-       int i;
+       const struct ce_attr *attr;
+       struct ath10k_pci_pipe *pipe_info;
+       int pipe_num, disable_interrupts;
 
-       ath10k_ce_disable_interrupts(ar);
+       for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
+               pipe_info = &ar_pci->pipe_info[pipe_num];
+
+               /* Handle Diagnostic CE specially */
+               if (pipe_info->ce_hdl == ar_pci->ce_diag)
+                       continue;
+
+               attr = &host_ce_config_wlan[pipe_num];
+
+               if (attr->src_nentries) {
+                       disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
+                       ath10k_ce_send_cb_register(pipe_info->ce_hdl,
+                                                  ath10k_pci_ce_send_done,
+                                                  disable_interrupts);
+               }
+
+               if (attr->dest_nentries)
+                       ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
+                                                  ath10k_pci_ce_recv_data);
+       }
+
+       return 0;
+}
+
+static void ath10k_pci_kill_tasklet(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int i;
 
-       /* Cancel the pending tasklet */
        tasklet_kill(&ar_pci->intr_tq);
+       tasklet_kill(&ar_pci->msi_fw_err);
+       tasklet_kill(&ar_pci->early_irq_tasklet);
 
        for (i = 0; i < CE_COUNT; i++)
                tasklet_kill(&ar_pci->pipe_info[i].intr);
+}
+
+static void ath10k_pci_stop_ce(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct ath10k_pci_compl *compl;
+       struct sk_buff *skb;
 
        /* Mark pending completions as aborted, so that upper layers free up
         * their associated resources */
        spin_lock_bh(&ar_pci->compl_lock);
        list_for_each_entry(compl, &ar_pci->compl_process, list) {
-               skb = (struct sk_buff *)compl->transfer_context;
+               skb = compl->skb;
                ATH10K_SKB_CB(skb)->is_aborted = true;
        }
        spin_unlock_bh(&ar_pci->compl_lock);
@@ -850,7 +1003,7 @@ static void ath10k_pci_cleanup_ce(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        struct ath10k_pci_compl *compl, *tmp;
-       struct hif_ce_pipe_info *pipe_info;
+       struct ath10k_pci_pipe *pipe_info;
        struct sk_buff *netbuf;
        int pipe_num;
 
@@ -861,14 +1014,14 @@ static void ath10k_pci_cleanup_ce(struct ath10k *ar)
 
        list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
                list_del(&compl->list);
-               netbuf = (struct sk_buff *)compl->transfer_context;
+               netbuf = compl->skb;
                dev_kfree_skb_any(netbuf);
                kfree(compl);
        }
        spin_unlock_bh(&ar_pci->compl_lock);
 
        /* Free unused completions for each pipe. */
-       for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+       for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
                pipe_info = &ar_pci->pipe_info[pipe_num];
 
                spin_lock_bh(&pipe_info->pipe_lock);
@@ -912,20 +1065,22 @@ static void ath10k_pci_process_ce(struct ath10k *ar)
                list_del(&compl->list);
                spin_unlock_bh(&ar_pci->compl_lock);
 
-               if (compl->send_or_recv == HIF_CE_COMPLETE_SEND) {
+               switch (compl->state) {
+               case ATH10K_PCI_COMPL_SEND:
                        cb->tx_completion(ar,
-                                         compl->transfer_context,
+                                         compl->skb,
                                          compl->transfer_id);
                        send_done = 1;
-               } else {
+                       break;
+               case ATH10K_PCI_COMPL_RECV:
                        ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
                        if (ret) {
-                               ath10k_warn("Unable to post recv buffer for pipe: %d\n",
-                                           compl->pipe_info->pipe_num);
+                               ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
+                                           compl->pipe_info->pipe_num, ret);
                                break;
                        }
 
-                       skb = (struct sk_buff *)compl->transfer_context;
+                       skb = compl->skb;
                        nbytes = compl->nbytes;
 
                        ath10k_dbg(ATH10K_DBG_PCI,
@@ -944,16 +1099,23 @@ static void ath10k_pci_process_ce(struct ath10k *ar)
                                            nbytes,
                                            skb->len + skb_tailroom(skb));
                        }
+                       break;
+               case ATH10K_PCI_COMPL_FREE:
+                       ath10k_warn("free completion cannot be processed\n");
+                       break;
+               default:
+                       ath10k_warn("invalid completion state (%d)\n",
+                                   compl->state);
+                       break;
                }
 
-               compl->send_or_recv = HIF_CE_COMPLETE_FREE;
+               compl->state = ATH10K_PCI_COMPL_FREE;
 
                /*
                 * Add completion back to the pipe's free list.
                 */
                spin_lock_bh(&compl->pipe_info->pipe_lock);
                list_add_tail(&compl->list, &compl->pipe_info->compl_free);
-               compl->pipe_info->num_sends_allowed += send_done;
                spin_unlock_bh(&compl->pipe_info->pipe_lock);
        }
 
@@ -1037,12 +1199,12 @@ static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
                                                 &dl_is_polled);
 }
 
-static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
+static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
                                   int num)
 {
        struct ath10k *ar = pipe_info->hif_ce_state;
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       struct ce_state *ce_state = pipe_info->ce_hdl;
+       struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
        struct sk_buff *skb;
        dma_addr_t ce_data;
        int i, ret = 0;
@@ -1053,7 +1215,7 @@ static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
        for (i = 0; i < num; i++) {
                skb = dev_alloc_skb(pipe_info->buf_sz);
                if (!skb) {
-                       ath10k_warn("could not allocate skbuff for pipe %d\n",
+                       ath10k_warn("failed to allocate skbuff for pipe %d\n",
                                    num);
                        ret = -ENOMEM;
                        goto err;
@@ -1066,7 +1228,7 @@ static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
                                         DMA_FROM_DEVICE);
 
                if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
-                       ath10k_warn("could not dma map skbuff\n");
+                       ath10k_warn("failed to DMA map sk_buff\n");
                        dev_kfree_skb_any(skb);
                        ret = -EIO;
                        goto err;
@@ -1081,7 +1243,7 @@ static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
                ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
                                                 ce_data);
                if (ret) {
-                       ath10k_warn("could not enqueue to pipe %d (%d)\n",
+                       ath10k_warn("failed to enqueue to pipe %d: %d\n",
                                    num, ret);
                        goto err;
                }
@@ -1097,11 +1259,11 @@ err:
 static int ath10k_pci_post_rx(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       struct hif_ce_pipe_info *pipe_info;
+       struct ath10k_pci_pipe *pipe_info;
        const struct ce_attr *attr;
        int pipe_num, ret = 0;
 
-       for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+       for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
                pipe_info = &ar_pci->pipe_info[pipe_num];
                attr = &host_ce_config_wlan[pipe_num];
 
@@ -1111,8 +1273,8 @@ static int ath10k_pci_post_rx(struct ath10k *ar)
                ret = ath10k_pci_post_rx_pipe(pipe_info,
                                              attr->dest_nentries - 1);
                if (ret) {
-                       ath10k_warn("Unable to replenish recv buffers for pipe: %d\n",
-                                   pipe_num);
+                       ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
+                                   pipe_num, ret);
 
                        for (; pipe_num >= 0; pipe_num--) {
                                pipe_info = &ar_pci->pipe_info[pipe_num];
@@ -1128,30 +1290,65 @@ static int ath10k_pci_post_rx(struct ath10k *ar)
 static int ath10k_pci_hif_start(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       int ret;
+       int ret, ret_early;
+
+       ath10k_pci_free_early_irq(ar);
+       ath10k_pci_kill_tasklet(ar);
 
-       ret = ath10k_pci_start_ce(ar);
+       ret = ath10k_pci_alloc_compl(ar);
        if (ret) {
-               ath10k_warn("could not start CE (%d)\n", ret);
-               return ret;
+               ath10k_warn("failed to allocate CE completions: %d\n", ret);
+               goto err_early_irq;
+       }
+
+       ret = ath10k_pci_request_irq(ar);
+       if (ret) {
+               ath10k_warn("failed to post RX buffers for all pipes: %d\n",
+                           ret);
+               goto err_free_compl;
+       }
+
+       ret = ath10k_pci_setup_ce_irq(ar);
+       if (ret) {
+               ath10k_warn("failed to setup CE interrupts: %d\n", ret);
+               goto err_stop;
        }
 
        /* Post buffers once to start things off. */
        ret = ath10k_pci_post_rx(ar);
        if (ret) {
-               ath10k_warn("could not post rx pipes (%d)\n", ret);
-               return ret;
+               ath10k_warn("failed to post RX buffers for all pipes: %d\n",
+                           ret);
+               goto err_stop;
        }
 
        ar_pci->started = 1;
        return 0;
+
+err_stop:
+       ath10k_ce_disable_interrupts(ar);
+       ath10k_pci_free_irq(ar);
+       ath10k_pci_kill_tasklet(ar);
+       ath10k_pci_stop_ce(ar);
+       ath10k_pci_process_ce(ar);
+err_free_compl:
+       ath10k_pci_cleanup_ce(ar);
+err_early_irq:
+       /* Though there should be no interrupts (device was reset)
+        * power_down() expects the early IRQ to be installed as per the
+        * driver lifecycle. */
+       ret_early = ath10k_pci_request_early_irq(ar);
+       if (ret_early)
+               ath10k_warn("failed to re-enable early irq: %d\n", ret_early);
+
+       return ret;
 }
 
-static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
+static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
 {
        struct ath10k *ar;
        struct ath10k_pci *ar_pci;
-       struct ce_state *ce_hdl;
+       struct ath10k_ce_pipe *ce_hdl;
        u32 buf_sz;
        struct sk_buff *netbuf;
        u32 ce_data;
@@ -1179,11 +1376,11 @@ static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
        }
 }
 
-static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
+static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
 {
        struct ath10k *ar;
        struct ath10k_pci *ar_pci;
-       struct ce_state *ce_hdl;
+       struct ath10k_ce_pipe *ce_hdl;
        struct sk_buff *netbuf;
        u32 ce_data;
        unsigned int nbytes;
@@ -1206,15 +1403,21 @@ static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
 
        while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
                                          &ce_data, &nbytes, &id) == 0) {
-               if (netbuf != CE_SENDLIST_ITEM_CTXT)
-                       /*
-                        * Indicate the completion to higer layer to free
-                        * the buffer
-                        */
-                       ATH10K_SKB_CB(netbuf)->is_aborted = true;
-                       ar_pci->msg_callbacks_current.tx_completion(ar,
-                                                                   netbuf,
-                                                                   id);
+               /*
+                * Indicate the completion to higer layer to free
+                * the buffer
+                */
+
+               if (!netbuf) {
+                       ath10k_warn("invalid sk_buff on CE %d - NULL pointer. firmware crashed?\n",
+                                   ce_hdl->id);
+                       continue;
+               }
+
+               ATH10K_SKB_CB(netbuf)->is_aborted = true;
+               ar_pci->msg_callbacks_current.tx_completion(ar,
+                                                           netbuf,
+                                                           id);
        }
 }
 
@@ -1231,8 +1434,8 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        int pipe_num;
 
-       for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
-               struct hif_ce_pipe_info *pipe_info;
+       for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
+               struct ath10k_pci_pipe *pipe_info;
 
                pipe_info = &ar_pci->pipe_info[pipe_num];
                ath10k_pci_rx_pipe_cleanup(pipe_info);
@@ -1243,10 +1446,10 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
 static void ath10k_pci_ce_deinit(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       struct hif_ce_pipe_info *pipe_info;
+       struct ath10k_pci_pipe *pipe_info;
        int pipe_num;
 
-       for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+       for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
                pipe_info = &ar_pci->pipe_info[pipe_num];
                if (pipe_info->ce_hdl) {
                        ath10k_ce_deinit(pipe_info->ce_hdl);
@@ -1256,27 +1459,25 @@ static void ath10k_pci_ce_deinit(struct ath10k *ar)
        }
 }
 
-static void ath10k_pci_disable_irqs(struct ath10k *ar)
-{
-       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       int i;
-
-       for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
-               disable_irq(ar_pci->pdev->irq + i);
-}
-
 static void ath10k_pci_hif_stop(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int ret;
 
        ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
 
-       /* Irqs are never explicitly re-enabled. They are implicitly re-enabled
-        * by ath10k_pci_start_intr(). */
-       ath10k_pci_disable_irqs(ar);
+       ret = ath10k_ce_disable_interrupts(ar);
+       if (ret)
+               ath10k_warn("failed to disable CE interrupts: %d\n", ret);
 
+       ath10k_pci_free_irq(ar);
+       ath10k_pci_kill_tasklet(ar);
        ath10k_pci_stop_ce(ar);
 
+       ret = ath10k_pci_request_early_irq(ar);
+       if (ret)
+               ath10k_warn("failed to re-enable early irq: %d\n", ret);
+
        /* At this point, asynchronous threads are stopped, the target should
         * not DMA nor interrupt. We process the leftovers and then free
         * everything else up. */
@@ -1285,6 +1486,13 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
        ath10k_pci_cleanup_ce(ar);
        ath10k_pci_buffer_cleanup(ar);
 
+       /* Make the sure the device won't access any structures on the host by
+        * resetting it. The device was fed with PCI CE ringbuffer
+        * configuration during init. If ringbuffers are freed and the device
+        * were to access them this could lead to memory corruption on the
+        * host. */
+       ath10k_pci_device_reset(ar);
+
        ar_pci->started = 0;
 }
 
@@ -1293,14 +1501,18 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
                                           void *resp, u32 *resp_len)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       struct ce_state *ce_tx = ar_pci->pipe_info[BMI_CE_NUM_TO_TARG].ce_hdl;
-       struct ce_state *ce_rx = ar_pci->pipe_info[BMI_CE_NUM_TO_HOST].ce_hdl;
+       struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
+       struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
+       struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
+       struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
        dma_addr_t req_paddr = 0;
        dma_addr_t resp_paddr = 0;
        struct bmi_xfer xfer = {};
        void *treq, *tresp = NULL;
        int ret = 0;
 
+       might_sleep();
+
        if (resp && !resp_len)
                return -EINVAL;
 
@@ -1341,14 +1553,12 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
        if (ret)
                goto err_resp;
 
-       ret = wait_for_completion_timeout(&xfer.done,
-                                         BMI_COMMUNICATION_TIMEOUT_HZ);
-       if (ret <= 0) {
+       ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
+       if (ret) {
                u32 unused_buffer;
                unsigned int unused_nbytes;
                unsigned int unused_id;
 
-               ret = -ETIMEDOUT;
                ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
                                           &unused_nbytes, &unused_id);
        } else {
@@ -1378,13 +1588,16 @@ err_dma:
        return ret;
 }
 
-static void ath10k_pci_bmi_send_done(struct ce_state *ce_state,
-                                    void *transfer_context,
-                                    u32 data,
-                                    unsigned int nbytes,
-                                    unsigned int transfer_id)
+static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
 {
-       struct bmi_xfer *xfer = transfer_context;
+       struct bmi_xfer *xfer;
+       u32 ce_data;
+       unsigned int nbytes;
+       unsigned int transfer_id;
+
+       if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
+                                         &nbytes, &transfer_id))
+               return;
 
        if (xfer->wait_for_resp)
                return;
@@ -1392,14 +1605,17 @@ static void ath10k_pci_bmi_send_done(struct ce_state *ce_state,
        complete(&xfer->done);
 }
 
-static void ath10k_pci_bmi_recv_data(struct ce_state *ce_state,
-                                    void *transfer_context,
-                                    u32 data,
-                                    unsigned int nbytes,
-                                    unsigned int transfer_id,
-                                    unsigned int flags)
+static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
 {
-       struct bmi_xfer *xfer = transfer_context;
+       struct bmi_xfer *xfer;
+       u32 ce_data;
+       unsigned int nbytes;
+       unsigned int transfer_id;
+       unsigned int flags;
+
+       if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
+                                         &nbytes, &transfer_id, &flags))
+               return;
 
        if (!xfer->wait_for_resp) {
                ath10k_warn("unexpected: BMI data received; ignoring\n");
@@ -1410,6 +1626,25 @@ static void ath10k_pci_bmi_recv_data(struct ce_state *ce_state,
        complete(&xfer->done);
 }
 
+static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
+                              struct ath10k_ce_pipe *rx_pipe,
+                              struct bmi_xfer *xfer)
+{
+       unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
+
+       while (time_before_eq(jiffies, timeout)) {
+               ath10k_pci_bmi_send_done(tx_pipe);
+               ath10k_pci_bmi_recv_data(rx_pipe);
+
+               if (completion_done(&xfer->done))
+                       return 0;
+
+               schedule();
+       }
+
+       return -ETIMEDOUT;
+}
+
 /*
  * Map from service/endpoint to Copy Engine.
  * This table is derived from the CE_PCI TABLE, above.
@@ -1519,7 +1754,7 @@ static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
                                              CORE_CTRL_ADDRESS,
                                          &core_ctrl);
        if (ret) {
-               ath10k_warn("Unable to read core ctrl\n");
+               ath10k_warn("failed to read core_ctrl: %d\n", ret);
                return ret;
        }
 
@@ -1529,10 +1764,13 @@ static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
        ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
                                               CORE_CTRL_ADDRESS,
                                           core_ctrl);
-       if (ret)
-               ath10k_warn("Unable to set interrupt mask\n");
+       if (ret) {
+               ath10k_warn("failed to set target CPU interrupt mask: %d\n",
+                           ret);
+               return ret;
+       }
 
-       return ret;
+       return 0;
 }
 
 static int ath10k_pci_init_config(struct ath10k *ar)
@@ -1679,11 +1917,11 @@ static int ath10k_pci_init_config(struct ath10k *ar)
 static int ath10k_pci_ce_init(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       struct hif_ce_pipe_info *pipe_info;
+       struct ath10k_pci_pipe *pipe_info;
        const struct ce_attr *attr;
        int pipe_num;
 
-       for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+       for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
                pipe_info = &ar_pci->pipe_info[pipe_num];
                pipe_info->pipe_num = pipe_num;
                pipe_info->hif_ce_state = ar;
@@ -1691,7 +1929,7 @@ static int ath10k_pci_ce_init(struct ath10k *ar)
 
                pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
                if (pipe_info->ce_hdl == NULL) {
-                       ath10k_err("Unable to initialize CE for pipe: %d\n",
+                       ath10k_err("failed to initialize CE for pipe: %d\n",
                                   pipe_num);
 
                        /* It is safe to call it here. It checks if ce_hdl is
@@ -1700,31 +1938,18 @@ static int ath10k_pci_ce_init(struct ath10k *ar)
                        return -1;
                }
 
-               if (pipe_num == ar_pci->ce_count - 1) {
+               if (pipe_num == CE_COUNT - 1) {
                        /*
                         * Reserve the ultimate CE for
                         * diagnostic Window support
                         */
-                       ar_pci->ce_diag =
-                       ar_pci->pipe_info[ar_pci->ce_count - 1].ce_hdl;
+                       ar_pci->ce_diag = pipe_info->ce_hdl;
                        continue;
                }
 
                pipe_info->buf_sz = (size_t) (attr->src_sz_max);
        }
 
-       /*
-        * Initially, establish CE completion handlers for use with BMI.
-        * These are overwritten with generic handlers after we exit BMI phase.
-        */
-       pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
-       ath10k_ce_send_cb_register(pipe_info->ce_hdl,
-                                  ath10k_pci_bmi_send_done, 0);
-
-       pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
-       ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
-                                  ath10k_pci_bmi_recv_data);
-
        return 0;
 }
 
@@ -1760,14 +1985,9 @@ static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
 static int ath10k_pci_hif_power_up(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       const char *irq_mode;
        int ret;
 
-       ret = ath10k_pci_start_intr(ar);
-       if (ret) {
-               ath10k_err("could not start interrupt handling (%d)\n", ret);
-               goto err;
-       }
-
        /*
         * Bring the target up cleanly.
         *
@@ -1778,39 +1998,80 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
         * is in an unexpected state. We try to catch that here in order to
         * reset the Target and retry the probe.
         */
-       ath10k_pci_device_reset(ar);
-
-       ret = ath10k_pci_reset_target(ar);
-       if (ret)
-               goto err_irq;
+       ret = ath10k_pci_device_reset(ar);
+       if (ret) {
+               ath10k_err("failed to reset target: %d\n", ret);
+               goto err;
+       }
 
        if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
                /* Force AWAKE forever */
                ath10k_do_pci_wake(ar);
 
        ret = ath10k_pci_ce_init(ar);
-       if (ret)
+       if (ret) {
+               ath10k_err("failed to initialize CE: %d\n", ret);
                goto err_ps;
+       }
 
-       ret = ath10k_pci_init_config(ar);
-       if (ret)
+       ret = ath10k_ce_disable_interrupts(ar);
+       if (ret) {
+               ath10k_err("failed to disable CE interrupts: %d\n", ret);
                goto err_ce;
+       }
 
-       ret = ath10k_pci_wake_target_cpu(ar);
+       ret = ath10k_pci_init_irq(ar);
        if (ret) {
-               ath10k_err("could not wake up target CPU (%d)\n", ret);
+               ath10k_err("failed to init irqs: %d\n", ret);
                goto err_ce;
        }
 
+       ret = ath10k_pci_request_early_irq(ar);
+       if (ret) {
+               ath10k_err("failed to request early irq: %d\n", ret);
+               goto err_deinit_irq;
+       }
+
+       ret = ath10k_pci_wait_for_target_init(ar);
+       if (ret) {
+               ath10k_err("failed to wait for target to init: %d\n", ret);
+               goto err_free_early_irq;
+       }
+
+       ret = ath10k_pci_init_config(ar);
+       if (ret) {
+               ath10k_err("failed to setup init config: %d\n", ret);
+               goto err_free_early_irq;
+       }
+
+       ret = ath10k_pci_wake_target_cpu(ar);
+       if (ret) {
+               ath10k_err("could not wake up target CPU: %d\n", ret);
+               goto err_free_early_irq;
+       }
+
+       if (ar_pci->num_msi_intrs > 1)
+               irq_mode = "MSI-X";
+       else if (ar_pci->num_msi_intrs == 1)
+               irq_mode = "MSI";
+       else
+               irq_mode = "legacy";
+
+       if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
+               ath10k_info("pci irq %s\n", irq_mode);
+
        return 0;
 
+err_free_early_irq:
+       ath10k_pci_free_early_irq(ar);
+err_deinit_irq:
+       ath10k_pci_deinit_irq(ar);
 err_ce:
        ath10k_pci_ce_deinit(ar);
+       ath10k_pci_device_reset(ar);
 err_ps:
        if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
                ath10k_do_pci_sleep(ar);
-err_irq:
-       ath10k_pci_stop_intr(ar);
 err:
        return ret;
 }
@@ -1819,7 +2080,10 @@ static void ath10k_pci_hif_power_down(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 
-       ath10k_pci_stop_intr(ar);
+       ath10k_pci_free_early_irq(ar);
+       ath10k_pci_kill_tasklet(ar);
+       ath10k_pci_deinit_irq(ar);
+       ath10k_pci_device_reset(ar);
 
        ath10k_pci_ce_deinit(ar);
        if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
@@ -1895,7 +2159,7 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
 
 static void ath10k_pci_ce_tasklet(unsigned long ptr)
 {
-       struct hif_ce_pipe_info *pipe = (struct hif_ce_pipe_info *)ptr;
+       struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
        struct ath10k_pci *ar_pci = pipe->ar_pci;
 
        ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
@@ -1955,25 +2219,10 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 
        if (ar_pci->num_msi_intrs == 0) {
-               /*
-                * IMPORTANT: INTR_CLR regiser has to be set after
-                * INTR_ENABLE is set to 0, otherwise interrupt can not be
-                * really cleared.
-                */
-               iowrite32(0, ar_pci->mem +
-                         (SOC_CORE_BASE_ADDRESS |
-                          PCIE_INTR_ENABLE_ADDRESS));
-               iowrite32(PCIE_INTR_FIRMWARE_MASK |
-                         PCIE_INTR_CE_MASK_ALL,
-                         ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
-                                        PCIE_INTR_CLR_ADDRESS));
-               /*
-                * IMPORTANT: this extra read transaction is required to
-                * flush the posted write buffer.
-                */
-               (void) ioread32(ar_pci->mem +
-                               (SOC_CORE_BASE_ADDRESS |
-                                PCIE_INTR_ENABLE_ADDRESS));
+               if (!ath10k_pci_irq_pending(ar))
+                       return IRQ_NONE;
+
+               ath10k_pci_disable_and_clear_legacy_irq(ar);
        }
 
        tasklet_schedule(&ar_pci->intr_tq);
@@ -1981,6 +2230,34 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
        return IRQ_HANDLED;
 }
 
+static void ath10k_pci_early_irq_tasklet(unsigned long data)
+{
+       struct ath10k *ar = (struct ath10k *)data;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       u32 fw_ind;
+       int ret;
+
+       ret = ath10k_pci_wake(ar);
+       if (ret) {
+               ath10k_warn("failed to wake target in early irq tasklet: %d\n",
+                           ret);
+               return;
+       }
+
+       fw_ind = ath10k_pci_read32(ar, ar_pci->fw_indicator_address);
+       if (fw_ind & FW_IND_EVENT_PENDING) {
+               ath10k_pci_write32(ar, ar_pci->fw_indicator_address,
+                                  fw_ind & ~FW_IND_EVENT_PENDING);
+
+               /* Some structures are unavailable during early boot or at
+                * driver teardown so just print that the device has crashed. */
+               ath10k_warn("device crashed - no diagnostics available\n");
+       }
+
+       ath10k_pci_sleep(ar);
+       ath10k_pci_enable_legacy_irq(ar);
+}
+
 static void ath10k_pci_tasklet(unsigned long data)
 {
        struct ath10k *ar = (struct ath10k *)data;
@@ -1989,40 +2266,22 @@ static void ath10k_pci_tasklet(unsigned long data)
        ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
        ath10k_ce_per_engine_service_any(ar);
 
-       if (ar_pci->num_msi_intrs == 0) {
-               /* Enable Legacy PCI line interrupts */
-               iowrite32(PCIE_INTR_FIRMWARE_MASK |
-                         PCIE_INTR_CE_MASK_ALL,
-                         ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
-                                        PCIE_INTR_ENABLE_ADDRESS));
-               /*
-                * IMPORTANT: this extra read transaction is required to
-                * flush the posted write buffer
-                */
-               (void) ioread32(ar_pci->mem +
-                               (SOC_CORE_BASE_ADDRESS |
-                                PCIE_INTR_ENABLE_ADDRESS));
-       }
+       /* Re-enable legacy irq that was disabled in the irq handler */
+       if (ar_pci->num_msi_intrs == 0)
+               ath10k_pci_enable_legacy_irq(ar);
 }
 
-static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
+static int ath10k_pci_request_irq_msix(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       int ret;
-       int i;
-
-       ret = pci_enable_msi_block(ar_pci->pdev, num);
-       if (ret)
-               return ret;
+       int ret, i;
 
        ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
                          ath10k_pci_msi_fw_handler,
                          IRQF_SHARED, "ath10k_pci", ar);
        if (ret) {
-               ath10k_warn("request_irq(%d) failed %d\n",
+               ath10k_warn("failed to request MSI-X fw irq %d: %d\n",
                            ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
-
-               pci_disable_msi(ar_pci->pdev);
                return ret;
        }
 
@@ -2031,44 +2290,38 @@ static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
                                  ath10k_pci_per_engine_handler,
                                  IRQF_SHARED, "ath10k_pci", ar);
                if (ret) {
-                       ath10k_warn("request_irq(%d) failed %d\n",
+                       ath10k_warn("failed to request MSI-X ce irq %d: %d\n",
                                    ar_pci->pdev->irq + i, ret);
 
                        for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
                                free_irq(ar_pci->pdev->irq + i, ar);
 
                        free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
-                       pci_disable_msi(ar_pci->pdev);
                        return ret;
                }
        }
 
-       ath10k_info("MSI-X interrupt handling (%d intrs)\n", num);
        return 0;
 }
 
-static int ath10k_pci_start_intr_msi(struct ath10k *ar)
+static int ath10k_pci_request_irq_msi(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        int ret;
 
-       ret = pci_enable_msi(ar_pci->pdev);
-       if (ret < 0)
-               return ret;
-
        ret = request_irq(ar_pci->pdev->irq,
                          ath10k_pci_interrupt_handler,
                          IRQF_SHARED, "ath10k_pci", ar);
-       if (ret < 0) {
-               pci_disable_msi(ar_pci->pdev);
+       if (ret) {
+               ath10k_warn("failed to request MSI irq %d: %d\n",
+                           ar_pci->pdev->irq, ret);
                return ret;
        }
 
-       ath10k_info("MSI interrupt handling\n");
        return 0;
 }
 
-static int ath10k_pci_start_intr_legacy(struct ath10k *ar)
+static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        int ret;
@@ -2076,112 +2329,155 @@ static int ath10k_pci_start_intr_legacy(struct ath10k *ar)
        ret = request_irq(ar_pci->pdev->irq,
                          ath10k_pci_interrupt_handler,
                          IRQF_SHARED, "ath10k_pci", ar);
-       if (ret < 0)
+       if (ret) {
+               ath10k_warn("failed to request legacy irq %d: %d\n",
+                           ar_pci->pdev->irq, ret);
                return ret;
+       }
 
-       /*
-        * Make sure to wake the Target before enabling Legacy
-        * Interrupt.
-        */
-       iowrite32(PCIE_SOC_WAKE_V_MASK,
-                 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
-                 PCIE_SOC_WAKE_ADDRESS);
+       return 0;
+}
 
-       ath10k_pci_wait(ar);
+static int ath10k_pci_request_irq(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 
-       /*
-        * A potential race occurs here: The CORE_BASE write
-        * depends on target correctly decoding AXI address but
-        * host won't know when target writes BAR to CORE_CTRL.
-        * This write might get lost if target has NOT written BAR.
-        * For now, fix the race by repeating the write in below
-        * synchronization checking.
-        */
-       iowrite32(PCIE_INTR_FIRMWARE_MASK |
-                 PCIE_INTR_CE_MASK_ALL,
-                 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
-                                PCIE_INTR_ENABLE_ADDRESS));
-       iowrite32(PCIE_SOC_WAKE_RESET,
-                 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
-                 PCIE_SOC_WAKE_ADDRESS);
-
-       ath10k_info("legacy interrupt handling\n");
-       return 0;
+       switch (ar_pci->num_msi_intrs) {
+       case 0:
+               return ath10k_pci_request_irq_legacy(ar);
+       case 1:
+               return ath10k_pci_request_irq_msi(ar);
+       case MSI_NUM_REQUEST:
+               return ath10k_pci_request_irq_msix(ar);
+       }
+
+       ath10k_warn("unknown irq configuration upon request\n");
+       return -EINVAL;
 }
 
-static int ath10k_pci_start_intr(struct ath10k *ar)
+static void ath10k_pci_free_irq(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       int num = MSI_NUM_REQUEST;
-       int ret;
        int i;
 
-       tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long) ar);
+       /* There's at least one interrupt irregardless whether its legacy INTR
+        * or MSI or MSI-X */
+       for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
+               free_irq(ar_pci->pdev->irq + i, ar);
+}
+
+static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int i;
+
+       tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
        tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
-                    (unsigned long) ar);
+                    (unsigned long)ar);
+       tasklet_init(&ar_pci->early_irq_tasklet, ath10k_pci_early_irq_tasklet,
+                    (unsigned long)ar);
 
        for (i = 0; i < CE_COUNT; i++) {
                ar_pci->pipe_info[i].ar_pci = ar_pci;
-               tasklet_init(&ar_pci->pipe_info[i].intr,
-                            ath10k_pci_ce_tasklet,
+               tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
                             (unsigned long)&ar_pci->pipe_info[i]);
        }
+}
+
+static int ath10k_pci_init_irq(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int ret;
+
+       ath10k_pci_init_irq_tasklets(ar);
 
        if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features))
-               num = 1;
+               goto msi;
+
+       /* Try MSI-X */
+       ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
+       ret = pci_enable_msi_block(ar_pci->pdev, ar_pci->num_msi_intrs);
+       if (ret == 0)
+               return 0;
+       if (ret > 0)
+               pci_disable_msi(ar_pci->pdev);
+
+msi:
+       /* Try MSI */
+       ar_pci->num_msi_intrs = 1;
+       ret = pci_enable_msi(ar_pci->pdev);
+       if (ret == 0)
+               return 0;
 
-       if (num > 1) {
-               ret = ath10k_pci_start_intr_msix(ar, num);
-               if (ret == 0)
-                       goto exit;
+       /* Try legacy irq
+        *
+        * A potential race occurs here: The CORE_BASE write
+        * depends on target correctly decoding AXI address but
+        * host won't know when target writes BAR to CORE_CTRL.
+        * This write might get lost if target has NOT written BAR.
+        * For now, fix the race by repeating the write in below
+        * synchronization checking. */
+       ar_pci->num_msi_intrs = 0;
 
-               ath10k_warn("MSI-X didn't succeed (%d), trying MSI\n", ret);
-               num = 1;
+       ret = ath10k_pci_wake(ar);
+       if (ret) {
+               ath10k_warn("failed to wake target: %d\n", ret);
+               return ret;
        }
 
-       if (num == 1) {
-               ret = ath10k_pci_start_intr_msi(ar);
-               if (ret == 0)
-                       goto exit;
+       ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
+                          PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
+       ath10k_pci_sleep(ar);
 
-               ath10k_warn("MSI didn't succeed (%d), trying legacy INTR\n",
-                           ret);
-               num = 0;
+       return 0;
+}
+
+static int ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
+{
+       int ret;
+
+       ret = ath10k_pci_wake(ar);
+       if (ret) {
+               ath10k_warn("failed to wake target: %d\n", ret);
+               return ret;
        }
 
-       ret = ath10k_pci_start_intr_legacy(ar);
+       ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
+                          0);
+       ath10k_pci_sleep(ar);
 
-exit:
-       ar_pci->num_msi_intrs = num;
-       ar_pci->ce_count = CE_COUNT;
-       return ret;
+       return 0;
 }
 
-static void ath10k_pci_stop_intr(struct ath10k *ar)
+static int ath10k_pci_deinit_irq(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       int i;
 
-       /* There's at least one interrupt irregardless whether its legacy INTR
-        * or MSI or MSI-X */
-       for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
-               free_irq(ar_pci->pdev->irq + i, ar);
-
-       if (ar_pci->num_msi_intrs > 0)
+       switch (ar_pci->num_msi_intrs) {
+       case 0:
+               return ath10k_pci_deinit_irq_legacy(ar);
+       case 1:
+               /* fall-through */
+       case MSI_NUM_REQUEST:
                pci_disable_msi(ar_pci->pdev);
+               return 0;
+       }
+
+       ath10k_warn("unknown irq configuration upon deinit\n");
+       return -EINVAL;
 }
 
-static int ath10k_pci_reset_target(struct ath10k *ar)
+static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        int wait_limit = 300; /* 3 sec */
+       int ret;
 
-       /* Wait for Target to finish initialization before we proceed. */
-       iowrite32(PCIE_SOC_WAKE_V_MASK,
-                 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
-                 PCIE_SOC_WAKE_ADDRESS);
-
-       ath10k_pci_wait(ar);
+       ret = ath10k_pci_wake(ar);
+       if (ret) {
+               ath10k_err("failed to wake up target: %d\n", ret);
+               return ret;
+       }
 
        while (wait_limit-- &&
               !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
@@ -2196,48 +2492,35 @@ static int ath10k_pci_reset_target(struct ath10k *ar)
        }
 
        if (wait_limit < 0) {
-               ath10k_err("Target stalled\n");
-               iowrite32(PCIE_SOC_WAKE_RESET,
-                         ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
-                         PCIE_SOC_WAKE_ADDRESS);
-               return -EIO;
+               ath10k_err("target stalled\n");
+               ret = -EIO;
+               goto out;
        }
 
-       iowrite32(PCIE_SOC_WAKE_RESET,
-                 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
-                 PCIE_SOC_WAKE_ADDRESS);
-
-       return 0;
+out:
+       ath10k_pci_sleep(ar);
+       return ret;
 }
 
-static void ath10k_pci_device_reset(struct ath10k *ar)
+static int ath10k_pci_device_reset(struct ath10k *ar)
 {
-       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       void __iomem *mem = ar_pci->mem;
-       int i;
+       int i, ret;
        u32 val;
 
-       if (!SOC_GLOBAL_RESET_ADDRESS)
-               return;
-
-       if (!mem)
-               return;
-
-       ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS,
-                              PCIE_SOC_WAKE_V_MASK);
-       for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
-               if (ath10k_pci_target_is_awake(ar))
-                       break;
-               msleep(1);
+       ret = ath10k_do_pci_wake(ar);
+       if (ret) {
+               ath10k_err("failed to wake up target: %d\n",
+                          ret);
+               return ret;
        }
 
        /* Put Target, including PCIe, into RESET. */
-       val = ath10k_pci_reg_read32(mem, SOC_GLOBAL_RESET_ADDRESS);
+       val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
        val |= 1;
-       ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
+       ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
 
        for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
-               if (ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
+               if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
                                          RTC_STATE_COLD_RESET_MASK)
                        break;
                msleep(1);
@@ -2245,16 +2528,17 @@ static void ath10k_pci_device_reset(struct ath10k *ar)
 
        /* Pull Target, including PCIe, out of RESET. */
        val &= ~1;
-       ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
+       ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
 
        for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
-               if (!(ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
+               if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
                                            RTC_STATE_COLD_RESET_MASK))
                        break;
                msleep(1);
        }
 
-       ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
+       ath10k_do_pci_sleep(ar);
+       return 0;
 }
 
 static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
@@ -2267,13 +2551,10 @@ static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
 
                switch (i) {
                case ATH10K_PCI_FEATURE_MSI_X:
-                       ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n");
-                       break;
-               case ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND:
-                       ath10k_dbg(ATH10K_DBG_PCI, "QCA988X_1.0 workaround enabled\n");
+                       ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
                        break;
                case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
-                       ath10k_dbg(ATH10K_DBG_PCI, "QCA98XX SoC power save enabled\n");
+                       ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
                        break;
                }
        }
@@ -2286,7 +2567,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
        int ret = 0;
        struct ath10k *ar;
        struct ath10k_pci *ar_pci;
-       u32 lcr_val;
+       u32 lcr_val, chip_id;
 
        ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
 
@@ -2298,9 +2579,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
        ar_pci->dev = &pdev->dev;
 
        switch (pci_dev->device) {
-       case QCA988X_1_0_DEVICE_ID:
-               set_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features);
-               break;
        case QCA988X_2_0_DEVICE_ID:
                set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
                break;
@@ -2317,15 +2595,11 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
 
        ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
        if (!ar) {
-               ath10k_err("ath10k_core_create failed!\n");
+               ath10k_err("failed to create driver core\n");
                ret = -EINVAL;
                goto err_ar_pci;
        }
 
-       /* Enable QCA988X_1.0 HW workarounds */
-       if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features))
-               spin_lock_init(&ar_pci->hw_v1_workaround_lock);
-
        ar_pci->ar = ar;
        ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
        atomic_set(&ar_pci->keep_awake_count, 0);
@@ -2340,20 +2614,20 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
         */
        ret = pci_assign_resource(pdev, BAR_NUM);
        if (ret) {
-               ath10k_err("cannot assign PCI space: %d\n", ret);
+               ath10k_err("failed to assign PCI space: %d\n", ret);
                goto err_ar;
        }
 
        ret = pci_enable_device(pdev);
        if (ret) {
-               ath10k_err("cannot enable PCI device: %d\n", ret);
+               ath10k_err("failed to enable PCI device: %d\n", ret);
                goto err_ar;
        }
 
        /* Request MMIO resources */
        ret = pci_request_region(pdev, BAR_NUM, "ath");
        if (ret) {
-               ath10k_err("PCI MMIO reservation error: %d\n", ret);
+               ath10k_err("failed to request MMIO region: %d\n", ret);
                goto err_device;
        }
 
@@ -2363,13 +2637,13 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
         */
        ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
        if (ret) {
-               ath10k_err("32-bit DMA not available: %d\n", ret);
+               ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret);
                goto err_region;
        }
 
        ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
        if (ret) {
-               ath10k_err("cannot enable 32-bit consistent DMA\n");
+               ath10k_err("failed to set consistent DMA mask to 32-bit\n");
                goto err_region;
        }
 
@@ -2386,7 +2660,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
        /* Arrange for access to Target SoC registers. */
        mem = pci_iomap(pdev, BAR_NUM, 0);
        if (!mem) {
-               ath10k_err("PCI iomap error\n");
+               ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM);
                ret = -EIO;
                goto err_master;
        }
@@ -2395,11 +2669,21 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
 
        spin_lock_init(&ar_pci->ce_lock);
 
-       ar_pci->cacheline_sz = dma_get_cache_alignment();
+       ret = ath10k_do_pci_wake(ar);
+       if (ret) {
+               ath10k_err("Failed to get chip id: %d\n", ret);
+               goto err_iomap;
+       }
+
+       chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
+
+       ath10k_do_pci_sleep(ar);
+
+       ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
 
-       ret = ath10k_core_register(ar);
+       ret = ath10k_core_register(ar, chip_id);
        if (ret) {
-               ath10k_err("could not register driver core (%d)\n", ret);
+               ath10k_err("failed to register driver core: %d\n", ret);
                goto err_iomap;
        }
 
@@ -2414,7 +2698,6 @@ err_region:
 err_device:
        pci_disable_device(pdev);
 err_ar:
-       pci_set_drvdata(pdev, NULL);
        ath10k_core_destroy(ar);
 err_ar_pci:
        /* call HIF PCI free here */
@@ -2442,7 +2725,6 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
 
        ath10k_core_unregister(ar);
 
-       pci_set_drvdata(pdev, NULL);
        pci_iounmap(pdev, ar_pci->mem);
        pci_release_region(pdev, BAR_NUM);
        pci_clear_master(pdev);
@@ -2467,7 +2749,7 @@ static int __init ath10k_pci_init(void)
 
        ret = pci_register_driver(&ath10k_pci_driver);
        if (ret)
-               ath10k_err("pci_register_driver failed [%d]\n", ret);
+               ath10k_err("failed to register PCI driver: %d\n", ret);
 
        return ret;
 }
@@ -2483,9 +2765,6 @@ module_exit(ath10k_pci_exit);
 MODULE_AUTHOR("Qualcomm Atheros");
 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
 MODULE_LICENSE("Dual BSD/GPL");
-MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_FW_FILE);
-MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_OTP_FILE);
-MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_BOARD_DATA_FILE);
 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
This page took 0.094311 seconds and 5 git commands to generate.