wl12xx: Switch to a threaded interrupt handler
[deliverable/linux.git] / drivers / net / wireless / wl12xx / main.c
index 5772a33d79ecab92e9bfaa6cbe188c442670363b..f408c5a84cc912ff65784e0e6cbb6534499b05ca 100644 (file)
@@ -374,7 +374,7 @@ static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
        if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
                goto out;
 
-       ret = wl1271_ps_elp_wakeup(wl, false);
+       ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out;
 
@@ -537,6 +537,57 @@ static int wl1271_plt_init(struct wl1271 *wl)
        return ret;
 }
 
+static void wl1271_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_blks)
+{
+       bool fw_ps;
+
+       /* only regulate station links */
+       if (hlid < WL1271_AP_STA_HLID_START)
+               return;
+
+       fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
+
+       /*
+        * Wake up from high level PS if the STA is asleep with too little
+        * blocks in FW or if the STA is awake.
+        */
+       if (!fw_ps || tx_blks < WL1271_PS_STA_MAX_BLOCKS)
+               wl1271_ps_link_end(wl, hlid);
+
+       /* Start high-level PS if the STA is asleep with enough blocks in FW */
+       else if (fw_ps && tx_blks >= WL1271_PS_STA_MAX_BLOCKS)
+               wl1271_ps_link_start(wl, hlid, true);
+}
+
+static void wl1271_irq_update_links_status(struct wl1271 *wl,
+                                      struct wl1271_fw_ap_status *status)
+{
+       u32 cur_fw_ps_map;
+       u8 hlid;
+
+       cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
+       if (wl->ap_fw_ps_map != cur_fw_ps_map) {
+               wl1271_debug(DEBUG_PSM,
+                            "link ps prev 0x%x cur 0x%x changed 0x%x",
+                            wl->ap_fw_ps_map, cur_fw_ps_map,
+                            wl->ap_fw_ps_map ^ cur_fw_ps_map);
+
+               wl->ap_fw_ps_map = cur_fw_ps_map;
+       }
+
+       for (hlid = WL1271_AP_STA_HLID_START; hlid < AP_MAX_LINKS; hlid++) {
+               u8 cnt = status->tx_lnk_free_blks[hlid] -
+                       wl->links[hlid].prev_freed_blks;
+
+               wl->links[hlid].prev_freed_blks =
+                       status->tx_lnk_free_blks[hlid];
+               wl->links[hlid].allocated_blks -= cnt;
+
+               wl1271_irq_ps_regulate_link(wl, hlid,
+                                           wl->links[hlid].allocated_blks);
+       }
+}
+
 static void wl1271_fw_status(struct wl1271 *wl,
                             struct wl1271_fw_full_status *full_status)
 {
@@ -574,16 +625,9 @@ static void wl1271_fw_status(struct wl1271 *wl,
        if (total)
                clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
 
-       if (wl->bss_type == BSS_TYPE_AP_BSS) {
-               for (i = 0; i < AP_MAX_LINKS; i++) {
-                       u8 cnt = status->tx_lnk_free_blks[i] -
-                               wl->links[i].prev_freed_blks;
-
-                       wl->links[i].prev_freed_blks =
-                               status->tx_lnk_free_blks[i];
-                       wl->links[i].allocated_blks -= cnt;
-               }
-       }
+       /* for AP update num of allocated TX blocks per link and ps status */
+       if (wl->bss_type == BSS_TYPE_AP_BSS)
+               wl1271_irq_update_links_status(wl, &full_status->ap);
 
        /* update the host-chipset time offset */
        getnstimeofday(&ts);
@@ -591,16 +635,39 @@ static void wl1271_fw_status(struct wl1271 *wl,
                (s64)le32_to_cpu(status->fw_localtime);
 }
 
-#define WL1271_IRQ_MAX_LOOPS 10
+static void wl1271_flush_deferred_work(struct wl1271 *wl)
+{
+       struct sk_buff *skb;
+
+       /* Pass all received frames to the network stack */
+       while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
+               ieee80211_rx_ni(wl->hw, skb);
+
+       /* Return sent skbs to the network stack */
+       while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
+               ieee80211_tx_status(wl->hw, skb);
+}
+
+static void wl1271_netstack_work(struct work_struct *work)
+{
+       struct wl1271 *wl =
+               container_of(work, struct wl1271, netstack_work);
 
-static void wl1271_irq_work(struct work_struct *work)
+       do {
+               wl1271_flush_deferred_work(wl);
+       } while (skb_queue_len(&wl->deferred_rx_queue));
+}
+
+#define WL1271_IRQ_MAX_LOOPS 256
+
+irqreturn_t wl1271_irq(int irq, void *cookie)
 {
        int ret;
        u32 intr;
        int loopcount = WL1271_IRQ_MAX_LOOPS;
-       unsigned long flags;
-       struct wl1271 *wl =
-               container_of(work, struct wl1271, irq_work);
+       struct wl1271 *wl = (struct wl1271 *)cookie;
+       bool done = false;
+       unsigned int defer_count;
 
        mutex_lock(&wl->mutex);
 
@@ -609,26 +676,27 @@ static void wl1271_irq_work(struct work_struct *work)
        if (unlikely(wl->state == WL1271_STATE_OFF))
                goto out;
 
-       ret = wl1271_ps_elp_wakeup(wl, true);
+       ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out;
 
-       spin_lock_irqsave(&wl->wl_lock, flags);
-       while (test_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags) && loopcount) {
-               clear_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
-               spin_unlock_irqrestore(&wl->wl_lock, flags);
-               loopcount--;
+       while (!done && loopcount--) {
+               /*
+                * In order to avoid a race with the hardirq, clear the flag
+                * before acknowledging the chip. Since the mutex is held,
+                * wl1271_ps_elp_wakeup cannot be called concurrently.
+                */
+               clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
+               smp_mb__after_clear_bit();
 
                wl1271_fw_status(wl, wl->fw_status);
                intr = le32_to_cpu(wl->fw_status->common.intr);
+               intr &= WL1271_INTR_MASK;
                if (!intr) {
-                       wl1271_debug(DEBUG_IRQ, "Zero interrupt received.");
-                       spin_lock_irqsave(&wl->wl_lock, flags);
+                       done = true;
                        continue;
                }
 
-               intr &= WL1271_INTR_MASK;
-
                if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
                        wl1271_error("watchdog interrupt received! "
                                     "starting recovery.");
@@ -638,13 +706,10 @@ static void wl1271_irq_work(struct work_struct *work)
                        goto out;
                }
 
-               if (intr & WL1271_ACX_INTR_DATA) {
+               if (likely(intr & WL1271_ACX_INTR_DATA)) {
                        wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
 
-                       /* check for tx results */
-                       if (wl->fw_status->common.tx_results_counter !=
-                           (wl->tx_results_count & 0xff))
-                               wl1271_tx_complete(wl);
+                       wl1271_rx(wl, &wl->fw_status->common);
 
                        /* Check if any tx blocks were freed */
                        if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
@@ -656,7 +721,16 @@ static void wl1271_irq_work(struct work_struct *work)
                                wl1271_tx_work_locked(wl);
                        }
 
-                       wl1271_rx(wl, &wl->fw_status->common);
+                       /* check for tx results */
+                       if (wl->fw_status->common.tx_results_counter !=
+                           (wl->tx_results_count & 0xff))
+                               wl1271_tx_complete(wl);
+
+                       /* Make sure the deferred queues don't get too long */
+                       defer_count = skb_queue_len(&wl->deferred_tx_queue) +
+                                     skb_queue_len(&wl->deferred_rx_queue);
+                       if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
+                               wl1271_flush_deferred_work(wl);
                }
 
                if (intr & WL1271_ACX_INTR_EVENT_A) {
@@ -675,21 +749,16 @@ static void wl1271_irq_work(struct work_struct *work)
 
                if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
                        wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
-
-               spin_lock_irqsave(&wl->wl_lock, flags);
        }
 
-       if (test_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags))
-               ieee80211_queue_work(wl->hw, &wl->irq_work);
-       else
-               clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
-       spin_unlock_irqrestore(&wl->wl_lock, flags);
-
        wl1271_ps_elp_sleep(wl);
 
 out:
        mutex_unlock(&wl->mutex);
+
+       return IRQ_HANDLED;
 }
+EXPORT_SYMBOL_GPL(wl1271_irq);
 
 static int wl1271_fetch_firmware(struct wl1271 *wl)
 {
@@ -930,7 +999,6 @@ int wl1271_plt_start(struct wl1271 *wl)
                goto out;
 
 irq_disable:
-               wl1271_disable_interrupts(wl);
                mutex_unlock(&wl->mutex);
                /* Unlocking the mutex in the middle of handling is
                   inherently unsafe. In this case we deem it safe to do,
@@ -939,7 +1007,9 @@ irq_disable:
                   work function will not do anything.) Also, any other
                   possible concurrent operations will fail due to the
                   current state, hence the wl1271 struct should be safe. */
-               cancel_work_sync(&wl->irq_work);
+               wl1271_disable_interrupts(wl);
+               wl1271_flush_deferred_work(wl);
+               cancel_work_sync(&wl->netstack_work);
                mutex_lock(&wl->mutex);
 power_off:
                wl1271_power_off(wl);
@@ -966,14 +1036,15 @@ int __wl1271_plt_stop(struct wl1271 *wl)
                goto out;
        }
 
-       wl1271_disable_interrupts(wl);
        wl1271_power_off(wl);
 
        wl->state = WL1271_STATE_OFF;
        wl->rx_counter = 0;
 
        mutex_unlock(&wl->mutex);
-       cancel_work_sync(&wl->irq_work);
+       wl1271_disable_interrupts(wl);
+       wl1271_flush_deferred_work(wl);
+       cancel_work_sync(&wl->netstack_work);
        cancel_work_sync(&wl->recovery_work);
        mutex_lock(&wl->mutex);
 out:
@@ -990,7 +1061,7 @@ int wl1271_plt_stop(struct wl1271 *wl)
        return ret;
 }
 
-static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        struct wl1271 *wl = hw->priv;
        unsigned long flags;
@@ -1029,8 +1100,6 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 
        if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
                ieee80211_queue_work(wl->hw, &wl->tx_work);
-
-       return NETDEV_TX_OK;
 }
 
 static struct notifier_block wl1271_dev_notifier = {
@@ -1127,7 +1196,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
                break;
 
 irq_disable:
-               wl1271_disable_interrupts(wl);
                mutex_unlock(&wl->mutex);
                /* Unlocking the mutex in the middle of handling is
                   inherently unsafe. In this case we deem it safe to do,
@@ -1136,7 +1204,9 @@ irq_disable:
                   work function will not do anything.) Also, any other
                   possible concurrent operations will fail due to the
                   current state, hence the wl1271 struct should be safe. */
-               cancel_work_sync(&wl->irq_work);
+               wl1271_disable_interrupts(wl);
+               wl1271_flush_deferred_work(wl);
+               cancel_work_sync(&wl->netstack_work);
                mutex_lock(&wl->mutex);
 power_off:
                wl1271_power_off(wl);
@@ -1202,12 +1272,12 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl)
 
        wl->state = WL1271_STATE_OFF;
 
-       wl1271_disable_interrupts(wl);
-
        mutex_unlock(&wl->mutex);
 
+       wl1271_disable_interrupts(wl);
+       wl1271_flush_deferred_work(wl);
        cancel_delayed_work_sync(&wl->scan_complete_work);
-       cancel_work_sync(&wl->irq_work);
+       cancel_work_sync(&wl->netstack_work);
        cancel_work_sync(&wl->tx_work);
        cancel_delayed_work_sync(&wl->pspoll_work);
        cancel_delayed_work_sync(&wl->elp_work);
@@ -1241,6 +1311,8 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl)
        wl->filters = 0;
        wl1271_free_ap_keys(wl);
        memset(wl->ap_hlid_map, 0, sizeof(wl->ap_hlid_map));
+       wl->ap_fw_ps_map = 0;
+       wl->ap_ps_map = 0;
 
        for (i = 0; i < NUM_TX_QUEUES; i++)
                wl->tx_blocks_freed[i] = 0;
@@ -1481,7 +1553,7 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
 
        is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
 
-       ret = wl1271_ps_elp_wakeup(wl, false);
+       ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out;
 
@@ -1637,7 +1709,7 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
        if (unlikely(wl->state == WL1271_STATE_OFF))
                goto out;
 
-       ret = wl1271_ps_elp_wakeup(wl, false);
+       ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out;
 
@@ -1866,7 +1938,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                goto out_unlock;
        }
 
-       ret = wl1271_ps_elp_wakeup(wl, false);
+       ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out_unlock;
 
@@ -1969,7 +2041,7 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
                goto out;
        }
 
-       ret = wl1271_ps_elp_wakeup(wl, false);
+       ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out;
 
@@ -1995,7 +2067,7 @@ static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
                goto out;
        }
 
-       ret = wl1271_ps_elp_wakeup(wl, false);
+       ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out;
 
@@ -2023,7 +2095,7 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
                goto out;
        }
 
-       ret = wl1271_ps_elp_wakeup(wl, false);
+       ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out;
 
@@ -2502,7 +2574,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
        if (unlikely(wl->state == WL1271_STATE_OFF))
                goto out;
 
-       ret = wl1271_ps_elp_wakeup(wl, false);
+       ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out;
 
@@ -2557,7 +2629,7 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
                conf_tid->apsd_conf[0] = 0;
                conf_tid->apsd_conf[1] = 0;
        } else {
-               ret = wl1271_ps_elp_wakeup(wl, false);
+               ret = wl1271_ps_elp_wakeup(wl);
                if (ret < 0)
                        goto out;
 
@@ -2603,7 +2675,7 @@ static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw)
        if (unlikely(wl->state == WL1271_STATE_OFF))
                goto out;
 
-       ret = wl1271_ps_elp_wakeup(wl, false);
+       ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out;
 
@@ -2649,10 +2721,10 @@ static int wl1271_allocate_sta(struct wl1271 *wl,
        }
 
        wl_sta = (struct wl1271_station *)sta->drv_priv;
-
        __set_bit(id, wl->ap_hlid_map);
        wl_sta->hlid = WL1271_AP_STA_HLID_START + id;
        *hlid = wl_sta->hlid;
+       memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
        return 0;
 }
 
@@ -2664,7 +2736,10 @@ static void wl1271_free_sta(struct wl1271 *wl, u8 hlid)
                return;
 
        __clear_bit(id, wl->ap_hlid_map);
+       memset(wl->links[hlid].addr, 0, ETH_ALEN);
        wl1271_tx_reset_link_queues(wl, hlid);
+       __clear_bit(hlid, &wl->ap_ps_map);
+       __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
 }
 
 static int wl1271_op_sta_add(struct ieee80211_hw *hw,
@@ -2689,7 +2764,7 @@ static int wl1271_op_sta_add(struct ieee80211_hw *hw,
        if (ret < 0)
                goto out;
 
-       ret = wl1271_ps_elp_wakeup(wl, false);
+       ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out_free_sta;
 
@@ -2732,7 +2807,7 @@ static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
        if (WARN_ON(!test_bit(id, wl->ap_hlid_map)))
                goto out;
 
-       ret = wl1271_ps_elp_wakeup(wl, false);
+       ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out;
 
@@ -2765,7 +2840,7 @@ int wl1271_op_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                goto out;
        }
 
-       ret = wl1271_ps_elp_wakeup(wl, false);
+       ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out;
 
@@ -3129,7 +3204,7 @@ static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
        if (wl->state == WL1271_STATE_OFF)
                goto out;
 
-       ret = wl1271_ps_elp_wakeup(wl, false);
+       ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out;
 
@@ -3329,9 +3404,12 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
                for (j = 0; j < AP_MAX_LINKS; j++)
                        skb_queue_head_init(&wl->links[j].tx_queue[i]);
 
+       skb_queue_head_init(&wl->deferred_rx_queue);
+       skb_queue_head_init(&wl->deferred_tx_queue);
+
        INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
        INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work);
-       INIT_WORK(&wl->irq_work, wl1271_irq_work);
+       INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
        INIT_WORK(&wl->tx_work, wl1271_tx_work);
        INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
        INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
@@ -3355,6 +3433,9 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
        wl->set_bss_type = MAX_BSS_TYPE;
        wl->fw_bss_type = MAX_BSS_TYPE;
        wl->last_tx_hlid = 0;
+       wl->ap_ps_map = 0;
+       wl->ap_fw_ps_map = 0;
+       wl->quirks = 0;
 
        memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
        for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
This page took 0.04482 seconds and 5 git commands to generate.