To achieve maximal throughput, it is very important to react to
interrupts as soon as possible. Currently the interrupt handler wakes up
a worker for handling interrupts in process context. A cleaner and more
efficient design would be to request a threaded interrupt handler. This
handler's priority is very high, and can do blocking operations such as
SDIO/SPI transactions.
Some work can be deferred, mostly calls to mac80211 APIs
(ieee80211_rx_ni and ieee80211_tx_status). By deferring such work to a
different worker, we can keep the irq handler thread more I/O
responsive. In addition, on multi-core systems the two threads can be
scheduled on different cores, which will improve overall performance.
The use of WL1271_FLAG_IRQ_PENDING & WL1271_FLAG_IRQ_RUNNING was
changed. For simplicity, always query the FW for more pending
interrupts. Since there are relatively long bursts of interrupts, the
extra FW status read overhead is negligible. In addition, this enables
registering the IRQ handler with the ONESHOT option.
Signed-off-by: Ido Yariv <ido@wizery.com>
Reviewed-by: Luciano Coelho <coelho@ti.com>
Signed-off-by: Luciano Coelho <coelho@ti.com>
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
int wl1271_init_ieee80211(struct wl1271 *wl);
struct ieee80211_hw *wl1271_alloc_hw(void);
int wl1271_free_hw(struct wl1271 *wl);
int wl1271_init_ieee80211(struct wl1271 *wl);
struct ieee80211_hw *wl1271_alloc_hw(void);
int wl1271_free_hw(struct wl1271 *wl);
+irqreturn_t wl1271_irq(int irq, void *data);
if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
goto out;
if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
goto out;
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
(s64)le32_to_cpu(status->fw_localtime);
}
(s64)le32_to_cpu(status->fw_localtime);
}
-#define WL1271_IRQ_MAX_LOOPS 10
+static void wl1271_flush_deferred_work(struct wl1271 *wl)
+{
+ struct sk_buff *skb;
+
+ /* Pass all received frames to the network stack */
+ while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
+ ieee80211_rx_ni(wl->hw, skb);
+
+ /* Return sent skbs to the network stack */
+ while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
+ ieee80211_tx_status(wl->hw, skb);
+}
+
+static void wl1271_netstack_work(struct work_struct *work)
+{
+ struct wl1271 *wl =
+ container_of(work, struct wl1271, netstack_work);
+
+ do {
+ wl1271_flush_deferred_work(wl);
+ } while (skb_queue_len(&wl->deferred_rx_queue));
+}
-static void wl1271_irq_work(struct work_struct *work)
+#define WL1271_IRQ_MAX_LOOPS 256
+
+irqreturn_t wl1271_irq(int irq, void *cookie)
{
int ret;
u32 intr;
int loopcount = WL1271_IRQ_MAX_LOOPS;
{
int ret;
u32 intr;
int loopcount = WL1271_IRQ_MAX_LOOPS;
- unsigned long flags;
- struct wl1271 *wl =
- container_of(work, struct wl1271, irq_work);
+ struct wl1271 *wl = (struct wl1271 *)cookie;
+ bool done = false;
+ unsigned int defer_count;
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
- ret = wl1271_ps_elp_wakeup(wl, true);
+ ret = wl1271_ps_elp_wakeup(wl);
- spin_lock_irqsave(&wl->wl_lock, flags);
- while (test_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags) && loopcount) {
- clear_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
- spin_unlock_irqrestore(&wl->wl_lock, flags);
- loopcount--;
+ while (!done && loopcount--) {
+ /*
+ * In order to avoid a race with the hardirq, clear the flag
+ * before acknowledging the chip. Since the mutex is held,
+ * wl1271_ps_elp_wakeup cannot be called concurrently.
+ */
+ clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
+ smp_mb__after_clear_bit();
wl1271_fw_status(wl, wl->fw_status);
intr = le32_to_cpu(wl->fw_status->common.intr);
wl1271_fw_status(wl, wl->fw_status);
intr = le32_to_cpu(wl->fw_status->common.intr);
+ intr &= WL1271_INTR_MASK;
- wl1271_debug(DEBUG_IRQ, "Zero interrupt received.");
- spin_lock_irqsave(&wl->wl_lock, flags);
- intr &= WL1271_INTR_MASK;
-
if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
wl1271_error("watchdog interrupt received! "
"starting recovery.");
if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
wl1271_error("watchdog interrupt received! "
"starting recovery.");
- if (intr & WL1271_ACX_INTR_DATA) {
+ if (likely(intr & WL1271_ACX_INTR_DATA)) {
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
wl1271_rx(wl, &wl->fw_status->common);
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
wl1271_rx(wl, &wl->fw_status->common);
if (wl->fw_status->common.tx_results_counter !=
(wl->tx_results_count & 0xff))
wl1271_tx_complete(wl);
if (wl->fw_status->common.tx_results_counter !=
(wl->tx_results_count & 0xff))
wl1271_tx_complete(wl);
+
+ /* Make sure the deferred queues don't get too long */
+ defer_count = skb_queue_len(&wl->deferred_tx_queue) +
+ skb_queue_len(&wl->deferred_rx_queue);
+ if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
+ wl1271_flush_deferred_work(wl);
}
if (intr & WL1271_ACX_INTR_EVENT_A) {
}
if (intr & WL1271_ACX_INTR_EVENT_A) {
if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
-
- spin_lock_irqsave(&wl->wl_lock, flags);
- if (test_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags))
- ieee80211_queue_work(wl->hw, &wl->irq_work);
- else
- clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
- spin_unlock_irqrestore(&wl->wl_lock, flags);
-
wl1271_ps_elp_sleep(wl);
out:
mutex_unlock(&wl->mutex);
wl1271_ps_elp_sleep(wl);
out:
mutex_unlock(&wl->mutex);
+EXPORT_SYMBOL_GPL(wl1271_irq);
static int wl1271_fetch_firmware(struct wl1271 *wl)
{
static int wl1271_fetch_firmware(struct wl1271 *wl)
{
- wl1271_disable_interrupts(wl);
mutex_unlock(&wl->mutex);
/* Unlocking the mutex in the middle of handling is
inherently unsafe. In this case we deem it safe to do,
mutex_unlock(&wl->mutex);
/* Unlocking the mutex in the middle of handling is
inherently unsafe. In this case we deem it safe to do,
work function will not do anything.) Also, any other
possible concurrent operations will fail due to the
current state, hence the wl1271 struct should be safe. */
work function will not do anything.) Also, any other
possible concurrent operations will fail due to the
current state, hence the wl1271 struct should be safe. */
- cancel_work_sync(&wl->irq_work);
+ wl1271_disable_interrupts(wl);
+ wl1271_flush_deferred_work(wl);
+ cancel_work_sync(&wl->netstack_work);
mutex_lock(&wl->mutex);
power_off:
wl1271_power_off(wl);
mutex_lock(&wl->mutex);
power_off:
wl1271_power_off(wl);
- wl1271_disable_interrupts(wl);
wl1271_power_off(wl);
wl->state = WL1271_STATE_OFF;
wl->rx_counter = 0;
mutex_unlock(&wl->mutex);
wl1271_power_off(wl);
wl->state = WL1271_STATE_OFF;
wl->rx_counter = 0;
mutex_unlock(&wl->mutex);
- cancel_work_sync(&wl->irq_work);
+ wl1271_disable_interrupts(wl);
+ wl1271_flush_deferred_work(wl);
+ cancel_work_sync(&wl->netstack_work);
cancel_work_sync(&wl->recovery_work);
mutex_lock(&wl->mutex);
out:
cancel_work_sync(&wl->recovery_work);
mutex_lock(&wl->mutex);
out:
- wl1271_disable_interrupts(wl);
mutex_unlock(&wl->mutex);
/* Unlocking the mutex in the middle of handling is
inherently unsafe. In this case we deem it safe to do,
mutex_unlock(&wl->mutex);
/* Unlocking the mutex in the middle of handling is
inherently unsafe. In this case we deem it safe to do,
work function will not do anything.) Also, any other
possible concurrent operations will fail due to the
current state, hence the wl1271 struct should be safe. */
work function will not do anything.) Also, any other
possible concurrent operations will fail due to the
current state, hence the wl1271 struct should be safe. */
- cancel_work_sync(&wl->irq_work);
+ wl1271_disable_interrupts(wl);
+ wl1271_flush_deferred_work(wl);
+ cancel_work_sync(&wl->netstack_work);
mutex_lock(&wl->mutex);
power_off:
wl1271_power_off(wl);
mutex_lock(&wl->mutex);
power_off:
wl1271_power_off(wl);
wl->state = WL1271_STATE_OFF;
wl->state = WL1271_STATE_OFF;
- wl1271_disable_interrupts(wl);
-
mutex_unlock(&wl->mutex);
mutex_unlock(&wl->mutex);
+ wl1271_disable_interrupts(wl);
+ wl1271_flush_deferred_work(wl);
cancel_delayed_work_sync(&wl->scan_complete_work);
cancel_delayed_work_sync(&wl->scan_complete_work);
- cancel_work_sync(&wl->irq_work);
+ cancel_work_sync(&wl->netstack_work);
cancel_work_sync(&wl->tx_work);
cancel_delayed_work_sync(&wl->pspoll_work);
cancel_delayed_work_sync(&wl->elp_work);
cancel_work_sync(&wl->tx_work);
cancel_delayed_work_sync(&wl->pspoll_work);
cancel_delayed_work_sync(&wl->elp_work);
is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out_unlock;
if (ret < 0)
goto out_unlock;
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
conf_tid->apsd_conf[0] = 0;
conf_tid->apsd_conf[1] = 0;
} else {
conf_tid->apsd_conf[0] = 0;
conf_tid->apsd_conf[1] = 0;
} else {
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out_free_sta;
if (ret < 0)
goto out_free_sta;
if (WARN_ON(!test_bit(id, wl->ap_hlid_map)))
goto out;
if (WARN_ON(!test_bit(id, wl->ap_hlid_map)))
goto out;
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
if (wl->state == WL1271_STATE_OFF)
goto out;
if (wl->state == WL1271_STATE_OFF)
goto out;
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
for (j = 0; j < AP_MAX_LINKS; j++)
skb_queue_head_init(&wl->links[j].tx_queue[i]);
for (j = 0; j < AP_MAX_LINKS; j++)
skb_queue_head_init(&wl->links[j].tx_queue[i]);
+ skb_queue_head_init(&wl->deferred_rx_queue);
+ skb_queue_head_init(&wl->deferred_tx_queue);
+
INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work);
INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work);
- INIT_WORK(&wl->irq_work, wl1271_irq_work);
+ INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
INIT_WORK(&wl->tx_work, wl1271_tx_work);
INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
INIT_WORK(&wl->tx_work, wl1271_tx_work);
INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
-int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
+int wl1271_ps_elp_wakeup(struct wl1271 *wl)
{
DECLARE_COMPLETION_ONSTACK(compl);
unsigned long flags;
{
DECLARE_COMPLETION_ONSTACK(compl);
unsigned long flags;
* the completion variable in one entity.
*/
spin_lock_irqsave(&wl->wl_lock, flags);
* the completion variable in one entity.
*/
spin_lock_irqsave(&wl->wl_lock, flags);
- if (work_pending(&wl->irq_work) || chip_awake)
+ if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
pending = true;
else
wl->elp_compl = &compl;
pending = true;
else
wl->elp_compl = &compl;
case STATION_ACTIVE_MODE:
default:
wl1271_debug(DEBUG_PSM, "leaving psm");
case STATION_ACTIVE_MODE:
default:
wl1271_debug(DEBUG_PSM, "leaving psm");
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
u32 rates, bool send);
void wl1271_ps_elp_sleep(struct wl1271 *wl);
int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
u32 rates, bool send);
void wl1271_ps_elp_sleep(struct wl1271 *wl);
-int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake);
+int wl1271_ps_elp_wakeup(struct wl1271 *wl);
void wl1271_elp_work(struct work_struct *work);
void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues);
void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid);
void wl1271_elp_work(struct work_struct *work);
void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues);
void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid);
skb_trim(skb, skb->len - desc->pad_len);
skb_trim(skb, skb->len - desc->pad_len);
- ieee80211_rx_ni(wl->hw, skb);
+ skb_queue_tail(&wl->deferred_rx_queue, skb);
+ ieee80211_queue_work(wl->hw, &wl->netstack_work);
return &(wl_to_func(wl)->dev);
}
return &(wl_to_func(wl)->dev);
}
-static irqreturn_t wl1271_irq(int irq, void *cookie)
+static irqreturn_t wl1271_hardirq(int irq, void *cookie)
{
struct wl1271 *wl = cookie;
unsigned long flags;
{
struct wl1271 *wl = cookie;
unsigned long flags;
/* complete the ELP completion */
spin_lock_irqsave(&wl->wl_lock, flags);
/* complete the ELP completion */
spin_lock_irqsave(&wl->wl_lock, flags);
+ set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
if (wl->elp_compl) {
complete(wl->elp_compl);
wl->elp_compl = NULL;
}
if (wl->elp_compl) {
complete(wl->elp_compl);
wl->elp_compl = NULL;
}
-
- if (!test_and_set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
- ieee80211_queue_work(wl->hw, &wl->irq_work);
- set_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
spin_unlock_irqrestore(&wl->wl_lock, flags);
spin_unlock_irqrestore(&wl->wl_lock, flags);
+ return IRQ_WAKE_THREAD;
}
static void wl1271_sdio_disable_interrupts(struct wl1271 *wl)
}
static void wl1271_sdio_disable_interrupts(struct wl1271 *wl)
wl->irq = wlan_data->irq;
wl->ref_clock = wlan_data->board_ref_clock;
wl->irq = wlan_data->irq;
wl->ref_clock = wlan_data->board_ref_clock;
- ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl);
+ ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq,
+ IRQF_TRIGGER_RISING,
+ DRIVER_NAME, wl);
if (ret < 0) {
wl1271_error("request_irq() failed: %d", ret);
goto out_free;
}
if (ret < 0) {
wl1271_error("request_irq() failed: %d", ret);
goto out_free;
}
- set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
-
disable_irq(wl->irq);
ret = wl1271_init_ieee80211(wl);
disable_irq(wl->irq);
ret = wl1271_init_ieee80211(wl);
out_irq:
free_irq(wl->irq, wl);
out_irq:
free_irq(wl->irq, wl);
out_free:
wl1271_free_hw(wl);
out_free:
wl1271_free_hw(wl);
spi_sync(wl_to_spi(wl), &m);
}
spi_sync(wl_to_spi(wl), &m);
}
-static irqreturn_t wl1271_irq(int irq, void *cookie)
+static irqreturn_t wl1271_hardirq(int irq, void *cookie)
+ struct wl1271 *wl = cookie;
unsigned long flags;
wl1271_debug(DEBUG_IRQ, "IRQ");
unsigned long flags;
wl1271_debug(DEBUG_IRQ, "IRQ");
/* complete the ELP completion */
spin_lock_irqsave(&wl->wl_lock, flags);
/* complete the ELP completion */
spin_lock_irqsave(&wl->wl_lock, flags);
+ set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
if (wl->elp_compl) {
complete(wl->elp_compl);
wl->elp_compl = NULL;
}
if (wl->elp_compl) {
complete(wl->elp_compl);
wl->elp_compl = NULL;
}
-
- if (!test_and_set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
- ieee80211_queue_work(wl->hw, &wl->irq_work);
- set_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
spin_unlock_irqrestore(&wl->wl_lock, flags);
spin_unlock_irqrestore(&wl->wl_lock, flags);
+ return IRQ_WAKE_THREAD;
}
static int wl1271_spi_set_power(struct wl1271 *wl, bool enable)
}
static int wl1271_spi_set_power(struct wl1271 *wl, bool enable)
- ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl);
+ ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq,
+ IRQF_TRIGGER_RISING,
+ DRIVER_NAME, wl);
if (ret < 0) {
wl1271_error("request_irq() failed: %d", ret);
goto out_free;
}
if (ret < 0) {
wl1271_error("request_irq() failed: %d", ret);
goto out_free;
}
- set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
-
disable_irq(wl->irq);
ret = wl1271_init_ieee80211(wl);
disable_irq(wl->irq);
ret = wl1271_init_ieee80211(wl);
while ((skb = wl1271_skb_dequeue(wl))) {
if (!woken_up) {
while ((skb = wl1271_skb_dequeue(wl))) {
if (!woken_up) {
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out_ack;
woken_up = true;
if (ret < 0)
goto out_ack;
woken_up = true;
result->rate_class_index, result->status);
/* return the packet to the stack */
result->rate_class_index, result->status);
/* return the packet to the stack */
- ieee80211_tx_status(wl->hw, skb);
+ skb_queue_tail(&wl->deferred_tx_queue, skb);
+ ieee80211_queue_work(wl->hw, &wl->netstack_work);
wl1271_free_tx_id(wl, result->id);
}
wl1271_free_tx_id(wl, result->id);
}
WL1271_FLAG_IN_ELP,
WL1271_FLAG_PSM,
WL1271_FLAG_PSM_REQUESTED,
WL1271_FLAG_IN_ELP,
WL1271_FLAG_PSM,
WL1271_FLAG_PSM_REQUESTED,
- WL1271_FLAG_IRQ_PENDING,
WL1271_FLAG_IRQ_RUNNING,
WL1271_FLAG_IDLE,
WL1271_FLAG_IDLE_REQUESTED,
WL1271_FLAG_IRQ_RUNNING,
WL1271_FLAG_IDLE,
WL1271_FLAG_IDLE_REQUESTED,
struct sk_buff_head tx_queue[NUM_TX_QUEUES];
int tx_queue_count;
struct sk_buff_head tx_queue[NUM_TX_QUEUES];
int tx_queue_count;
+ /* Frames received, not handled yet by mac80211 */
+ struct sk_buff_head deferred_rx_queue;
+
+ /* Frames sent, not returned yet to mac80211 */
+ struct sk_buff_head deferred_tx_queue;
+
struct work_struct tx_work;
/* Pending TX frames */
struct work_struct tx_work;
/* Pending TX frames */
/* Intermediate buffer, used for packet aggregation */
u8 *aggr_buf;
/* Intermediate buffer, used for packet aggregation */
u8 *aggr_buf;
- /* The target interrupt mask */
- struct work_struct irq_work;
+ /* Network stack work */
+ struct work_struct netstack_work;
/* Hardware recovery work */
struct work_struct recovery_work;
/* Hardware recovery work */
struct work_struct recovery_work;
#define WL1271_TX_QUEUE_LOW_WATERMARK 10
#define WL1271_TX_QUEUE_HIGH_WATERMARK 25
#define WL1271_TX_QUEUE_LOW_WATERMARK 10
#define WL1271_TX_QUEUE_HIGH_WATERMARK 25
+#define WL1271_DEFERRED_QUEUE_LIMIT 64
+
/* WL1271 needs a 200ms sleep after power on, and a 20ms sleep before power
on in case is has been shut down shortly before */
#define WL1271_PRE_POWER_ON_SLEEP 20 /* in milliseconds */
/* WL1271 needs a 200ms sleep after power on, and a 20ms sleep before power
on in case is has been shut down shortly before */
#define WL1271_PRE_POWER_ON_SLEEP 20 /* in milliseconds */