iwlwifi: mvm: let any command flag be passed to iwl_mvm_flushtx_path()
authorLuca Coelho <luciano.coelho@intel.com>
Tue, 6 Oct 2015 06:54:57 +0000 (09:54 +0300)
committerEmmanuel Grumbach <emmanuel.grumbach@intel.com>
Sun, 25 Oct 2015 11:45:04 +0000 (13:45 +0200)
Instead of only allowing the caller to decide whether the CMD_ASYNC
flag is set, let it pass the entire flags bitmask.  This allows more
flexibility and will be needed when we call this function in the
suspend flow (where other flags are needed).

Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
drivers/net/wireless/iwlwifi/mvm/debugfs.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/mvm.h
drivers/net/wireless/iwlwifi/mvm/sta.c
drivers/net/wireless/iwlwifi/mvm/time-event.c
drivers/net/wireless/iwlwifi/mvm/tx.c

index 9b4fbb8b483a7e1d253269da93ee4158dce558c6..05928fb4021d5c23dd3801289094d4ad205d6bd2 100644 (file)
@@ -85,7 +85,7 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
        IWL_ERR(mvm, "FLUSHING queues: scd_q_msk = 0x%x\n", scd_q_msk);
 
        mutex_lock(&mvm->mutex);
-       ret =  iwl_mvm_flush_tx_path(mvm, scd_q_msk, true) ? : count;
+       ret =  iwl_mvm_flush_tx_path(mvm, scd_q_msk, 0) ? : count;
        mutex_unlock(&mvm->mutex);
 
        return ret;
index aaffb54e8f78ce43cbd3d6d45426b4d38826266f..adfcce48d863c9a9fe02886ad2cbda3da60d8d36 100644 (file)
@@ -1781,7 +1781,7 @@ static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
                 * Flush them here.
                 */
                mutex_lock(&mvm->mutex);
-               iwl_mvm_flush_tx_path(mvm, tfd_msk, true);
+               iwl_mvm_flush_tx_path(mvm, tfd_msk, 0);
                mutex_unlock(&mvm->mutex);
 
                /*
@@ -3924,7 +3924,7 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
        }
 
        if (drop) {
-               if (iwl_mvm_flush_tx_path(mvm, msk, true))
+               if (iwl_mvm_flush_tx_path(mvm, msk, 0))
                        IWL_ERR(mvm, "flush request fail\n");
                mutex_unlock(&mvm->mutex);
        } else {
index 0d3aff1b4bad3ed5c6307f68b42c977ca6f52683..4485bdb56b34ff8c9feadc7a9273dd2cb713047c 100644 (file)
@@ -1031,7 +1031,7 @@ const char *iwl_mvm_get_tx_fail_reason(u32 status);
 #else
 static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
 #endif
-int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync);
+int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags);
 void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
 
 static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info,
index a9a3eb6a1f8a80a87166edc74de265d090c55ac8..04d0cb3b69a19259421b124b475750bc65c15452 100644 (file)
@@ -501,7 +501,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
                if (ret)
                        return ret;
                /* flush its queues here since we are freeing mvm_sta */
-               ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, true);
+               ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0);
                if (ret)
                        return ret;
                ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
@@ -1155,7 +1155,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
        if (old_state >= IWL_AGG_ON) {
                iwl_mvm_drain_sta(mvm, mvmsta, true);
-               if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true))
+               if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
                        IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
                iwl_trans_wait_tx_queue_empty(mvm->trans,
                                              mvmsta->tfd_queue_msk);
index dbd7d544575de68a3972588bb117a8fa5b560ef3..7530eb23035d3ea315c8d6671eebce16d0ec44c6 100644 (file)
@@ -129,7 +129,7 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
         * issue as it will have to complete before the next command is
         * executed, and a new time event means a new command.
         */
-       iwl_mvm_flush_tx_path(mvm, queues, false);
+       iwl_mvm_flush_tx_path(mvm, queues, CMD_ASYNC);
 }
 
 static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
index ff8b9bdef7e8533d60fc0a2960ac07de6b4679db..c652a66be80353532a215f6b728d6d847d50825d 100644 (file)
@@ -1099,7 +1099,7 @@ out:
  * 2) flush the Tx path
  * 3) wait for the transport queues to be empty
  */
-int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync)
+int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags)
 {
        int ret;
        struct iwl_tx_path_flush_cmd flush_cmd = {
@@ -1107,8 +1107,6 @@ int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync)
                .flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH),
        };
 
-       u32 flags = sync ? 0 : CMD_ASYNC;
-
        ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
                                   sizeof(flush_cmd), &flush_cmd);
        if (ret)
This page took 0.033605 seconds and 5 git commands to generate.