*/
static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
{
- struct iwl_priv *priv = priv(trans);
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
struct iwl_queue *q = &txq->q;
if (!q->n_bd)
*/
static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
{
- struct iwl_priv *priv = priv(trans);
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
struct device *dev = bus(trans)->dev;
int i;
if (WARN_ON(!txq))
static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
{
int txq_id;
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_priv *priv = priv(trans);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
/* Tx queues */
- if (priv->txq) {
+ if (trans_pcie->txq) {
for (txq_id = 0;
txq_id < hw_params(trans).max_txq_num; txq_id++)
iwl_tx_queue_free(trans, txq_id);
}
- kfree(priv->txq);
- priv->txq = NULL;
+ kfree(trans_pcie->txq);
+ trans_pcie->txq = NULL;
iwlagn_free_dma_ptr(trans, &trans_pcie->kw);
{
int ret;
int txq_id, slots_num;
- struct iwl_priv *priv = priv(trans);
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u16 scd_bc_tbls_size = hw_params(trans).max_txq_num *
sizeof(struct iwlagn_scd_bc_tbl);
/*It is not allowed to alloc twice, so warn when this happens.
* We cannot rely on the previous allocation, so free and fail */
- if (WARN_ON(priv->txq)) {
+ if (WARN_ON(trans_pcie->txq)) {
ret = -EINVAL;
goto error;
}
goto error;
}
- priv->txq = kzalloc(sizeof(struct iwl_tx_queue) *
+ trans_pcie->txq = kzalloc(sizeof(struct iwl_tx_queue) *
hw_params(trans).max_txq_num, GFP_KERNEL);
- if (!priv->txq) {
+ if (!trans_pcie->txq) {
IWL_ERR(trans, "Not enough memory for txq\n");
ret = ENOMEM;
goto error;
for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) {
slots_num = (txq_id == trans->shrd->cmd_queue) ?
TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- ret = iwl_trans_txq_alloc(trans, &priv->txq[txq_id], slots_num,
- txq_id);
+ ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id],
+ slots_num, txq_id);
if (ret) {
IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
goto error;
int txq_id, slots_num;
unsigned long flags;
bool alloc = false;
- struct iwl_priv *priv = priv(trans);
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- if (!priv->txq) {
+ if (!trans_pcie->txq) {
ret = iwl_trans_tx_alloc(trans);
if (ret)
goto error;
for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) {
slots_num = (txq_id == trans->shrd->cmd_queue) ?
TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- ret = iwl_trans_txq_init(trans, &priv->txq[txq_id], slots_num,
- txq_id);
+ ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id],
+ slots_num, txq_id);
if (ret) {
IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
goto error;
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
- priv->shrd->ucode_owner = IWL_OWNERSHIP_DRIVER;
+ trans->shrd->ucode_owner = IWL_OWNERSHIP_DRIVER;
trans_pcie->ac_to_queue[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_queue;
trans_pcie->ac_to_queue[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_queue;
trans_pcie->mcast_queue[IWL_RXON_CTX_BSS] = 0;
trans_pcie->mcast_queue[IWL_RXON_CTX_PAN] = IWL_IPAN_MCAST_QUEUE;
- if ((hw_params(priv).sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
+ if ((hw_params(trans).sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
iwl_trans_pcie_prepare_card_hw(trans)) {
IWL_WARN(trans, "Exit HW not ready\n");
return -EIO;
a += 4)
iwl_write_targ_mem(bus(trans), a, 0);
for (; a < trans_pcie->scd_base_addr +
- SCD_TRANS_TBL_OFFSET_QUEUE(hw_params(priv).max_txq_num);
+ SCD_TRANS_TBL_OFFSET_QUEUE(hw_params(trans).max_txq_num);
a += 4)
iwl_write_targ_mem(bus(trans), a, 0);
reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
iwl_write_prph(bus(trans), SCD_QUEUECHAIN_SEL,
- SCD_QUEUECHAIN_SEL_ALL(priv));
+ SCD_QUEUECHAIN_SEL_ALL(trans));
iwl_write_prph(bus(trans), SCD_AGGR_SEL, 0);
/* initiate the queues */
- for (i = 0; i < hw_params(priv).max_txq_num; i++) {
+ for (i = 0; i < hw_params(trans).max_txq_num; i++) {
iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(i), 0);
iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR, 0 | (i << 8));
iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
iwl_trans_set_wr_ptrs(trans, trans->shrd->cmd_queue, 0);
/* make sure all queue are not stopped */
- memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
+ memset(&trans_pcie->queue_stopped[0], 0,
+ sizeof(trans_pcie->queue_stopped));
for (i = 0; i < 4; i++)
- atomic_set(&priv->queue_stop_count[i], 0);
+ atomic_set(&trans_pcie->queue_stop_count[i], 0);
for_each_context(priv, ctx)
ctx->last_tx_rejected = false;
/* reset to 0 to enable all the queue first */
- priv->txq_ctx_active_msk = 0;
+ trans_pcie->txq_ctx_active_msk = 0;
BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) <
IWLAGN_FIRST_AMPDU_QUEUE);
int fifo = queue_to_fifo[i].fifo;
int ac = queue_to_fifo[i].ac;
- iwl_txq_ctx_activate(priv, i);
+ iwl_txq_ctx_activate(trans_pcie, i);
if (fifo == IWL_TX_FIFO_UNUSED)
continue;
if (ac != IWL_AC_UNSET)
- iwl_set_swq_id(&priv->txq[i], ac, i);
- iwl_trans_tx_queue_set_status(priv, &priv->txq[i], fifo, 0);
+ iwl_set_swq_id(&trans_pcie->txq[i], ac, i);
+ iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[i],
+ fifo, 0);
}
spin_unlock_irqrestore(&trans->shrd->lock, flags);
{
int ch, txq_id;
unsigned long flags;
- struct iwl_priv *priv = priv(trans);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
/* Turn off all Tx DMA fifos */
spin_lock_irqsave(&trans->shrd->lock, flags);
}
spin_unlock_irqrestore(&trans->shrd->lock, flags);
- if (!priv->txq) {
+ if (!trans_pcie->txq) {
IWL_WARN(trans, "Stopping tx queues that aren't allocated...");
return 0;
}
}
}
- txq = &priv(trans)->txq[txq_id];
+ txq = &trans_pcie->txq[txq_id];
q = &txq->q;
/* Set up driver data for this TFD */
txq->need_update = 1;
iwl_txq_update_write_ptr(trans, txq);
} else {
- iwl_stop_queue(priv(trans), txq);
+ iwl_stop_queue(trans, txq);
}
}
return 0;
return 0;
}
-static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id,
- int ssn, u32 status, struct sk_buff_head *skbs)
+static int iwlagn_txq_check_empty(struct iwl_trans *trans,
+ int sta_id, u8 tid, int txq_id)
{
- struct iwl_priv *priv = priv(trans);
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_queue *q = &trans_pcie->txq[txq_id].q;
+ struct iwl_tid_data *tid_data = &trans->shrd->tid_data[sta_id][tid];
+
+ lockdep_assert_held(&trans->shrd->sta_lock);
+
+ switch (trans->shrd->tid_data[sta_id][tid].agg.state) {
+ case IWL_EMPTYING_HW_QUEUE_DELBA:
+ /* We are reclaiming the last packet of the */
+ /* aggregated HW queue */
+ if ((txq_id == tid_data->agg.txq_id) &&
+ (q->read_ptr == q->write_ptr)) {
+ IWL_DEBUG_HT(trans,
+ "HW queue empty: continue DELBA flow\n");
+ iwl_trans_pcie_txq_agg_disable(trans, txq_id);
+ tid_data->agg.state = IWL_AGG_OFF;
+ iwl_stop_tx_ba_trans_ready(priv(trans),
+ NUM_IWL_RXON_CTX,
+ sta_id, tid);
+ iwl_wake_queue(trans, &trans_pcie->txq[txq_id]);
+ }
+ break;
+ case IWL_EMPTYING_HW_QUEUE_ADDBA:
+ /* We are reclaiming the last packet of the queue */
+ if (tid_data->tfds_in_queue == 0) {
+ IWL_DEBUG_HT(trans,
+ "HW queue empty: continue ADDBA flow\n");
+ tid_data->agg.state = IWL_AGG_ON;
+ iwl_start_tx_ba_trans_ready(priv(trans),
+ NUM_IWL_RXON_CTX,
+ sta_id, tid);
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static void iwl_free_tfds_in_queue(struct iwl_trans *trans,
+ int sta_id, int tid, int freed)
+{
+ lockdep_assert_held(&trans->shrd->sta_lock);
+
+ if (trans->shrd->tid_data[sta_id][tid].tfds_in_queue >= freed)
+ trans->shrd->tid_data[sta_id][tid].tfds_in_queue -= freed;
+ else {
+ IWL_DEBUG_TX(trans, "free more than tfds_in_queue (%u:%d)\n",
+ trans->shrd->tid_data[sta_id][tid].tfds_in_queue,
+ freed);
+ trans->shrd->tid_data[sta_id][tid].tfds_in_queue = 0;
+ }
+}
+
+static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
+ int txq_id, int ssn, u32 status,
+ struct sk_buff_head *skbs)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
/* n_bd is usually 256 => n_bd - 1 = 0xff */
int tfd_num = ssn & (txq->q.n_bd - 1);
+ int freed = 0;
u8 agg_state;
bool cond;
+ txq->time_stamp = jiffies;
+
if (txq->sched_retry) {
agg_state =
- priv->shrd->tid_data[txq->sta_id][txq->tid].agg.state;
+ trans->shrd->tid_data[txq->sta_id][txq->tid].agg.state;
cond = (agg_state != IWL_EMPTYING_HW_QUEUE_DELBA);
} else {
cond = (status != TX_STATUS_FAIL_PASSIVE_NO_RX);
IWL_DEBUG_TX_REPLY(trans, "Retry scheduler reclaim "
"scd_ssn=%d idx=%d txq=%d swq=%d\n",
ssn , tfd_num, txq_id, txq->swq_id);
- iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
+ freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
if (iwl_queue_space(&txq->q) > txq->q.low_mark && cond)
- iwl_wake_queue(priv, txq);
+ iwl_wake_queue(trans, txq);
}
+
+ iwl_free_tfds_in_queue(trans, sta_id, tid, freed);
+ iwlagn_txq_check_empty(trans, sta_id, tid, txq_id);
}
static void iwl_trans_pcie_free(struct iwl_trans *trans)
txq_id = trans_pcie->ac_to_queue[ctx][ac];
IWL_DEBUG_INFO(trans, "Queue Status: Q[%d] %s\n",
ac,
- (atomic_read(&priv(trans)->queue_stop_count[ac]) > 0)
+ (atomic_read(&trans_pcie->queue_stop_count[ac]) > 0)
? "stopped" : "awake");
- iwl_wake_queue(priv(trans), &priv(trans)->txq[txq_id]);
+ iwl_wake_queue(trans, &trans_pcie->txq[txq_id]);
}
}
return iwl_trans;
}
+static void iwl_trans_pcie_stop_queue(struct iwl_trans *trans, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ iwl_stop_queue(trans, &trans_pcie->txq[txq_id]);
+}
+
+#define IWL_FLUSH_WAIT_MS 2000
+
+static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_queue *txq;
+ struct iwl_queue *q;
+ int cnt;
+ unsigned long now = jiffies;
+ int ret = 0;
+
+ /* waiting for all the tx frames complete might take a while */
+ for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
+ if (cnt == trans->shrd->cmd_queue)
+ continue;
+ txq = &trans_pcie->txq[cnt];
+ q = &txq->q;
+ while (q->read_ptr != q->write_ptr && !time_after(jiffies,
+ now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
+ msleep(1);
+
+ if (q->read_ptr != q->write_ptr) {
+ IWL_ERR(trans, "fail to flush all tx fifo queues\n");
+ ret = -ETIMEDOUT;
+ break;
+ }
+ }
+ return ret;
+}
+
+/*
+ * On every watchdog tick we check (latest) time stamp. If it does not
+ * change during timeout period and queue is not empty we reset firmware.
+ */
+static int iwl_trans_pcie_check_stuck_queue(struct iwl_trans *trans, int cnt)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_queue *txq = &trans_pcie->txq[cnt];
+ struct iwl_queue *q = &txq->q;
+ unsigned long timeout;
+
+ if (q->read_ptr == q->write_ptr) {
+ txq->time_stamp = jiffies;
+ return 0;
+ }
+
+ timeout = txq->time_stamp +
+ msecs_to_jiffies(hw_params(trans).wd_timeout);
+
+ if (time_after(jiffies, timeout)) {
+ IWL_ERR(trans, "Queue %d stuck for %u ms.\n", q->id,
+ hw_params(trans).wd_timeout);
+ return 1;
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_IWLWIFI_DEBUGFS
/* create and remove of files */
#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
const u8 *ptr;
ssize_t ret;
- if (!priv->txq) {
+ if (!trans_pcie->txq) {
IWL_ERR(trans, "txq not ready\n");
return -EAGAIN;
}
}
pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
- txq = &priv->txq[cnt];
+ txq = &trans_pcie->txq[cnt];
q = &txq->q;
pos += scnprintf(buf + pos, bufsz - pos,
"q[%d]: read_ptr: %u, write_ptr: %u\n",
static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
char __user *user_buf,
- size_t count, loff_t *ppos) {
-
+ size_t count, loff_t *ppos)
+{
struct iwl_trans *trans = file->private_data;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_priv *priv = priv(trans);
struct iwl_tx_queue *txq;
struct iwl_queue *q;
int ret;
const size_t bufsz = sizeof(char) * 64 * hw_params(trans).max_txq_num;
- if (!priv->txq) {
+ if (!trans_pcie->txq) {
IWL_ERR(priv, "txq not ready\n");
return -EAGAIN;
}
return -ENOMEM;
for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
- txq = &priv->txq[cnt];
+ txq = &trans_pcie->txq[cnt];
q = &txq->q;
pos += scnprintf(buf + pos, bufsz - pos,
"hwq %.2d: read=%u write=%u stop=%d"
" swq_id=%#.2x (ac %d/hwq %d)\n",
cnt, q->read_ptr, q->write_ptr,
- !!test_bit(cnt, priv->queue_stopped),
+ !!test_bit(cnt, trans_pcie->queue_stopped),
txq->swq_id, txq->swq_id & 3,
(txq->swq_id >> 2) & 0x1f);
if (cnt >= 4)
continue;
/* for the ACs, display the stop count too */
pos += scnprintf(buf + pos, bufsz - pos,
- " stop-count: %d\n",
- atomic_read(&priv->queue_stop_count[cnt]));
+ " stop-count: %d\n",
+ atomic_read(&trans_pcie->queue_stop_count[cnt]));
}
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
.tx = iwl_trans_pcie_tx,
.reclaim = iwl_trans_pcie_reclaim,
- .txq_agg_disable = iwl_trans_pcie_txq_agg_disable,
+ .tx_agg_disable = iwl_trans_pcie_tx_agg_disable,
.tx_agg_alloc = iwl_trans_pcie_tx_agg_alloc,
- .txq_agg_setup = iwl_trans_pcie_txq_agg_setup,
+ .tx_agg_setup = iwl_trans_pcie_tx_agg_setup,
.kick_nic = iwl_trans_pcie_kick_nic,
.free = iwl_trans_pcie_free,
+ .stop_queue = iwl_trans_pcie_stop_queue,
.dbgfs_register = iwl_trans_pcie_dbgfs_register,
+
+ .wait_tx_queue_empty = iwl_trans_pcie_wait_tx_queue_empty,
+ .check_stuck_queue = iwl_trans_pcie_check_stuck_queue,
+
.suspend = iwl_trans_pcie_suspend,
.resume = iwl_trans_pcie_resume,
};