iwlagn: remove priv dereferences from the transport layer
[deliverable/linux.git] / drivers / net / wireless / iwlwifi / iwl-trans-tx-pcie.c
... / ...
CommitLineData
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#include <linux/etherdevice.h>
30#include <linux/slab.h>
31#include <linux/sched.h>
32
33#include "iwl-agn.h"
34#include "iwl-dev.h"
35#include "iwl-core.h"
36#include "iwl-io.h"
37#include "iwl-helpers.h"
38#include "iwl-trans-int-pcie.h"
39
40/**
41 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
42 */
43void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
44 struct iwl_tx_queue *txq,
45 u16 byte_cnt)
46{
47 struct iwlagn_scd_bc_tbl *scd_bc_tbl;
48 struct iwl_trans_pcie *trans_pcie =
49 IWL_TRANS_GET_PCIE_TRANS(trans);
50 int write_ptr = txq->q.write_ptr;
51 int txq_id = txq->q.id;
52 u8 sec_ctl = 0;
53 u8 sta_id = 0;
54 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
55 __le16 bc_ent;
56
57 scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
58
59 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
60
61 sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
62 sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
63
64 switch (sec_ctl & TX_CMD_SEC_MSK) {
65 case TX_CMD_SEC_CCM:
66 len += CCMP_MIC_LEN;
67 break;
68 case TX_CMD_SEC_TKIP:
69 len += TKIP_ICV_LEN;
70 break;
71 case TX_CMD_SEC_WEP:
72 len += WEP_IV_LEN + WEP_ICV_LEN;
73 break;
74 }
75
76 bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
77
78 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
79
80 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
81 scd_bc_tbl[txq_id].
82 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
83}
84
85/**
86 * iwl_txq_update_write_ptr - Send new write index to hardware
87 */
88void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
89{
90 u32 reg = 0;
91 int txq_id = txq->q.id;
92
93 if (txq->need_update == 0)
94 return;
95
96 if (hw_params(trans).shadow_reg_enable) {
97 /* shadow register enabled */
98 iwl_write32(bus(trans), HBUS_TARG_WRPTR,
99 txq->q.write_ptr | (txq_id << 8));
100 } else {
101 /* if we're trying to save power */
102 if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) {
103 /* wake up nic if it's powered down ...
104 * uCode will wake up, and interrupt us again, so next
105 * time we'll skip this part. */
106 reg = iwl_read32(bus(trans), CSR_UCODE_DRV_GP1);
107
108 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
109 IWL_DEBUG_INFO(trans,
110 "Tx queue %d requesting wakeup,"
111 " GP1 = 0x%x\n", txq_id, reg);
112 iwl_set_bit(bus(trans), CSR_GP_CNTRL,
113 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
114 return;
115 }
116
117 iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
118 txq->q.write_ptr | (txq_id << 8));
119
120 /*
121 * else not in power-save mode,
122 * uCode will never sleep when we're
123 * trying to tx (during RFKILL, we're not trying to tx).
124 */
125 } else
126 iwl_write32(bus(trans), HBUS_TARG_WRPTR,
127 txq->q.write_ptr | (txq_id << 8));
128 }
129 txq->need_update = 0;
130}
131
132static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
133{
134 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
135
136 dma_addr_t addr = get_unaligned_le32(&tb->lo);
137 if (sizeof(dma_addr_t) > sizeof(u32))
138 addr |=
139 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
140
141 return addr;
142}
143
144static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
145{
146 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
147
148 return le16_to_cpu(tb->hi_n_len) >> 4;
149}
150
151static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
152 dma_addr_t addr, u16 len)
153{
154 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
155 u16 hi_n_len = len << 4;
156
157 put_unaligned_le32(addr, &tb->lo);
158 if (sizeof(dma_addr_t) > sizeof(u32))
159 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
160
161 tb->hi_n_len = cpu_to_le16(hi_n_len);
162
163 tfd->num_tbs = idx + 1;
164}
165
166static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
167{
168 return tfd->num_tbs & 0x1f;
169}
170
171static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
172 struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
173{
174 int i;
175 int num_tbs;
176
177 /* Sanity check on number of chunks */
178 num_tbs = iwl_tfd_get_num_tbs(tfd);
179
180 if (num_tbs >= IWL_NUM_OF_TBS) {
181 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
182 /* @todo issue fatal error, it is quite serious situation */
183 return;
184 }
185
186 /* Unmap tx_cmd */
187 if (num_tbs)
188 dma_unmap_single(bus(trans)->dev,
189 dma_unmap_addr(meta, mapping),
190 dma_unmap_len(meta, len),
191 DMA_BIDIRECTIONAL);
192
193 /* Unmap chunks, if any. */
194 for (i = 1; i < num_tbs; i++)
195 dma_unmap_single(bus(trans)->dev, iwl_tfd_tb_get_addr(tfd, i),
196 iwl_tfd_tb_get_len(tfd, i), dma_dir);
197}
198
199/**
200 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
201 * @trans - transport private data
202 * @txq - tx queue
203 * @index - the index of the TFD to be freed
204 *
205 * Does NOT advance any TFD circular buffer read/write indexes
206 * Does NOT free the TFD itself (which is within circular buffer)
207 */
208void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
209 int index)
210{
211 struct iwl_tfd *tfd_tmp = txq->tfds;
212
213 iwlagn_unmap_tfd(trans, &txq->meta[index], &tfd_tmp[index],
214 DMA_TO_DEVICE);
215
216 /* free SKB */
217 if (txq->skbs) {
218 struct sk_buff *skb;
219
220 skb = txq->skbs[index];
221
222 /* can be called from irqs-disabled context */
223 if (skb) {
224 dev_kfree_skb_any(skb);
225 txq->skbs[index] = NULL;
226 }
227 }
228}
229
230int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
231 struct iwl_tx_queue *txq,
232 dma_addr_t addr, u16 len,
233 u8 reset)
234{
235 struct iwl_queue *q;
236 struct iwl_tfd *tfd, *tfd_tmp;
237 u32 num_tbs;
238
239 q = &txq->q;
240 tfd_tmp = txq->tfds;
241 tfd = &tfd_tmp[q->write_ptr];
242
243 if (reset)
244 memset(tfd, 0, sizeof(*tfd));
245
246 num_tbs = iwl_tfd_get_num_tbs(tfd);
247
248 /* Each TFD can point to a maximum 20 Tx buffers */
249 if (num_tbs >= IWL_NUM_OF_TBS) {
250 IWL_ERR(trans, "Error can not send more than %d chunks\n",
251 IWL_NUM_OF_TBS);
252 return -EINVAL;
253 }
254
255 if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
256 return -EINVAL;
257
258 if (unlikely(addr & ~IWL_TX_DMA_MASK))
259 IWL_ERR(trans, "Unaligned address = %llx\n",
260 (unsigned long long)addr);
261
262 iwl_tfd_set_tb(tfd, num_tbs, addr, len);
263
264 return 0;
265}
266
267/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
268 * DMA services
269 *
270 * Theory of operation
271 *
272 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
273 * of buffer descriptors, each of which points to one or more data buffers for
274 * the device to read from or fill. Driver and device exchange status of each
275 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
276 * entries in each circular buffer, to protect against confusing empty and full
277 * queue states.
278 *
279 * The device reads or writes the data in the queues via the device's several
280 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
281 *
282 * For Tx queue, there are low mark and high mark limits. If, after queuing
283 * the packet for Tx, free space become < low mark, Tx queue stopped. When
284 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
285 * Tx queue resumed.
286 *
287 ***************************************************/
288
289int iwl_queue_space(const struct iwl_queue *q)
290{
291 int s = q->read_ptr - q->write_ptr;
292
293 if (q->read_ptr > q->write_ptr)
294 s -= q->n_bd;
295
296 if (s <= 0)
297 s += q->n_window;
298 /* keep some reserve to not confuse empty and full situations */
299 s -= 2;
300 if (s < 0)
301 s = 0;
302 return s;
303}
304
305/**
306 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
307 */
308int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
309{
310 q->n_bd = count;
311 q->n_window = slots_num;
312 q->id = id;
313
314 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
315 * and iwl_queue_dec_wrap are broken. */
316 if (WARN_ON(!is_power_of_2(count)))
317 return -EINVAL;
318
319 /* slots_num must be power-of-two size, otherwise
320 * get_cmd_index is broken. */
321 if (WARN_ON(!is_power_of_2(slots_num)))
322 return -EINVAL;
323
324 q->low_mark = q->n_window / 4;
325 if (q->low_mark < 4)
326 q->low_mark = 4;
327
328 q->high_mark = q->n_window / 8;
329 if (q->high_mark < 2)
330 q->high_mark = 2;
331
332 q->write_ptr = q->read_ptr = 0;
333
334 return 0;
335}
336
337static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
338 struct iwl_tx_queue *txq)
339{
340 struct iwl_trans_pcie *trans_pcie =
341 IWL_TRANS_GET_PCIE_TRANS(trans);
342 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
343 int txq_id = txq->q.id;
344 int read_ptr = txq->q.read_ptr;
345 u8 sta_id = 0;
346 __le16 bc_ent;
347
348 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
349
350 if (txq_id != trans->shrd->cmd_queue)
351 sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
352
353 bc_ent = cpu_to_le16(1 | (sta_id << 12));
354 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
355
356 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
357 scd_bc_tbl[txq_id].
358 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
359}
360
361static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid,
362 u16 txq_id)
363{
364 u32 tbl_dw_addr;
365 u32 tbl_dw;
366 u16 scd_q2ratid;
367
368 struct iwl_trans_pcie *trans_pcie =
369 IWL_TRANS_GET_PCIE_TRANS(trans);
370
371 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
372
373 tbl_dw_addr = trans_pcie->scd_base_addr +
374 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
375
376 tbl_dw = iwl_read_targ_mem(bus(trans), tbl_dw_addr);
377
378 if (txq_id & 0x1)
379 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
380 else
381 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
382
383 iwl_write_targ_mem(bus(trans), tbl_dw_addr, tbl_dw);
384
385 return 0;
386}
387
388static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
389{
390 /* Simply stop the queue, but don't change any configuration;
391 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
392 iwl_write_prph(bus(trans),
393 SCD_QUEUE_STATUS_BITS(txq_id),
394 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
395 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
396}
397
398void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
399 int txq_id, u32 index)
400{
401 iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
402 (index & 0xff) | (txq_id << 8));
403 iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(txq_id), index);
404}
405
406void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
407 struct iwl_tx_queue *txq,
408 int tx_fifo_id, int scd_retry)
409{
410 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
411 int txq_id = txq->q.id;
412 int active =
413 test_bit(txq_id, &trans_pcie->txq_ctx_active_msk) ? 1 : 0;
414
415 iwl_write_prph(bus(trans), SCD_QUEUE_STATUS_BITS(txq_id),
416 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
417 (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
418 (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
419 SCD_QUEUE_STTS_REG_MSK);
420
421 txq->sched_retry = scd_retry;
422
423 IWL_DEBUG_INFO(trans, "%s %s Queue %d on FIFO %d\n",
424 active ? "Activate" : "Deactivate",
425 scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
426}
427
428static inline int get_fifo_from_tid(struct iwl_trans_pcie *trans_pcie,
429 u8 ctx, u16 tid)
430{
431 const u8 *ac_to_fifo = trans_pcie->ac_to_fifo[ctx];
432 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
433 return ac_to_fifo[tid_to_ac[tid]];
434
435 /* no support for TIDs 8-15 yet */
436 return -EINVAL;
437}
438
439void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
440 enum iwl_rxon_context_id ctx, int sta_id,
441 int tid, int frame_limit)
442{
443 int tx_fifo, txq_id, ssn_idx;
444 u16 ra_tid;
445 unsigned long flags;
446 struct iwl_tid_data *tid_data;
447
448 struct iwl_trans_pcie *trans_pcie =
449 IWL_TRANS_GET_PCIE_TRANS(trans);
450
451 if (WARN_ON(sta_id == IWL_INVALID_STATION))
452 return;
453 if (WARN_ON(tid >= IWL_MAX_TID_COUNT))
454 return;
455
456 tx_fifo = get_fifo_from_tid(trans_pcie, ctx, tid);
457 if (WARN_ON(tx_fifo < 0)) {
458 IWL_ERR(trans, "txq_agg_setup, bad fifo: %d\n", tx_fifo);
459 return;
460 }
461
462 spin_lock_irqsave(&trans->shrd->sta_lock, flags);
463 tid_data = &trans->shrd->tid_data[sta_id][tid];
464 ssn_idx = SEQ_TO_SN(tid_data->seq_number);
465 txq_id = tid_data->agg.txq_id;
466 spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
467
468 ra_tid = BUILD_RAxTID(sta_id, tid);
469
470 spin_lock_irqsave(&trans->shrd->lock, flags);
471
472 /* Stop this Tx queue before configuring it */
473 iwlagn_tx_queue_stop_scheduler(trans, txq_id);
474
475 /* Map receiver-address / traffic-ID to this queue */
476 iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id);
477
478 /* Set this queue as a chain-building queue */
479 iwl_set_bits_prph(bus(trans), SCD_QUEUECHAIN_SEL, (1<<txq_id));
480
481 /* enable aggregations for the queue */
482 iwl_set_bits_prph(bus(trans), SCD_AGGR_SEL, (1<<txq_id));
483
484 /* Place first TFD at index corresponding to start sequence number.
485 * Assumes that ssn_idx is valid (!= 0xFFF) */
486 trans_pcie->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
487 trans_pcie->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
488 iwl_trans_set_wr_ptrs(trans, txq_id, ssn_idx);
489
490 /* Set up Tx window size and frame limit for this queue */
491 iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
492 SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
493 sizeof(u32),
494 ((frame_limit <<
495 SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
496 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
497 ((frame_limit <<
498 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
499 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
500
501 iwl_set_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
502
503 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
504 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
505 tx_fifo, 1);
506
507 trans_pcie->txq[txq_id].sta_id = sta_id;
508 trans_pcie->txq[txq_id].tid = tid;
509
510 spin_unlock_irqrestore(&trans->shrd->lock, flags);
511}
512
513/*
514 * Find first available (lowest unused) Tx Queue, mark it "active".
515 * Called only when finding queue for aggregation.
516 * Should never return anything < 7, because they should already
517 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
518 */
519static int iwlagn_txq_ctx_activate_free(struct iwl_trans *trans)
520{
521 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
522 int txq_id;
523
524 for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
525 if (!test_and_set_bit(txq_id,
526 &trans_pcie->txq_ctx_active_msk))
527 return txq_id;
528 return -1;
529}
530
531int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
532 enum iwl_rxon_context_id ctx, int sta_id,
533 int tid, u16 *ssn)
534{
535 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
536 struct iwl_tid_data *tid_data;
537 unsigned long flags;
538 int txq_id;
539
540 txq_id = iwlagn_txq_ctx_activate_free(trans);
541 if (txq_id == -1) {
542 IWL_ERR(trans, "No free aggregation queue available\n");
543 return -ENXIO;
544 }
545
546 spin_lock_irqsave(&trans->shrd->sta_lock, flags);
547 tid_data = &trans->shrd->tid_data[sta_id][tid];
548 *ssn = SEQ_TO_SN(tid_data->seq_number);
549 tid_data->agg.txq_id = txq_id;
550 iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id);
551
552 tid_data = &trans->shrd->tid_data[sta_id][tid];
553 if (tid_data->tfds_in_queue == 0) {
554 IWL_DEBUG_HT(trans, "HW queue is empty\n");
555 tid_data->agg.state = IWL_AGG_ON;
556 iwl_start_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
557 } else {
558 IWL_DEBUG_HT(trans, "HW queue is NOT empty: %d packets in HW"
559 "queue\n", tid_data->tfds_in_queue);
560 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
561 }
562 spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
563
564 return 0;
565}
566
567void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id)
568{
569 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
570 iwlagn_tx_queue_stop_scheduler(trans, txq_id);
571
572 iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id));
573
574 trans_pcie->txq[txq_id].q.read_ptr = 0;
575 trans_pcie->txq[txq_id].q.write_ptr = 0;
576 /* supposes that ssn_idx is valid (!= 0xFFF) */
577 iwl_trans_set_wr_ptrs(trans, txq_id, 0);
578
579 iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
580 iwl_txq_ctx_deactivate(trans_pcie, txq_id);
581 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0);
582}
583
584int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
585 enum iwl_rxon_context_id ctx, int sta_id,
586 int tid)
587{
588 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
589 unsigned long flags;
590 int read_ptr, write_ptr;
591 struct iwl_tid_data *tid_data;
592 int txq_id;
593
594 spin_lock_irqsave(&trans->shrd->sta_lock, flags);
595
596 tid_data = &trans->shrd->tid_data[sta_id][tid];
597 txq_id = tid_data->agg.txq_id;
598
599 if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
600 (IWLAGN_FIRST_AMPDU_QUEUE +
601 hw_params(trans).num_ampdu_queues <= txq_id)) {
602 IWL_ERR(trans,
603 "queue number out of range: %d, must be %d to %d\n",
604 txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
605 IWLAGN_FIRST_AMPDU_QUEUE +
606 hw_params(trans).num_ampdu_queues - 1);
607 spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
608 return -EINVAL;
609 }
610
611 switch (trans->shrd->tid_data[sta_id][tid].agg.state) {
612 case IWL_EMPTYING_HW_QUEUE_ADDBA:
613 /*
614 * This can happen if the peer stops aggregation
615 * again before we've had a chance to drain the
616 * queue we selected previously, i.e. before the
617 * session was really started completely.
618 */
619 IWL_DEBUG_HT(trans, "AGG stop before setup done\n");
620 goto turn_off;
621 case IWL_AGG_ON:
622 break;
623 default:
624 IWL_WARN(trans, "Stopping AGG while state not ON"
625 "or starting\n");
626 }
627
628 write_ptr = trans_pcie->txq[txq_id].q.write_ptr;
629 read_ptr = trans_pcie->txq[txq_id].q.read_ptr;
630
631 /* The queue is not empty */
632 if (write_ptr != read_ptr) {
633 IWL_DEBUG_HT(trans, "Stopping a non empty AGG HW QUEUE\n");
634 trans->shrd->tid_data[sta_id][tid].agg.state =
635 IWL_EMPTYING_HW_QUEUE_DELBA;
636 spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
637 return 0;
638 }
639
640 IWL_DEBUG_HT(trans, "HW queue is empty\n");
641turn_off:
642 trans->shrd->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
643
644 /* do not restore/save irqs */
645 spin_unlock(&trans->shrd->sta_lock);
646 spin_lock(&trans->shrd->lock);
647
648 iwl_trans_pcie_txq_agg_disable(trans, txq_id);
649
650 spin_unlock_irqrestore(&trans->shrd->lock, flags);
651
652 iwl_stop_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
653
654 return 0;
655}
656
657/*************** HOST COMMAND QUEUE FUNCTIONS *****/
658
659/**
660 * iwl_enqueue_hcmd - enqueue a uCode command
661 * @priv: device private data point
662 * @cmd: a point to the ucode command structure
663 *
664 * The function returns < 0 values to indicate the operation is
665 * failed. On success, it turns the index (> 0) of command in the
666 * command queue.
667 */
668static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
669{
670 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
671 struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
672 struct iwl_queue *q = &txq->q;
673 struct iwl_device_cmd *out_cmd;
674 struct iwl_cmd_meta *out_meta;
675 dma_addr_t phys_addr;
676 unsigned long flags;
677 u32 idx;
678 u16 copy_size, cmd_size;
679 bool is_ct_kill = false;
680 bool had_nocopy = false;
681 int i;
682 u8 *cmd_dest;
683#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
684 const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
685 int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
686 int trace_idx;
687#endif
688
689 if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
690 IWL_WARN(trans, "fw recovery, no hcmd send\n");
691 return -EIO;
692 }
693
694 if ((trans->shrd->ucode_owner == IWL_OWNERSHIP_TM) &&
695 !(cmd->flags & CMD_ON_DEMAND)) {
696 IWL_DEBUG_HC(trans, "tm own the uCode, no regular hcmd send\n");
697 return -EIO;
698 }
699
700 copy_size = sizeof(out_cmd->hdr);
701 cmd_size = sizeof(out_cmd->hdr);
702
703 /* need one for the header if the first is NOCOPY */
704 BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
705
706 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
707 if (!cmd->len[i])
708 continue;
709 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
710 had_nocopy = true;
711 } else {
712 /* NOCOPY must not be followed by normal! */
713 if (WARN_ON(had_nocopy))
714 return -EINVAL;
715 copy_size += cmd->len[i];
716 }
717 cmd_size += cmd->len[i];
718 }
719
720 /*
721 * If any of the command structures end up being larger than
722 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
723 * allocated into separate TFDs, then we will need to
724 * increase the size of the buffers.
725 */
726 if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
727 return -EINVAL;
728
729 if (iwl_is_rfkill(trans->shrd) || iwl_is_ctkill(trans->shrd)) {
730 IWL_WARN(trans, "Not sending command - %s KILL\n",
731 iwl_is_rfkill(trans->shrd) ? "RF" : "CT");
732 return -EIO;
733 }
734
735 spin_lock_irqsave(&trans->hcmd_lock, flags);
736
737 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
738 spin_unlock_irqrestore(&trans->hcmd_lock, flags);
739
740 IWL_ERR(trans, "No space in command queue\n");
741 is_ct_kill = iwl_check_for_ct_kill(priv(trans));
742 if (!is_ct_kill) {
743 IWL_ERR(trans, "Restarting adapter queue is full\n");
744 iwlagn_fw_error(priv(trans), false);
745 }
746 return -ENOSPC;
747 }
748
749 idx = get_cmd_index(q, q->write_ptr);
750 out_cmd = txq->cmd[idx];
751 out_meta = &txq->meta[idx];
752
753 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
754 if (cmd->flags & CMD_WANT_SKB)
755 out_meta->source = cmd;
756 if (cmd->flags & CMD_ASYNC)
757 out_meta->callback = cmd->callback;
758
759 /* set up the header */
760
761 out_cmd->hdr.cmd = cmd->id;
762 out_cmd->hdr.flags = 0;
763 out_cmd->hdr.sequence =
764 cpu_to_le16(QUEUE_TO_SEQ(trans->shrd->cmd_queue) |
765 INDEX_TO_SEQ(q->write_ptr));
766
767 /* and copy the data that needs to be copied */
768
769 cmd_dest = &out_cmd->cmd.payload[0];
770 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
771 if (!cmd->len[i])
772 continue;
773 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
774 break;
775 memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
776 cmd_dest += cmd->len[i];
777 }
778
779 IWL_DEBUG_HC(trans, "Sending command %s (#%x), seq: 0x%04X, "
780 "%d bytes at %d[%d]:%d\n",
781 get_cmd_string(out_cmd->hdr.cmd),
782 out_cmd->hdr.cmd,
783 le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
784 q->write_ptr, idx, trans->shrd->cmd_queue);
785
786 phys_addr = dma_map_single(bus(trans)->dev, &out_cmd->hdr, copy_size,
787 DMA_BIDIRECTIONAL);
788 if (unlikely(dma_mapping_error(bus(trans)->dev, phys_addr))) {
789 idx = -ENOMEM;
790 goto out;
791 }
792
793 dma_unmap_addr_set(out_meta, mapping, phys_addr);
794 dma_unmap_len_set(out_meta, len, copy_size);
795
796 iwlagn_txq_attach_buf_to_tfd(trans, txq,
797 phys_addr, copy_size, 1);
798#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
799 trace_bufs[0] = &out_cmd->hdr;
800 trace_lens[0] = copy_size;
801 trace_idx = 1;
802#endif
803
804 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
805 if (!cmd->len[i])
806 continue;
807 if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
808 continue;
809 phys_addr = dma_map_single(bus(trans)->dev,
810 (void *)cmd->data[i],
811 cmd->len[i], DMA_BIDIRECTIONAL);
812 if (dma_mapping_error(bus(trans)->dev, phys_addr)) {
813 iwlagn_unmap_tfd(trans, out_meta,
814 &txq->tfds[q->write_ptr],
815 DMA_BIDIRECTIONAL);
816 idx = -ENOMEM;
817 goto out;
818 }
819
820 iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
821 cmd->len[i], 0);
822#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
823 trace_bufs[trace_idx] = cmd->data[i];
824 trace_lens[trace_idx] = cmd->len[i];
825 trace_idx++;
826#endif
827 }
828
829 out_meta->flags = cmd->flags;
830
831 txq->need_update = 1;
832
833 /* check that tracing gets all possible blocks */
834 BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
835#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
836 trace_iwlwifi_dev_hcmd(priv(trans), cmd->flags,
837 trace_bufs[0], trace_lens[0],
838 trace_bufs[1], trace_lens[1],
839 trace_bufs[2], trace_lens[2]);
840#endif
841
842 /* Increment and update queue's write index */
843 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
844 iwl_txq_update_write_ptr(trans, txq);
845
846 out:
847 spin_unlock_irqrestore(&trans->hcmd_lock, flags);
848 return idx;
849}
850
851/**
852 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
853 *
854 * When FW advances 'R' index, all entries between old and new 'R' index
855 * need to be reclaimed. As result, some free space forms. If there is
856 * enough free space (> low mark), wake the stack that feeds us.
857 */
858static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
859 int idx)
860{
861 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
862 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
863 struct iwl_queue *q = &txq->q;
864 int nfreed = 0;
865
866 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
867 IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
868 "index %d is out of range [0-%d] %d %d.\n", __func__,
869 txq_id, idx, q->n_bd, q->write_ptr, q->read_ptr);
870 return;
871 }
872
873 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
874 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
875
876 if (nfreed++ > 0) {
877 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", idx,
878 q->write_ptr, q->read_ptr);
879 iwlagn_fw_error(priv(trans), false);
880 }
881
882 }
883}
884
885/**
886 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
887 * @rxb: Rx buffer to reclaim
888 *
889 * If an Rx buffer has an async callback associated with it the callback
890 * will be executed. The attached skb (if present) will only be freed
891 * if the callback returns 1
892 */
893void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb)
894{
895 struct iwl_rx_packet *pkt = rxb_addr(rxb);
896 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
897 int txq_id = SEQ_TO_QUEUE(sequence);
898 int index = SEQ_TO_INDEX(sequence);
899 int cmd_index;
900 struct iwl_device_cmd *cmd;
901 struct iwl_cmd_meta *meta;
902 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
903 struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
904 unsigned long flags;
905
906 /* If a Tx command is being handled and it isn't in the actual
907 * command queue then there a command routing bug has been introduced
908 * in the queue management code. */
909 if (WARN(txq_id != trans->shrd->cmd_queue,
910 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
911 txq_id, trans->shrd->cmd_queue, sequence,
912 trans_pcie->txq[trans->shrd->cmd_queue].q.read_ptr,
913 trans_pcie->txq[trans->shrd->cmd_queue].q.write_ptr)) {
914 iwl_print_hex_error(trans, pkt, 32);
915 return;
916 }
917
918 cmd_index = get_cmd_index(&txq->q, index);
919 cmd = txq->cmd[cmd_index];
920 meta = &txq->meta[cmd_index];
921
922 iwlagn_unmap_tfd(trans, meta, &txq->tfds[index],
923 DMA_BIDIRECTIONAL);
924
925 /* Input error checking is done when commands are added to queue. */
926 if (meta->flags & CMD_WANT_SKB) {
927 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
928 rxb->page = NULL;
929 } else if (meta->callback)
930 meta->callback(trans->shrd, cmd, pkt);
931
932 spin_lock_irqsave(&trans->hcmd_lock, flags);
933
934 iwl_hcmd_queue_reclaim(trans, txq_id, index);
935
936 if (!(meta->flags & CMD_ASYNC)) {
937 clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
938 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
939 get_cmd_string(cmd->hdr.cmd));
940 wake_up_interruptible(&trans->shrd->wait_command_queue);
941 }
942
943 meta->flags = 0;
944
945 spin_unlock_irqrestore(&trans->hcmd_lock, flags);
946}
947
948const char *get_cmd_string(u8 cmd)
949{
950 switch (cmd) {
951 IWL_CMD(REPLY_ALIVE);
952 IWL_CMD(REPLY_ERROR);
953 IWL_CMD(REPLY_RXON);
954 IWL_CMD(REPLY_RXON_ASSOC);
955 IWL_CMD(REPLY_QOS_PARAM);
956 IWL_CMD(REPLY_RXON_TIMING);
957 IWL_CMD(REPLY_ADD_STA);
958 IWL_CMD(REPLY_REMOVE_STA);
959 IWL_CMD(REPLY_REMOVE_ALL_STA);
960 IWL_CMD(REPLY_TXFIFO_FLUSH);
961 IWL_CMD(REPLY_WEPKEY);
962 IWL_CMD(REPLY_TX);
963 IWL_CMD(REPLY_LEDS_CMD);
964 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
965 IWL_CMD(COEX_PRIORITY_TABLE_CMD);
966 IWL_CMD(COEX_MEDIUM_NOTIFICATION);
967 IWL_CMD(COEX_EVENT_CMD);
968 IWL_CMD(REPLY_QUIET_CMD);
969 IWL_CMD(REPLY_CHANNEL_SWITCH);
970 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
971 IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
972 IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
973 IWL_CMD(POWER_TABLE_CMD);
974 IWL_CMD(PM_SLEEP_NOTIFICATION);
975 IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
976 IWL_CMD(REPLY_SCAN_CMD);
977 IWL_CMD(REPLY_SCAN_ABORT_CMD);
978 IWL_CMD(SCAN_START_NOTIFICATION);
979 IWL_CMD(SCAN_RESULTS_NOTIFICATION);
980 IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
981 IWL_CMD(BEACON_NOTIFICATION);
982 IWL_CMD(REPLY_TX_BEACON);
983 IWL_CMD(WHO_IS_AWAKE_NOTIFICATION);
984 IWL_CMD(QUIET_NOTIFICATION);
985 IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
986 IWL_CMD(MEASURE_ABORT_NOTIFICATION);
987 IWL_CMD(REPLY_BT_CONFIG);
988 IWL_CMD(REPLY_STATISTICS_CMD);
989 IWL_CMD(STATISTICS_NOTIFICATION);
990 IWL_CMD(REPLY_CARD_STATE_CMD);
991 IWL_CMD(CARD_STATE_NOTIFICATION);
992 IWL_CMD(MISSED_BEACONS_NOTIFICATION);
993 IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
994 IWL_CMD(SENSITIVITY_CMD);
995 IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
996 IWL_CMD(REPLY_RX_PHY_CMD);
997 IWL_CMD(REPLY_RX_MPDU_CMD);
998 IWL_CMD(REPLY_RX);
999 IWL_CMD(REPLY_COMPRESSED_BA);
1000 IWL_CMD(CALIBRATION_CFG_CMD);
1001 IWL_CMD(CALIBRATION_RES_NOTIFICATION);
1002 IWL_CMD(CALIBRATION_COMPLETE_NOTIFICATION);
1003 IWL_CMD(REPLY_TX_POWER_DBM_CMD);
1004 IWL_CMD(TEMPERATURE_NOTIFICATION);
1005 IWL_CMD(TX_ANT_CONFIGURATION_CMD);
1006 IWL_CMD(REPLY_BT_COEX_PROFILE_NOTIF);
1007 IWL_CMD(REPLY_BT_COEX_PRIO_TABLE);
1008 IWL_CMD(REPLY_BT_COEX_PROT_ENV);
1009 IWL_CMD(REPLY_WIPAN_PARAMS);
1010 IWL_CMD(REPLY_WIPAN_RXON);
1011 IWL_CMD(REPLY_WIPAN_RXON_TIMING);
1012 IWL_CMD(REPLY_WIPAN_RXON_ASSOC);
1013 IWL_CMD(REPLY_WIPAN_QOS_PARAM);
1014 IWL_CMD(REPLY_WIPAN_WEPKEY);
1015 IWL_CMD(REPLY_WIPAN_P2P_CHANNEL_SWITCH);
1016 IWL_CMD(REPLY_WIPAN_NOA_NOTIFICATION);
1017 IWL_CMD(REPLY_WIPAN_DEACTIVATION_COMPLETE);
1018 IWL_CMD(REPLY_WOWLAN_PATTERNS);
1019 IWL_CMD(REPLY_WOWLAN_WAKEUP_FILTER);
1020 IWL_CMD(REPLY_WOWLAN_TSC_RSC_PARAMS);
1021 IWL_CMD(REPLY_WOWLAN_TKIP_PARAMS);
1022 IWL_CMD(REPLY_WOWLAN_KEK_KCK_MATERIAL);
1023 IWL_CMD(REPLY_WOWLAN_GET_STATUS);
1024 default:
1025 return "UNKNOWN";
1026
1027 }
1028}
1029
1030#define HOST_COMPLETE_TIMEOUT (2 * HZ)
1031
1032static void iwl_generic_cmd_callback(struct iwl_shared *shrd,
1033 struct iwl_device_cmd *cmd,
1034 struct iwl_rx_packet *pkt)
1035{
1036 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
1037 IWL_ERR(shrd->trans, "Bad return from %s (0x%08X)\n",
1038 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
1039 return;
1040 }
1041
1042#ifdef CONFIG_IWLWIFI_DEBUG
1043 switch (cmd->hdr.cmd) {
1044 case REPLY_TX_LINK_QUALITY_CMD:
1045 case SENSITIVITY_CMD:
1046 IWL_DEBUG_HC_DUMP(shrd->trans, "back from %s (0x%08X)\n",
1047 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
1048 break;
1049 default:
1050 IWL_DEBUG_HC(shrd->trans, "back from %s (0x%08X)\n",
1051 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
1052 }
1053#endif
1054}
1055
1056static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
1057{
1058 int ret;
1059
1060 /* An asynchronous command can not expect an SKB to be set. */
1061 if (WARN_ON(cmd->flags & CMD_WANT_SKB))
1062 return -EINVAL;
1063
1064 /* Assign a generic callback if one is not provided */
1065 if (!cmd->callback)
1066 cmd->callback = iwl_generic_cmd_callback;
1067
1068 if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status))
1069 return -EBUSY;
1070
1071 ret = iwl_enqueue_hcmd(trans, cmd);
1072 if (ret < 0) {
1073 IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
1074 get_cmd_string(cmd->id), ret);
1075 return ret;
1076 }
1077 return 0;
1078}
1079
1080static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
1081{
1082 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1083 int cmd_idx;
1084 int ret;
1085
1086 lockdep_assert_held(&trans->shrd->mutex);
1087
1088 /* A synchronous command can not have a callback set. */
1089 if (WARN_ON(cmd->callback))
1090 return -EINVAL;
1091
1092 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
1093 get_cmd_string(cmd->id));
1094
1095 set_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
1096 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
1097 get_cmd_string(cmd->id));
1098
1099 cmd_idx = iwl_enqueue_hcmd(trans, cmd);
1100 if (cmd_idx < 0) {
1101 ret = cmd_idx;
1102 clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
1103 IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
1104 get_cmd_string(cmd->id), ret);
1105 return ret;
1106 }
1107
1108 ret = wait_event_interruptible_timeout(trans->shrd->wait_command_queue,
1109 !test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status),
1110 HOST_COMPLETE_TIMEOUT);
1111 if (!ret) {
1112 if (test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) {
1113 IWL_ERR(trans,
1114 "Error sending %s: time out after %dms.\n",
1115 get_cmd_string(cmd->id),
1116 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
1117
1118 clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
1119 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command"
1120 "%s\n", get_cmd_string(cmd->id));
1121 ret = -ETIMEDOUT;
1122 goto cancel;
1123 }
1124 }
1125
1126 if (test_bit(STATUS_RF_KILL_HW, &trans->shrd->status)) {
1127 IWL_ERR(trans, "Command %s aborted: RF KILL Switch\n",
1128 get_cmd_string(cmd->id));
1129 ret = -ECANCELED;
1130 goto fail;
1131 }
1132 if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
1133 IWL_ERR(trans, "Command %s failed: FW Error\n",
1134 get_cmd_string(cmd->id));
1135 ret = -EIO;
1136 goto fail;
1137 }
1138 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
1139 IWL_ERR(trans, "Error: Response NULL in '%s'\n",
1140 get_cmd_string(cmd->id));
1141 ret = -EIO;
1142 goto cancel;
1143 }
1144
1145 return 0;
1146
1147cancel:
1148 if (cmd->flags & CMD_WANT_SKB) {
1149 /*
1150 * Cancel the CMD_WANT_SKB flag for the cmd in the
1151 * TX cmd queue. Otherwise in case the cmd comes
1152 * in later, it will possibly set an invalid
1153 * address (cmd->meta.source).
1154 */
1155 trans_pcie->txq[trans->shrd->cmd_queue].meta[cmd_idx].flags &=
1156 ~CMD_WANT_SKB;
1157 }
1158fail:
1159 if (cmd->reply_page) {
1160 iwl_free_pages(trans->shrd, cmd->reply_page);
1161 cmd->reply_page = 0;
1162 }
1163
1164 return ret;
1165}
1166
1167int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
1168{
1169 if (cmd->flags & CMD_ASYNC)
1170 return iwl_send_cmd_async(trans, cmd);
1171
1172 return iwl_send_cmd_sync(trans, cmd);
1173}
1174
1175int iwl_trans_pcie_send_cmd_pdu(struct iwl_trans *trans, u8 id, u32 flags,
1176 u16 len, const void *data)
1177{
1178 struct iwl_host_cmd cmd = {
1179 .id = id,
1180 .len = { len, },
1181 .data = { data, },
1182 .flags = flags,
1183 };
1184
1185 return iwl_trans_pcie_send_cmd(trans, &cmd);
1186}
1187
1188/* Frees buffers until index _not_ inclusive */
1189int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
1190 struct sk_buff_head *skbs)
1191{
1192 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1193 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
1194 struct iwl_queue *q = &txq->q;
1195 int last_to_free;
1196 int freed = 0;
1197
1198 /*Since we free until index _not_ inclusive, the one before index is
1199 * the last we will free. This one must be used */
1200 last_to_free = iwl_queue_dec_wrap(index, q->n_bd);
1201
1202 if ((index >= q->n_bd) ||
1203 (iwl_queue_used(q, last_to_free) == 0)) {
1204 IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
1205 "last_to_free %d is out of range [0-%d] %d %d.\n",
1206 __func__, txq_id, last_to_free, q->n_bd,
1207 q->write_ptr, q->read_ptr);
1208 return 0;
1209 }
1210
1211 IWL_DEBUG_TX_REPLY(trans, "reclaim: [%d, %d, %d]\n", txq_id,
1212 q->read_ptr, index);
1213
1214 if (WARN_ON(!skb_queue_empty(skbs)))
1215 return 0;
1216
1217 for (;
1218 q->read_ptr != index;
1219 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1220
1221 if (WARN_ON_ONCE(txq->skbs[txq->q.read_ptr] == NULL))
1222 continue;
1223
1224 __skb_queue_tail(skbs, txq->skbs[txq->q.read_ptr]);
1225
1226 txq->skbs[txq->q.read_ptr] = NULL;
1227
1228 iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
1229
1230 iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr);
1231 freed++;
1232 }
1233 return freed;
1234}
This page took 0.030376 seconds and 5 git commands to generate.