iwlagn: move wait_for_tx_queue_empty to transport layer
[deliverable/linux.git] / drivers / net / wireless / iwlwifi / iwl-trans.c
CommitLineData
c85eb619
EG
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
e6bb4c9c 63#include <linux/interrupt.h>
87e5666c 64#include <linux/debugfs.h>
6d8f6eeb
EG
65#include <linux/bitops.h>
66#include <linux/gfp.h>
e6bb4c9c 67
a0f6b0a2 68#include "iwl-dev.h"
c85eb619 69#include "iwl-trans.h"
02aca585
EG
70#include "iwl-core.h"
71#include "iwl-helpers.h"
ab697a9f 72#include "iwl-trans-int-pcie.h"
02aca585
EG
73/*TODO remove uneeded includes when the transport layer tx_free will be here */
74#include "iwl-agn.h"
48f20d35 75#include "iwl-shared.h"
c85eb619 76
5a878bf6 77static int iwl_trans_rx_alloc(struct iwl_trans *trans)
c85eb619 78{
5a878bf6
EG
79 struct iwl_trans_pcie *trans_pcie =
80 IWL_TRANS_GET_PCIE_TRANS(trans);
81 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
82 struct device *dev = bus(trans)->dev;
c85eb619 83
5a878bf6 84 memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
c85eb619
EG
85
86 spin_lock_init(&rxq->lock);
87 INIT_LIST_HEAD(&rxq->rx_free);
88 INIT_LIST_HEAD(&rxq->rx_used);
89
90 if (WARN_ON(rxq->bd || rxq->rb_stts))
91 return -EINVAL;
92
93 /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
a0f6b0a2
EG
94 rxq->bd = dma_alloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
95 &rxq->bd_dma, GFP_KERNEL);
c85eb619
EG
96 if (!rxq->bd)
97 goto err_bd;
a0f6b0a2 98 memset(rxq->bd, 0, sizeof(__le32) * RX_QUEUE_SIZE);
c85eb619
EG
99
100 /*Allocate the driver's pointer to receive buffer status */
101 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(*rxq->rb_stts),
102 &rxq->rb_stts_dma, GFP_KERNEL);
103 if (!rxq->rb_stts)
104 goto err_rb_stts;
105 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
106
107 return 0;
108
109err_rb_stts:
a0f6b0a2
EG
110 dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
111 rxq->bd, rxq->bd_dma);
c85eb619
EG
112 memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
113 rxq->bd = NULL;
114err_bd:
115 return -ENOMEM;
116}
117
5a878bf6 118static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
c85eb619 119{
5a878bf6
EG
120 struct iwl_trans_pcie *trans_pcie =
121 IWL_TRANS_GET_PCIE_TRANS(trans);
122 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
a0f6b0a2 123 int i;
c85eb619
EG
124
125 /* Fill the rx_used queue with _all_ of the Rx buffers */
126 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
127 /* In the reset function, these buffers may have been allocated
128 * to an SKB, so we need to unmap and free potential storage */
129 if (rxq->pool[i].page != NULL) {
5a878bf6
EG
130 dma_unmap_page(bus(trans)->dev, rxq->pool[i].page_dma,
131 PAGE_SIZE << hw_params(trans).rx_page_order,
c85eb619 132 DMA_FROM_DEVICE);
790428b6
EG
133 __free_pages(rxq->pool[i].page,
134 hw_params(trans).rx_page_order);
c85eb619
EG
135 rxq->pool[i].page = NULL;
136 }
137 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
138 }
a0f6b0a2
EG
139}
140
fd656935 141static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
ab697a9f
EG
142 struct iwl_rx_queue *rxq)
143{
144 u32 rb_size;
145 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
146 u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
147
148 rb_timeout = RX_RB_TIMEOUT;
149
150 if (iwlagn_mod_params.amsdu_size_8K)
151 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
152 else
153 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
154
155 /* Stop Rx DMA */
83ed9015 156 iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
ab697a9f
EG
157
158 /* Reset driver's Rx queue write index */
83ed9015 159 iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
ab697a9f
EG
160
161 /* Tell device where to find RBD circular buffer in DRAM */
83ed9015 162 iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_RBDCB_BASE_REG,
ab697a9f
EG
163 (u32)(rxq->bd_dma >> 8));
164
165 /* Tell device where in DRAM to update its Rx status */
83ed9015 166 iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_STTS_WPTR_REG,
ab697a9f
EG
167 rxq->rb_stts_dma >> 4);
168
169 /* Enable Rx DMA
170 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
171 * the credit mechanism in 5000 HW RX FIFO
172 * Direct rx interrupts to hosts
173 * Rx buffer size 4 or 8k
174 * RB timeout 0x10
175 * 256 RBDs
176 */
83ed9015 177 iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG,
ab697a9f
EG
178 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
179 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
180 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
181 FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
182 rb_size|
183 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
184 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
185
186 /* Set interrupt coalescing timer to default (2048 usecs) */
83ed9015 187 iwl_write8(bus(trans), CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
ab697a9f
EG
188}
189
5a878bf6 190static int iwl_rx_init(struct iwl_trans *trans)
a0f6b0a2 191{
5a878bf6
EG
192 struct iwl_trans_pcie *trans_pcie =
193 IWL_TRANS_GET_PCIE_TRANS(trans);
194 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
195
a0f6b0a2
EG
196 int i, err;
197 unsigned long flags;
198
199 if (!rxq->bd) {
5a878bf6 200 err = iwl_trans_rx_alloc(trans);
a0f6b0a2
EG
201 if (err)
202 return err;
203 }
204
205 spin_lock_irqsave(&rxq->lock, flags);
206 INIT_LIST_HEAD(&rxq->rx_free);
207 INIT_LIST_HEAD(&rxq->rx_used);
208
5a878bf6 209 iwl_trans_rxq_free_rx_bufs(trans);
c85eb619
EG
210
211 for (i = 0; i < RX_QUEUE_SIZE; i++)
212 rxq->queue[i] = NULL;
213
214 /* Set us so that we have processed and used all buffers, but have
215 * not restocked the Rx queue with fresh buffers */
216 rxq->read = rxq->write = 0;
217 rxq->write_actual = 0;
218 rxq->free_count = 0;
219 spin_unlock_irqrestore(&rxq->lock, flags);
220
5a878bf6 221 iwlagn_rx_replenish(trans);
ab697a9f 222
fd656935 223 iwl_trans_rx_hw_init(trans, rxq);
ab697a9f 224
5a878bf6 225 spin_lock_irqsave(&trans->shrd->lock, flags);
ab697a9f 226 rxq->need_update = 1;
5a878bf6
EG
227 iwl_rx_queue_update_write_ptr(trans, rxq);
228 spin_unlock_irqrestore(&trans->shrd->lock, flags);
ab697a9f 229
c85eb619
EG
230 return 0;
231}
232
5a878bf6 233static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
a0f6b0a2 234{
5a878bf6
EG
235 struct iwl_trans_pcie *trans_pcie =
236 IWL_TRANS_GET_PCIE_TRANS(trans);
237 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
238
a0f6b0a2
EG
239 unsigned long flags;
240
241 /*if rxq->bd is NULL, it means that nothing has been allocated,
242 * exit now */
243 if (!rxq->bd) {
5a878bf6 244 IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
a0f6b0a2
EG
245 return;
246 }
247
248 spin_lock_irqsave(&rxq->lock, flags);
5a878bf6 249 iwl_trans_rxq_free_rx_bufs(trans);
a0f6b0a2
EG
250 spin_unlock_irqrestore(&rxq->lock, flags);
251
5a878bf6 252 dma_free_coherent(bus(trans)->dev, sizeof(__le32) * RX_QUEUE_SIZE,
a0f6b0a2
EG
253 rxq->bd, rxq->bd_dma);
254 memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
255 rxq->bd = NULL;
256
257 if (rxq->rb_stts)
5a878bf6 258 dma_free_coherent(bus(trans)->dev,
a0f6b0a2
EG
259 sizeof(struct iwl_rb_status),
260 rxq->rb_stts, rxq->rb_stts_dma);
261 else
5a878bf6 262 IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
a0f6b0a2
EG
263 memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
264 rxq->rb_stts = NULL;
265}
266
6d8f6eeb 267static int iwl_trans_rx_stop(struct iwl_trans *trans)
c2c52e8b
EG
268{
269
270 /* stop Rx DMA */
83ed9015
EG
271 iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
272 return iwl_poll_direct_bit(bus(trans), FH_MEM_RSSR_RX_STATUS_REG,
c2c52e8b
EG
273 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
274}
275
6d8f6eeb 276static inline int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
02aca585
EG
277 struct iwl_dma_ptr *ptr, size_t size)
278{
279 if (WARN_ON(ptr->addr))
280 return -EINVAL;
281
6d8f6eeb 282 ptr->addr = dma_alloc_coherent(bus(trans)->dev, size,
02aca585
EG
283 &ptr->dma, GFP_KERNEL);
284 if (!ptr->addr)
285 return -ENOMEM;
286 ptr->size = size;
287 return 0;
288}
289
6d8f6eeb 290static inline void iwlagn_free_dma_ptr(struct iwl_trans *trans,
1359ca4f
EG
291 struct iwl_dma_ptr *ptr)
292{
293 if (unlikely(!ptr->addr))
294 return;
295
6d8f6eeb 296 dma_free_coherent(bus(trans)->dev, ptr->size, ptr->addr, ptr->dma);
1359ca4f
EG
297 memset(ptr, 0, sizeof(*ptr));
298}
299
6d8f6eeb
EG
300static int iwl_trans_txq_alloc(struct iwl_trans *trans,
301 struct iwl_tx_queue *txq, int slots_num,
302 u32 txq_id)
02aca585 303{
ab9e212e 304 size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
02aca585
EG
305 int i;
306
2c452297 307 if (WARN_ON(txq->meta || txq->cmd || txq->skbs || txq->tfds))
02aca585
EG
308 return -EINVAL;
309
1359ca4f
EG
310 txq->q.n_window = slots_num;
311
02aca585
EG
312 txq->meta = kzalloc(sizeof(txq->meta[0]) * slots_num,
313 GFP_KERNEL);
314 txq->cmd = kzalloc(sizeof(txq->cmd[0]) * slots_num,
315 GFP_KERNEL);
316
317 if (!txq->meta || !txq->cmd)
318 goto error;
319
dfa2bdba
EG
320 if (txq_id == trans->shrd->cmd_queue)
321 for (i = 0; i < slots_num; i++) {
322 txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd),
323 GFP_KERNEL);
324 if (!txq->cmd[i])
325 goto error;
326 }
02aca585
EG
327
328 /* Alloc driver data array and TFD circular buffer */
329 /* Driver private data, only for Tx (not command) queues,
330 * not shared with device. */
6d8f6eeb 331 if (txq_id != trans->shrd->cmd_queue) {
2c452297 332 txq->skbs = kzalloc(sizeof(txq->skbs[0]) *
02aca585 333 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
2c452297 334 if (!txq->skbs) {
6d8f6eeb 335 IWL_ERR(trans, "kmalloc for auxiliary BD "
02aca585
EG
336 "structures failed\n");
337 goto error;
338 }
339 } else {
2c452297 340 txq->skbs = NULL;
02aca585
EG
341 }
342
343 /* Circular buffer of transmit frame descriptors (TFDs),
344 * shared with device */
6d8f6eeb
EG
345 txq->tfds = dma_alloc_coherent(bus(trans)->dev, tfd_sz,
346 &txq->q.dma_addr, GFP_KERNEL);
02aca585 347 if (!txq->tfds) {
6d8f6eeb 348 IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
02aca585
EG
349 goto error;
350 }
351 txq->q.id = txq_id;
352
353 return 0;
354error:
2c452297
EG
355 kfree(txq->skbs);
356 txq->skbs = NULL;
02aca585
EG
357 /* since txq->cmd has been zeroed,
358 * all non allocated cmd[i] will be NULL */
dfa2bdba 359 if (txq->cmd && txq_id == trans->shrd->cmd_queue)
02aca585
EG
360 for (i = 0; i < slots_num; i++)
361 kfree(txq->cmd[i]);
362 kfree(txq->meta);
363 kfree(txq->cmd);
364 txq->meta = NULL;
365 txq->cmd = NULL;
366
367 return -ENOMEM;
368
369}
370
6d8f6eeb 371static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq,
02aca585
EG
372 int slots_num, u32 txq_id)
373{
374 int ret;
375
376 txq->need_update = 0;
377 memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num);
378
379 /*
380 * For the default queues 0-3, set up the swq_id
381 * already -- all others need to get one later
382 * (if they need one at all).
383 */
384 if (txq_id < 4)
385 iwl_set_swq_id(txq, txq_id, txq_id);
386
387 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
388 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
389 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
390
391 /* Initialize queue's high/low-water marks, and head/tail indexes */
6d8f6eeb 392 ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
02aca585
EG
393 txq_id);
394 if (ret)
395 return ret;
396
397 /*
398 * Tell nic where to find circular buffer of Tx Frame Descriptors for
399 * given Tx queue, and enable the DMA channel used for that queue.
400 * Circular buffer (TFD queue in DRAM) physical base address */
83ed9015 401 iwl_write_direct32(bus(trans), FH_MEM_CBBC_QUEUE(txq_id),
02aca585
EG
402 txq->q.dma_addr >> 8);
403
404 return 0;
405}
406
c170b867
EG
407/**
408 * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
409 */
6d8f6eeb 410static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
c170b867 411{
6d8f6eeb 412 struct iwl_priv *priv = priv(trans);
c170b867
EG
413 struct iwl_tx_queue *txq = &priv->txq[txq_id];
414 struct iwl_queue *q = &txq->q;
415
416 if (!q->n_bd)
417 return;
418
419 while (q->write_ptr != q->read_ptr) {
420 /* The read_ptr needs to bound by q->n_window */
6d8f6eeb 421 iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr));
c170b867
EG
422 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
423 }
424}
425
1359ca4f
EG
426/**
427 * iwl_tx_queue_free - Deallocate DMA queue.
428 * @txq: Transmit queue to deallocate.
429 *
430 * Empty queue by removing and destroying all BD's.
431 * Free all buffers.
432 * 0-fill, but do not free "txq" descriptor structure.
433 */
6d8f6eeb 434static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
1359ca4f 435{
6d8f6eeb 436 struct iwl_priv *priv = priv(trans);
1359ca4f 437 struct iwl_tx_queue *txq = &priv->txq[txq_id];
6d8f6eeb 438 struct device *dev = bus(trans)->dev;
1359ca4f
EG
439 int i;
440 if (WARN_ON(!txq))
441 return;
442
6d8f6eeb 443 iwl_tx_queue_unmap(trans, txq_id);
1359ca4f
EG
444
445 /* De-alloc array of command/tx buffers */
dfa2bdba
EG
446
447 if (txq_id == trans->shrd->cmd_queue)
448 for (i = 0; i < txq->q.n_window; i++)
449 kfree(txq->cmd[i]);
1359ca4f
EG
450
451 /* De-alloc circular buffer of TFDs */
452 if (txq->q.n_bd) {
ab9e212e 453 dma_free_coherent(dev, sizeof(struct iwl_tfd) *
1359ca4f
EG
454 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
455 memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
456 }
457
458 /* De-alloc array of per-TFD driver data */
2c452297
EG
459 kfree(txq->skbs);
460 txq->skbs = NULL;
1359ca4f
EG
461
462 /* deallocate arrays */
463 kfree(txq->cmd);
464 kfree(txq->meta);
465 txq->cmd = NULL;
466 txq->meta = NULL;
467
468 /* 0-fill queue descriptor structure */
469 memset(txq, 0, sizeof(*txq));
470}
471
472/**
473 * iwl_trans_tx_free - Free TXQ Context
474 *
475 * Destroy all TX DMA queues and structures
476 */
6d8f6eeb 477static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
1359ca4f
EG
478{
479 int txq_id;
105183b1
EG
480 struct iwl_trans_pcie *trans_pcie =
481 IWL_TRANS_GET_PCIE_TRANS(trans);
6d8f6eeb 482 struct iwl_priv *priv = priv(trans);
1359ca4f
EG
483
484 /* Tx queues */
485 if (priv->txq) {
d6189124 486 for (txq_id = 0;
6d8f6eeb
EG
487 txq_id < hw_params(trans).max_txq_num; txq_id++)
488 iwl_tx_queue_free(trans, txq_id);
1359ca4f
EG
489 }
490
491 kfree(priv->txq);
492 priv->txq = NULL;
493
9d6b2cb1 494 iwlagn_free_dma_ptr(trans, &trans_pcie->kw);
1359ca4f 495
6d8f6eeb 496 iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
1359ca4f
EG
497}
498
02aca585
EG
499/**
500 * iwl_trans_tx_alloc - allocate TX context
501 * Allocate all Tx DMA structures and initialize them
502 *
503 * @param priv
504 * @return error code
505 */
6d8f6eeb 506static int iwl_trans_tx_alloc(struct iwl_trans *trans)
02aca585
EG
507{
508 int ret;
509 int txq_id, slots_num;
6d8f6eeb 510 struct iwl_priv *priv = priv(trans);
105183b1
EG
511 struct iwl_trans_pcie *trans_pcie =
512 IWL_TRANS_GET_PCIE_TRANS(trans);
02aca585 513
fd656935 514 u16 scd_bc_tbls_size = hw_params(trans).max_txq_num *
ab9e212e
EG
515 sizeof(struct iwlagn_scd_bc_tbl);
516
02aca585
EG
517 /*It is not allowed to alloc twice, so warn when this happens.
518 * We cannot rely on the previous allocation, so free and fail */
519 if (WARN_ON(priv->txq)) {
520 ret = -EINVAL;
521 goto error;
522 }
523
6d8f6eeb 524 ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
ab9e212e 525 scd_bc_tbls_size);
02aca585 526 if (ret) {
6d8f6eeb 527 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
02aca585
EG
528 goto error;
529 }
530
531 /* Alloc keep-warm buffer */
9d6b2cb1 532 ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
02aca585 533 if (ret) {
6d8f6eeb 534 IWL_ERR(trans, "Keep Warm allocation failed\n");
02aca585
EG
535 goto error;
536 }
537
538 priv->txq = kzalloc(sizeof(struct iwl_tx_queue) *
fd656935 539 hw_params(trans).max_txq_num, GFP_KERNEL);
02aca585 540 if (!priv->txq) {
6d8f6eeb 541 IWL_ERR(trans, "Not enough memory for txq\n");
02aca585
EG
542 ret = ENOMEM;
543 goto error;
544 }
545
546 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
6d8f6eeb
EG
547 for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) {
548 slots_num = (txq_id == trans->shrd->cmd_queue) ?
02aca585 549 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
6d8f6eeb 550 ret = iwl_trans_txq_alloc(trans, &priv->txq[txq_id], slots_num,
02aca585
EG
551 txq_id);
552 if (ret) {
6d8f6eeb 553 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
02aca585
EG
554 goto error;
555 }
556 }
557
558 return 0;
559
560error:
ae2c30bf 561 iwl_trans_pcie_tx_free(trans);
02aca585
EG
562
563 return ret;
564}
6d8f6eeb 565static int iwl_tx_init(struct iwl_trans *trans)
02aca585
EG
566{
567 int ret;
568 int txq_id, slots_num;
569 unsigned long flags;
570 bool alloc = false;
6d8f6eeb 571 struct iwl_priv *priv = priv(trans);
9d6b2cb1
EG
572 struct iwl_trans_pcie *trans_pcie =
573 IWL_TRANS_GET_PCIE_TRANS(trans);
02aca585
EG
574
575 if (!priv->txq) {
6d8f6eeb 576 ret = iwl_trans_tx_alloc(trans);
02aca585
EG
577 if (ret)
578 goto error;
579 alloc = true;
580 }
581
6d8f6eeb 582 spin_lock_irqsave(&trans->shrd->lock, flags);
02aca585
EG
583
584 /* Turn off all Tx DMA fifos */
83ed9015 585 iwl_write_prph(bus(trans), SCD_TXFACT, 0);
02aca585
EG
586
587 /* Tell NIC where to find the "keep warm" buffer */
83ed9015
EG
588 iwl_write_direct32(bus(trans), FH_KW_MEM_ADDR_REG,
589 trans_pcie->kw.dma >> 4);
02aca585 590
6d8f6eeb 591 spin_unlock_irqrestore(&trans->shrd->lock, flags);
02aca585
EG
592
593 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
6d8f6eeb
EG
594 for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) {
595 slots_num = (txq_id == trans->shrd->cmd_queue) ?
02aca585 596 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
6d8f6eeb 597 ret = iwl_trans_txq_init(trans, &priv->txq[txq_id], slots_num,
02aca585
EG
598 txq_id);
599 if (ret) {
6d8f6eeb 600 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
02aca585
EG
601 goto error;
602 }
603 }
604
605 return 0;
606error:
607 /*Upon error, free only if we allocated something */
608 if (alloc)
ae2c30bf 609 iwl_trans_pcie_tx_free(trans);
02aca585
EG
610 return ret;
611}
612
392f8b78
EG
613static void iwl_set_pwr_vmain(struct iwl_priv *priv)
614{
83ed9015 615 struct iwl_trans *trans = trans(priv);
392f8b78
EG
616/*
617 * (for documentation purposes)
618 * to set power to V_AUX, do:
619
620 if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
83ed9015 621 iwl_set_bits_mask_prph(bus(trans), APMG_PS_CTRL_REG,
392f8b78
EG
622 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
623 ~APMG_PS_CTRL_MSK_PWR_SRC);
624 */
625
83ed9015 626 iwl_set_bits_mask_prph(bus(trans), APMG_PS_CTRL_REG,
392f8b78
EG
627 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
628 ~APMG_PS_CTRL_MSK_PWR_SRC);
629}
630
6d8f6eeb 631static int iwl_nic_init(struct iwl_trans *trans)
392f8b78
EG
632{
633 unsigned long flags;
6d8f6eeb 634 struct iwl_priv *priv = priv(trans);
392f8b78
EG
635
636 /* nic_init */
6d8f6eeb 637 spin_lock_irqsave(&trans->shrd->lock, flags);
392f8b78
EG
638 iwl_apm_init(priv);
639
640 /* Set interrupt coalescing calibration timer to default (512 usecs) */
83ed9015
EG
641 iwl_write8(bus(trans), CSR_INT_COALESCING,
642 IWL_HOST_INT_CALIB_TIMEOUT_DEF);
392f8b78 643
6d8f6eeb 644 spin_unlock_irqrestore(&trans->shrd->lock, flags);
392f8b78
EG
645
646 iwl_set_pwr_vmain(priv);
647
648 priv->cfg->lib->nic_config(priv);
649
650 /* Allocate the RX queue, or reset if it is already allocated */
6d8f6eeb 651 iwl_rx_init(trans);
392f8b78
EG
652
653 /* Allocate or reset and init all Tx and Command queues */
6d8f6eeb 654 if (iwl_tx_init(trans))
392f8b78
EG
655 return -ENOMEM;
656
fd656935 657 if (hw_params(trans).shadow_reg_enable) {
392f8b78 658 /* enable shadow regs in HW */
83ed9015 659 iwl_set_bit(bus(trans), CSR_MAC_SHADOW_REG_CTRL,
392f8b78
EG
660 0x800FFFFF);
661 }
662
6d8f6eeb 663 set_bit(STATUS_INIT, &trans->shrd->status);
392f8b78
EG
664
665 return 0;
666}
667
668#define HW_READY_TIMEOUT (50)
669
670/* Note: returns poll_bit return value, which is >= 0 if success */
6d8f6eeb 671static int iwl_set_hw_ready(struct iwl_trans *trans)
392f8b78
EG
672{
673 int ret;
674
83ed9015 675 iwl_set_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
392f8b78
EG
676 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
677
678 /* See if we got it */
83ed9015 679 ret = iwl_poll_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
392f8b78
EG
680 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
681 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
682 HW_READY_TIMEOUT);
683
6d8f6eeb 684 IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
392f8b78
EG
685 return ret;
686}
687
688/* Note: returns standard 0/-ERROR code */
6d8f6eeb 689static int iwl_trans_pcie_prepare_card_hw(struct iwl_trans *trans)
392f8b78
EG
690{
691 int ret;
692
6d8f6eeb 693 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
392f8b78 694
6d8f6eeb 695 ret = iwl_set_hw_ready(trans);
392f8b78
EG
696 if (ret >= 0)
697 return 0;
698
699 /* If HW is not ready, prepare the conditions to check again */
83ed9015 700 iwl_set_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
392f8b78
EG
701 CSR_HW_IF_CONFIG_REG_PREPARE);
702
83ed9015 703 ret = iwl_poll_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
392f8b78
EG
704 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
705 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
706
707 if (ret < 0)
708 return ret;
709
710 /* HW should be ready by now, check again. */
6d8f6eeb 711 ret = iwl_set_hw_ready(trans);
392f8b78
EG
712 if (ret >= 0)
713 return 0;
714 return ret;
715}
716
e13c0c59
EG
717#define IWL_AC_UNSET -1
718
719struct queue_to_fifo_ac {
720 s8 fifo, ac;
721};
722
723static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
724 { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
725 { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
726 { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
727 { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
728 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
729 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
730 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
731 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
732 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
733 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
734 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
735};
736
737static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
738 { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
739 { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
740 { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
741 { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
742 { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
743 { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
744 { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
745 { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
746 { IWL_TX_FIFO_BE_IPAN, 2, },
747 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
748 { IWL_TX_FIFO_AUX, IWL_AC_UNSET, },
749};
750
751static const u8 iwlagn_bss_ac_to_fifo[] = {
752 IWL_TX_FIFO_VO,
753 IWL_TX_FIFO_VI,
754 IWL_TX_FIFO_BE,
755 IWL_TX_FIFO_BK,
756};
757static const u8 iwlagn_bss_ac_to_queue[] = {
758 0, 1, 2, 3,
759};
760static const u8 iwlagn_pan_ac_to_fifo[] = {
761 IWL_TX_FIFO_VO_IPAN,
762 IWL_TX_FIFO_VI_IPAN,
763 IWL_TX_FIFO_BE_IPAN,
764 IWL_TX_FIFO_BK_IPAN,
765};
766static const u8 iwlagn_pan_ac_to_queue[] = {
767 7, 6, 5, 4,
768};
769
6d8f6eeb 770static int iwl_trans_pcie_start_device(struct iwl_trans *trans)
392f8b78
EG
771{
772 int ret;
6d8f6eeb 773 struct iwl_priv *priv = priv(trans);
e13c0c59
EG
774 struct iwl_trans_pcie *trans_pcie =
775 IWL_TRANS_GET_PCIE_TRANS(trans);
392f8b78 776
c91bd124 777 trans->shrd->ucode_owner = IWL_OWNERSHIP_DRIVER;
e13c0c59
EG
778 trans_pcie->ac_to_queue[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_queue;
779 trans_pcie->ac_to_queue[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_queue;
780
781 trans_pcie->ac_to_fifo[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_fifo;
782 trans_pcie->ac_to_fifo[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_fifo;
783
784 trans_pcie->mcast_queue[IWL_RXON_CTX_BSS] = 0;
785 trans_pcie->mcast_queue[IWL_RXON_CTX_PAN] = IWL_IPAN_MCAST_QUEUE;
392f8b78 786
c91bd124 787 if ((hw_params(trans).sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
6d8f6eeb
EG
788 iwl_trans_pcie_prepare_card_hw(trans)) {
789 IWL_WARN(trans, "Exit HW not ready\n");
392f8b78
EG
790 return -EIO;
791 }
792
793 /* If platform's RF_KILL switch is NOT set to KILL */
83ed9015 794 if (iwl_read32(bus(trans), CSR_GP_CNTRL) &
392f8b78 795 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
6d8f6eeb 796 clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
392f8b78 797 else
6d8f6eeb 798 set_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
392f8b78 799
6d8f6eeb 800 if (iwl_is_rfkill(trans->shrd)) {
392f8b78 801 wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
6d8f6eeb 802 iwl_enable_interrupts(trans);
392f8b78
EG
803 return -ERFKILL;
804 }
805
83ed9015 806 iwl_write32(bus(trans), CSR_INT, 0xFFFFFFFF);
392f8b78 807
6d8f6eeb 808 ret = iwl_nic_init(trans);
392f8b78 809 if (ret) {
6d8f6eeb 810 IWL_ERR(trans, "Unable to init nic\n");
392f8b78
EG
811 return ret;
812 }
813
814 /* make sure rfkill handshake bits are cleared */
83ed9015
EG
815 iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
816 iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR,
392f8b78
EG
817 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
818
819 /* clear (again), then enable host interrupts */
83ed9015 820 iwl_write32(bus(trans), CSR_INT, 0xFFFFFFFF);
6d8f6eeb 821 iwl_enable_interrupts(trans);
392f8b78
EG
822
823 /* really make sure rfkill handshake bits are cleared */
83ed9015
EG
824 iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
825 iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
392f8b78
EG
826
827 return 0;
828}
829
b3c2ce13
EG
830/*
831 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
10b15e6f 832 * must be called under priv->shrd->lock and mac access
b3c2ce13 833 */
6d8f6eeb 834static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
b3c2ce13 835{
83ed9015 836 iwl_write_prph(bus(trans), SCD_TXFACT, mask);
b3c2ce13
EG
837}
838
6d8f6eeb 839static void iwl_trans_pcie_tx_start(struct iwl_trans *trans)
b3c2ce13
EG
840{
841 const struct queue_to_fifo_ac *queue_to_fifo;
842 struct iwl_rxon_context *ctx;
6d8f6eeb 843 struct iwl_priv *priv = priv(trans);
105183b1
EG
844 struct iwl_trans_pcie *trans_pcie =
845 IWL_TRANS_GET_PCIE_TRANS(trans);
b3c2ce13
EG
846 u32 a;
847 unsigned long flags;
848 int i, chan;
849 u32 reg_val;
850
105183b1 851 spin_lock_irqsave(&trans->shrd->lock, flags);
b3c2ce13 852
83ed9015
EG
853 trans_pcie->scd_base_addr =
854 iwl_read_prph(bus(trans), SCD_SRAM_BASE_ADDR);
105183b1 855 a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
b3c2ce13 856 /* reset conext data memory */
105183b1 857 for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
b3c2ce13 858 a += 4)
83ed9015 859 iwl_write_targ_mem(bus(trans), a, 0);
b3c2ce13 860 /* reset tx status memory */
105183b1 861 for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
b3c2ce13 862 a += 4)
83ed9015 863 iwl_write_targ_mem(bus(trans), a, 0);
105183b1 864 for (; a < trans_pcie->scd_base_addr +
c91bd124 865 SCD_TRANS_TBL_OFFSET_QUEUE(hw_params(trans).max_txq_num);
d6189124 866 a += 4)
83ed9015 867 iwl_write_targ_mem(bus(trans), a, 0);
b3c2ce13 868
83ed9015 869 iwl_write_prph(bus(trans), SCD_DRAM_BASE_ADDR,
105183b1 870 trans_pcie->scd_bc_tbls.dma >> 10);
b3c2ce13
EG
871
872 /* Enable DMA channel */
873 for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
83ed9015 874 iwl_write_direct32(bus(trans), FH_TCSR_CHNL_TX_CONFIG_REG(chan),
b3c2ce13
EG
875 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
876 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
877
878 /* Update FH chicken bits */
83ed9015
EG
879 reg_val = iwl_read_direct32(bus(trans), FH_TX_CHICKEN_BITS_REG);
880 iwl_write_direct32(bus(trans), FH_TX_CHICKEN_BITS_REG,
b3c2ce13
EG
881 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
882
83ed9015 883 iwl_write_prph(bus(trans), SCD_QUEUECHAIN_SEL,
c91bd124 884 SCD_QUEUECHAIN_SEL_ALL(trans));
83ed9015 885 iwl_write_prph(bus(trans), SCD_AGGR_SEL, 0);
b3c2ce13
EG
886
887 /* initiate the queues */
c91bd124 888 for (i = 0; i < hw_params(trans).max_txq_num; i++) {
83ed9015
EG
889 iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(i), 0);
890 iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR, 0 | (i << 8));
891 iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
b3c2ce13 892 SCD_CONTEXT_QUEUE_OFFSET(i), 0);
83ed9015 893 iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
b3c2ce13
EG
894 SCD_CONTEXT_QUEUE_OFFSET(i) +
895 sizeof(u32),
896 ((SCD_WIN_SIZE <<
897 SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
898 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
899 ((SCD_FRAME_LIMIT <<
900 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
901 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
902 }
903
83ed9015 904 iwl_write_prph(bus(trans), SCD_INTERRUPT_MASK,
105183b1 905 IWL_MASK(0, hw_params(trans).max_txq_num));
b3c2ce13
EG
906
907 /* Activate all Tx DMA/FIFO channels */
6d8f6eeb 908 iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
b3c2ce13
EG
909
910 /* map queues to FIFOs */
911 if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
912 queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
913 else
914 queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
915
6d8f6eeb 916 iwl_trans_set_wr_ptrs(trans, trans->shrd->cmd_queue, 0);
b3c2ce13
EG
917
918 /* make sure all queue are not stopped */
919 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
920 for (i = 0; i < 4; i++)
921 atomic_set(&priv->queue_stop_count[i], 0);
922 for_each_context(priv, ctx)
923 ctx->last_tx_rejected = false;
924
925 /* reset to 0 to enable all the queue first */
926 priv->txq_ctx_active_msk = 0;
927
effcea16 928 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) <
72c04ce0 929 IWLAGN_FIRST_AMPDU_QUEUE);
effcea16 930 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) <
72c04ce0 931 IWLAGN_FIRST_AMPDU_QUEUE);
b3c2ce13 932
72c04ce0 933 for (i = 0; i < IWLAGN_FIRST_AMPDU_QUEUE; i++) {
b3c2ce13
EG
934 int fifo = queue_to_fifo[i].fifo;
935 int ac = queue_to_fifo[i].ac;
936
937 iwl_txq_ctx_activate(priv, i);
938
939 if (fifo == IWL_TX_FIFO_UNUSED)
940 continue;
941
942 if (ac != IWL_AC_UNSET)
943 iwl_set_swq_id(&priv->txq[i], ac, i);
c91bd124 944 iwl_trans_tx_queue_set_status(trans, &priv->txq[i], fifo, 0);
b3c2ce13
EG
945 }
946
6d8f6eeb 947 spin_unlock_irqrestore(&trans->shrd->lock, flags);
b3c2ce13
EG
948
949 /* Enable L1-Active */
83ed9015 950 iwl_clear_bits_prph(bus(trans), APMG_PCIDEV_STT_REG,
b3c2ce13
EG
951 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
952}
953
c170b867
EG
954/**
955 * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
956 */
6d8f6eeb 957static int iwl_trans_tx_stop(struct iwl_trans *trans)
c170b867
EG
958{
959 int ch, txq_id;
960 unsigned long flags;
6d8f6eeb 961 struct iwl_priv *priv = priv(trans);
c170b867
EG
962
963 /* Turn off all Tx DMA fifos */
6d8f6eeb 964 spin_lock_irqsave(&trans->shrd->lock, flags);
c170b867 965
6d8f6eeb 966 iwl_trans_txq_set_sched(trans, 0);
c170b867
EG
967
968 /* Stop each Tx DMA channel, and wait for it to be idle */
02f6f659 969 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
83ed9015 970 iwl_write_direct32(bus(trans),
6d8f6eeb 971 FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
83ed9015 972 if (iwl_poll_direct_bit(bus(trans), FH_TSSR_TX_STATUS_REG,
c170b867
EG
973 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
974 1000))
6d8f6eeb 975 IWL_ERR(trans, "Failing on timeout while stopping"
c170b867 976 " DMA channel %d [0x%08x]", ch,
83ed9015 977 iwl_read_direct32(bus(trans),
6d8f6eeb 978 FH_TSSR_TX_STATUS_REG));
c170b867 979 }
6d8f6eeb 980 spin_unlock_irqrestore(&trans->shrd->lock, flags);
c170b867
EG
981
982 if (!priv->txq) {
6d8f6eeb 983 IWL_WARN(trans, "Stopping tx queues that aren't allocated...");
c170b867
EG
984 return 0;
985 }
986
987 /* Unmap DMA from host system and free skb's */
6d8f6eeb
EG
988 for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
989 iwl_tx_queue_unmap(trans, txq_id);
c170b867
EG
990
991 return 0;
992}
993
ae2c30bf
EG
994static void iwl_trans_pcie_disable_sync_irq(struct iwl_trans *trans)
995{
996 unsigned long flags;
997 struct iwl_trans_pcie *trans_pcie =
998 IWL_TRANS_GET_PCIE_TRANS(trans);
999
1000 spin_lock_irqsave(&trans->shrd->lock, flags);
1001 iwl_disable_interrupts(trans);
1002 spin_unlock_irqrestore(&trans->shrd->lock, flags);
1003
1004 /* wait to make sure we flush pending tasklet*/
1005 synchronize_irq(bus(trans)->irq);
1006 tasklet_kill(&trans_pcie->irq_tasklet);
1007}
1008
6d8f6eeb 1009static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
ab6cf8e8 1010{
ab6cf8e8 1011 /* stop and reset the on-board processor */
83ed9015 1012 iwl_write32(bus(trans), CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
ab6cf8e8
EG
1013
1014 /* tell the device to stop sending interrupts */
ae2c30bf 1015 iwl_trans_pcie_disable_sync_irq(trans);
ab6cf8e8
EG
1016
1017 /* device going down, Stop using ICT table */
6d8f6eeb 1018 iwl_disable_ict(trans);
ab6cf8e8
EG
1019
1020 /*
1021 * If a HW restart happens during firmware loading,
1022 * then the firmware loading might call this function
1023 * and later it might be called again due to the
1024 * restart. So don't process again if the device is
1025 * already dead.
1026 */
6d8f6eeb
EG
1027 if (test_bit(STATUS_DEVICE_ENABLED, &trans->shrd->status)) {
1028 iwl_trans_tx_stop(trans);
1029 iwl_trans_rx_stop(trans);
ab6cf8e8
EG
1030
1031 /* Power-down device's busmaster DMA clocks */
83ed9015 1032 iwl_write_prph(bus(trans), APMG_CLK_DIS_REG,
ab6cf8e8
EG
1033 APMG_CLK_VAL_DMA_CLK_RQT);
1034 udelay(5);
1035 }
1036
1037 /* Make sure (redundant) we've released our request to stay awake */
83ed9015 1038 iwl_clear_bit(bus(trans), CSR_GP_CNTRL,
6d8f6eeb 1039 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
ab6cf8e8
EG
1040
1041 /* Stop the device, and put it in low power state */
6d8f6eeb 1042 iwl_apm_stop(priv(trans));
ab6cf8e8
EG
1043}
1044
e13c0c59
EG
1045static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1046 struct iwl_device_cmd *dev_cmd, u8 ctx, u8 sta_id)
47c1b496 1047{
e13c0c59
EG
1048 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1049 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1050 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
dfa2bdba 1051 struct iwl_tx_cmd *tx_cmd = &dev_cmd->cmd.tx;
47c1b496 1052 struct iwl_cmd_meta *out_meta;
e13c0c59
EG
1053 struct iwl_tx_queue *txq;
1054 struct iwl_queue *q;
47c1b496
EG
1055
1056 dma_addr_t phys_addr = 0;
1057 dma_addr_t txcmd_phys;
1058 dma_addr_t scratch_phys;
1059 u16 len, firstlen, secondlen;
e13c0c59 1060 u16 seq_number = 0;
47c1b496 1061 u8 wait_write_ptr = 0;
e13c0c59
EG
1062 u8 txq_id;
1063 u8 tid = 0;
1064 bool is_agg = false;
1065 __le16 fc = hdr->frame_control;
47c1b496
EG
1066 u8 hdr_len = ieee80211_hdrlen(fc);
1067
e13c0c59
EG
1068 /*
1069 * Send this frame after DTIM -- there's a special queue
1070 * reserved for this for contexts that support AP mode.
1071 */
1072 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1073 txq_id = trans_pcie->mcast_queue[ctx];
1074
1075 /*
1076 * The microcode will clear the more data
1077 * bit in the last frame it transmits.
1078 */
1079 hdr->frame_control |=
1080 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1081 } else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
1082 txq_id = IWL_AUX_QUEUE;
1083 else
1084 txq_id =
1085 trans_pcie->ac_to_queue[ctx][skb_get_queue_mapping(skb)];
1086
1087 if (ieee80211_is_data_qos(fc)) {
1088 u8 *qc = NULL;
1089 struct iwl_tid_data *tid_data;
1090 qc = ieee80211_get_qos_ctl(hdr);
1091 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
1092 tid_data = &trans->shrd->tid_data[sta_id][tid];
1093
1094 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
1095 return -1;
1096
1097 seq_number = tid_data->seq_number;
1098 seq_number &= IEEE80211_SCTL_SEQ;
1099 hdr->seq_ctrl = hdr->seq_ctrl &
1100 cpu_to_le16(IEEE80211_SCTL_FRAG);
1101 hdr->seq_ctrl |= cpu_to_le16(seq_number);
1102 seq_number += 0x10;
1103 /* aggregation is on for this <sta,tid> */
1104 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
1105 tid_data->agg.state == IWL_AGG_ON) {
1106 txq_id = tid_data->agg.txq_id;
1107 is_agg = true;
1108 }
1109 }
1110
1111 txq = &priv(trans)->txq[txq_id];
1112 q = &txq->q;
1113
47c1b496 1114 /* Set up driver data for this TFD */
2c452297 1115 txq->skbs[q->write_ptr] = skb;
dfa2bdba
EG
1116 txq->cmd[q->write_ptr] = dev_cmd;
1117
1118 dev_cmd->hdr.cmd = REPLY_TX;
1119 dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
1120 INDEX_TO_SEQ(q->write_ptr)));
47c1b496
EG
1121
1122 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1123 out_meta = &txq->meta[q->write_ptr];
1124
1125 /*
1126 * Use the first empty entry in this queue's command buffer array
1127 * to contain the Tx command and MAC header concatenated together
1128 * (payload data will be in another buffer).
1129 * Size of this varies, due to varying MAC header length.
1130 * If end is not dword aligned, we'll have 2 extra bytes at the end
1131 * of the MAC header (device reads on dword boundaries).
1132 * We'll tell device about this padding later.
1133 */
1134 len = sizeof(struct iwl_tx_cmd) +
1135 sizeof(struct iwl_cmd_header) + hdr_len;
1136 firstlen = (len + 3) & ~3;
1137
1138 /* Tell NIC about any 2-byte padding after MAC header */
1139 if (firstlen != len)
1140 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1141
1142 /* Physical address of this Tx command's header (not MAC header!),
1143 * within command buffer array. */
e13c0c59 1144 txcmd_phys = dma_map_single(bus(trans)->dev,
47c1b496
EG
1145 &dev_cmd->hdr, firstlen,
1146 DMA_BIDIRECTIONAL);
e13c0c59 1147 if (unlikely(dma_mapping_error(bus(trans)->dev, txcmd_phys)))
47c1b496
EG
1148 return -1;
1149 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1150 dma_unmap_len_set(out_meta, len, firstlen);
1151
1152 if (!ieee80211_has_morefrags(fc)) {
1153 txq->need_update = 1;
1154 } else {
1155 wait_write_ptr = 1;
1156 txq->need_update = 0;
1157 }
1158
1159 /* Set up TFD's 2nd entry to point directly to remainder of skb,
1160 * if any (802.11 null frames have no payload). */
1161 secondlen = skb->len - hdr_len;
1162 if (secondlen > 0) {
e13c0c59 1163 phys_addr = dma_map_single(bus(trans)->dev, skb->data + hdr_len,
47c1b496 1164 secondlen, DMA_TO_DEVICE);
e13c0c59
EG
1165 if (unlikely(dma_mapping_error(bus(trans)->dev, phys_addr))) {
1166 dma_unmap_single(bus(trans)->dev,
47c1b496
EG
1167 dma_unmap_addr(out_meta, mapping),
1168 dma_unmap_len(out_meta, len),
1169 DMA_BIDIRECTIONAL);
1170 return -1;
1171 }
1172 }
1173
1174 /* Attach buffers to TFD */
e13c0c59 1175 iwlagn_txq_attach_buf_to_tfd(trans, txq, txcmd_phys, firstlen, 1);
47c1b496 1176 if (secondlen > 0)
e13c0c59 1177 iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
47c1b496
EG
1178 secondlen, 0);
1179
1180 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
1181 offsetof(struct iwl_tx_cmd, scratch);
1182
1183 /* take back ownership of DMA buffer to enable update */
e13c0c59 1184 dma_sync_single_for_cpu(bus(trans)->dev, txcmd_phys, firstlen,
47c1b496
EG
1185 DMA_BIDIRECTIONAL);
1186 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1187 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1188
e13c0c59 1189 IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
47c1b496 1190 le16_to_cpu(dev_cmd->hdr.sequence));
e13c0c59
EG
1191 IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
1192 iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
1193 iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
47c1b496
EG
1194
1195 /* Set up entry for this TFD in Tx byte-count array */
e13c0c59
EG
1196 if (is_agg)
1197 iwl_trans_txq_update_byte_cnt_tbl(trans, txq,
47c1b496
EG
1198 le16_to_cpu(tx_cmd->len));
1199
e13c0c59 1200 dma_sync_single_for_device(bus(trans)->dev, txcmd_phys, firstlen,
47c1b496
EG
1201 DMA_BIDIRECTIONAL);
1202
e13c0c59 1203 trace_iwlwifi_dev_tx(priv(trans),
47c1b496
EG
1204 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
1205 sizeof(struct iwl_tfd),
1206 &dev_cmd->hdr, firstlen,
1207 skb->data + hdr_len, secondlen);
1208
1209 /* Tell device the write index *just past* this latest filled TFD */
1210 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
e13c0c59
EG
1211 iwl_txq_update_write_ptr(trans, txq);
1212
1213 if (ieee80211_is_data_qos(fc)) {
1214 trans->shrd->tid_data[sta_id][tid].tfds_in_queue++;
1215 if (!ieee80211_has_morefrags(fc))
1216 trans->shrd->tid_data[sta_id][tid].seq_number =
1217 seq_number;
1218 }
47c1b496
EG
1219
1220 /*
1221 * At this point the frame is "transmitted" successfully
1222 * and we will get a TX status notification eventually,
1223 * regardless of the value of ret. "ret" only indicates
1224 * whether or not we should update the write pointer.
1225 */
a0eaad71 1226 if (iwl_queue_space(q) < q->high_mark) {
47c1b496
EG
1227 if (wait_write_ptr) {
1228 txq->need_update = 1;
e13c0c59 1229 iwl_txq_update_write_ptr(trans, txq);
47c1b496 1230 } else {
e13c0c59 1231 iwl_stop_queue(priv(trans), txq);
47c1b496
EG
1232 }
1233 }
1234 return 0;
1235}
1236
6d8f6eeb 1237static void iwl_trans_pcie_kick_nic(struct iwl_trans *trans)
56d90f4c
EG
1238{
1239 /* Remove all resets to allow NIC to operate */
83ed9015 1240 iwl_write32(bus(trans), CSR_RESET, 0);
56d90f4c
EG
1241}
1242
e6bb4c9c
EG
1243static int iwl_trans_pcie_request_irq(struct iwl_trans *trans)
1244{
5a878bf6
EG
1245 struct iwl_trans_pcie *trans_pcie =
1246 IWL_TRANS_GET_PCIE_TRANS(trans);
e6bb4c9c
EG
1247 int err;
1248
0c325769
EG
1249 trans_pcie->inta_mask = CSR_INI_SET_MASK;
1250
1251 tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
1252 iwl_irq_tasklet, (unsigned long)trans);
e6bb4c9c 1253
0c325769 1254 iwl_alloc_isr_ict(trans);
e6bb4c9c
EG
1255
1256 err = request_irq(bus(trans)->irq, iwl_isr_ict, IRQF_SHARED,
0c325769 1257 DRV_NAME, trans);
e6bb4c9c 1258 if (err) {
0c325769
EG
1259 IWL_ERR(trans, "Error allocating IRQ %d\n", bus(trans)->irq);
1260 iwl_free_isr_ict(trans);
e6bb4c9c
EG
1261 return err;
1262 }
1263
5a878bf6 1264 INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish);
e6bb4c9c
EG
1265 return 0;
1266}
1267
464021ff
EG
1268static int iwlagn_txq_check_empty(struct iwl_trans *trans,
1269 int sta_id, u8 tid, int txq_id)
a0eaad71 1270{
464021ff
EG
1271 struct iwl_queue *q = &priv(trans)->txq[txq_id].q;
1272 struct iwl_tid_data *tid_data = &trans->shrd->tid_data[sta_id][tid];
1273
1274 lockdep_assert_held(&trans->shrd->sta_lock);
1275
1276 switch (trans->shrd->tid_data[sta_id][tid].agg.state) {
1277 case IWL_EMPTYING_HW_QUEUE_DELBA:
1278 /* We are reclaiming the last packet of the */
1279 /* aggregated HW queue */
1280 if ((txq_id == tid_data->agg.txq_id) &&
1281 (q->read_ptr == q->write_ptr)) {
1282 IWL_DEBUG_HT(trans,
1283 "HW queue empty: continue DELBA flow\n");
7f01d567 1284 iwl_trans_pcie_txq_agg_disable(trans, txq_id);
464021ff
EG
1285 tid_data->agg.state = IWL_AGG_OFF;
1286 iwl_stop_tx_ba_trans_ready(priv(trans),
1287 NUM_IWL_RXON_CTX,
1288 sta_id, tid);
1289 iwl_wake_queue(priv(trans), &priv(trans)->txq[txq_id]);
1290 }
1291 break;
1292 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1293 /* We are reclaiming the last packet of the queue */
1294 if (tid_data->tfds_in_queue == 0) {
1295 IWL_DEBUG_HT(trans,
1296 "HW queue empty: continue ADDBA flow\n");
1297 tid_data->agg.state = IWL_AGG_ON;
1298 iwl_start_tx_ba_trans_ready(priv(trans),
1299 NUM_IWL_RXON_CTX,
1300 sta_id, tid);
1301 }
1302 break;
1303 }
1304
1305 return 0;
1306}
1307
1308static void iwl_free_tfds_in_queue(struct iwl_trans *trans,
1309 int sta_id, int tid, int freed)
1310{
1311 lockdep_assert_held(&trans->shrd->sta_lock);
1312
1313 if (trans->shrd->tid_data[sta_id][tid].tfds_in_queue >= freed)
1314 trans->shrd->tid_data[sta_id][tid].tfds_in_queue -= freed;
1315 else {
1316 IWL_DEBUG_TX(trans, "free more than tfds_in_queue (%u:%d)\n",
1317 trans->shrd->tid_data[sta_id][tid].tfds_in_queue,
1318 freed);
1319 trans->shrd->tid_data[sta_id][tid].tfds_in_queue = 0;
1320 }
1321}
1322
1323static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
1324 int txq_id, int ssn, u32 status,
1325 struct sk_buff_head *skbs)
1326{
1327 struct iwl_tx_queue *txq = &priv(trans)->txq[txq_id];
a0eaad71
EG
1328 /* n_bd is usually 256 => n_bd - 1 = 0xff */
1329 int tfd_num = ssn & (txq->q.n_bd - 1);
464021ff 1330 int freed = 0;
a0eaad71
EG
1331 u8 agg_state;
1332 bool cond;
1333
1334 if (txq->sched_retry) {
1335 agg_state =
464021ff 1336 trans->shrd->tid_data[txq->sta_id][txq->tid].agg.state;
a0eaad71
EG
1337 cond = (agg_state != IWL_EMPTYING_HW_QUEUE_DELBA);
1338 } else {
1339 cond = (status != TX_STATUS_FAIL_PASSIVE_NO_RX);
1340 }
1341
1342 if (txq->q.read_ptr != tfd_num) {
1343 IWL_DEBUG_TX_REPLY(trans, "Retry scheduler reclaim "
1344 "scd_ssn=%d idx=%d txq=%d swq=%d\n",
1345 ssn , tfd_num, txq_id, txq->swq_id);
464021ff 1346 freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
a0eaad71 1347 if (iwl_queue_space(&txq->q) > txq->q.low_mark && cond)
464021ff 1348 iwl_wake_queue(priv(trans), txq);
a0eaad71 1349 }
464021ff
EG
1350
1351 iwl_free_tfds_in_queue(trans, sta_id, tid, freed);
1352 iwlagn_txq_check_empty(trans, sta_id, tid, txq_id);
a0eaad71
EG
1353}
1354
6d8f6eeb 1355static void iwl_trans_pcie_free(struct iwl_trans *trans)
34c1b7ba 1356{
ae2c30bf
EG
1357 iwl_trans_pcie_tx_free(trans);
1358 iwl_trans_pcie_rx_free(trans);
6d8f6eeb
EG
1359 free_irq(bus(trans)->irq, trans);
1360 iwl_free_isr_ict(trans);
1361 trans->shrd->trans = NULL;
1362 kfree(trans);
34c1b7ba
EG
1363}
1364
57210f7c
EG
1365#ifdef CONFIG_PM
1366
1367static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
1368{
1369 /*
1370 * This function is called when system goes into suspend state
1371 * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
1372 * first but since iwl_mac_stop() has no knowledge of who the caller is,
1373 * it will not call apm_ops.stop() to stop the DMA operation.
1374 * Calling apm_ops.stop here to make sure we stop the DMA.
1375 *
1376 * But of course ... if we have configured WoWLAN then we did other
1377 * things already :-)
1378 */
1379 if (!trans->shrd->wowlan)
1380 iwl_apm_stop(priv(trans));
1381
1382 return 0;
1383}
1384
1385static int iwl_trans_pcie_resume(struct iwl_trans *trans)
1386{
1387 bool hw_rfkill = false;
1388
0c325769 1389 iwl_enable_interrupts(trans);
57210f7c 1390
83ed9015 1391 if (!(iwl_read32(bus(trans), CSR_GP_CNTRL) &
57210f7c
EG
1392 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
1393 hw_rfkill = true;
1394
1395 if (hw_rfkill)
1396 set_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
1397 else
1398 clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
1399
1400 wiphy_rfkill_set_hw_state(priv(trans)->hw->wiphy, hw_rfkill);
1401
1402 return 0;
1403}
1404#else /* CONFIG_PM */
1405static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
1406{ return 0; }
1407
1408static int iwl_trans_pcie_resume(struct iwl_trans *trans)
1409{ return 0; }
1410
1411#endif /* CONFIG_PM */
1412
e13c0c59
EG
1413static void iwl_trans_pcie_wake_any_queue(struct iwl_trans *trans,
1414 u8 ctx)
1415{
1416 u8 ac, txq_id;
1417 struct iwl_trans_pcie *trans_pcie =
1418 IWL_TRANS_GET_PCIE_TRANS(trans);
1419
1420 for (ac = 0; ac < AC_NUM; ac++) {
1421 txq_id = trans_pcie->ac_to_queue[ctx][ac];
1422 IWL_DEBUG_INFO(trans, "Queue Status: Q[%d] %s\n",
1423 ac,
1424 (atomic_read(&priv(trans)->queue_stop_count[ac]) > 0)
1425 ? "stopped" : "awake");
1426 iwl_wake_queue(priv(trans), &priv(trans)->txq[txq_id]);
1427 }
1428}
1429
e6bb4c9c 1430const struct iwl_trans_ops trans_ops_pcie;
e419d62d 1431
e6bb4c9c
EG
1432static struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd)
1433{
1434 struct iwl_trans *iwl_trans = kzalloc(sizeof(struct iwl_trans) +
1435 sizeof(struct iwl_trans_pcie),
1436 GFP_KERNEL);
1437 if (iwl_trans) {
5a878bf6
EG
1438 struct iwl_trans_pcie *trans_pcie =
1439 IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
e6bb4c9c
EG
1440 iwl_trans->ops = &trans_ops_pcie;
1441 iwl_trans->shrd = shrd;
5a878bf6 1442 trans_pcie->trans = iwl_trans;
72012474 1443 spin_lock_init(&iwl_trans->hcmd_lock);
e6bb4c9c 1444 }
ab6cf8e8 1445
e6bb4c9c
EG
1446 return iwl_trans;
1447}
47c1b496 1448
5f178cd2
EG
1449#define IWL_FLUSH_WAIT_MS 2000
1450
1451static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans)
1452{
1453 struct iwl_tx_queue *txq;
1454 struct iwl_queue *q;
1455 int cnt;
1456 unsigned long now = jiffies;
1457 int ret = 0;
1458
1459 /* waiting for all the tx frames complete might take a while */
1460 for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
1461 if (cnt == trans->shrd->cmd_queue)
1462 continue;
1463 txq = &priv(trans)->txq[cnt];
1464 q = &txq->q;
1465 while (q->read_ptr != q->write_ptr && !time_after(jiffies,
1466 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
1467 msleep(1);
1468
1469 if (q->read_ptr != q->write_ptr) {
1470 IWL_ERR(trans, "fail to flush all tx fifo queues\n");
1471 ret = -ETIMEDOUT;
1472 break;
1473 }
1474 }
1475 return ret;
1476}
1477
87e5666c
EG
1478#ifdef CONFIG_IWLWIFI_DEBUGFS
1479/* create and remove of files */
1480#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
5a878bf6 1481 if (!debugfs_create_file(#name, mode, parent, trans, \
87e5666c
EG
1482 &iwl_dbgfs_##name##_ops)) \
1483 return -ENOMEM; \
1484} while (0)
1485
1486/* file operation */
1487#define DEBUGFS_READ_FUNC(name) \
1488static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
1489 char __user *user_buf, \
1490 size_t count, loff_t *ppos);
1491
1492#define DEBUGFS_WRITE_FUNC(name) \
1493static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
1494 const char __user *user_buf, \
1495 size_t count, loff_t *ppos);
1496
1497
1498static int iwl_dbgfs_open_file_generic(struct inode *inode, struct file *file)
1499{
1500 file->private_data = inode->i_private;
1501 return 0;
1502}
1503
1504#define DEBUGFS_READ_FILE_OPS(name) \
1505 DEBUGFS_READ_FUNC(name); \
1506static const struct file_operations iwl_dbgfs_##name##_ops = { \
1507 .read = iwl_dbgfs_##name##_read, \
1508 .open = iwl_dbgfs_open_file_generic, \
1509 .llseek = generic_file_llseek, \
1510};
1511
16db88ba
EG
1512#define DEBUGFS_WRITE_FILE_OPS(name) \
1513 DEBUGFS_WRITE_FUNC(name); \
1514static const struct file_operations iwl_dbgfs_##name##_ops = { \
1515 .write = iwl_dbgfs_##name##_write, \
1516 .open = iwl_dbgfs_open_file_generic, \
1517 .llseek = generic_file_llseek, \
1518};
1519
87e5666c
EG
1520#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
1521 DEBUGFS_READ_FUNC(name); \
1522 DEBUGFS_WRITE_FUNC(name); \
1523static const struct file_operations iwl_dbgfs_##name##_ops = { \
1524 .write = iwl_dbgfs_##name##_write, \
1525 .read = iwl_dbgfs_##name##_read, \
1526 .open = iwl_dbgfs_open_file_generic, \
1527 .llseek = generic_file_llseek, \
1528};
1529
1530static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
1531 char __user *user_buf,
1532 size_t count, loff_t *ppos)
1533{
5a878bf6
EG
1534 struct iwl_trans *trans = file->private_data;
1535 struct iwl_priv *priv = priv(trans);
87e5666c
EG
1536 int pos = 0, ofs = 0;
1537 int cnt = 0, entry;
5a878bf6
EG
1538 struct iwl_trans_pcie *trans_pcie =
1539 IWL_TRANS_GET_PCIE_TRANS(trans);
87e5666c
EG
1540 struct iwl_tx_queue *txq;
1541 struct iwl_queue *q;
5a878bf6 1542 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
87e5666c
EG
1543 char *buf;
1544 int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
fd656935 1545 (hw_params(trans).max_txq_num * 32 * 8) + 400;
87e5666c
EG
1546 const u8 *ptr;
1547 ssize_t ret;
1548
1549 if (!priv->txq) {
5a878bf6 1550 IWL_ERR(trans, "txq not ready\n");
87e5666c
EG
1551 return -EAGAIN;
1552 }
1553 buf = kzalloc(bufsz, GFP_KERNEL);
1554 if (!buf) {
5a878bf6 1555 IWL_ERR(trans, "Can not allocate buffer\n");
87e5666c
EG
1556 return -ENOMEM;
1557 }
1558 pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
5a878bf6 1559 for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
87e5666c
EG
1560 txq = &priv->txq[cnt];
1561 q = &txq->q;
1562 pos += scnprintf(buf + pos, bufsz - pos,
1563 "q[%d]: read_ptr: %u, write_ptr: %u\n",
1564 cnt, q->read_ptr, q->write_ptr);
1565 }
1566 if (priv->tx_traffic &&
5a878bf6 1567 (iwl_get_debug_level(trans->shrd) & IWL_DL_TX)) {
87e5666c
EG
1568 ptr = priv->tx_traffic;
1569 pos += scnprintf(buf + pos, bufsz - pos,
5a878bf6 1570 "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
87e5666c
EG
1571 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
1572 for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
1573 entry++, ofs += 16) {
1574 pos += scnprintf(buf + pos, bufsz - pos,
1575 "0x%.4x ", ofs);
1576 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
1577 buf + pos, bufsz - pos, 0);
1578 pos += strlen(buf + pos);
1579 if (bufsz - pos > 0)
1580 buf[pos++] = '\n';
1581 }
1582 }
1583 }
1584
1585 pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
1586 pos += scnprintf(buf + pos, bufsz - pos,
1587 "read: %u, write: %u\n",
1588 rxq->read, rxq->write);
1589
1590 if (priv->rx_traffic &&
5a878bf6 1591 (iwl_get_debug_level(trans->shrd) & IWL_DL_RX)) {
87e5666c
EG
1592 ptr = priv->rx_traffic;
1593 pos += scnprintf(buf + pos, bufsz - pos,
5a878bf6 1594 "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
87e5666c
EG
1595 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
1596 for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
1597 entry++, ofs += 16) {
1598 pos += scnprintf(buf + pos, bufsz - pos,
1599 "0x%.4x ", ofs);
1600 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
1601 buf + pos, bufsz - pos, 0);
1602 pos += strlen(buf + pos);
1603 if (bufsz - pos > 0)
1604 buf[pos++] = '\n';
1605 }
1606 }
1607 }
1608
1609 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1610 kfree(buf);
1611 return ret;
1612}
1613
1614static ssize_t iwl_dbgfs_traffic_log_write(struct file *file,
1615 const char __user *user_buf,
1616 size_t count, loff_t *ppos)
1617{
5a878bf6 1618 struct iwl_trans *trans = file->private_data;
87e5666c
EG
1619 char buf[8];
1620 int buf_size;
1621 int traffic_log;
1622
1623 memset(buf, 0, sizeof(buf));
1624 buf_size = min(count, sizeof(buf) - 1);
1625 if (copy_from_user(buf, user_buf, buf_size))
1626 return -EFAULT;
1627 if (sscanf(buf, "%d", &traffic_log) != 1)
1628 return -EFAULT;
1629 if (traffic_log == 0)
5a878bf6 1630 iwl_reset_traffic_log(priv(trans));
87e5666c
EG
1631
1632 return count;
1633}
1634
1635static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
1636 char __user *user_buf,
1637 size_t count, loff_t *ppos) {
1638
5a878bf6
EG
1639 struct iwl_trans *trans = file->private_data;
1640 struct iwl_priv *priv = priv(trans);
87e5666c
EG
1641 struct iwl_tx_queue *txq;
1642 struct iwl_queue *q;
1643 char *buf;
1644 int pos = 0;
1645 int cnt;
1646 int ret;
fd656935 1647 const size_t bufsz = sizeof(char) * 64 * hw_params(trans).max_txq_num;
87e5666c
EG
1648
1649 if (!priv->txq) {
1650 IWL_ERR(priv, "txq not ready\n");
1651 return -EAGAIN;
1652 }
1653 buf = kzalloc(bufsz, GFP_KERNEL);
1654 if (!buf)
1655 return -ENOMEM;
1656
5a878bf6 1657 for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
87e5666c
EG
1658 txq = &priv->txq[cnt];
1659 q = &txq->q;
1660 pos += scnprintf(buf + pos, bufsz - pos,
1661 "hwq %.2d: read=%u write=%u stop=%d"
1662 " swq_id=%#.2x (ac %d/hwq %d)\n",
1663 cnt, q->read_ptr, q->write_ptr,
1664 !!test_bit(cnt, priv->queue_stopped),
1665 txq->swq_id, txq->swq_id & 3,
1666 (txq->swq_id >> 2) & 0x1f);
1667 if (cnt >= 4)
1668 continue;
1669 /* for the ACs, display the stop count too */
1670 pos += scnprintf(buf + pos, bufsz - pos,
1671 " stop-count: %d\n",
1672 atomic_read(&priv->queue_stop_count[cnt]));
1673 }
1674 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1675 kfree(buf);
1676 return ret;
1677}
1678
1679static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1680 char __user *user_buf,
1681 size_t count, loff_t *ppos) {
5a878bf6
EG
1682 struct iwl_trans *trans = file->private_data;
1683 struct iwl_trans_pcie *trans_pcie =
1684 IWL_TRANS_GET_PCIE_TRANS(trans);
1685 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
87e5666c
EG
1686 char buf[256];
1687 int pos = 0;
1688 const size_t bufsz = sizeof(buf);
1689
1690 pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
1691 rxq->read);
1692 pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
1693 rxq->write);
1694 pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
1695 rxq->free_count);
1696 if (rxq->rb_stts) {
1697 pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
1698 le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
1699 } else {
1700 pos += scnprintf(buf + pos, bufsz - pos,
1701 "closed_rb_num: Not Allocated\n");
1702 }
1703 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1704}
1705
7ff94706
EG
1706static ssize_t iwl_dbgfs_log_event_read(struct file *file,
1707 char __user *user_buf,
1708 size_t count, loff_t *ppos)
1709{
1710 struct iwl_trans *trans = file->private_data;
1711 char *buf;
1712 int pos = 0;
1713 ssize_t ret = -ENOMEM;
1714
6bb78847 1715 ret = pos = iwl_dump_nic_event_log(trans, true, &buf, true);
7ff94706
EG
1716 if (buf) {
1717 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1718 kfree(buf);
1719 }
1720 return ret;
1721}
1722
1723static ssize_t iwl_dbgfs_log_event_write(struct file *file,
1724 const char __user *user_buf,
1725 size_t count, loff_t *ppos)
1726{
1727 struct iwl_trans *trans = file->private_data;
1728 u32 event_log_flag;
1729 char buf[8];
1730 int buf_size;
1731
1732 memset(buf, 0, sizeof(buf));
1733 buf_size = min(count, sizeof(buf) - 1);
1734 if (copy_from_user(buf, user_buf, buf_size))
1735 return -EFAULT;
1736 if (sscanf(buf, "%d", &event_log_flag) != 1)
1737 return -EFAULT;
1738 if (event_log_flag == 1)
6bb78847 1739 iwl_dump_nic_event_log(trans, true, NULL, false);
7ff94706
EG
1740
1741 return count;
1742}
1743
1f7b6172
EG
1744static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
1745 char __user *user_buf,
1746 size_t count, loff_t *ppos) {
1747
1748 struct iwl_trans *trans = file->private_data;
1749 struct iwl_trans_pcie *trans_pcie =
1750 IWL_TRANS_GET_PCIE_TRANS(trans);
1751 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1752
1753 int pos = 0;
1754 char *buf;
1755 int bufsz = 24 * 64; /* 24 items * 64 char per item */
1756 ssize_t ret;
1757
1758 buf = kzalloc(bufsz, GFP_KERNEL);
1759 if (!buf) {
1760 IWL_ERR(trans, "Can not allocate Buffer\n");
1761 return -ENOMEM;
1762 }
1763
1764 pos += scnprintf(buf + pos, bufsz - pos,
1765 "Interrupt Statistics Report:\n");
1766
1767 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
1768 isr_stats->hw);
1769 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
1770 isr_stats->sw);
1771 if (isr_stats->sw || isr_stats->hw) {
1772 pos += scnprintf(buf + pos, bufsz - pos,
1773 "\tLast Restarting Code: 0x%X\n",
1774 isr_stats->err_code);
1775 }
1776#ifdef CONFIG_IWLWIFI_DEBUG
1777 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
1778 isr_stats->sch);
1779 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
1780 isr_stats->alive);
1781#endif
1782 pos += scnprintf(buf + pos, bufsz - pos,
1783 "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
1784
1785 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
1786 isr_stats->ctkill);
1787
1788 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
1789 isr_stats->wakeup);
1790
1791 pos += scnprintf(buf + pos, bufsz - pos,
1792 "Rx command responses:\t\t %u\n", isr_stats->rx);
1793
1794 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
1795 isr_stats->tx);
1796
1797 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
1798 isr_stats->unhandled);
1799
1800 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1801 kfree(buf);
1802 return ret;
1803}
1804
1805static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
1806 const char __user *user_buf,
1807 size_t count, loff_t *ppos)
1808{
1809 struct iwl_trans *trans = file->private_data;
1810 struct iwl_trans_pcie *trans_pcie =
1811 IWL_TRANS_GET_PCIE_TRANS(trans);
1812 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1813
1814 char buf[8];
1815 int buf_size;
1816 u32 reset_flag;
1817
1818 memset(buf, 0, sizeof(buf));
1819 buf_size = min(count, sizeof(buf) - 1);
1820 if (copy_from_user(buf, user_buf, buf_size))
1821 return -EFAULT;
1822 if (sscanf(buf, "%x", &reset_flag) != 1)
1823 return -EFAULT;
1824 if (reset_flag == 0)
1825 memset(isr_stats, 0, sizeof(*isr_stats));
1826
1827 return count;
1828}
1829
16db88ba
EG
1830static const char *get_csr_string(int cmd)
1831{
1832 switch (cmd) {
1833 IWL_CMD(CSR_HW_IF_CONFIG_REG);
1834 IWL_CMD(CSR_INT_COALESCING);
1835 IWL_CMD(CSR_INT);
1836 IWL_CMD(CSR_INT_MASK);
1837 IWL_CMD(CSR_FH_INT_STATUS);
1838 IWL_CMD(CSR_GPIO_IN);
1839 IWL_CMD(CSR_RESET);
1840 IWL_CMD(CSR_GP_CNTRL);
1841 IWL_CMD(CSR_HW_REV);
1842 IWL_CMD(CSR_EEPROM_REG);
1843 IWL_CMD(CSR_EEPROM_GP);
1844 IWL_CMD(CSR_OTP_GP_REG);
1845 IWL_CMD(CSR_GIO_REG);
1846 IWL_CMD(CSR_GP_UCODE_REG);
1847 IWL_CMD(CSR_GP_DRIVER_REG);
1848 IWL_CMD(CSR_UCODE_DRV_GP1);
1849 IWL_CMD(CSR_UCODE_DRV_GP2);
1850 IWL_CMD(CSR_LED_REG);
1851 IWL_CMD(CSR_DRAM_INT_TBL_REG);
1852 IWL_CMD(CSR_GIO_CHICKEN_BITS);
1853 IWL_CMD(CSR_ANA_PLL_CFG);
1854 IWL_CMD(CSR_HW_REV_WA_REG);
1855 IWL_CMD(CSR_DBG_HPET_MEM_REG);
1856 default:
1857 return "UNKNOWN";
1858 }
1859}
1860
1861void iwl_dump_csr(struct iwl_trans *trans)
1862{
1863 int i;
1864 static const u32 csr_tbl[] = {
1865 CSR_HW_IF_CONFIG_REG,
1866 CSR_INT_COALESCING,
1867 CSR_INT,
1868 CSR_INT_MASK,
1869 CSR_FH_INT_STATUS,
1870 CSR_GPIO_IN,
1871 CSR_RESET,
1872 CSR_GP_CNTRL,
1873 CSR_HW_REV,
1874 CSR_EEPROM_REG,
1875 CSR_EEPROM_GP,
1876 CSR_OTP_GP_REG,
1877 CSR_GIO_REG,
1878 CSR_GP_UCODE_REG,
1879 CSR_GP_DRIVER_REG,
1880 CSR_UCODE_DRV_GP1,
1881 CSR_UCODE_DRV_GP2,
1882 CSR_LED_REG,
1883 CSR_DRAM_INT_TBL_REG,
1884 CSR_GIO_CHICKEN_BITS,
1885 CSR_ANA_PLL_CFG,
1886 CSR_HW_REV_WA_REG,
1887 CSR_DBG_HPET_MEM_REG
1888 };
1889 IWL_ERR(trans, "CSR values:\n");
1890 IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
1891 "CSR_INT_PERIODIC_REG)\n");
1892 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
1893 IWL_ERR(trans, " %25s: 0X%08x\n",
1894 get_csr_string(csr_tbl[i]),
83ed9015 1895 iwl_read32(bus(trans), csr_tbl[i]));
16db88ba
EG
1896 }
1897}
1898
1899static ssize_t iwl_dbgfs_csr_write(struct file *file,
1900 const char __user *user_buf,
1901 size_t count, loff_t *ppos)
1902{
1903 struct iwl_trans *trans = file->private_data;
1904 char buf[8];
1905 int buf_size;
1906 int csr;
1907
1908 memset(buf, 0, sizeof(buf));
1909 buf_size = min(count, sizeof(buf) - 1);
1910 if (copy_from_user(buf, user_buf, buf_size))
1911 return -EFAULT;
1912 if (sscanf(buf, "%d", &csr) != 1)
1913 return -EFAULT;
1914
1915 iwl_dump_csr(trans);
1916
1917 return count;
1918}
1919
1920static const char *get_fh_string(int cmd)
1921{
1922 switch (cmd) {
1923 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
1924 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
1925 IWL_CMD(FH_RSCSR_CHNL0_WPTR);
1926 IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
1927 IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
1928 IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
1929 IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
1930 IWL_CMD(FH_TSSR_TX_STATUS_REG);
1931 IWL_CMD(FH_TSSR_TX_ERROR_REG);
1932 default:
1933 return "UNKNOWN";
1934 }
1935}
1936
1937int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
1938{
1939 int i;
1940#ifdef CONFIG_IWLWIFI_DEBUG
1941 int pos = 0;
1942 size_t bufsz = 0;
1943#endif
1944 static const u32 fh_tbl[] = {
1945 FH_RSCSR_CHNL0_STTS_WPTR_REG,
1946 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
1947 FH_RSCSR_CHNL0_WPTR,
1948 FH_MEM_RCSR_CHNL0_CONFIG_REG,
1949 FH_MEM_RSSR_SHARED_CTRL_REG,
1950 FH_MEM_RSSR_RX_STATUS_REG,
1951 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
1952 FH_TSSR_TX_STATUS_REG,
1953 FH_TSSR_TX_ERROR_REG
1954 };
1955#ifdef CONFIG_IWLWIFI_DEBUG
1956 if (display) {
1957 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
1958 *buf = kmalloc(bufsz, GFP_KERNEL);
1959 if (!*buf)
1960 return -ENOMEM;
1961 pos += scnprintf(*buf + pos, bufsz - pos,
1962 "FH register values:\n");
1963 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1964 pos += scnprintf(*buf + pos, bufsz - pos,
1965 " %34s: 0X%08x\n",
1966 get_fh_string(fh_tbl[i]),
83ed9015 1967 iwl_read_direct32(bus(trans), fh_tbl[i]));
16db88ba
EG
1968 }
1969 return pos;
1970 }
1971#endif
1972 IWL_ERR(trans, "FH register values:\n");
1973 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1974 IWL_ERR(trans, " %34s: 0X%08x\n",
1975 get_fh_string(fh_tbl[i]),
83ed9015 1976 iwl_read_direct32(bus(trans), fh_tbl[i]));
16db88ba
EG
1977 }
1978 return 0;
1979}
1980
1981static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
1982 char __user *user_buf,
1983 size_t count, loff_t *ppos)
1984{
1985 struct iwl_trans *trans = file->private_data;
1986 char *buf;
1987 int pos = 0;
1988 ssize_t ret = -EFAULT;
1989
1990 ret = pos = iwl_dump_fh(trans, &buf, true);
1991 if (buf) {
1992 ret = simple_read_from_buffer(user_buf,
1993 count, ppos, buf, pos);
1994 kfree(buf);
1995 }
1996
1997 return ret;
1998}
1999
87e5666c 2000DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
7ff94706 2001DEBUGFS_READ_WRITE_FILE_OPS(log_event);
1f7b6172 2002DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
16db88ba 2003DEBUGFS_READ_FILE_OPS(fh_reg);
87e5666c
EG
2004DEBUGFS_READ_FILE_OPS(rx_queue);
2005DEBUGFS_READ_FILE_OPS(tx_queue);
16db88ba 2006DEBUGFS_WRITE_FILE_OPS(csr);
87e5666c
EG
2007
2008/*
2009 * Create the debugfs files and directories
2010 *
2011 */
2012static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
2013 struct dentry *dir)
2014{
87e5666c
EG
2015 DEBUGFS_ADD_FILE(traffic_log, dir, S_IWUSR | S_IRUSR);
2016 DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
2017 DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
7ff94706 2018 DEBUGFS_ADD_FILE(log_event, dir, S_IWUSR | S_IRUSR);
1f7b6172 2019 DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
16db88ba
EG
2020 DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
2021 DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
87e5666c
EG
2022 return 0;
2023}
2024#else
2025static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
2026 struct dentry *dir)
2027{ return 0; }
2028
2029#endif /*CONFIG_IWLWIFI_DEBUGFS */
2030
e6bb4c9c
EG
2031const struct iwl_trans_ops trans_ops_pcie = {
2032 .alloc = iwl_trans_pcie_alloc,
2033 .request_irq = iwl_trans_pcie_request_irq,
2034 .start_device = iwl_trans_pcie_start_device,
2035 .prepare_card_hw = iwl_trans_pcie_prepare_card_hw,
2036 .stop_device = iwl_trans_pcie_stop_device,
48d42c42 2037
e6bb4c9c 2038 .tx_start = iwl_trans_pcie_tx_start,
e13c0c59 2039 .wake_any_queue = iwl_trans_pcie_wake_any_queue,
48d42c42 2040
e6bb4c9c
EG
2041 .send_cmd = iwl_trans_pcie_send_cmd,
2042 .send_cmd_pdu = iwl_trans_pcie_send_cmd_pdu,
c85eb619 2043
e6bb4c9c 2044 .tx = iwl_trans_pcie_tx,
a0eaad71 2045 .reclaim = iwl_trans_pcie_reclaim,
34c1b7ba 2046
7f01d567 2047 .tx_agg_disable = iwl_trans_pcie_tx_agg_disable,
288712a6 2048 .tx_agg_alloc = iwl_trans_pcie_tx_agg_alloc,
c91bd124 2049 .tx_agg_setup = iwl_trans_pcie_tx_agg_setup,
34c1b7ba 2050
e6bb4c9c 2051 .kick_nic = iwl_trans_pcie_kick_nic,
1e89cbac 2052
e6bb4c9c 2053 .free = iwl_trans_pcie_free,
87e5666c
EG
2054
2055 .dbgfs_register = iwl_trans_pcie_dbgfs_register,
5f178cd2
EG
2056
2057 .wait_tx_queue_empty = iwl_trans_pcie_wait_tx_queue_empty,
2058
57210f7c
EG
2059 .suspend = iwl_trans_pcie_suspend,
2060 .resume = iwl_trans_pcie_resume,
e6bb4c9c 2061};
ab697a9f 2062
This page took 0.203919 seconds and 5 git commands to generate.