iwlwifi: Move HBUS address to iwl-csr.h
[deliverable/linux.git] / drivers / net / wireless / iwlwifi / iwl-4965.c
CommitLineData
b481de9c
ZY
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/version.h>
30#include <linux/init.h>
31#include <linux/pci.h>
32#include <linux/dma-mapping.h>
33#include <linux/delay.h>
34#include <linux/skbuff.h>
35#include <linux/netdevice.h>
36#include <linux/wireless.h>
37#include <net/mac80211.h>
b481de9c 38#include <linux/etherdevice.h>
12342c47 39#include <asm/unaligned.h>
b481de9c 40
b481de9c
ZY
41#include "iwl-4965.h"
42#include "iwl-helpers.h"
43
bb8c093b 44static void iwl4965_hw_card_show_info(struct iwl4965_priv *priv);
416e1438 45
b481de9c
ZY
46#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
47 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
48 IWL_RATE_SISO_##s##M_PLCP, \
49 IWL_RATE_MIMO_##s##M_PLCP, \
50 IWL_RATE_##r##M_IEEE, \
51 IWL_RATE_##ip##M_INDEX, \
52 IWL_RATE_##in##M_INDEX, \
53 IWL_RATE_##rp##M_INDEX, \
54 IWL_RATE_##rn##M_INDEX, \
55 IWL_RATE_##pp##M_INDEX, \
56 IWL_RATE_##np##M_INDEX }
57
58/*
59 * Parameter order:
60 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
61 *
62 * If there isn't a valid next or previous rate then INV is used which
63 * maps to IWL_RATE_INVALID
64 *
65 */
bb8c093b 66const struct iwl4965_rate_info iwl4965_rates[IWL_RATE_COUNT] = {
b481de9c
ZY
67 IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
68 IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
69 IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
70 IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
71 IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
72 IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
73 IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
74 IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
75 IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
76 IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
77 IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
78 IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
79 IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
80};
81
fe01b477
RR
82#ifdef CONFIG_IWL4965_HT
83
84static const u16 default_tid_to_tx_fifo[] = {
85 IWL_TX_FIFO_AC1,
86 IWL_TX_FIFO_AC0,
87 IWL_TX_FIFO_AC0,
88 IWL_TX_FIFO_AC1,
89 IWL_TX_FIFO_AC2,
90 IWL_TX_FIFO_AC2,
91 IWL_TX_FIFO_AC3,
92 IWL_TX_FIFO_AC3,
93 IWL_TX_FIFO_NONE,
94 IWL_TX_FIFO_NONE,
95 IWL_TX_FIFO_NONE,
96 IWL_TX_FIFO_NONE,
97 IWL_TX_FIFO_NONE,
98 IWL_TX_FIFO_NONE,
99 IWL_TX_FIFO_NONE,
100 IWL_TX_FIFO_NONE,
101 IWL_TX_FIFO_AC3
102};
103
104#endif /*CONFIG_IWL4965_HT */
105
b481de9c
ZY
106static int is_fat_channel(__le32 rxon_flags)
107{
108 return (rxon_flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) ||
109 (rxon_flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK);
110}
111
bb8c093b 112static u8 is_single_stream(struct iwl4965_priv *priv)
b481de9c 113{
c8b0e6e1 114#ifdef CONFIG_IWL4965_HT
fd105e79
RR
115 if (!priv->current_ht_config.is_ht ||
116 (priv->current_ht_config.supp_mcs_set[1] == 0) ||
b481de9c
ZY
117 (priv->ps_mode == IWL_MIMO_PS_STATIC))
118 return 1;
119#else
120 return 1;
c8b0e6e1 121#endif /*CONFIG_IWL4965_HT */
b481de9c
ZY
122 return 0;
123}
124
17744ff6
TW
125int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags)
126{
127 int idx = 0;
128
129 /* 4965 HT rate format */
130 if (rate_n_flags & RATE_MCS_HT_MSK) {
131 idx = (rate_n_flags & 0xff);
132
133 if (idx >= IWL_RATE_MIMO_6M_PLCP)
134 idx = idx - IWL_RATE_MIMO_6M_PLCP;
135
136 idx += IWL_FIRST_OFDM_RATE;
137 /* skip 9M not supported in ht*/
138 if (idx >= IWL_RATE_9M_INDEX)
139 idx += 1;
140 if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
141 return idx;
142
143 /* 4965 legacy rate format, search for match in table */
144 } else {
145 for (idx = 0; idx < ARRAY_SIZE(iwl4965_rates); idx++)
146 if (iwl4965_rates[idx].plcp == (rate_n_flags & 0xFF))
147 return idx;
148 }
149
150 return -1;
151}
152
4c424e4c
RR
153/**
154 * translate ucode response to mac80211 tx status control values
155 */
156void iwl4965_hwrate_to_tx_control(struct iwl4965_priv *priv, u32 rate_n_flags,
157 struct ieee80211_tx_control *control)
158{
159 int rate_index;
160
161 control->antenna_sel_tx =
162 ((rate_n_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_A_POS);
163 if (rate_n_flags & RATE_MCS_HT_MSK)
164 control->flags |= IEEE80211_TXCTL_OFDM_HT;
165 if (rate_n_flags & RATE_MCS_GF_MSK)
166 control->flags |= IEEE80211_TXCTL_GREEN_FIELD;
167 if (rate_n_flags & RATE_MCS_FAT_MSK)
168 control->flags |= IEEE80211_TXCTL_40_MHZ_WIDTH;
169 if (rate_n_flags & RATE_MCS_DUP_MSK)
170 control->flags |= IEEE80211_TXCTL_DUP_DATA;
171 if (rate_n_flags & RATE_MCS_SGI_MSK)
172 control->flags |= IEEE80211_TXCTL_SHORT_GI;
173 /* since iwl4965_hwrate_to_plcp_idx is band indifferent, we always use
174 * IEEE80211_BAND_2GHZ band as it contains all the rates */
175 rate_index = iwl4965_hwrate_to_plcp_idx(rate_n_flags);
176 if (rate_index == -1)
177 control->tx_rate = NULL;
178 else
179 control->tx_rate =
180 &priv->bands[IEEE80211_BAND_2GHZ].bitrates[rate_index];
181}
17744ff6 182
b481de9c
ZY
183/*
184 * Determine how many receiver/antenna chains to use.
185 * More provides better reception via diversity. Fewer saves power.
186 * MIMO (dual stream) requires at least 2, but works better with 3.
187 * This does not determine *which* chains to use, just how many.
188 */
bb8c093b 189static int iwl4965_get_rx_chain_counter(struct iwl4965_priv *priv,
b481de9c
ZY
190 u8 *idle_state, u8 *rx_state)
191{
192 u8 is_single = is_single_stream(priv);
193 u8 is_cam = test_bit(STATUS_POWER_PMI, &priv->status) ? 0 : 1;
194
195 /* # of Rx chains to use when expecting MIMO. */
196 if (is_single || (!is_cam && (priv->ps_mode == IWL_MIMO_PS_STATIC)))
197 *rx_state = 2;
198 else
199 *rx_state = 3;
200
201 /* # Rx chains when idling and maybe trying to save power */
202 switch (priv->ps_mode) {
203 case IWL_MIMO_PS_STATIC:
204 case IWL_MIMO_PS_DYNAMIC:
205 *idle_state = (is_cam) ? 2 : 1;
206 break;
207 case IWL_MIMO_PS_NONE:
208 *idle_state = (is_cam) ? *rx_state : 1;
209 break;
210 default:
211 *idle_state = 1;
212 break;
213 }
214
215 return 0;
216}
217
bb8c093b 218int iwl4965_hw_rxq_stop(struct iwl4965_priv *priv)
b481de9c
ZY
219{
220 int rc;
221 unsigned long flags;
222
223 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 224 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
225 if (rc) {
226 spin_unlock_irqrestore(&priv->lock, flags);
227 return rc;
228 }
229
8b6eaea8 230 /* stop Rx DMA */
bb8c093b
CH
231 iwl4965_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
232 rc = iwl4965_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
b481de9c
ZY
233 (1 << 24), 1000);
234 if (rc < 0)
235 IWL_ERROR("Can't stop Rx DMA.\n");
236
bb8c093b 237 iwl4965_release_nic_access(priv);
b481de9c
ZY
238 spin_unlock_irqrestore(&priv->lock, flags);
239
240 return 0;
241}
242
bb8c093b 243u8 iwl4965_hw_find_station(struct iwl4965_priv *priv, const u8 *addr)
b481de9c
ZY
244{
245 int i;
246 int start = 0;
247 int ret = IWL_INVALID_STATION;
248 unsigned long flags;
0795af57 249 DECLARE_MAC_BUF(mac);
b481de9c
ZY
250
251 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) ||
252 (priv->iw_mode == IEEE80211_IF_TYPE_AP))
253 start = IWL_STA_ID;
254
255 if (is_broadcast_ether_addr(addr))
256 return IWL4965_BROADCAST_ID;
257
258 spin_lock_irqsave(&priv->sta_lock, flags);
259 for (i = start; i < priv->hw_setting.max_stations; i++)
260 if ((priv->stations[i].used) &&
261 (!compare_ether_addr
262 (priv->stations[i].sta.sta.addr, addr))) {
263 ret = i;
264 goto out;
265 }
266
a50e2e3f 267 IWL_DEBUG_ASSOC_LIMIT("can not find STA %s total %d\n",
0795af57 268 print_mac(mac, addr), priv->num_stations);
b481de9c
ZY
269
270 out:
271 spin_unlock_irqrestore(&priv->sta_lock, flags);
272 return ret;
273}
274
bb8c093b 275static int iwl4965_nic_set_pwr_src(struct iwl4965_priv *priv, int pwr_max)
b481de9c 276{
d8609652 277 int ret;
b481de9c
ZY
278 unsigned long flags;
279
280 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 281 ret = iwl4965_grab_nic_access(priv);
d8609652 282 if (ret) {
b481de9c 283 spin_unlock_irqrestore(&priv->lock, flags);
d8609652 284 return ret;
b481de9c
ZY
285 }
286
287 if (!pwr_max) {
288 u32 val;
289
d8609652 290 ret = pci_read_config_dword(priv->pci_dev, PCI_POWER_SOURCE,
b481de9c
ZY
291 &val);
292
293 if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT)
bb8c093b 294 iwl4965_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
b481de9c
ZY
295 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
296 ~APMG_PS_CTRL_MSK_PWR_SRC);
297 } else
bb8c093b 298 iwl4965_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
b481de9c
ZY
299 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
300 ~APMG_PS_CTRL_MSK_PWR_SRC);
301
bb8c093b 302 iwl4965_release_nic_access(priv);
b481de9c
ZY
303 spin_unlock_irqrestore(&priv->lock, flags);
304
d8609652 305 return ret;
b481de9c
ZY
306}
307
bb8c093b 308static int iwl4965_rx_init(struct iwl4965_priv *priv, struct iwl4965_rx_queue *rxq)
b481de9c
ZY
309{
310 int rc;
311 unsigned long flags;
9ee1ba47 312 unsigned int rb_size;
b481de9c
ZY
313
314 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 315 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
316 if (rc) {
317 spin_unlock_irqrestore(&priv->lock, flags);
318 return rc;
319 }
320
9ee1ba47
RR
321 if (iwl4965_param_amsdu_size_8K)
322 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
323 else
324 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
325
8b6eaea8 326 /* Stop Rx DMA */
bb8c093b 327 iwl4965_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
b481de9c 328
8b6eaea8 329 /* Reset driver's Rx queue write index */
bb8c093b 330 iwl4965_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
8b6eaea8
CB
331
332 /* Tell device where to find RBD circular buffer in DRAM */
bb8c093b 333 iwl4965_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
b481de9c
ZY
334 rxq->dma_addr >> 8);
335
8b6eaea8 336 /* Tell device where in DRAM to update its Rx status */
bb8c093b 337 iwl4965_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
b481de9c 338 (priv->hw_setting.shared_phys +
bb8c093b 339 offsetof(struct iwl4965_shared, val0)) >> 4);
b481de9c 340
8b6eaea8 341 /* Enable Rx DMA, enable host interrupt, Rx buffer size 4k, 256 RBDs */
bb8c093b 342 iwl4965_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
b481de9c
ZY
343 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
344 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
9ee1ba47 345 rb_size |
b481de9c
ZY
346 /*0x10 << 4 | */
347 (RX_QUEUE_SIZE_LOG <<
348 FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
349
350 /*
bb8c093b 351 * iwl4965_write32(priv,CSR_INT_COAL_REG,0);
b481de9c
ZY
352 */
353
bb8c093b 354 iwl4965_release_nic_access(priv);
b481de9c
ZY
355 spin_unlock_irqrestore(&priv->lock, flags);
356
357 return 0;
358}
359
8b6eaea8 360/* Tell 4965 where to find the "keep warm" buffer */
bb8c093b 361static int iwl4965_kw_init(struct iwl4965_priv *priv)
b481de9c
ZY
362{
363 unsigned long flags;
364 int rc;
365
366 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 367 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
368 if (rc)
369 goto out;
370
bb8c093b 371 iwl4965_write_direct32(priv, IWL_FH_KW_MEM_ADDR_REG,
b481de9c 372 priv->kw.dma_addr >> 4);
bb8c093b 373 iwl4965_release_nic_access(priv);
b481de9c
ZY
374out:
375 spin_unlock_irqrestore(&priv->lock, flags);
376 return rc;
377}
378
bb8c093b 379static int iwl4965_kw_alloc(struct iwl4965_priv *priv)
b481de9c
ZY
380{
381 struct pci_dev *dev = priv->pci_dev;
bb8c093b 382 struct iwl4965_kw *kw = &priv->kw;
b481de9c
ZY
383
384 kw->size = IWL4965_KW_SIZE; /* TBW need set somewhere else */
385 kw->v_addr = pci_alloc_consistent(dev, kw->size, &kw->dma_addr);
386 if (!kw->v_addr)
387 return -ENOMEM;
388
389 return 0;
390}
391
392#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
393 ? # x " " : "")
394
8b6eaea8
CB
395/**
396 * iwl4965_set_fat_chan_info - Copy fat channel info into driver's priv.
397 *
398 * Does not set up a command, or touch hardware.
399 */
8318d78a
JB
400int iwl4965_set_fat_chan_info(struct iwl4965_priv *priv,
401 enum ieee80211_band band, u16 channel,
bb8c093b 402 const struct iwl4965_eeprom_channel *eeprom_ch,
b481de9c
ZY
403 u8 fat_extension_channel)
404{
bb8c093b 405 struct iwl4965_channel_info *ch_info;
b481de9c 406
bb8c093b 407 ch_info = (struct iwl4965_channel_info *)
8318d78a 408 iwl4965_get_channel_info(priv, band, channel);
b481de9c
ZY
409
410 if (!is_channel_valid(ch_info))
411 return -1;
412
413 IWL_DEBUG_INFO("FAT Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x"
414 " %ddBm): Ad-Hoc %ssupported\n",
415 ch_info->channel,
416 is_channel_a_band(ch_info) ?
417 "5.2" : "2.4",
418 CHECK_AND_PRINT(IBSS),
419 CHECK_AND_PRINT(ACTIVE),
420 CHECK_AND_PRINT(RADAR),
421 CHECK_AND_PRINT(WIDE),
422 CHECK_AND_PRINT(NARROW),
423 CHECK_AND_PRINT(DFS),
424 eeprom_ch->flags,
425 eeprom_ch->max_power_avg,
426 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS)
427 && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ?
428 "" : "not ");
429
430 ch_info->fat_eeprom = *eeprom_ch;
431 ch_info->fat_max_power_avg = eeprom_ch->max_power_avg;
432 ch_info->fat_curr_txpow = eeprom_ch->max_power_avg;
433 ch_info->fat_min_power = 0;
434 ch_info->fat_scan_power = eeprom_ch->max_power_avg;
435 ch_info->fat_flags = eeprom_ch->flags;
436 ch_info->fat_extension_channel = fat_extension_channel;
437
438 return 0;
439}
440
8b6eaea8
CB
441/**
442 * iwl4965_kw_free - Free the "keep warm" buffer
443 */
bb8c093b 444static void iwl4965_kw_free(struct iwl4965_priv *priv)
b481de9c
ZY
445{
446 struct pci_dev *dev = priv->pci_dev;
bb8c093b 447 struct iwl4965_kw *kw = &priv->kw;
b481de9c
ZY
448
449 if (kw->v_addr) {
450 pci_free_consistent(dev, kw->size, kw->v_addr, kw->dma_addr);
451 memset(kw, 0, sizeof(*kw));
452 }
453}
454
455/**
456 * iwl4965_txq_ctx_reset - Reset TX queue context
457 * Destroys all DMA structures and initialise them again
458 *
459 * @param priv
460 * @return error code
461 */
bb8c093b 462static int iwl4965_txq_ctx_reset(struct iwl4965_priv *priv)
b481de9c
ZY
463{
464 int rc = 0;
465 int txq_id, slots_num;
466 unsigned long flags;
467
468 iwl4965_kw_free(priv);
469
8b6eaea8 470 /* Free all tx/cmd queues and keep-warm buffer */
bb8c093b 471 iwl4965_hw_txq_ctx_free(priv);
b481de9c 472
8b6eaea8 473 /* Alloc keep-warm buffer */
b481de9c
ZY
474 rc = iwl4965_kw_alloc(priv);
475 if (rc) {
476 IWL_ERROR("Keep Warm allocation failed");
477 goto error_kw;
478 }
479
480 spin_lock_irqsave(&priv->lock, flags);
481
bb8c093b 482 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
483 if (unlikely(rc)) {
484 IWL_ERROR("TX reset failed");
485 spin_unlock_irqrestore(&priv->lock, flags);
486 goto error_reset;
487 }
488
8b6eaea8 489 /* Turn off all Tx DMA channels */
bb8c093b
CH
490 iwl4965_write_prph(priv, KDR_SCD_TXFACT, 0);
491 iwl4965_release_nic_access(priv);
b481de9c
ZY
492 spin_unlock_irqrestore(&priv->lock, flags);
493
8b6eaea8 494 /* Tell 4965 where to find the keep-warm buffer */
b481de9c
ZY
495 rc = iwl4965_kw_init(priv);
496 if (rc) {
497 IWL_ERROR("kw_init failed\n");
498 goto error_reset;
499 }
500
8b6eaea8
CB
501 /* Alloc and init all (default 16) Tx queues,
502 * including the command queue (#4) */
b481de9c
ZY
503 for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++) {
504 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
505 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
bb8c093b 506 rc = iwl4965_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
b481de9c
ZY
507 txq_id);
508 if (rc) {
509 IWL_ERROR("Tx %d queue init failed\n", txq_id);
510 goto error;
511 }
512 }
513
514 return rc;
515
516 error:
bb8c093b 517 iwl4965_hw_txq_ctx_free(priv);
b481de9c
ZY
518 error_reset:
519 iwl4965_kw_free(priv);
520 error_kw:
521 return rc;
522}
523
bb8c093b 524int iwl4965_hw_nic_init(struct iwl4965_priv *priv)
b481de9c
ZY
525{
526 int rc;
527 unsigned long flags;
bb8c093b 528 struct iwl4965_rx_queue *rxq = &priv->rxq;
b481de9c
ZY
529 u8 rev_id;
530 u32 val;
531 u8 val_link;
532
bb8c093b 533 iwl4965_power_init_handle(priv);
b481de9c
ZY
534
535 /* nic_init */
536 spin_lock_irqsave(&priv->lock, flags);
537
bb8c093b 538 iwl4965_set_bit(priv, CSR_GIO_CHICKEN_BITS,
b481de9c
ZY
539 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
540
bb8c093b
CH
541 iwl4965_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
542 rc = iwl4965_poll_bit(priv, CSR_GP_CNTRL,
b481de9c
ZY
543 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
544 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
545 if (rc < 0) {
546 spin_unlock_irqrestore(&priv->lock, flags);
547 IWL_DEBUG_INFO("Failed to init the card\n");
548 return rc;
549 }
550
bb8c093b 551 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
552 if (rc) {
553 spin_unlock_irqrestore(&priv->lock, flags);
554 return rc;
555 }
556
bb8c093b 557 iwl4965_read_prph(priv, APMG_CLK_CTRL_REG);
b481de9c 558
bb8c093b 559 iwl4965_write_prph(priv, APMG_CLK_CTRL_REG,
b481de9c
ZY
560 APMG_CLK_VAL_DMA_CLK_RQT |
561 APMG_CLK_VAL_BSM_CLK_RQT);
bb8c093b 562 iwl4965_read_prph(priv, APMG_CLK_CTRL_REG);
b481de9c
ZY
563
564 udelay(20);
565
bb8c093b 566 iwl4965_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
b481de9c
ZY
567 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
568
bb8c093b
CH
569 iwl4965_release_nic_access(priv);
570 iwl4965_write32(priv, CSR_INT_COALESCING, 512 / 32);
b481de9c
ZY
571 spin_unlock_irqrestore(&priv->lock, flags);
572
573 /* Determine HW type */
574 rc = pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &rev_id);
575 if (rc)
576 return rc;
577
578 IWL_DEBUG_INFO("HW Revision ID = 0x%X\n", rev_id);
579
580 iwl4965_nic_set_pwr_src(priv, 1);
581 spin_lock_irqsave(&priv->lock, flags);
582
583 if ((rev_id & 0x80) == 0x80 && (rev_id & 0x7f) < 8) {
584 pci_read_config_dword(priv->pci_dev, PCI_REG_WUM8, &val);
585 /* Enable No Snoop field */
586 pci_write_config_dword(priv->pci_dev, PCI_REG_WUM8,
587 val & ~(1 << 11));
588 }
589
590 spin_unlock_irqrestore(&priv->lock, flags);
591
b481de9c
ZY
592 if (priv->eeprom.calib_version < EEPROM_TX_POWER_VERSION_NEW) {
593 IWL_ERROR("Older EEPROM detected! Aborting.\n");
594 return -EINVAL;
595 }
596
597 pci_read_config_byte(priv->pci_dev, PCI_LINK_CTRL, &val_link);
598
599 /* disable L1 entry -- workaround for pre-B1 */
600 pci_write_config_byte(priv->pci_dev, PCI_LINK_CTRL, val_link & ~0x02);
601
602 spin_lock_irqsave(&priv->lock, flags);
603
604 /* set CSR_HW_CONFIG_REG for uCode use */
605
9f6adf23 606 iwl4965_set_bit(priv, CSR_HW_IF_CONFIG_REG,
6f83eaa1
TW
607 CSR49_HW_IF_CONFIG_REG_BIT_4965_R |
608 CSR49_HW_IF_CONFIG_REG_BIT_RADIO_SI |
609 CSR49_HW_IF_CONFIG_REG_BIT_MAC_SI);
b481de9c 610
bb8c093b 611 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
612 if (rc < 0) {
613 spin_unlock_irqrestore(&priv->lock, flags);
614 IWL_DEBUG_INFO("Failed to init the card\n");
615 return rc;
616 }
617
bb8c093b
CH
618 iwl4965_read_prph(priv, APMG_PS_CTRL_REG);
619 iwl4965_set_bits_prph(priv, APMG_PS_CTRL_REG,
b481de9c
ZY
620 APMG_PS_CTRL_VAL_RESET_REQ);
621 udelay(5);
bb8c093b 622 iwl4965_clear_bits_prph(priv, APMG_PS_CTRL_REG,
b481de9c
ZY
623 APMG_PS_CTRL_VAL_RESET_REQ);
624
bb8c093b 625 iwl4965_release_nic_access(priv);
b481de9c
ZY
626 spin_unlock_irqrestore(&priv->lock, flags);
627
bb8c093b 628 iwl4965_hw_card_show_info(priv);
b481de9c
ZY
629
630 /* end nic_init */
631
632 /* Allocate the RX queue, or reset if it is already allocated */
633 if (!rxq->bd) {
bb8c093b 634 rc = iwl4965_rx_queue_alloc(priv);
b481de9c
ZY
635 if (rc) {
636 IWL_ERROR("Unable to initialize Rx queue\n");
637 return -ENOMEM;
638 }
639 } else
bb8c093b 640 iwl4965_rx_queue_reset(priv, rxq);
b481de9c 641
bb8c093b 642 iwl4965_rx_replenish(priv);
b481de9c
ZY
643
644 iwl4965_rx_init(priv, rxq);
645
646 spin_lock_irqsave(&priv->lock, flags);
647
648 rxq->need_update = 1;
bb8c093b 649 iwl4965_rx_queue_update_write_ptr(priv, rxq);
b481de9c
ZY
650
651 spin_unlock_irqrestore(&priv->lock, flags);
8b6eaea8
CB
652
653 /* Allocate and init all Tx and Command queues */
b481de9c
ZY
654 rc = iwl4965_txq_ctx_reset(priv);
655 if (rc)
656 return rc;
657
658 if (priv->eeprom.sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
659 IWL_DEBUG_RF_KILL("SW RF KILL supported in EEPROM.\n");
660
661 if (priv->eeprom.sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
662 IWL_DEBUG_RF_KILL("HW RF KILL supported in EEPROM.\n");
663
664 set_bit(STATUS_INIT, &priv->status);
665
666 return 0;
667}
668
bb8c093b 669int iwl4965_hw_nic_stop_master(struct iwl4965_priv *priv)
b481de9c
ZY
670{
671 int rc = 0;
672 u32 reg_val;
673 unsigned long flags;
674
675 spin_lock_irqsave(&priv->lock, flags);
676
677 /* set stop master bit */
bb8c093b 678 iwl4965_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
b481de9c 679
bb8c093b 680 reg_val = iwl4965_read32(priv, CSR_GP_CNTRL);
b481de9c
ZY
681
682 if (CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE ==
683 (reg_val & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE))
684 IWL_DEBUG_INFO("Card in power save, master is already "
685 "stopped\n");
686 else {
bb8c093b 687 rc = iwl4965_poll_bit(priv, CSR_RESET,
b481de9c
ZY
688 CSR_RESET_REG_FLAG_MASTER_DISABLED,
689 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
690 if (rc < 0) {
691 spin_unlock_irqrestore(&priv->lock, flags);
692 return rc;
693 }
694 }
695
696 spin_unlock_irqrestore(&priv->lock, flags);
697 IWL_DEBUG_INFO("stop master\n");
698
699 return rc;
700}
701
8b6eaea8
CB
702/**
703 * iwl4965_hw_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
704 */
bb8c093b 705void iwl4965_hw_txq_ctx_stop(struct iwl4965_priv *priv)
b481de9c
ZY
706{
707
708 int txq_id;
709 unsigned long flags;
710
8b6eaea8 711 /* Stop each Tx DMA channel, and wait for it to be idle */
b481de9c
ZY
712 for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++) {
713 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 714 if (iwl4965_grab_nic_access(priv)) {
b481de9c
ZY
715 spin_unlock_irqrestore(&priv->lock, flags);
716 continue;
717 }
718
bb8c093b 719 iwl4965_write_direct32(priv,
b481de9c
ZY
720 IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id),
721 0x0);
bb8c093b 722 iwl4965_poll_direct_bit(priv, IWL_FH_TSSR_TX_STATUS_REG,
b481de9c
ZY
723 IWL_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE
724 (txq_id), 200);
bb8c093b 725 iwl4965_release_nic_access(priv);
b481de9c
ZY
726 spin_unlock_irqrestore(&priv->lock, flags);
727 }
728
8b6eaea8 729 /* Deallocate memory for all Tx queues */
bb8c093b 730 iwl4965_hw_txq_ctx_free(priv);
b481de9c
ZY
731}
732
bb8c093b 733int iwl4965_hw_nic_reset(struct iwl4965_priv *priv)
b481de9c
ZY
734{
735 int rc = 0;
736 unsigned long flags;
737
bb8c093b 738 iwl4965_hw_nic_stop_master(priv);
b481de9c
ZY
739
740 spin_lock_irqsave(&priv->lock, flags);
741
bb8c093b 742 iwl4965_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
b481de9c
ZY
743
744 udelay(10);
745
bb8c093b
CH
746 iwl4965_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
747 rc = iwl4965_poll_bit(priv, CSR_RESET,
b481de9c
ZY
748 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
749 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25);
750
751 udelay(10);
752
bb8c093b 753 rc = iwl4965_grab_nic_access(priv);
b481de9c 754 if (!rc) {
bb8c093b 755 iwl4965_write_prph(priv, APMG_CLK_EN_REG,
b481de9c
ZY
756 APMG_CLK_VAL_DMA_CLK_RQT |
757 APMG_CLK_VAL_BSM_CLK_RQT);
758
759 udelay(10);
760
bb8c093b 761 iwl4965_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
b481de9c
ZY
762 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
763
bb8c093b 764 iwl4965_release_nic_access(priv);
b481de9c
ZY
765 }
766
767 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
768 wake_up_interruptible(&priv->wait_command_queue);
769
770 spin_unlock_irqrestore(&priv->lock, flags);
771
772 return rc;
773
774}
775
776#define REG_RECALIB_PERIOD (60)
777
778/**
779 * iwl4965_bg_statistics_periodic - Timer callback to queue statistics
780 *
781 * This callback is provided in order to queue the statistics_work
782 * in work_queue context (v. softirq)
783 *
784 * This timer function is continually reset to execute within
785 * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
786 * was received. We need to ensure we receive the statistics in order
787 * to update the temperature used for calibrating the TXPOWER. However,
788 * we can't send the statistics command from softirq context (which
789 * is the context which timers run at) so we have to queue off the
790 * statistics_work to actually send the command to the hardware.
791 */
792static void iwl4965_bg_statistics_periodic(unsigned long data)
793{
bb8c093b 794 struct iwl4965_priv *priv = (struct iwl4965_priv *)data;
b481de9c
ZY
795
796 queue_work(priv->workqueue, &priv->statistics_work);
797}
798
799/**
800 * iwl4965_bg_statistics_work - Send the statistics request to the hardware.
801 *
bb8c093b 802 * This is queued by iwl4965_bg_statistics_periodic.
b481de9c
ZY
803 */
804static void iwl4965_bg_statistics_work(struct work_struct *work)
805{
bb8c093b 806 struct iwl4965_priv *priv = container_of(work, struct iwl4965_priv,
b481de9c
ZY
807 statistics_work);
808
809 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
810 return;
811
812 mutex_lock(&priv->mutex);
bb8c093b 813 iwl4965_send_statistics_request(priv);
b481de9c
ZY
814 mutex_unlock(&priv->mutex);
815}
816
817#define CT_LIMIT_CONST 259
818#define TM_CT_KILL_THRESHOLD 110
819
bb8c093b 820void iwl4965_rf_kill_ct_config(struct iwl4965_priv *priv)
b481de9c 821{
bb8c093b 822 struct iwl4965_ct_kill_config cmd;
b481de9c
ZY
823 u32 R1, R2, R3;
824 u32 temp_th;
825 u32 crit_temperature;
826 unsigned long flags;
827 int rc = 0;
828
829 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 830 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR,
b481de9c
ZY
831 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
832 spin_unlock_irqrestore(&priv->lock, flags);
833
834 if (priv->statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK) {
835 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
836 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
837 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
838 } else {
839 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
840 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
841 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
842 }
843
844 temp_th = CELSIUS_TO_KELVIN(TM_CT_KILL_THRESHOLD);
845
846 crit_temperature = ((temp_th * (R3-R1))/CT_LIMIT_CONST) + R2;
847 cmd.critical_temperature_R = cpu_to_le32(crit_temperature);
bb8c093b 848 rc = iwl4965_send_cmd_pdu(priv,
b481de9c
ZY
849 REPLY_CT_KILL_CONFIG_CMD, sizeof(cmd), &cmd);
850 if (rc)
851 IWL_ERROR("REPLY_CT_KILL_CONFIG_CMD failed\n");
852 else
853 IWL_DEBUG_INFO("REPLY_CT_KILL_CONFIG_CMD succeeded\n");
854}
855
c8b0e6e1 856#ifdef CONFIG_IWL4965_SENSITIVITY
b481de9c
ZY
857
858/* "false alarms" are signals that our DSP tries to lock onto,
859 * but then determines that they are either noise, or transmissions
860 * from a distant wireless network (also "noise", really) that get
861 * "stepped on" by stronger transmissions within our own network.
862 * This algorithm attempts to set a sensitivity level that is high
863 * enough to receive all of our own network traffic, but not so
864 * high that our DSP gets too busy trying to lock onto non-network
865 * activity/noise. */
bb8c093b 866static int iwl4965_sens_energy_cck(struct iwl4965_priv *priv,
b481de9c
ZY
867 u32 norm_fa,
868 u32 rx_enable_time,
869 struct statistics_general_data *rx_info)
870{
871 u32 max_nrg_cck = 0;
872 int i = 0;
873 u8 max_silence_rssi = 0;
874 u32 silence_ref = 0;
875 u8 silence_rssi_a = 0;
876 u8 silence_rssi_b = 0;
877 u8 silence_rssi_c = 0;
878 u32 val;
879
880 /* "false_alarms" values below are cross-multiplications to assess the
881 * numbers of false alarms within the measured period of actual Rx
882 * (Rx is off when we're txing), vs the min/max expected false alarms
883 * (some should be expected if rx is sensitive enough) in a
884 * hypothetical listening period of 200 time units (TU), 204.8 msec:
885 *
886 * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time
887 *
888 * */
889 u32 false_alarms = norm_fa * 200 * 1024;
890 u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
891 u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
bb8c093b 892 struct iwl4965_sensitivity_data *data = NULL;
b481de9c
ZY
893
894 data = &(priv->sensitivity_data);
895
896 data->nrg_auto_corr_silence_diff = 0;
897
898 /* Find max silence rssi among all 3 receivers.
899 * This is background noise, which may include transmissions from other
900 * networks, measured during silence before our network's beacon */
901 silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a &
8a1b0245 902 ALL_BAND_FILTER) >> 8);
b481de9c 903 silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b &
8a1b0245 904 ALL_BAND_FILTER) >> 8);
b481de9c 905 silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c &
8a1b0245 906 ALL_BAND_FILTER) >> 8);
b481de9c
ZY
907
908 val = max(silence_rssi_b, silence_rssi_c);
909 max_silence_rssi = max(silence_rssi_a, (u8) val);
910
911 /* Store silence rssi in 20-beacon history table */
912 data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi;
913 data->nrg_silence_idx++;
914 if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L)
915 data->nrg_silence_idx = 0;
916
917 /* Find max silence rssi across 20 beacon history */
918 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) {
919 val = data->nrg_silence_rssi[i];
920 silence_ref = max(silence_ref, val);
921 }
922 IWL_DEBUG_CALIB("silence a %u, b %u, c %u, 20-bcn max %u\n",
923 silence_rssi_a, silence_rssi_b, silence_rssi_c,
924 silence_ref);
925
926 /* Find max rx energy (min value!) among all 3 receivers,
927 * measured during beacon frame.
928 * Save it in 10-beacon history table. */
929 i = data->nrg_energy_idx;
930 val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c);
931 data->nrg_value[i] = min(rx_info->beacon_energy_a, val);
932
933 data->nrg_energy_idx++;
934 if (data->nrg_energy_idx >= 10)
935 data->nrg_energy_idx = 0;
936
937 /* Find min rx energy (max value) across 10 beacon history.
938 * This is the minimum signal level that we want to receive well.
939 * Add backoff (margin so we don't miss slightly lower energy frames).
940 * This establishes an upper bound (min value) for energy threshold. */
941 max_nrg_cck = data->nrg_value[0];
942 for (i = 1; i < 10; i++)
943 max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
944 max_nrg_cck += 6;
945
946 IWL_DEBUG_CALIB("rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
947 rx_info->beacon_energy_a, rx_info->beacon_energy_b,
948 rx_info->beacon_energy_c, max_nrg_cck - 6);
949
950 /* Count number of consecutive beacons with fewer-than-desired
951 * false alarms. */
952 if (false_alarms < min_false_alarms)
953 data->num_in_cck_no_fa++;
954 else
955 data->num_in_cck_no_fa = 0;
956 IWL_DEBUG_CALIB("consecutive bcns with few false alarms = %u\n",
957 data->num_in_cck_no_fa);
958
959 /* If we got too many false alarms this time, reduce sensitivity */
960 if (false_alarms > max_false_alarms) {
961 IWL_DEBUG_CALIB("norm FA %u > max FA %u\n",
962 false_alarms, max_false_alarms);
963 IWL_DEBUG_CALIB("... reducing sensitivity\n");
964 data->nrg_curr_state = IWL_FA_TOO_MANY;
965
966 if (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK) {
967 /* Store for "fewer than desired" on later beacon */
968 data->nrg_silence_ref = silence_ref;
969
970 /* increase energy threshold (reduce nrg value)
971 * to decrease sensitivity */
972 if (data->nrg_th_cck > (NRG_MAX_CCK + NRG_STEP_CCK))
973 data->nrg_th_cck = data->nrg_th_cck
974 - NRG_STEP_CCK;
975 }
976
977 /* increase auto_corr values to decrease sensitivity */
978 if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK)
979 data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1;
980 else {
981 val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
982 data->auto_corr_cck = min((u32)AUTO_CORR_MAX_CCK, val);
983 }
984 val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
985 data->auto_corr_cck_mrc = min((u32)AUTO_CORR_MAX_CCK_MRC, val);
986
987 /* Else if we got fewer than desired, increase sensitivity */
988 } else if (false_alarms < min_false_alarms) {
989 data->nrg_curr_state = IWL_FA_TOO_FEW;
990
991 /* Compare silence level with silence level for most recent
992 * healthy number or too many false alarms */
993 data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref -
994 (s32)silence_ref;
995
996 IWL_DEBUG_CALIB("norm FA %u < min FA %u, silence diff %d\n",
997 false_alarms, min_false_alarms,
998 data->nrg_auto_corr_silence_diff);
999
1000 /* Increase value to increase sensitivity, but only if:
1001 * 1a) previous beacon did *not* have *too many* false alarms
1002 * 1b) AND there's a significant difference in Rx levels
1003 * from a previous beacon with too many, or healthy # FAs
1004 * OR 2) We've seen a lot of beacons (100) with too few
1005 * false alarms */
1006 if ((data->nrg_prev_state != IWL_FA_TOO_MANY) &&
1007 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
1008 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
1009
1010 IWL_DEBUG_CALIB("... increasing sensitivity\n");
1011 /* Increase nrg value to increase sensitivity */
1012 val = data->nrg_th_cck + NRG_STEP_CCK;
1013 data->nrg_th_cck = min((u32)NRG_MIN_CCK, val);
1014
1015 /* Decrease auto_corr values to increase sensitivity */
1016 val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
1017 data->auto_corr_cck = max((u32)AUTO_CORR_MIN_CCK, val);
1018
1019 val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
1020 data->auto_corr_cck_mrc =
1021 max((u32)AUTO_CORR_MIN_CCK_MRC, val);
1022
1023 } else
1024 IWL_DEBUG_CALIB("... but not changing sensitivity\n");
1025
1026 /* Else we got a healthy number of false alarms, keep status quo */
1027 } else {
1028 IWL_DEBUG_CALIB(" FA in safe zone\n");
1029 data->nrg_curr_state = IWL_FA_GOOD_RANGE;
1030
1031 /* Store for use in "fewer than desired" with later beacon */
1032 data->nrg_silence_ref = silence_ref;
1033
1034 /* If previous beacon had too many false alarms,
1035 * give it some extra margin by reducing sensitivity again
1036 * (but don't go below measured energy of desired Rx) */
1037 if (IWL_FA_TOO_MANY == data->nrg_prev_state) {
1038 IWL_DEBUG_CALIB("... increasing margin\n");
1039 data->nrg_th_cck -= NRG_MARGIN;
1040 }
1041 }
1042
1043 /* Make sure the energy threshold does not go above the measured
1044 * energy of the desired Rx signals (reduced by backoff margin),
1045 * or else we might start missing Rx frames.
1046 * Lower value is higher energy, so we use max()!
1047 */
1048 data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
1049 IWL_DEBUG_CALIB("new nrg_th_cck %u\n", data->nrg_th_cck);
1050
1051 data->nrg_prev_state = data->nrg_curr_state;
1052
1053 return 0;
1054}
1055
1056
bb8c093b 1057static int iwl4965_sens_auto_corr_ofdm(struct iwl4965_priv *priv,
b481de9c
ZY
1058 u32 norm_fa,
1059 u32 rx_enable_time)
1060{
1061 u32 val;
1062 u32 false_alarms = norm_fa * 200 * 1024;
1063 u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
1064 u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
bb8c093b 1065 struct iwl4965_sensitivity_data *data = NULL;
b481de9c
ZY
1066
1067 data = &(priv->sensitivity_data);
1068
1069 /* If we got too many false alarms this time, reduce sensitivity */
1070 if (false_alarms > max_false_alarms) {
1071
1072 IWL_DEBUG_CALIB("norm FA %u > max FA %u)\n",
1073 false_alarms, max_false_alarms);
1074
1075 val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
1076 data->auto_corr_ofdm =
1077 min((u32)AUTO_CORR_MAX_OFDM, val);
1078
1079 val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
1080 data->auto_corr_ofdm_mrc =
1081 min((u32)AUTO_CORR_MAX_OFDM_MRC, val);
1082
1083 val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
1084 data->auto_corr_ofdm_x1 =
1085 min((u32)AUTO_CORR_MAX_OFDM_X1, val);
1086
1087 val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
1088 data->auto_corr_ofdm_mrc_x1 =
1089 min((u32)AUTO_CORR_MAX_OFDM_MRC_X1, val);
1090 }
1091
1092 /* Else if we got fewer than desired, increase sensitivity */
1093 else if (false_alarms < min_false_alarms) {
1094
1095 IWL_DEBUG_CALIB("norm FA %u < min FA %u\n",
1096 false_alarms, min_false_alarms);
1097
1098 val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
1099 data->auto_corr_ofdm =
1100 max((u32)AUTO_CORR_MIN_OFDM, val);
1101
1102 val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
1103 data->auto_corr_ofdm_mrc =
1104 max((u32)AUTO_CORR_MIN_OFDM_MRC, val);
1105
1106 val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
1107 data->auto_corr_ofdm_x1 =
1108 max((u32)AUTO_CORR_MIN_OFDM_X1, val);
1109
1110 val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
1111 data->auto_corr_ofdm_mrc_x1 =
1112 max((u32)AUTO_CORR_MIN_OFDM_MRC_X1, val);
1113 }
1114
1115 else
1116 IWL_DEBUG_CALIB("min FA %u < norm FA %u < max FA %u OK\n",
1117 min_false_alarms, false_alarms, max_false_alarms);
1118
1119 return 0;
1120}
1121
bb8c093b
CH
1122static int iwl4965_sensitivity_callback(struct iwl4965_priv *priv,
1123 struct iwl4965_cmd *cmd, struct sk_buff *skb)
b481de9c
ZY
1124{
1125 /* We didn't cache the SKB; let the caller free it */
1126 return 1;
1127}
1128
1129/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
bb8c093b 1130static int iwl4965_sensitivity_write(struct iwl4965_priv *priv, u8 flags)
b481de9c
ZY
1131{
1132 int rc = 0;
bb8c093b
CH
1133 struct iwl4965_sensitivity_cmd cmd ;
1134 struct iwl4965_sensitivity_data *data = NULL;
1135 struct iwl4965_host_cmd cmd_out = {
b481de9c 1136 .id = SENSITIVITY_CMD,
bb8c093b 1137 .len = sizeof(struct iwl4965_sensitivity_cmd),
b481de9c
ZY
1138 .meta.flags = flags,
1139 .data = &cmd,
1140 };
1141
1142 data = &(priv->sensitivity_data);
1143
1144 memset(&cmd, 0, sizeof(cmd));
1145
1146 cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] =
1147 cpu_to_le16((u16)data->auto_corr_ofdm);
1148 cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] =
1149 cpu_to_le16((u16)data->auto_corr_ofdm_mrc);
1150 cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] =
1151 cpu_to_le16((u16)data->auto_corr_ofdm_x1);
1152 cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] =
1153 cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1);
1154
1155 cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] =
1156 cpu_to_le16((u16)data->auto_corr_cck);
1157 cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] =
1158 cpu_to_le16((u16)data->auto_corr_cck_mrc);
1159
1160 cmd.table[HD_MIN_ENERGY_CCK_DET_INDEX] =
1161 cpu_to_le16((u16)data->nrg_th_cck);
1162 cmd.table[HD_MIN_ENERGY_OFDM_DET_INDEX] =
1163 cpu_to_le16((u16)data->nrg_th_ofdm);
1164
1165 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
1166 __constant_cpu_to_le16(190);
1167 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
1168 __constant_cpu_to_le16(390);
1169 cmd.table[HD_OFDM_ENERGY_TH_IN_INDEX] =
1170 __constant_cpu_to_le16(62);
1171
1172 IWL_DEBUG_CALIB("ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
1173 data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
1174 data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
1175 data->nrg_th_ofdm);
1176
1177 IWL_DEBUG_CALIB("cck: ac %u mrc %u thresh %u\n",
1178 data->auto_corr_cck, data->auto_corr_cck_mrc,
1179 data->nrg_th_cck);
1180
f7d09d7c 1181 /* Update uCode's "work" table, and copy it to DSP */
b481de9c
ZY
1182 cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
1183
1184 if (flags & CMD_ASYNC)
bb8c093b 1185 cmd_out.meta.u.callback = iwl4965_sensitivity_callback;
b481de9c
ZY
1186
1187 /* Don't send command to uCode if nothing has changed */
1188 if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]),
1189 sizeof(u16)*HD_TABLE_SIZE)) {
1190 IWL_DEBUG_CALIB("No change in SENSITIVITY_CMD\n");
1191 return 0;
1192 }
1193
1194 /* Copy table for comparison next time */
1195 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
1196 sizeof(u16)*HD_TABLE_SIZE);
1197
bb8c093b 1198 rc = iwl4965_send_cmd(priv, &cmd_out);
b481de9c
ZY
1199 if (!rc) {
1200 IWL_DEBUG_CALIB("SENSITIVITY_CMD succeeded\n");
1201 return rc;
1202 }
1203
1204 return 0;
1205}
1206
bb8c093b 1207void iwl4965_init_sensitivity(struct iwl4965_priv *priv, u8 flags, u8 force)
b481de9c
ZY
1208{
1209 int rc = 0;
1210 int i;
bb8c093b 1211 struct iwl4965_sensitivity_data *data = NULL;
b481de9c
ZY
1212
1213 IWL_DEBUG_CALIB("Start iwl4965_init_sensitivity\n");
1214
1215 if (force)
1216 memset(&(priv->sensitivity_tbl[0]), 0,
1217 sizeof(u16)*HD_TABLE_SIZE);
1218
1219 /* Clear driver's sensitivity algo data */
1220 data = &(priv->sensitivity_data);
bb8c093b 1221 memset(data, 0, sizeof(struct iwl4965_sensitivity_data));
b481de9c
ZY
1222
1223 data->num_in_cck_no_fa = 0;
1224 data->nrg_curr_state = IWL_FA_TOO_MANY;
1225 data->nrg_prev_state = IWL_FA_TOO_MANY;
1226 data->nrg_silence_ref = 0;
1227 data->nrg_silence_idx = 0;
1228 data->nrg_energy_idx = 0;
1229
1230 for (i = 0; i < 10; i++)
1231 data->nrg_value[i] = 0;
1232
1233 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
1234 data->nrg_silence_rssi[i] = 0;
1235
1236 data->auto_corr_ofdm = 90;
1237 data->auto_corr_ofdm_mrc = 170;
1238 data->auto_corr_ofdm_x1 = 105;
1239 data->auto_corr_ofdm_mrc_x1 = 220;
1240 data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
1241 data->auto_corr_cck_mrc = 200;
1242 data->nrg_th_cck = 100;
1243 data->nrg_th_ofdm = 100;
1244
1245 data->last_bad_plcp_cnt_ofdm = 0;
1246 data->last_fa_cnt_ofdm = 0;
1247 data->last_bad_plcp_cnt_cck = 0;
1248 data->last_fa_cnt_cck = 0;
1249
1250 /* Clear prior Sensitivity command data to force send to uCode */
1251 if (force)
1252 memset(&(priv->sensitivity_tbl[0]), 0,
1253 sizeof(u16)*HD_TABLE_SIZE);
1254
1255 rc |= iwl4965_sensitivity_write(priv, flags);
1256 IWL_DEBUG_CALIB("<<return 0x%X\n", rc);
1257
1258 return;
1259}
1260
1261
1262/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
1263 * Called after every association, but this runs only once!
1264 * ... once chain noise is calibrated the first time, it's good forever. */
bb8c093b 1265void iwl4965_chain_noise_reset(struct iwl4965_priv *priv)
b481de9c 1266{
bb8c093b 1267 struct iwl4965_chain_noise_data *data = NULL;
b481de9c
ZY
1268 int rc = 0;
1269
1270 data = &(priv->chain_noise_data);
bb8c093b
CH
1271 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl4965_is_associated(priv)) {
1272 struct iwl4965_calibration_cmd cmd;
b481de9c
ZY
1273
1274 memset(&cmd, 0, sizeof(cmd));
1275 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
1276 cmd.diff_gain_a = 0;
1277 cmd.diff_gain_b = 0;
1278 cmd.diff_gain_c = 0;
bb8c093b 1279 rc = iwl4965_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
b481de9c
ZY
1280 sizeof(cmd), &cmd);
1281 msleep(4);
1282 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
1283 IWL_DEBUG_CALIB("Run chain_noise_calibrate\n");
1284 }
1285 return;
1286}
1287
1288/*
1289 * Accumulate 20 beacons of signal and noise statistics for each of
1290 * 3 receivers/antennas/rx-chains, then figure out:
1291 * 1) Which antennas are connected.
1292 * 2) Differential rx gain settings to balance the 3 receivers.
1293 */
bb8c093b
CH
1294static void iwl4965_noise_calibration(struct iwl4965_priv *priv,
1295 struct iwl4965_notif_statistics *stat_resp)
b481de9c 1296{
bb8c093b 1297 struct iwl4965_chain_noise_data *data = NULL;
b481de9c
ZY
1298 int rc = 0;
1299
1300 u32 chain_noise_a;
1301 u32 chain_noise_b;
1302 u32 chain_noise_c;
1303 u32 chain_sig_a;
1304 u32 chain_sig_b;
1305 u32 chain_sig_c;
1306 u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
1307 u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
1308 u32 max_average_sig;
1309 u16 max_average_sig_antenna_i;
1310 u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
1311 u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
1312 u16 i = 0;
1313 u16 chan_num = INITIALIZATION_VALUE;
1314 u32 band = INITIALIZATION_VALUE;
1315 u32 active_chains = 0;
1316 unsigned long flags;
1317 struct statistics_rx_non_phy *rx_info = &(stat_resp->rx.general);
1318
1319 data = &(priv->chain_noise_data);
1320
1321 /* Accumulate just the first 20 beacons after the first association,
1322 * then we're done forever. */
1323 if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) {
1324 if (data->state == IWL_CHAIN_NOISE_ALIVE)
1325 IWL_DEBUG_CALIB("Wait for noise calib reset\n");
1326 return;
1327 }
1328
1329 spin_lock_irqsave(&priv->lock, flags);
1330 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
1331 IWL_DEBUG_CALIB(" << Interference data unavailable\n");
1332 spin_unlock_irqrestore(&priv->lock, flags);
1333 return;
1334 }
1335
1336 band = (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) ? 0 : 1;
1337 chan_num = le16_to_cpu(priv->staging_rxon.channel);
1338
1339 /* Make sure we accumulate data for just the associated channel
1340 * (even if scanning). */
1341 if ((chan_num != (le32_to_cpu(stat_resp->flag) >> 16)) ||
1342 ((STATISTICS_REPLY_FLG_BAND_24G_MSK ==
1343 (stat_resp->flag & STATISTICS_REPLY_FLG_BAND_24G_MSK)) && band)) {
1344 IWL_DEBUG_CALIB("Stats not from chan=%d, band=%d\n",
1345 chan_num, band);
1346 spin_unlock_irqrestore(&priv->lock, flags);
1347 return;
1348 }
1349
1350 /* Accumulate beacon statistics values across 20 beacons */
1351 chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) &
1352 IN_BAND_FILTER;
1353 chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) &
1354 IN_BAND_FILTER;
1355 chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) &
1356 IN_BAND_FILTER;
1357
1358 chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
1359 chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
1360 chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
1361
1362 spin_unlock_irqrestore(&priv->lock, flags);
1363
1364 data->beacon_count++;
1365
1366 data->chain_noise_a = (chain_noise_a + data->chain_noise_a);
1367 data->chain_noise_b = (chain_noise_b + data->chain_noise_b);
1368 data->chain_noise_c = (chain_noise_c + data->chain_noise_c);
1369
1370 data->chain_signal_a = (chain_sig_a + data->chain_signal_a);
1371 data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
1372 data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
1373
1374 IWL_DEBUG_CALIB("chan=%d, band=%d, beacon=%d\n", chan_num, band,
1375 data->beacon_count);
1376 IWL_DEBUG_CALIB("chain_sig: a %d b %d c %d\n",
1377 chain_sig_a, chain_sig_b, chain_sig_c);
1378 IWL_DEBUG_CALIB("chain_noise: a %d b %d c %d\n",
1379 chain_noise_a, chain_noise_b, chain_noise_c);
1380
1381 /* If this is the 20th beacon, determine:
1382 * 1) Disconnected antennas (using signal strengths)
1383 * 2) Differential gain (using silence noise) to balance receivers */
1384 if (data->beacon_count == CAL_NUM_OF_BEACONS) {
1385
1386 /* Analyze signal for disconnected antenna */
1387 average_sig[0] = (data->chain_signal_a) / CAL_NUM_OF_BEACONS;
1388 average_sig[1] = (data->chain_signal_b) / CAL_NUM_OF_BEACONS;
1389 average_sig[2] = (data->chain_signal_c) / CAL_NUM_OF_BEACONS;
1390
1391 if (average_sig[0] >= average_sig[1]) {
1392 max_average_sig = average_sig[0];
1393 max_average_sig_antenna_i = 0;
1394 active_chains = (1 << max_average_sig_antenna_i);
1395 } else {
1396 max_average_sig = average_sig[1];
1397 max_average_sig_antenna_i = 1;
1398 active_chains = (1 << max_average_sig_antenna_i);
1399 }
1400
1401 if (average_sig[2] >= max_average_sig) {
1402 max_average_sig = average_sig[2];
1403 max_average_sig_antenna_i = 2;
1404 active_chains = (1 << max_average_sig_antenna_i);
1405 }
1406
1407 IWL_DEBUG_CALIB("average_sig: a %d b %d c %d\n",
1408 average_sig[0], average_sig[1], average_sig[2]);
1409 IWL_DEBUG_CALIB("max_average_sig = %d, antenna %d\n",
1410 max_average_sig, max_average_sig_antenna_i);
1411
1412 /* Compare signal strengths for all 3 receivers. */
1413 for (i = 0; i < NUM_RX_CHAINS; i++) {
1414 if (i != max_average_sig_antenna_i) {
1415 s32 rssi_delta = (max_average_sig -
1416 average_sig[i]);
1417
1418 /* If signal is very weak, compared with
1419 * strongest, mark it as disconnected. */
1420 if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
1421 data->disconn_array[i] = 1;
1422 else
1423 active_chains |= (1 << i);
1424 IWL_DEBUG_CALIB("i = %d rssiDelta = %d "
1425 "disconn_array[i] = %d\n",
1426 i, rssi_delta, data->disconn_array[i]);
1427 }
1428 }
1429
1430 /*If both chains A & B are disconnected -
1431 * connect B and leave A as is */
1432 if (data->disconn_array[CHAIN_A] &&
1433 data->disconn_array[CHAIN_B]) {
1434 data->disconn_array[CHAIN_B] = 0;
1435 active_chains |= (1 << CHAIN_B);
1436 IWL_DEBUG_CALIB("both A & B chains are disconnected! "
1437 "W/A - declare B as connected\n");
1438 }
1439
1440 IWL_DEBUG_CALIB("active_chains (bitwise) = 0x%x\n",
1441 active_chains);
1442
1443 /* Save for use within RXON, TX, SCAN commands, etc. */
1444 priv->valid_antenna = active_chains;
1445
1446 /* Analyze noise for rx balance */
1447 average_noise[0] = ((data->chain_noise_a)/CAL_NUM_OF_BEACONS);
1448 average_noise[1] = ((data->chain_noise_b)/CAL_NUM_OF_BEACONS);
1449 average_noise[2] = ((data->chain_noise_c)/CAL_NUM_OF_BEACONS);
1450
1451 for (i = 0; i < NUM_RX_CHAINS; i++) {
1452 if (!(data->disconn_array[i]) &&
1453 (average_noise[i] <= min_average_noise)) {
1454 /* This means that chain i is active and has
1455 * lower noise values so far: */
1456 min_average_noise = average_noise[i];
1457 min_average_noise_antenna_i = i;
1458 }
1459 }
1460
1461 data->delta_gain_code[min_average_noise_antenna_i] = 0;
1462
1463 IWL_DEBUG_CALIB("average_noise: a %d b %d c %d\n",
1464 average_noise[0], average_noise[1],
1465 average_noise[2]);
1466
1467 IWL_DEBUG_CALIB("min_average_noise = %d, antenna %d\n",
1468 min_average_noise, min_average_noise_antenna_i);
1469
1470 for (i = 0; i < NUM_RX_CHAINS; i++) {
1471 s32 delta_g = 0;
1472
1473 if (!(data->disconn_array[i]) &&
1474 (data->delta_gain_code[i] ==
1475 CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
1476 delta_g = average_noise[i] - min_average_noise;
1477 data->delta_gain_code[i] = (u8)((delta_g *
1478 10) / 15);
1479 if (CHAIN_NOISE_MAX_DELTA_GAIN_CODE <
1480 data->delta_gain_code[i])
1481 data->delta_gain_code[i] =
1482 CHAIN_NOISE_MAX_DELTA_GAIN_CODE;
1483
1484 data->delta_gain_code[i] =
1485 (data->delta_gain_code[i] | (1 << 2));
1486 } else
1487 data->delta_gain_code[i] = 0;
1488 }
1489 IWL_DEBUG_CALIB("delta_gain_codes: a %d b %d c %d\n",
1490 data->delta_gain_code[0],
1491 data->delta_gain_code[1],
1492 data->delta_gain_code[2]);
1493
1494 /* Differential gain gets sent to uCode only once */
1495 if (!data->radio_write) {
bb8c093b 1496 struct iwl4965_calibration_cmd cmd;
b481de9c
ZY
1497 data->radio_write = 1;
1498
1499 memset(&cmd, 0, sizeof(cmd));
1500 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
1501 cmd.diff_gain_a = data->delta_gain_code[0];
1502 cmd.diff_gain_b = data->delta_gain_code[1];
1503 cmd.diff_gain_c = data->delta_gain_code[2];
bb8c093b 1504 rc = iwl4965_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
b481de9c
ZY
1505 sizeof(cmd), &cmd);
1506 if (rc)
1507 IWL_DEBUG_CALIB("fail sending cmd "
1508 "REPLY_PHY_CALIBRATION_CMD \n");
1509
1510 /* TODO we might want recalculate
1511 * rx_chain in rxon cmd */
1512
1513 /* Mark so we run this algo only once! */
1514 data->state = IWL_CHAIN_NOISE_CALIBRATED;
1515 }
1516 data->chain_noise_a = 0;
1517 data->chain_noise_b = 0;
1518 data->chain_noise_c = 0;
1519 data->chain_signal_a = 0;
1520 data->chain_signal_b = 0;
1521 data->chain_signal_c = 0;
1522 data->beacon_count = 0;
1523 }
1524 return;
1525}
1526
bb8c093b
CH
1527static void iwl4965_sensitivity_calibration(struct iwl4965_priv *priv,
1528 struct iwl4965_notif_statistics *resp)
b481de9c
ZY
1529{
1530 int rc = 0;
1531 u32 rx_enable_time;
1532 u32 fa_cck;
1533 u32 fa_ofdm;
1534 u32 bad_plcp_cck;
1535 u32 bad_plcp_ofdm;
1536 u32 norm_fa_ofdm;
1537 u32 norm_fa_cck;
bb8c093b 1538 struct iwl4965_sensitivity_data *data = NULL;
b481de9c
ZY
1539 struct statistics_rx_non_phy *rx_info = &(resp->rx.general);
1540 struct statistics_rx *statistics = &(resp->rx);
1541 unsigned long flags;
1542 struct statistics_general_data statis;
1543
1544 data = &(priv->sensitivity_data);
1545
bb8c093b 1546 if (!iwl4965_is_associated(priv)) {
b481de9c
ZY
1547 IWL_DEBUG_CALIB("<< - not associated\n");
1548 return;
1549 }
1550
1551 spin_lock_irqsave(&priv->lock, flags);
1552 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
1553 IWL_DEBUG_CALIB("<< invalid data.\n");
1554 spin_unlock_irqrestore(&priv->lock, flags);
1555 return;
1556 }
1557
1558 /* Extract Statistics: */
1559 rx_enable_time = le32_to_cpu(rx_info->channel_load);
1560 fa_cck = le32_to_cpu(statistics->cck.false_alarm_cnt);
1561 fa_ofdm = le32_to_cpu(statistics->ofdm.false_alarm_cnt);
1562 bad_plcp_cck = le32_to_cpu(statistics->cck.plcp_err);
1563 bad_plcp_ofdm = le32_to_cpu(statistics->ofdm.plcp_err);
1564
1565 statis.beacon_silence_rssi_a =
1566 le32_to_cpu(statistics->general.beacon_silence_rssi_a);
1567 statis.beacon_silence_rssi_b =
1568 le32_to_cpu(statistics->general.beacon_silence_rssi_b);
1569 statis.beacon_silence_rssi_c =
1570 le32_to_cpu(statistics->general.beacon_silence_rssi_c);
1571 statis.beacon_energy_a =
1572 le32_to_cpu(statistics->general.beacon_energy_a);
1573 statis.beacon_energy_b =
1574 le32_to_cpu(statistics->general.beacon_energy_b);
1575 statis.beacon_energy_c =
1576 le32_to_cpu(statistics->general.beacon_energy_c);
1577
1578 spin_unlock_irqrestore(&priv->lock, flags);
1579
1580 IWL_DEBUG_CALIB("rx_enable_time = %u usecs\n", rx_enable_time);
1581
1582 if (!rx_enable_time) {
1583 IWL_DEBUG_CALIB("<< RX Enable Time == 0! \n");
1584 return;
1585 }
1586
1587 /* These statistics increase monotonically, and do not reset
1588 * at each beacon. Calculate difference from last value, or just
1589 * use the new statistics value if it has reset or wrapped around. */
1590 if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
1591 data->last_bad_plcp_cnt_cck = bad_plcp_cck;
1592 else {
1593 bad_plcp_cck -= data->last_bad_plcp_cnt_cck;
1594 data->last_bad_plcp_cnt_cck += bad_plcp_cck;
1595 }
1596
1597 if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm)
1598 data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm;
1599 else {
1600 bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm;
1601 data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm;
1602 }
1603
1604 if (data->last_fa_cnt_ofdm > fa_ofdm)
1605 data->last_fa_cnt_ofdm = fa_ofdm;
1606 else {
1607 fa_ofdm -= data->last_fa_cnt_ofdm;
1608 data->last_fa_cnt_ofdm += fa_ofdm;
1609 }
1610
1611 if (data->last_fa_cnt_cck > fa_cck)
1612 data->last_fa_cnt_cck = fa_cck;
1613 else {
1614 fa_cck -= data->last_fa_cnt_cck;
1615 data->last_fa_cnt_cck += fa_cck;
1616 }
1617
1618 /* Total aborted signal locks */
1619 norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
1620 norm_fa_cck = fa_cck + bad_plcp_cck;
1621
1622 IWL_DEBUG_CALIB("cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck,
1623 bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
1624
1625 iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
1626 iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
1627 rc |= iwl4965_sensitivity_write(priv, CMD_ASYNC);
1628
1629 return;
1630}
1631
1632static void iwl4965_bg_sensitivity_work(struct work_struct *work)
1633{
bb8c093b 1634 struct iwl4965_priv *priv = container_of(work, struct iwl4965_priv,
b481de9c
ZY
1635 sensitivity_work);
1636
1637 mutex_lock(&priv->mutex);
1638
1639 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
1640 test_bit(STATUS_SCANNING, &priv->status)) {
1641 mutex_unlock(&priv->mutex);
1642 return;
1643 }
1644
1645 if (priv->start_calib) {
1646 iwl4965_noise_calibration(priv, &priv->statistics);
1647
1648 if (priv->sensitivity_data.state ==
1649 IWL_SENS_CALIB_NEED_REINIT) {
1650 iwl4965_init_sensitivity(priv, CMD_ASYNC, 0);
1651 priv->sensitivity_data.state = IWL_SENS_CALIB_ALLOWED;
1652 } else
1653 iwl4965_sensitivity_calibration(priv,
1654 &priv->statistics);
1655 }
1656
1657 mutex_unlock(&priv->mutex);
1658 return;
1659}
c8b0e6e1 1660#endif /*CONFIG_IWL4965_SENSITIVITY*/
b481de9c
ZY
1661
1662static void iwl4965_bg_txpower_work(struct work_struct *work)
1663{
bb8c093b 1664 struct iwl4965_priv *priv = container_of(work, struct iwl4965_priv,
b481de9c
ZY
1665 txpower_work);
1666
1667 /* If a scan happened to start before we got here
1668 * then just return; the statistics notification will
1669 * kick off another scheduled work to compensate for
1670 * any temperature delta we missed here. */
1671 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
1672 test_bit(STATUS_SCANNING, &priv->status))
1673 return;
1674
1675 mutex_lock(&priv->mutex);
1676
1677 /* Regardless of if we are assocaited, we must reconfigure the
1678 * TX power since frames can be sent on non-radar channels while
1679 * not associated */
bb8c093b 1680 iwl4965_hw_reg_send_txpower(priv);
b481de9c
ZY
1681
1682 /* Update last_temperature to keep is_calib_needed from running
1683 * when it isn't needed... */
1684 priv->last_temperature = priv->temperature;
1685
1686 mutex_unlock(&priv->mutex);
1687}
1688
1689/*
1690 * Acquire priv->lock before calling this function !
1691 */
bb8c093b 1692static void iwl4965_set_wr_ptrs(struct iwl4965_priv *priv, int txq_id, u32 index)
b481de9c 1693{
bb8c093b 1694 iwl4965_write_direct32(priv, HBUS_TARG_WRPTR,
b481de9c 1695 (index & 0xff) | (txq_id << 8));
bb8c093b 1696 iwl4965_write_prph(priv, KDR_SCD_QUEUE_RDPTR(txq_id), index);
b481de9c
ZY
1697}
1698
8b6eaea8
CB
1699/**
1700 * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
1701 * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
1702 * @scd_retry: (1) Indicates queue will be used in aggregation mode
1703 *
1704 * NOTE: Acquire priv->lock before calling this function !
b481de9c 1705 */
bb8c093b
CH
1706static void iwl4965_tx_queue_set_status(struct iwl4965_priv *priv,
1707 struct iwl4965_tx_queue *txq,
b481de9c
ZY
1708 int tx_fifo_id, int scd_retry)
1709{
1710 int txq_id = txq->q.id;
8b6eaea8
CB
1711
1712 /* Find out whether to activate Tx queue */
b481de9c
ZY
1713 int active = test_bit(txq_id, &priv->txq_ctx_active_msk)?1:0;
1714
8b6eaea8 1715 /* Set up and activate */
bb8c093b 1716 iwl4965_write_prph(priv, KDR_SCD_QUEUE_STATUS_BITS(txq_id),
b481de9c
ZY
1717 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1718 (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
1719 (scd_retry << SCD_QUEUE_STTS_REG_POS_WSL) |
1720 (scd_retry << SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
1721 SCD_QUEUE_STTS_REG_MSK);
1722
1723 txq->sched_retry = scd_retry;
1724
1725 IWL_DEBUG_INFO("%s %s Queue %d on AC %d\n",
8b6eaea8 1726 active ? "Activate" : "Deactivate",
b481de9c
ZY
1727 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
1728}
1729
1730static const u16 default_queue_to_tx_fifo[] = {
1731 IWL_TX_FIFO_AC3,
1732 IWL_TX_FIFO_AC2,
1733 IWL_TX_FIFO_AC1,
1734 IWL_TX_FIFO_AC0,
1735 IWL_CMD_FIFO_NUM,
1736 IWL_TX_FIFO_HCCA_1,
1737 IWL_TX_FIFO_HCCA_2
1738};
1739
bb8c093b 1740static inline void iwl4965_txq_ctx_activate(struct iwl4965_priv *priv, int txq_id)
b481de9c
ZY
1741{
1742 set_bit(txq_id, &priv->txq_ctx_active_msk);
1743}
1744
bb8c093b 1745static inline void iwl4965_txq_ctx_deactivate(struct iwl4965_priv *priv, int txq_id)
b481de9c
ZY
1746{
1747 clear_bit(txq_id, &priv->txq_ctx_active_msk);
1748}
1749
bb8c093b 1750int iwl4965_alive_notify(struct iwl4965_priv *priv)
b481de9c
ZY
1751{
1752 u32 a;
1753 int i = 0;
1754 unsigned long flags;
1755 int rc;
1756
1757 spin_lock_irqsave(&priv->lock, flags);
1758
c8b0e6e1 1759#ifdef CONFIG_IWL4965_SENSITIVITY
b481de9c 1760 memset(&(priv->sensitivity_data), 0,
bb8c093b 1761 sizeof(struct iwl4965_sensitivity_data));
b481de9c 1762 memset(&(priv->chain_noise_data), 0,
bb8c093b 1763 sizeof(struct iwl4965_chain_noise_data));
b481de9c
ZY
1764 for (i = 0; i < NUM_RX_CHAINS; i++)
1765 priv->chain_noise_data.delta_gain_code[i] =
1766 CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
c8b0e6e1 1767#endif /* CONFIG_IWL4965_SENSITIVITY*/
bb8c093b 1768 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
1769 if (rc) {
1770 spin_unlock_irqrestore(&priv->lock, flags);
1771 return rc;
1772 }
1773
8b6eaea8 1774 /* Clear 4965's internal Tx Scheduler data base */
bb8c093b 1775 priv->scd_base_addr = iwl4965_read_prph(priv, KDR_SCD_SRAM_BASE_ADDR);
b481de9c
ZY
1776 a = priv->scd_base_addr + SCD_CONTEXT_DATA_OFFSET;
1777 for (; a < priv->scd_base_addr + SCD_TX_STTS_BITMAP_OFFSET; a += 4)
bb8c093b 1778 iwl4965_write_targ_mem(priv, a, 0);
b481de9c 1779 for (; a < priv->scd_base_addr + SCD_TRANSLATE_TBL_OFFSET; a += 4)
bb8c093b 1780 iwl4965_write_targ_mem(priv, a, 0);
b481de9c 1781 for (; a < sizeof(u16) * priv->hw_setting.max_txq_num; a += 4)
bb8c093b 1782 iwl4965_write_targ_mem(priv, a, 0);
b481de9c 1783
8b6eaea8 1784 /* Tel 4965 where to find Tx byte count tables */
bb8c093b 1785 iwl4965_write_prph(priv, KDR_SCD_DRAM_BASE_ADDR,
b481de9c 1786 (priv->hw_setting.shared_phys +
bb8c093b 1787 offsetof(struct iwl4965_shared, queues_byte_cnt_tbls)) >> 10);
8b6eaea8
CB
1788
1789 /* Disable chain mode for all queues */
bb8c093b 1790 iwl4965_write_prph(priv, KDR_SCD_QUEUECHAIN_SEL, 0);
b481de9c 1791
8b6eaea8 1792 /* Initialize each Tx queue (including the command queue) */
b481de9c 1793 for (i = 0; i < priv->hw_setting.max_txq_num; i++) {
8b6eaea8
CB
1794
1795 /* TFD circular buffer read/write indexes */
bb8c093b
CH
1796 iwl4965_write_prph(priv, KDR_SCD_QUEUE_RDPTR(i), 0);
1797 iwl4965_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
8b6eaea8
CB
1798
1799 /* Max Tx Window size for Scheduler-ACK mode */
bb8c093b 1800 iwl4965_write_targ_mem(priv, priv->scd_base_addr +
b481de9c
ZY
1801 SCD_CONTEXT_QUEUE_OFFSET(i),
1802 (SCD_WIN_SIZE <<
1803 SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
1804 SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
8b6eaea8
CB
1805
1806 /* Frame limit */
bb8c093b 1807 iwl4965_write_targ_mem(priv, priv->scd_base_addr +
b481de9c
ZY
1808 SCD_CONTEXT_QUEUE_OFFSET(i) +
1809 sizeof(u32),
1810 (SCD_FRAME_LIMIT <<
1811 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1812 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
1813
1814 }
bb8c093b 1815 iwl4965_write_prph(priv, KDR_SCD_INTERRUPT_MASK,
b481de9c
ZY
1816 (1 << priv->hw_setting.max_txq_num) - 1);
1817
8b6eaea8 1818 /* Activate all Tx DMA/FIFO channels */
bb8c093b 1819 iwl4965_write_prph(priv, KDR_SCD_TXFACT,
b481de9c
ZY
1820 SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
1821
1822 iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
8b6eaea8
CB
1823
1824 /* Map each Tx/cmd queue to its corresponding fifo */
b481de9c
ZY
1825 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
1826 int ac = default_queue_to_tx_fifo[i];
1827 iwl4965_txq_ctx_activate(priv, i);
1828 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
1829 }
1830
bb8c093b 1831 iwl4965_release_nic_access(priv);
b481de9c
ZY
1832 spin_unlock_irqrestore(&priv->lock, flags);
1833
1834 return 0;
1835}
1836
8b6eaea8
CB
1837/**
1838 * iwl4965_hw_set_hw_setting
1839 *
1840 * Called when initializing driver
1841 */
bb8c093b 1842int iwl4965_hw_set_hw_setting(struct iwl4965_priv *priv)
b481de9c 1843{
8b6eaea8 1844 /* Allocate area for Tx byte count tables and Rx queue status */
b481de9c
ZY
1845 priv->hw_setting.shared_virt =
1846 pci_alloc_consistent(priv->pci_dev,
bb8c093b 1847 sizeof(struct iwl4965_shared),
b481de9c
ZY
1848 &priv->hw_setting.shared_phys);
1849
1850 if (!priv->hw_setting.shared_virt)
1851 return -1;
1852
bb8c093b 1853 memset(priv->hw_setting.shared_virt, 0, sizeof(struct iwl4965_shared));
b481de9c 1854
bb8c093b 1855 priv->hw_setting.max_txq_num = iwl4965_param_queues_num;
bb8c093b 1856 priv->hw_setting.tx_cmd_len = sizeof(struct iwl4965_tx_cmd);
b481de9c
ZY
1857 priv->hw_setting.max_rxq_size = RX_QUEUE_SIZE;
1858 priv->hw_setting.max_rxq_log = RX_QUEUE_SIZE_LOG;
9ee1ba47
RR
1859 if (iwl4965_param_amsdu_size_8K)
1860 priv->hw_setting.rx_buf_size = IWL_RX_BUF_SIZE_8K;
1861 else
1862 priv->hw_setting.rx_buf_size = IWL_RX_BUF_SIZE_4K;
1863 priv->hw_setting.max_pkt_size = priv->hw_setting.rx_buf_size - 256;
b481de9c
ZY
1864 priv->hw_setting.max_stations = IWL4965_STATION_COUNT;
1865 priv->hw_setting.bcast_sta_id = IWL4965_BROADCAST_ID;
3e82a822
TW
1866
1867 priv->hw_setting.tx_ant_num = 2;
1868
b481de9c
ZY
1869 return 0;
1870}
1871
1872/**
bb8c093b 1873 * iwl4965_hw_txq_ctx_free - Free TXQ Context
b481de9c
ZY
1874 *
1875 * Destroy all TX DMA queues and structures
1876 */
bb8c093b 1877void iwl4965_hw_txq_ctx_free(struct iwl4965_priv *priv)
b481de9c
ZY
1878{
1879 int txq_id;
1880
1881 /* Tx queues */
1882 for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++)
bb8c093b 1883 iwl4965_tx_queue_free(priv, &priv->txq[txq_id]);
b481de9c 1884
8b6eaea8 1885 /* Keep-warm buffer */
b481de9c
ZY
1886 iwl4965_kw_free(priv);
1887}
1888
1889/**
8b6eaea8 1890 * iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
b481de9c 1891 *
8b6eaea8
CB
1892 * Does NOT advance any TFD circular buffer read/write indexes
1893 * Does NOT free the TFD itself (which is within circular buffer)
b481de9c 1894 */
bb8c093b 1895int iwl4965_hw_txq_free_tfd(struct iwl4965_priv *priv, struct iwl4965_tx_queue *txq)
b481de9c 1896{
bb8c093b
CH
1897 struct iwl4965_tfd_frame *bd_tmp = (struct iwl4965_tfd_frame *)&txq->bd[0];
1898 struct iwl4965_tfd_frame *bd = &bd_tmp[txq->q.read_ptr];
b481de9c
ZY
1899 struct pci_dev *dev = priv->pci_dev;
1900 int i;
1901 int counter = 0;
1902 int index, is_odd;
1903
8b6eaea8 1904 /* Host command buffers stay mapped in memory, nothing to clean */
b481de9c 1905 if (txq->q.id == IWL_CMD_QUEUE_NUM)
b481de9c
ZY
1906 return 0;
1907
8b6eaea8 1908 /* Sanity check on number of chunks */
b481de9c
ZY
1909 counter = IWL_GET_BITS(*bd, num_tbs);
1910 if (counter > MAX_NUM_OF_TBS) {
1911 IWL_ERROR("Too many chunks: %i\n", counter);
1912 /* @todo issue fatal error, it is quite serious situation */
1913 return 0;
1914 }
1915
8b6eaea8
CB
1916 /* Unmap chunks, if any.
1917 * TFD info for odd chunks is different format than for even chunks. */
b481de9c
ZY
1918 for (i = 0; i < counter; i++) {
1919 index = i / 2;
1920 is_odd = i & 0x1;
1921
1922 if (is_odd)
1923 pci_unmap_single(
1924 dev,
1925 IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) |
1926 (IWL_GET_BITS(bd->pa[index],
1927 tb2_addr_hi20) << 16),
1928 IWL_GET_BITS(bd->pa[index], tb2_len),
1929 PCI_DMA_TODEVICE);
1930
1931 else if (i > 0)
1932 pci_unmap_single(dev,
1933 le32_to_cpu(bd->pa[index].tb1_addr),
1934 IWL_GET_BITS(bd->pa[index], tb1_len),
1935 PCI_DMA_TODEVICE);
1936
8b6eaea8 1937 /* Free SKB, if any, for this chunk */
fc4b6853
TW
1938 if (txq->txb[txq->q.read_ptr].skb[i]) {
1939 struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[i];
b481de9c
ZY
1940
1941 dev_kfree_skb(skb);
fc4b6853 1942 txq->txb[txq->q.read_ptr].skb[i] = NULL;
b481de9c
ZY
1943 }
1944 }
1945 return 0;
1946}
1947
bb8c093b 1948int iwl4965_hw_reg_set_txpower(struct iwl4965_priv *priv, s8 power)
b481de9c 1949{
bb8c093b 1950 IWL_ERROR("TODO: Implement iwl4965_hw_reg_set_txpower!\n");
b481de9c
ZY
1951 return -EINVAL;
1952}
1953
1954static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res)
1955{
1956 s32 sign = 1;
1957
1958 if (num < 0) {
1959 sign = -sign;
1960 num = -num;
1961 }
1962 if (denom < 0) {
1963 sign = -sign;
1964 denom = -denom;
1965 }
1966 *res = 1;
1967 *res = ((num * 2 + denom) / (denom * 2)) * sign;
1968
1969 return 1;
1970}
1971
8b6eaea8
CB
1972/**
1973 * iwl4965_get_voltage_compensation - Power supply voltage comp for txpower
1974 *
1975 * Determines power supply voltage compensation for txpower calculations.
1976 * Returns number of 1/2-dB steps to subtract from gain table index,
1977 * to compensate for difference between power supply voltage during
1978 * factory measurements, vs. current power supply voltage.
1979 *
1980 * Voltage indication is higher for lower voltage.
1981 * Lower voltage requires more gain (lower gain table index).
1982 */
b481de9c
ZY
1983static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage,
1984 s32 current_voltage)
1985{
1986 s32 comp = 0;
1987
1988 if ((TX_POWER_IWL_ILLEGAL_VOLTAGE == eeprom_voltage) ||
1989 (TX_POWER_IWL_ILLEGAL_VOLTAGE == current_voltage))
1990 return 0;
1991
1992 iwl4965_math_div_round(current_voltage - eeprom_voltage,
1993 TX_POWER_IWL_VOLTAGE_CODES_PER_03V, &comp);
1994
1995 if (current_voltage > eeprom_voltage)
1996 comp *= 2;
1997 if ((comp < -2) || (comp > 2))
1998 comp = 0;
1999
2000 return comp;
2001}
2002
bb8c093b 2003static const struct iwl4965_channel_info *
8318d78a
JB
2004iwl4965_get_channel_txpower_info(struct iwl4965_priv *priv,
2005 enum ieee80211_band band, u16 channel)
b481de9c 2006{
bb8c093b 2007 const struct iwl4965_channel_info *ch_info;
b481de9c 2008
8318d78a 2009 ch_info = iwl4965_get_channel_info(priv, band, channel);
b481de9c
ZY
2010
2011 if (!is_channel_valid(ch_info))
2012 return NULL;
2013
2014 return ch_info;
2015}
2016
2017static s32 iwl4965_get_tx_atten_grp(u16 channel)
2018{
2019 if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH &&
2020 channel <= CALIB_IWL_TX_ATTEN_GR5_LCH)
2021 return CALIB_CH_GROUP_5;
2022
2023 if (channel >= CALIB_IWL_TX_ATTEN_GR1_FCH &&
2024 channel <= CALIB_IWL_TX_ATTEN_GR1_LCH)
2025 return CALIB_CH_GROUP_1;
2026
2027 if (channel >= CALIB_IWL_TX_ATTEN_GR2_FCH &&
2028 channel <= CALIB_IWL_TX_ATTEN_GR2_LCH)
2029 return CALIB_CH_GROUP_2;
2030
2031 if (channel >= CALIB_IWL_TX_ATTEN_GR3_FCH &&
2032 channel <= CALIB_IWL_TX_ATTEN_GR3_LCH)
2033 return CALIB_CH_GROUP_3;
2034
2035 if (channel >= CALIB_IWL_TX_ATTEN_GR4_FCH &&
2036 channel <= CALIB_IWL_TX_ATTEN_GR4_LCH)
2037 return CALIB_CH_GROUP_4;
2038
2039 IWL_ERROR("Can't find txatten group for channel %d.\n", channel);
2040 return -1;
2041}
2042
bb8c093b 2043static u32 iwl4965_get_sub_band(const struct iwl4965_priv *priv, u32 channel)
b481de9c
ZY
2044{
2045 s32 b = -1;
2046
2047 for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
2048 if (priv->eeprom.calib_info.band_info[b].ch_from == 0)
2049 continue;
2050
2051 if ((channel >= priv->eeprom.calib_info.band_info[b].ch_from)
2052 && (channel <= priv->eeprom.calib_info.band_info[b].ch_to))
2053 break;
2054 }
2055
2056 return b;
2057}
2058
2059static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
2060{
2061 s32 val;
2062
2063 if (x2 == x1)
2064 return y1;
2065 else {
2066 iwl4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
2067 return val + y2;
2068 }
2069}
2070
8b6eaea8
CB
2071/**
2072 * iwl4965_interpolate_chan - Interpolate factory measurements for one channel
2073 *
2074 * Interpolates factory measurements from the two sample channels within a
2075 * sub-band, to apply to channel of interest. Interpolation is proportional to
2076 * differences in channel frequencies, which is proportional to differences
2077 * in channel number.
2078 */
bb8c093b
CH
2079static int iwl4965_interpolate_chan(struct iwl4965_priv *priv, u32 channel,
2080 struct iwl4965_eeprom_calib_ch_info *chan_info)
b481de9c
ZY
2081{
2082 s32 s = -1;
2083 u32 c;
2084 u32 m;
bb8c093b
CH
2085 const struct iwl4965_eeprom_calib_measure *m1;
2086 const struct iwl4965_eeprom_calib_measure *m2;
2087 struct iwl4965_eeprom_calib_measure *omeas;
b481de9c
ZY
2088 u32 ch_i1;
2089 u32 ch_i2;
2090
2091 s = iwl4965_get_sub_band(priv, channel);
2092 if (s >= EEPROM_TX_POWER_BANDS) {
2093 IWL_ERROR("Tx Power can not find channel %d ", channel);
2094 return -1;
2095 }
2096
2097 ch_i1 = priv->eeprom.calib_info.band_info[s].ch1.ch_num;
2098 ch_i2 = priv->eeprom.calib_info.band_info[s].ch2.ch_num;
2099 chan_info->ch_num = (u8) channel;
2100
2101 IWL_DEBUG_TXPOWER("channel %d subband %d factory cal ch %d & %d\n",
2102 channel, s, ch_i1, ch_i2);
2103
2104 for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
2105 for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
2106 m1 = &(priv->eeprom.calib_info.band_info[s].ch1.
2107 measurements[c][m]);
2108 m2 = &(priv->eeprom.calib_info.band_info[s].ch2.
2109 measurements[c][m]);
2110 omeas = &(chan_info->measurements[c][m]);
2111
2112 omeas->actual_pow =
2113 (u8) iwl4965_interpolate_value(channel, ch_i1,
2114 m1->actual_pow,
2115 ch_i2,
2116 m2->actual_pow);
2117 omeas->gain_idx =
2118 (u8) iwl4965_interpolate_value(channel, ch_i1,
2119 m1->gain_idx, ch_i2,
2120 m2->gain_idx);
2121 omeas->temperature =
2122 (u8) iwl4965_interpolate_value(channel, ch_i1,
2123 m1->temperature,
2124 ch_i2,
2125 m2->temperature);
2126 omeas->pa_det =
2127 (s8) iwl4965_interpolate_value(channel, ch_i1,
2128 m1->pa_det, ch_i2,
2129 m2->pa_det);
2130
2131 IWL_DEBUG_TXPOWER
2132 ("chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m,
2133 m1->actual_pow, m2->actual_pow, omeas->actual_pow);
2134 IWL_DEBUG_TXPOWER
2135 ("chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m,
2136 m1->gain_idx, m2->gain_idx, omeas->gain_idx);
2137 IWL_DEBUG_TXPOWER
2138 ("chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m,
2139 m1->pa_det, m2->pa_det, omeas->pa_det);
2140 IWL_DEBUG_TXPOWER
2141 ("chain %d meas %d T1=%d T2=%d T=%d\n", c, m,
2142 m1->temperature, m2->temperature,
2143 omeas->temperature);
2144 }
2145 }
2146
2147 return 0;
2148}
2149
2150/* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
2151 * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
2152static s32 back_off_table[] = {
2153 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
2154 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
2155 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
2156 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
2157 10 /* CCK */
2158};
2159
2160/* Thermal compensation values for txpower for various frequency ranges ...
2161 * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
bb8c093b 2162static struct iwl4965_txpower_comp_entry {
b481de9c
ZY
2163 s32 degrees_per_05db_a;
2164 s32 degrees_per_05db_a_denom;
2165} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
2166 {9, 2}, /* group 0 5.2, ch 34-43 */
2167 {4, 1}, /* group 1 5.2, ch 44-70 */
2168 {4, 1}, /* group 2 5.2, ch 71-124 */
2169 {4, 1}, /* group 3 5.2, ch 125-200 */
2170 {3, 1} /* group 4 2.4, ch all */
2171};
2172
2173static s32 get_min_power_index(s32 rate_power_index, u32 band)
2174{
2175 if (!band) {
2176 if ((rate_power_index & 7) <= 4)
2177 return MIN_TX_GAIN_INDEX_52GHZ_EXT;
2178 }
2179 return MIN_TX_GAIN_INDEX;
2180}
2181
2182struct gain_entry {
2183 u8 dsp;
2184 u8 radio;
2185};
2186
2187static const struct gain_entry gain_table[2][108] = {
2188 /* 5.2GHz power gain index table */
2189 {
2190 {123, 0x3F}, /* highest txpower */
2191 {117, 0x3F},
2192 {110, 0x3F},
2193 {104, 0x3F},
2194 {98, 0x3F},
2195 {110, 0x3E},
2196 {104, 0x3E},
2197 {98, 0x3E},
2198 {110, 0x3D},
2199 {104, 0x3D},
2200 {98, 0x3D},
2201 {110, 0x3C},
2202 {104, 0x3C},
2203 {98, 0x3C},
2204 {110, 0x3B},
2205 {104, 0x3B},
2206 {98, 0x3B},
2207 {110, 0x3A},
2208 {104, 0x3A},
2209 {98, 0x3A},
2210 {110, 0x39},
2211 {104, 0x39},
2212 {98, 0x39},
2213 {110, 0x38},
2214 {104, 0x38},
2215 {98, 0x38},
2216 {110, 0x37},
2217 {104, 0x37},
2218 {98, 0x37},
2219 {110, 0x36},
2220 {104, 0x36},
2221 {98, 0x36},
2222 {110, 0x35},
2223 {104, 0x35},
2224 {98, 0x35},
2225 {110, 0x34},
2226 {104, 0x34},
2227 {98, 0x34},
2228 {110, 0x33},
2229 {104, 0x33},
2230 {98, 0x33},
2231 {110, 0x32},
2232 {104, 0x32},
2233 {98, 0x32},
2234 {110, 0x31},
2235 {104, 0x31},
2236 {98, 0x31},
2237 {110, 0x30},
2238 {104, 0x30},
2239 {98, 0x30},
2240 {110, 0x25},
2241 {104, 0x25},
2242 {98, 0x25},
2243 {110, 0x24},
2244 {104, 0x24},
2245 {98, 0x24},
2246 {110, 0x23},
2247 {104, 0x23},
2248 {98, 0x23},
2249 {110, 0x22},
2250 {104, 0x18},
2251 {98, 0x18},
2252 {110, 0x17},
2253 {104, 0x17},
2254 {98, 0x17},
2255 {110, 0x16},
2256 {104, 0x16},
2257 {98, 0x16},
2258 {110, 0x15},
2259 {104, 0x15},
2260 {98, 0x15},
2261 {110, 0x14},
2262 {104, 0x14},
2263 {98, 0x14},
2264 {110, 0x13},
2265 {104, 0x13},
2266 {98, 0x13},
2267 {110, 0x12},
2268 {104, 0x08},
2269 {98, 0x08},
2270 {110, 0x07},
2271 {104, 0x07},
2272 {98, 0x07},
2273 {110, 0x06},
2274 {104, 0x06},
2275 {98, 0x06},
2276 {110, 0x05},
2277 {104, 0x05},
2278 {98, 0x05},
2279 {110, 0x04},
2280 {104, 0x04},
2281 {98, 0x04},
2282 {110, 0x03},
2283 {104, 0x03},
2284 {98, 0x03},
2285 {110, 0x02},
2286 {104, 0x02},
2287 {98, 0x02},
2288 {110, 0x01},
2289 {104, 0x01},
2290 {98, 0x01},
2291 {110, 0x00},
2292 {104, 0x00},
2293 {98, 0x00},
2294 {93, 0x00},
2295 {88, 0x00},
2296 {83, 0x00},
2297 {78, 0x00},
2298 },
2299 /* 2.4GHz power gain index table */
2300 {
2301 {110, 0x3f}, /* highest txpower */
2302 {104, 0x3f},
2303 {98, 0x3f},
2304 {110, 0x3e},
2305 {104, 0x3e},
2306 {98, 0x3e},
2307 {110, 0x3d},
2308 {104, 0x3d},
2309 {98, 0x3d},
2310 {110, 0x3c},
2311 {104, 0x3c},
2312 {98, 0x3c},
2313 {110, 0x3b},
2314 {104, 0x3b},
2315 {98, 0x3b},
2316 {110, 0x3a},
2317 {104, 0x3a},
2318 {98, 0x3a},
2319 {110, 0x39},
2320 {104, 0x39},
2321 {98, 0x39},
2322 {110, 0x38},
2323 {104, 0x38},
2324 {98, 0x38},
2325 {110, 0x37},
2326 {104, 0x37},
2327 {98, 0x37},
2328 {110, 0x36},
2329 {104, 0x36},
2330 {98, 0x36},
2331 {110, 0x35},
2332 {104, 0x35},
2333 {98, 0x35},
2334 {110, 0x34},
2335 {104, 0x34},
2336 {98, 0x34},
2337 {110, 0x33},
2338 {104, 0x33},
2339 {98, 0x33},
2340 {110, 0x32},
2341 {104, 0x32},
2342 {98, 0x32},
2343 {110, 0x31},
2344 {104, 0x31},
2345 {98, 0x31},
2346 {110, 0x30},
2347 {104, 0x30},
2348 {98, 0x30},
2349 {110, 0x6},
2350 {104, 0x6},
2351 {98, 0x6},
2352 {110, 0x5},
2353 {104, 0x5},
2354 {98, 0x5},
2355 {110, 0x4},
2356 {104, 0x4},
2357 {98, 0x4},
2358 {110, 0x3},
2359 {104, 0x3},
2360 {98, 0x3},
2361 {110, 0x2},
2362 {104, 0x2},
2363 {98, 0x2},
2364 {110, 0x1},
2365 {104, 0x1},
2366 {98, 0x1},
2367 {110, 0x0},
2368 {104, 0x0},
2369 {98, 0x0},
2370 {97, 0},
2371 {96, 0},
2372 {95, 0},
2373 {94, 0},
2374 {93, 0},
2375 {92, 0},
2376 {91, 0},
2377 {90, 0},
2378 {89, 0},
2379 {88, 0},
2380 {87, 0},
2381 {86, 0},
2382 {85, 0},
2383 {84, 0},
2384 {83, 0},
2385 {82, 0},
2386 {81, 0},
2387 {80, 0},
2388 {79, 0},
2389 {78, 0},
2390 {77, 0},
2391 {76, 0},
2392 {75, 0},
2393 {74, 0},
2394 {73, 0},
2395 {72, 0},
2396 {71, 0},
2397 {70, 0},
2398 {69, 0},
2399 {68, 0},
2400 {67, 0},
2401 {66, 0},
2402 {65, 0},
2403 {64, 0},
2404 {63, 0},
2405 {62, 0},
2406 {61, 0},
2407 {60, 0},
2408 {59, 0},
2409 }
2410};
2411
bb8c093b 2412static int iwl4965_fill_txpower_tbl(struct iwl4965_priv *priv, u8 band, u16 channel,
b481de9c 2413 u8 is_fat, u8 ctrl_chan_high,
bb8c093b 2414 struct iwl4965_tx_power_db *tx_power_tbl)
b481de9c
ZY
2415{
2416 u8 saturation_power;
2417 s32 target_power;
2418 s32 user_target_power;
2419 s32 power_limit;
2420 s32 current_temp;
2421 s32 reg_limit;
2422 s32 current_regulatory;
2423 s32 txatten_grp = CALIB_CH_GROUP_MAX;
2424 int i;
2425 int c;
bb8c093b
CH
2426 const struct iwl4965_channel_info *ch_info = NULL;
2427 struct iwl4965_eeprom_calib_ch_info ch_eeprom_info;
2428 const struct iwl4965_eeprom_calib_measure *measurement;
b481de9c
ZY
2429 s16 voltage;
2430 s32 init_voltage;
2431 s32 voltage_compensation;
2432 s32 degrees_per_05db_num;
2433 s32 degrees_per_05db_denom;
2434 s32 factory_temp;
2435 s32 temperature_comp[2];
2436 s32 factory_gain_index[2];
2437 s32 factory_actual_pwr[2];
2438 s32 power_index;
2439
2440 /* Sanity check requested level (dBm) */
2441 if (priv->user_txpower_limit < IWL_TX_POWER_TARGET_POWER_MIN) {
2442 IWL_WARNING("Requested user TXPOWER %d below limit.\n",
2443 priv->user_txpower_limit);
2444 return -EINVAL;
2445 }
2446 if (priv->user_txpower_limit > IWL_TX_POWER_TARGET_POWER_MAX) {
2447 IWL_WARNING("Requested user TXPOWER %d above limit.\n",
2448 priv->user_txpower_limit);
2449 return -EINVAL;
2450 }
2451
2452 /* user_txpower_limit is in dBm, convert to half-dBm (half-dB units
2453 * are used for indexing into txpower table) */
2454 user_target_power = 2 * priv->user_txpower_limit;
2455
2456 /* Get current (RXON) channel, band, width */
2457 ch_info =
8318d78a 2458 iwl4965_get_channel_txpower_info(priv, priv->band, channel);
b481de9c
ZY
2459
2460 IWL_DEBUG_TXPOWER("chan %d band %d is_fat %d\n", channel, band,
2461 is_fat);
2462
2463 if (!ch_info)
2464 return -EINVAL;
2465
2466 /* get txatten group, used to select 1) thermal txpower adjustment
2467 * and 2) mimo txpower balance between Tx chains. */
2468 txatten_grp = iwl4965_get_tx_atten_grp(channel);
2469 if (txatten_grp < 0)
2470 return -EINVAL;
2471
2472 IWL_DEBUG_TXPOWER("channel %d belongs to txatten group %d\n",
2473 channel, txatten_grp);
2474
2475 if (is_fat) {
2476 if (ctrl_chan_high)
2477 channel -= 2;
2478 else
2479 channel += 2;
2480 }
2481
2482 /* hardware txpower limits ...
2483 * saturation (clipping distortion) txpowers are in half-dBm */
2484 if (band)
2485 saturation_power = priv->eeprom.calib_info.saturation_power24;
2486 else
2487 saturation_power = priv->eeprom.calib_info.saturation_power52;
2488
2489 if (saturation_power < IWL_TX_POWER_SATURATION_MIN ||
2490 saturation_power > IWL_TX_POWER_SATURATION_MAX) {
2491 if (band)
2492 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_24;
2493 else
2494 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_52;
2495 }
2496
2497 /* regulatory txpower limits ... reg_limit values are in half-dBm,
2498 * max_power_avg values are in dBm, convert * 2 */
2499 if (is_fat)
2500 reg_limit = ch_info->fat_max_power_avg * 2;
2501 else
2502 reg_limit = ch_info->max_power_avg * 2;
2503
2504 if ((reg_limit < IWL_TX_POWER_REGULATORY_MIN) ||
2505 (reg_limit > IWL_TX_POWER_REGULATORY_MAX)) {
2506 if (band)
2507 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_24;
2508 else
2509 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_52;
2510 }
2511
2512 /* Interpolate txpower calibration values for this channel,
2513 * based on factory calibration tests on spaced channels. */
2514 iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
2515
2516 /* calculate tx gain adjustment based on power supply voltage */
2517 voltage = priv->eeprom.calib_info.voltage;
2518 init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
2519 voltage_compensation =
2520 iwl4965_get_voltage_compensation(voltage, init_voltage);
2521
2522 IWL_DEBUG_TXPOWER("curr volt %d eeprom volt %d volt comp %d\n",
2523 init_voltage,
2524 voltage, voltage_compensation);
2525
2526 /* get current temperature (Celsius) */
2527 current_temp = max(priv->temperature, IWL_TX_POWER_TEMPERATURE_MIN);
2528 current_temp = min(priv->temperature, IWL_TX_POWER_TEMPERATURE_MAX);
2529 current_temp = KELVIN_TO_CELSIUS(current_temp);
2530
2531 /* select thermal txpower adjustment params, based on channel group
2532 * (same frequency group used for mimo txatten adjustment) */
2533 degrees_per_05db_num =
2534 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
2535 degrees_per_05db_denom =
2536 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
2537
2538 /* get per-chain txpower values from factory measurements */
2539 for (c = 0; c < 2; c++) {
2540 measurement = &ch_eeprom_info.measurements[c][1];
2541
2542 /* txgain adjustment (in half-dB steps) based on difference
2543 * between factory and current temperature */
2544 factory_temp = measurement->temperature;
2545 iwl4965_math_div_round((current_temp - factory_temp) *
2546 degrees_per_05db_denom,
2547 degrees_per_05db_num,
2548 &temperature_comp[c]);
2549
2550 factory_gain_index[c] = measurement->gain_idx;
2551 factory_actual_pwr[c] = measurement->actual_pow;
2552
2553 IWL_DEBUG_TXPOWER("chain = %d\n", c);
2554 IWL_DEBUG_TXPOWER("fctry tmp %d, "
2555 "curr tmp %d, comp %d steps\n",
2556 factory_temp, current_temp,
2557 temperature_comp[c]);
2558
2559 IWL_DEBUG_TXPOWER("fctry idx %d, fctry pwr %d\n",
2560 factory_gain_index[c],
2561 factory_actual_pwr[c]);
2562 }
2563
2564 /* for each of 33 bit-rates (including 1 for CCK) */
2565 for (i = 0; i < POWER_TABLE_NUM_ENTRIES; i++) {
2566 u8 is_mimo_rate;
bb8c093b 2567 union iwl4965_tx_power_dual_stream tx_power;
b481de9c
ZY
2568
2569 /* for mimo, reduce each chain's txpower by half
2570 * (3dB, 6 steps), so total output power is regulatory
2571 * compliant. */
2572 if (i & 0x8) {
2573 current_regulatory = reg_limit -
2574 IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
2575 is_mimo_rate = 1;
2576 } else {
2577 current_regulatory = reg_limit;
2578 is_mimo_rate = 0;
2579 }
2580
2581 /* find txpower limit, either hardware or regulatory */
2582 power_limit = saturation_power - back_off_table[i];
2583 if (power_limit > current_regulatory)
2584 power_limit = current_regulatory;
2585
2586 /* reduce user's txpower request if necessary
2587 * for this rate on this channel */
2588 target_power = user_target_power;
2589 if (target_power > power_limit)
2590 target_power = power_limit;
2591
2592 IWL_DEBUG_TXPOWER("rate %d sat %d reg %d usr %d tgt %d\n",
2593 i, saturation_power - back_off_table[i],
2594 current_regulatory, user_target_power,
2595 target_power);
2596
2597 /* for each of 2 Tx chains (radio transmitters) */
2598 for (c = 0; c < 2; c++) {
2599 s32 atten_value;
2600
2601 if (is_mimo_rate)
2602 atten_value =
2603 (s32)le32_to_cpu(priv->card_alive_init.
2604 tx_atten[txatten_grp][c]);
2605 else
2606 atten_value = 0;
2607
2608 /* calculate index; higher index means lower txpower */
2609 power_index = (u8) (factory_gain_index[c] -
2610 (target_power -
2611 factory_actual_pwr[c]) -
2612 temperature_comp[c] -
2613 voltage_compensation +
2614 atten_value);
2615
2616/* IWL_DEBUG_TXPOWER("calculated txpower index %d\n",
2617 power_index); */
2618
2619 if (power_index < get_min_power_index(i, band))
2620 power_index = get_min_power_index(i, band);
2621
2622 /* adjust 5 GHz index to support negative indexes */
2623 if (!band)
2624 power_index += 9;
2625
2626 /* CCK, rate 32, reduce txpower for CCK */
2627 if (i == POWER_TABLE_CCK_ENTRY)
2628 power_index +=
2629 IWL_TX_POWER_CCK_COMPENSATION_C_STEP;
2630
2631 /* stay within the table! */
2632 if (power_index > 107) {
2633 IWL_WARNING("txpower index %d > 107\n",
2634 power_index);
2635 power_index = 107;
2636 }
2637 if (power_index < 0) {
2638 IWL_WARNING("txpower index %d < 0\n",
2639 power_index);
2640 power_index = 0;
2641 }
2642
2643 /* fill txpower command for this rate/chain */
2644 tx_power.s.radio_tx_gain[c] =
2645 gain_table[band][power_index].radio;
2646 tx_power.s.dsp_predis_atten[c] =
2647 gain_table[band][power_index].dsp;
2648
2649 IWL_DEBUG_TXPOWER("chain %d mimo %d index %d "
2650 "gain 0x%02x dsp %d\n",
2651 c, atten_value, power_index,
2652 tx_power.s.radio_tx_gain[c],
2653 tx_power.s.dsp_predis_atten[c]);
2654 }/* for each chain */
2655
2656 tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
2657
2658 }/* for each rate */
2659
2660 return 0;
2661}
2662
2663/**
bb8c093b 2664 * iwl4965_hw_reg_send_txpower - Configure the TXPOWER level user limit
b481de9c
ZY
2665 *
2666 * Uses the active RXON for channel, band, and characteristics (fat, high)
2667 * The power limit is taken from priv->user_txpower_limit.
2668 */
bb8c093b 2669int iwl4965_hw_reg_send_txpower(struct iwl4965_priv *priv)
b481de9c 2670{
bb8c093b 2671 struct iwl4965_txpowertable_cmd cmd = { 0 };
b481de9c
ZY
2672 int rc = 0;
2673 u8 band = 0;
2674 u8 is_fat = 0;
2675 u8 ctrl_chan_high = 0;
2676
2677 if (test_bit(STATUS_SCANNING, &priv->status)) {
2678 /* If this gets hit a lot, switch it to a BUG() and catch
2679 * the stack trace to find out who is calling this during
2680 * a scan. */
2681 IWL_WARNING("TX Power requested while scanning!\n");
2682 return -EAGAIN;
2683 }
2684
8318d78a 2685 band = priv->band == IEEE80211_BAND_2GHZ;
b481de9c
ZY
2686
2687 is_fat = is_fat_channel(priv->active_rxon.flags);
2688
2689 if (is_fat &&
2690 (priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
2691 ctrl_chan_high = 1;
2692
2693 cmd.band = band;
2694 cmd.channel = priv->active_rxon.channel;
2695
2696 rc = iwl4965_fill_txpower_tbl(priv, band,
2697 le16_to_cpu(priv->active_rxon.channel),
2698 is_fat, ctrl_chan_high, &cmd.tx_power);
2699 if (rc)
2700 return rc;
2701
bb8c093b 2702 rc = iwl4965_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
b481de9c
ZY
2703 return rc;
2704}
2705
bb8c093b 2706int iwl4965_hw_channel_switch(struct iwl4965_priv *priv, u16 channel)
b481de9c
ZY
2707{
2708 int rc;
2709 u8 band = 0;
2710 u8 is_fat = 0;
2711 u8 ctrl_chan_high = 0;
bb8c093b
CH
2712 struct iwl4965_channel_switch_cmd cmd = { 0 };
2713 const struct iwl4965_channel_info *ch_info;
b481de9c 2714
8318d78a 2715 band = priv->band == IEEE80211_BAND_2GHZ;
b481de9c 2716
8318d78a 2717 ch_info = iwl4965_get_channel_info(priv, priv->band, channel);
b481de9c
ZY
2718
2719 is_fat = is_fat_channel(priv->staging_rxon.flags);
2720
2721 if (is_fat &&
2722 (priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
2723 ctrl_chan_high = 1;
2724
2725 cmd.band = band;
2726 cmd.expect_beacon = 0;
2727 cmd.channel = cpu_to_le16(channel);
2728 cmd.rxon_flags = priv->active_rxon.flags;
2729 cmd.rxon_filter_flags = priv->active_rxon.filter_flags;
2730 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
2731 if (ch_info)
2732 cmd.expect_beacon = is_channel_radar(ch_info);
2733 else
2734 cmd.expect_beacon = 1;
2735
2736 rc = iwl4965_fill_txpower_tbl(priv, band, channel, is_fat,
2737 ctrl_chan_high, &cmd.tx_power);
2738 if (rc) {
2739 IWL_DEBUG_11H("error:%d fill txpower_tbl\n", rc);
2740 return rc;
2741 }
2742
bb8c093b 2743 rc = iwl4965_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
b481de9c
ZY
2744 return rc;
2745}
2746
2747#define RTS_HCCA_RETRY_LIMIT 3
2748#define RTS_DFAULT_RETRY_LIMIT 60
2749
bb8c093b
CH
2750void iwl4965_hw_build_tx_cmd_rate(struct iwl4965_priv *priv,
2751 struct iwl4965_cmd *cmd,
b481de9c
ZY
2752 struct ieee80211_tx_control *ctrl,
2753 struct ieee80211_hdr *hdr, int sta_id,
2754 int is_hcca)
2755{
87e4f7df 2756 struct iwl4965_tx_cmd *tx = &cmd->cmd.tx;
b481de9c
ZY
2757 u8 rts_retry_limit = 0;
2758 u8 data_retry_limit = 0;
b481de9c 2759 u16 fc = le16_to_cpu(hdr->frame_control);
87e4f7df
TW
2760 u8 rate_plcp;
2761 u16 rate_flags = 0;
8318d78a 2762 int rate_idx = min(ctrl->tx_rate->hw_value & 0xffff, IWL_RATE_COUNT - 1);
b481de9c 2763
87e4f7df 2764 rate_plcp = iwl4965_rates[rate_idx].plcp;
b481de9c
ZY
2765
2766 rts_retry_limit = (is_hcca) ?
2767 RTS_HCCA_RETRY_LIMIT : RTS_DFAULT_RETRY_LIMIT;
2768
87e4f7df
TW
2769 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
2770 rate_flags |= RATE_MCS_CCK_MSK;
2771
2772
b481de9c
ZY
2773 if (ieee80211_is_probe_response(fc)) {
2774 data_retry_limit = 3;
2775 if (data_retry_limit < rts_retry_limit)
2776 rts_retry_limit = data_retry_limit;
2777 } else
2778 data_retry_limit = IWL_DEFAULT_TX_RETRY;
2779
2780 if (priv->data_retry_limit != -1)
2781 data_retry_limit = priv->data_retry_limit;
2782
87e4f7df
TW
2783
2784 if (ieee80211_is_data(fc)) {
2785 tx->initial_rate_index = 0;
2786 tx->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
2787 } else {
b481de9c
ZY
2788 switch (fc & IEEE80211_FCTL_STYPE) {
2789 case IEEE80211_STYPE_AUTH:
2790 case IEEE80211_STYPE_DEAUTH:
2791 case IEEE80211_STYPE_ASSOC_REQ:
2792 case IEEE80211_STYPE_REASSOC_REQ:
87e4f7df
TW
2793 if (tx->tx_flags & TX_CMD_FLG_RTS_MSK) {
2794 tx->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2795 tx->tx_flags |= TX_CMD_FLG_CTS_MSK;
b481de9c
ZY
2796 }
2797 break;
2798 default:
2799 break;
2800 }
87e4f7df
TW
2801
2802 /* Alternate between antenna A and B for successive frames */
2803 if (priv->use_ant_b_for_management_frame) {
2804 priv->use_ant_b_for_management_frame = 0;
2805 rate_flags |= RATE_MCS_ANT_B_MSK;
2806 } else {
2807 priv->use_ant_b_for_management_frame = 1;
2808 rate_flags |= RATE_MCS_ANT_A_MSK;
2809 }
b481de9c
ZY
2810 }
2811
87e4f7df
TW
2812 tx->rts_retry_limit = rts_retry_limit;
2813 tx->data_retry_limit = data_retry_limit;
2814 tx->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
b481de9c
ZY
2815}
2816
bb8c093b 2817int iwl4965_hw_get_rx_read(struct iwl4965_priv *priv)
b481de9c 2818{
bb8c093b 2819 struct iwl4965_shared *shared_data = priv->hw_setting.shared_virt;
b481de9c
ZY
2820
2821 return IWL_GET_BITS(*shared_data, rb_closed_stts_rb_num);
2822}
2823
bb8c093b 2824int iwl4965_hw_get_temperature(struct iwl4965_priv *priv)
b481de9c
ZY
2825{
2826 return priv->temperature;
2827}
2828
bb8c093b
CH
2829unsigned int iwl4965_hw_get_beacon_cmd(struct iwl4965_priv *priv,
2830 struct iwl4965_frame *frame, u8 rate)
b481de9c 2831{
bb8c093b 2832 struct iwl4965_tx_beacon_cmd *tx_beacon_cmd;
b481de9c
ZY
2833 unsigned int frame_size;
2834
2835 tx_beacon_cmd = &frame->u.beacon;
2836 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
2837
2838 tx_beacon_cmd->tx.sta_id = IWL4965_BROADCAST_ID;
2839 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2840
bb8c093b 2841 frame_size = iwl4965_fill_beacon_frame(priv,
b481de9c 2842 tx_beacon_cmd->frame,
bb8c093b 2843 iwl4965_broadcast_addr,
b481de9c
ZY
2844 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
2845
2846 BUG_ON(frame_size > MAX_MPDU_SIZE);
2847 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
2848
2849 if ((rate == IWL_RATE_1M_PLCP) || (rate >= IWL_RATE_2M_PLCP))
2850 tx_beacon_cmd->tx.rate_n_flags =
bb8c093b 2851 iwl4965_hw_set_rate_n_flags(rate, RATE_MCS_CCK_MSK);
b481de9c
ZY
2852 else
2853 tx_beacon_cmd->tx.rate_n_flags =
bb8c093b 2854 iwl4965_hw_set_rate_n_flags(rate, 0);
b481de9c
ZY
2855
2856 tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK |
2857 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK);
2858 return (sizeof(*tx_beacon_cmd) + frame_size);
2859}
2860
8b6eaea8
CB
2861/*
2862 * Tell 4965 where to find circular buffer of Tx Frame Descriptors for
2863 * given Tx queue, and enable the DMA channel used for that queue.
2864 *
2865 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
2866 * channels supported in hardware.
2867 */
bb8c093b 2868int iwl4965_hw_tx_queue_init(struct iwl4965_priv *priv, struct iwl4965_tx_queue *txq)
b481de9c
ZY
2869{
2870 int rc;
2871 unsigned long flags;
2872 int txq_id = txq->q.id;
2873
2874 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 2875 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
2876 if (rc) {
2877 spin_unlock_irqrestore(&priv->lock, flags);
2878 return rc;
2879 }
2880
8b6eaea8 2881 /* Circular buffer (TFD queue in DRAM) physical base address */
bb8c093b 2882 iwl4965_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
b481de9c 2883 txq->q.dma_addr >> 8);
8b6eaea8
CB
2884
2885 /* Enable DMA channel, using same id as for TFD queue */
bb8c093b 2886 iwl4965_write_direct32(
b481de9c
ZY
2887 priv, IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id),
2888 IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2889 IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
bb8c093b 2890 iwl4965_release_nic_access(priv);
b481de9c
ZY
2891 spin_unlock_irqrestore(&priv->lock, flags);
2892
2893 return 0;
2894}
2895
bb8c093b 2896int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl4965_priv *priv, void *ptr,
b481de9c
ZY
2897 dma_addr_t addr, u16 len)
2898{
2899 int index, is_odd;
bb8c093b 2900 struct iwl4965_tfd_frame *tfd = ptr;
b481de9c
ZY
2901 u32 num_tbs = IWL_GET_BITS(*tfd, num_tbs);
2902
8b6eaea8 2903 /* Each TFD can point to a maximum 20 Tx buffers */
b481de9c
ZY
2904 if ((num_tbs >= MAX_NUM_OF_TBS) || (num_tbs < 0)) {
2905 IWL_ERROR("Error can not send more than %d chunks\n",
2906 MAX_NUM_OF_TBS);
2907 return -EINVAL;
2908 }
2909
2910 index = num_tbs / 2;
2911 is_odd = num_tbs & 0x1;
2912
2913 if (!is_odd) {
2914 tfd->pa[index].tb1_addr = cpu_to_le32(addr);
2915 IWL_SET_BITS(tfd->pa[index], tb1_addr_hi,
6a218f6f 2916 iwl_get_dma_hi_address(addr));
b481de9c
ZY
2917 IWL_SET_BITS(tfd->pa[index], tb1_len, len);
2918 } else {
2919 IWL_SET_BITS(tfd->pa[index], tb2_addr_lo16,
2920 (u32) (addr & 0xffff));
2921 IWL_SET_BITS(tfd->pa[index], tb2_addr_hi20, addr >> 16);
2922 IWL_SET_BITS(tfd->pa[index], tb2_len, len);
2923 }
2924
2925 IWL_SET_BITS(*tfd, num_tbs, num_tbs + 1);
2926
2927 return 0;
2928}
2929
bb8c093b 2930static void iwl4965_hw_card_show_info(struct iwl4965_priv *priv)
b481de9c
ZY
2931{
2932 u16 hw_version = priv->eeprom.board_revision_4965;
2933
2934 IWL_DEBUG_INFO("4965ABGN HW Version %u.%u.%u\n",
2935 ((hw_version >> 8) & 0x0F),
2936 ((hw_version >> 8) >> 4), (hw_version & 0x00FF));
2937
2938 IWL_DEBUG_INFO("4965ABGN PBA Number %.16s\n",
2939 priv->eeprom.board_pba_number_4965);
2940}
2941
2942#define IWL_TX_CRC_SIZE 4
2943#define IWL_TX_DELIMITER_SIZE 4
2944
8b6eaea8
CB
2945/**
2946 * iwl4965_tx_queue_update_wr_ptr - Set up entry in Tx byte-count array
2947 */
bb8c093b
CH
2948int iwl4965_tx_queue_update_wr_ptr(struct iwl4965_priv *priv,
2949 struct iwl4965_tx_queue *txq, u16 byte_cnt)
b481de9c
ZY
2950{
2951 int len;
2952 int txq_id = txq->q.id;
bb8c093b 2953 struct iwl4965_shared *shared_data = priv->hw_setting.shared_virt;
b481de9c
ZY
2954
2955 if (txq->need_update == 0)
2956 return 0;
2957
2958 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
2959
8b6eaea8 2960 /* Set up byte count within first 256 entries */
b481de9c 2961 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
fc4b6853 2962 tfd_offset[txq->q.write_ptr], byte_cnt, len);
b481de9c 2963
8b6eaea8 2964 /* If within first 64 entries, duplicate at end */
fc4b6853 2965 if (txq->q.write_ptr < IWL4965_MAX_WIN_SIZE)
b481de9c 2966 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
fc4b6853 2967 tfd_offset[IWL4965_QUEUE_SIZE + txq->q.write_ptr],
b481de9c
ZY
2968 byte_cnt, len);
2969
2970 return 0;
2971}
2972
8b6eaea8
CB
2973/**
2974 * iwl4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
2975 *
2976 * Selects how many and which Rx receivers/antennas/chains to use.
2977 * This should not be used for scan command ... it puts data in wrong place.
2978 */
bb8c093b 2979void iwl4965_set_rxon_chain(struct iwl4965_priv *priv)
b481de9c
ZY
2980{
2981 u8 is_single = is_single_stream(priv);
2982 u8 idle_state, rx_state;
2983
2984 priv->staging_rxon.rx_chain = 0;
2985 rx_state = idle_state = 3;
2986
2987 /* Tell uCode which antennas are actually connected.
2988 * Before first association, we assume all antennas are connected.
2989 * Just after first association, iwl4965_noise_calibration()
2990 * checks which antennas actually *are* connected. */
2991 priv->staging_rxon.rx_chain |=
2992 cpu_to_le16(priv->valid_antenna << RXON_RX_CHAIN_VALID_POS);
2993
2994 /* How many receivers should we use? */
2995 iwl4965_get_rx_chain_counter(priv, &idle_state, &rx_state);
2996 priv->staging_rxon.rx_chain |=
2997 cpu_to_le16(rx_state << RXON_RX_CHAIN_MIMO_CNT_POS);
2998 priv->staging_rxon.rx_chain |=
2999 cpu_to_le16(idle_state << RXON_RX_CHAIN_CNT_POS);
3000
3001 if (!is_single && (rx_state >= 2) &&
3002 !test_bit(STATUS_POWER_PMI, &priv->status))
3003 priv->staging_rxon.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
3004 else
3005 priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
3006
3007 IWL_DEBUG_ASSOC("rx chain %X\n", priv->staging_rxon.rx_chain);
3008}
3009
b481de9c
ZY
3010/**
3011 * sign_extend - Sign extend a value using specified bit as sign-bit
3012 *
3013 * Example: sign_extend(9, 3) would return -7 as bit3 of 1001b is 1
3014 * and bit0..2 is 001b which when sign extended to 1111111111111001b is -7.
3015 *
3016 * @param oper value to sign extend
3017 * @param index 0 based bit index (0<=index<32) to sign bit
3018 */
3019static s32 sign_extend(u32 oper, int index)
3020{
3021 u8 shift = 31 - index;
3022
3023 return (s32)(oper << shift) >> shift;
3024}
3025
3026/**
3027 * iwl4965_get_temperature - return the calibrated temperature (in Kelvin)
3028 * @statistics: Provides the temperature reading from the uCode
3029 *
3030 * A return of <0 indicates bogus data in the statistics
3031 */
bb8c093b 3032int iwl4965_get_temperature(const struct iwl4965_priv *priv)
b481de9c
ZY
3033{
3034 s32 temperature;
3035 s32 vt;
3036 s32 R1, R2, R3;
3037 u32 R4;
3038
3039 if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
3040 (priv->statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK)) {
3041 IWL_DEBUG_TEMP("Running FAT temperature calibration\n");
3042 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
3043 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
3044 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
3045 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]);
3046 } else {
3047 IWL_DEBUG_TEMP("Running temperature calibration\n");
3048 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
3049 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
3050 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
3051 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[0]);
3052 }
3053
3054 /*
8b6eaea8 3055 * Temperature is only 23 bits, so sign extend out to 32.
b481de9c
ZY
3056 *
3057 * NOTE If we haven't received a statistics notification yet
3058 * with an updated temperature, use R4 provided to us in the
8b6eaea8
CB
3059 * "initialize" ALIVE response.
3060 */
b481de9c
ZY
3061 if (!test_bit(STATUS_TEMPERATURE, &priv->status))
3062 vt = sign_extend(R4, 23);
3063 else
3064 vt = sign_extend(
3065 le32_to_cpu(priv->statistics.general.temperature), 23);
3066
3067 IWL_DEBUG_TEMP("Calib values R[1-3]: %d %d %d R4: %d\n",
3068 R1, R2, R3, vt);
3069
3070 if (R3 == R1) {
3071 IWL_ERROR("Calibration conflict R1 == R3\n");
3072 return -1;
3073 }
3074
3075 /* Calculate temperature in degrees Kelvin, adjust by 97%.
3076 * Add offset to center the adjustment around 0 degrees Centigrade. */
3077 temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
3078 temperature /= (R3 - R1);
3079 temperature = (temperature * 97) / 100 +
3080 TEMPERATURE_CALIB_KELVIN_OFFSET;
3081
3082 IWL_DEBUG_TEMP("Calibrated temperature: %dK, %dC\n", temperature,
3083 KELVIN_TO_CELSIUS(temperature));
3084
3085 return temperature;
3086}
3087
3088/* Adjust Txpower only if temperature variance is greater than threshold. */
3089#define IWL_TEMPERATURE_THRESHOLD 3
3090
3091/**
3092 * iwl4965_is_temp_calib_needed - determines if new calibration is needed
3093 *
3094 * If the temperature changed has changed sufficiently, then a recalibration
3095 * is needed.
3096 *
3097 * Assumes caller will replace priv->last_temperature once calibration
3098 * executed.
3099 */
bb8c093b 3100static int iwl4965_is_temp_calib_needed(struct iwl4965_priv *priv)
b481de9c
ZY
3101{
3102 int temp_diff;
3103
3104 if (!test_bit(STATUS_STATISTICS, &priv->status)) {
3105 IWL_DEBUG_TEMP("Temperature not updated -- no statistics.\n");
3106 return 0;
3107 }
3108
3109 temp_diff = priv->temperature - priv->last_temperature;
3110
3111 /* get absolute value */
3112 if (temp_diff < 0) {
3113 IWL_DEBUG_POWER("Getting cooler, delta %d, \n", temp_diff);
3114 temp_diff = -temp_diff;
3115 } else if (temp_diff == 0)
3116 IWL_DEBUG_POWER("Same temp, \n");
3117 else
3118 IWL_DEBUG_POWER("Getting warmer, delta %d, \n", temp_diff);
3119
3120 if (temp_diff < IWL_TEMPERATURE_THRESHOLD) {
3121 IWL_DEBUG_POWER("Thermal txpower calib not needed\n");
3122 return 0;
3123 }
3124
3125 IWL_DEBUG_POWER("Thermal txpower calib needed\n");
3126
3127 return 1;
3128}
3129
3130/* Calculate noise level, based on measurements during network silence just
3131 * before arriving beacon. This measurement can be done only if we know
3132 * exactly when to expect beacons, therefore only when we're associated. */
bb8c093b 3133static void iwl4965_rx_calc_noise(struct iwl4965_priv *priv)
b481de9c
ZY
3134{
3135 struct statistics_rx_non_phy *rx_info
3136 = &(priv->statistics.rx.general);
3137 int num_active_rx = 0;
3138 int total_silence = 0;
3139 int bcn_silence_a =
3140 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
3141 int bcn_silence_b =
3142 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
3143 int bcn_silence_c =
3144 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
3145
3146 if (bcn_silence_a) {
3147 total_silence += bcn_silence_a;
3148 num_active_rx++;
3149 }
3150 if (bcn_silence_b) {
3151 total_silence += bcn_silence_b;
3152 num_active_rx++;
3153 }
3154 if (bcn_silence_c) {
3155 total_silence += bcn_silence_c;
3156 num_active_rx++;
3157 }
3158
3159 /* Average among active antennas */
3160 if (num_active_rx)
3161 priv->last_rx_noise = (total_silence / num_active_rx) - 107;
3162 else
3163 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
3164
3165 IWL_DEBUG_CALIB("inband silence a %u, b %u, c %u, dBm %d\n",
3166 bcn_silence_a, bcn_silence_b, bcn_silence_c,
3167 priv->last_rx_noise);
3168}
3169
bb8c093b 3170void iwl4965_hw_rx_statistics(struct iwl4965_priv *priv, struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3171{
bb8c093b 3172 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
3173 int change;
3174 s32 temp;
3175
3176 IWL_DEBUG_RX("Statistics notification received (%d vs %d).\n",
3177 (int)sizeof(priv->statistics), pkt->len);
3178
3179 change = ((priv->statistics.general.temperature !=
3180 pkt->u.stats.general.temperature) ||
3181 ((priv->statistics.flag &
3182 STATISTICS_REPLY_FLG_FAT_MODE_MSK) !=
3183 (pkt->u.stats.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK)));
3184
3185 memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
3186
3187 set_bit(STATUS_STATISTICS, &priv->status);
3188
3189 /* Reschedule the statistics timer to occur in
3190 * REG_RECALIB_PERIOD seconds to ensure we get a
3191 * thermal update even if the uCode doesn't give
3192 * us one */
3193 mod_timer(&priv->statistics_periodic, jiffies +
3194 msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
3195
3196 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
3197 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
3198 iwl4965_rx_calc_noise(priv);
c8b0e6e1 3199#ifdef CONFIG_IWL4965_SENSITIVITY
b481de9c
ZY
3200 queue_work(priv->workqueue, &priv->sensitivity_work);
3201#endif
3202 }
3203
3204 /* If the hardware hasn't reported a change in
3205 * temperature then don't bother computing a
3206 * calibrated temperature value */
3207 if (!change)
3208 return;
3209
3210 temp = iwl4965_get_temperature(priv);
3211 if (temp < 0)
3212 return;
3213
3214 if (priv->temperature != temp) {
3215 if (priv->temperature)
3216 IWL_DEBUG_TEMP("Temperature changed "
3217 "from %dC to %dC\n",
3218 KELVIN_TO_CELSIUS(priv->temperature),
3219 KELVIN_TO_CELSIUS(temp));
3220 else
3221 IWL_DEBUG_TEMP("Temperature "
3222 "initialized to %dC\n",
3223 KELVIN_TO_CELSIUS(temp));
3224 }
3225
3226 priv->temperature = temp;
3227 set_bit(STATUS_TEMPERATURE, &priv->status);
3228
3229 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
3230 iwl4965_is_temp_calib_needed(priv))
3231 queue_work(priv->workqueue, &priv->txpower_work);
3232}
3233
12342c47
ZY
3234static void iwl4965_add_radiotap(struct iwl4965_priv *priv,
3235 struct sk_buff *skb,
3236 struct iwl4965_rx_phy_res *rx_start,
3237 struct ieee80211_rx_status *stats,
3238 u32 ampdu_status)
3239{
3240 s8 signal = stats->ssi;
3241 s8 noise = 0;
8318d78a 3242 int rate = stats->rate_idx;
12342c47
ZY
3243 u64 tsf = stats->mactime;
3244 __le16 phy_flags_hw = rx_start->phy_flags;
3245 struct iwl4965_rt_rx_hdr {
3246 struct ieee80211_radiotap_header rt_hdr;
3247 __le64 rt_tsf; /* TSF */
3248 u8 rt_flags; /* radiotap packet flags */
3249 u8 rt_rate; /* rate in 500kb/s */
3250 __le16 rt_channelMHz; /* channel in MHz */
3251 __le16 rt_chbitmask; /* channel bitfield */
3252 s8 rt_dbmsignal; /* signal in dBm, kluged to signed */
3253 s8 rt_dbmnoise;
3254 u8 rt_antenna; /* antenna number */
3255 } __attribute__ ((packed)) *iwl4965_rt;
3256
3257 /* TODO: We won't have enough headroom for HT frames. Fix it later. */
3258 if (skb_headroom(skb) < sizeof(*iwl4965_rt)) {
3259 if (net_ratelimit())
3260 printk(KERN_ERR "not enough headroom [%d] for "
01c20986 3261 "radiotap head [%zd]\n",
12342c47
ZY
3262 skb_headroom(skb), sizeof(*iwl4965_rt));
3263 return;
3264 }
3265
3266 /* put radiotap header in front of 802.11 header and data */
3267 iwl4965_rt = (void *)skb_push(skb, sizeof(*iwl4965_rt));
3268
3269 /* initialise radiotap header */
3270 iwl4965_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
3271 iwl4965_rt->rt_hdr.it_pad = 0;
3272
3273 /* total header + data */
3274 put_unaligned(cpu_to_le16(sizeof(*iwl4965_rt)),
3275 &iwl4965_rt->rt_hdr.it_len);
3276
3277 /* Indicate all the fields we add to the radiotap header */
3278 put_unaligned(cpu_to_le32((1 << IEEE80211_RADIOTAP_TSFT) |
3279 (1 << IEEE80211_RADIOTAP_FLAGS) |
3280 (1 << IEEE80211_RADIOTAP_RATE) |
3281 (1 << IEEE80211_RADIOTAP_CHANNEL) |
3282 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
3283 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
3284 (1 << IEEE80211_RADIOTAP_ANTENNA)),
3285 &iwl4965_rt->rt_hdr.it_present);
3286
3287 /* Zero the flags, we'll add to them as we go */
3288 iwl4965_rt->rt_flags = 0;
3289
3290 put_unaligned(cpu_to_le64(tsf), &iwl4965_rt->rt_tsf);
3291
3292 iwl4965_rt->rt_dbmsignal = signal;
3293 iwl4965_rt->rt_dbmnoise = noise;
3294
3295 /* Convert the channel frequency and set the flags */
3296 put_unaligned(cpu_to_le16(stats->freq), &iwl4965_rt->rt_channelMHz);
3297 if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK))
3298 put_unaligned(cpu_to_le16(IEEE80211_CHAN_OFDM |
3299 IEEE80211_CHAN_5GHZ),
3300 &iwl4965_rt->rt_chbitmask);
3301 else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK)
3302 put_unaligned(cpu_to_le16(IEEE80211_CHAN_CCK |
3303 IEEE80211_CHAN_2GHZ),
3304 &iwl4965_rt->rt_chbitmask);
3305 else /* 802.11g */
3306 put_unaligned(cpu_to_le16(IEEE80211_CHAN_OFDM |
3307 IEEE80211_CHAN_2GHZ),
3308 &iwl4965_rt->rt_chbitmask);
3309
12342c47
ZY
3310 if (rate == -1)
3311 iwl4965_rt->rt_rate = 0;
3312 else
3313 iwl4965_rt->rt_rate = iwl4965_rates[rate].ieee;
3314
3315 /*
3316 * "antenna number"
3317 *
3318 * It seems that the antenna field in the phy flags value
3319 * is actually a bitfield. This is undefined by radiotap,
3320 * it wants an actual antenna number but I always get "7"
3321 * for most legacy frames I receive indicating that the
3322 * same frame was received on all three RX chains.
3323 *
3324 * I think this field should be removed in favour of a
3325 * new 802.11n radiotap field "RX chains" that is defined
3326 * as a bitmask.
3327 */
3328 iwl4965_rt->rt_antenna =
3329 le16_to_cpu(phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4;
3330
3331 /* set the preamble flag if appropriate */
3332 if (phy_flags_hw & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
3333 iwl4965_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3334
3335 stats->flag |= RX_FLAG_RADIOTAP;
3336}
3337
bb8c093b 3338static void iwl4965_handle_data_packet(struct iwl4965_priv *priv, int is_data,
b481de9c 3339 int include_phy,
bb8c093b 3340 struct iwl4965_rx_mem_buffer *rxb,
b481de9c
ZY
3341 struct ieee80211_rx_status *stats)
3342{
bb8c093b 3343 struct iwl4965_rx_packet *pkt = (struct iwl4965_rx_packet *)rxb->skb->data;
b481de9c
ZY
3344 struct iwl4965_rx_phy_res *rx_start = (include_phy) ?
3345 (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) : NULL;
3346 struct ieee80211_hdr *hdr;
3347 u16 len;
3348 __le32 *rx_end;
3349 unsigned int skblen;
3350 u32 ampdu_status;
3351
3352 if (!include_phy && priv->last_phy_res[0])
3353 rx_start = (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1];
3354
3355 if (!rx_start) {
3356 IWL_ERROR("MPDU frame without a PHY data\n");
3357 return;
3358 }
3359 if (include_phy) {
3360 hdr = (struct ieee80211_hdr *)((u8 *) & rx_start[1] +
3361 rx_start->cfg_phy_cnt);
3362
3363 len = le16_to_cpu(rx_start->byte_count);
3364
3365 rx_end = (__le32 *) ((u8 *) & pkt->u.raw[0] +
3366 sizeof(struct iwl4965_rx_phy_res) +
3367 rx_start->cfg_phy_cnt + len);
3368
3369 } else {
3370 struct iwl4965_rx_mpdu_res_start *amsdu =
3371 (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
3372
3373 hdr = (struct ieee80211_hdr *)(pkt->u.raw +
3374 sizeof(struct iwl4965_rx_mpdu_res_start));
3375 len = le16_to_cpu(amsdu->byte_count);
3376 rx_start->byte_count = amsdu->byte_count;
3377 rx_end = (__le32 *) (((u8 *) hdr) + len);
3378 }
9ee1ba47 3379 if (len > priv->hw_setting.max_pkt_size || len < 16) {
12342c47 3380 IWL_WARNING("byte count out of range [16,4K] : %d\n", len);
b481de9c
ZY
3381 return;
3382 }
3383
3384 ampdu_status = le32_to_cpu(*rx_end);
3385 skblen = ((u8 *) rx_end - (u8 *) & pkt->u.raw[0]) + sizeof(u32);
3386
3387 /* start from MAC */
3388 skb_reserve(rxb->skb, (void *)hdr - (void *)pkt);
3389 skb_put(rxb->skb, len); /* end where data ends */
3390
3391 /* We only process data packets if the interface is open */
3392 if (unlikely(!priv->is_open)) {
3393 IWL_DEBUG_DROP_LIMIT
3394 ("Dropping packet while interface is not open.\n");
3395 return;
3396 }
3397
b481de9c
ZY
3398 stats->flag = 0;
3399 hdr = (struct ieee80211_hdr *)rxb->skb->data;
3400
bb8c093b
CH
3401 if (iwl4965_param_hwcrypto)
3402 iwl4965_set_decrypted_flag(priv, rxb->skb, ampdu_status, stats);
b481de9c 3403
12342c47
ZY
3404 if (priv->add_radiotap)
3405 iwl4965_add_radiotap(priv, rxb->skb, rx_start, stats, ampdu_status);
3406
b481de9c
ZY
3407 ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats);
3408 priv->alloc_rxb_skb--;
3409 rxb->skb = NULL;
3410#ifdef LED
3411 priv->led_packets += len;
bb8c093b 3412 iwl4965_setup_activity_timer(priv);
b481de9c
ZY
3413#endif
3414}
3415
3416/* Calc max signal level (dBm) among 3 possible receivers */
3417static int iwl4965_calc_rssi(struct iwl4965_rx_phy_res *rx_resp)
3418{
3419 /* data from PHY/DSP regarding signal strength, etc.,
3420 * contents are always there, not configurable by host. */
3421 struct iwl4965_rx_non_cfg_phy *ncphy =
3422 (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy;
3423 u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL_AGC_DB_MASK)
3424 >> IWL_AGC_DB_POS;
3425
3426 u32 valid_antennae =
3427 (le16_to_cpu(rx_resp->phy_flags) & RX_PHY_FLAGS_ANTENNAE_MASK)
3428 >> RX_PHY_FLAGS_ANTENNAE_OFFSET;
3429 u8 max_rssi = 0;
3430 u32 i;
3431
3432 /* Find max rssi among 3 possible receivers.
3433 * These values are measured by the digital signal processor (DSP).
3434 * They should stay fairly constant even as the signal strength varies,
3435 * if the radio's automatic gain control (AGC) is working right.
3436 * AGC value (see below) will provide the "interesting" info. */
3437 for (i = 0; i < 3; i++)
3438 if (valid_antennae & (1 << i))
3439 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
3440
3441 IWL_DEBUG_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
3442 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
3443 max_rssi, agc);
3444
3445 /* dBm = max_rssi dB - agc dB - constant.
3446 * Higher AGC (higher radio gain) means lower signal. */
3447 return (max_rssi - agc - IWL_RSSI_OFFSET);
3448}
3449
c8b0e6e1 3450#ifdef CONFIG_IWL4965_HT
b481de9c
ZY
3451
3452/* Parsed Information Elements */
3453struct ieee802_11_elems {
3454 u8 *ds_params;
3455 u8 ds_params_len;
3456 u8 *tim;
3457 u8 tim_len;
3458 u8 *ibss_params;
3459 u8 ibss_params_len;
3460 u8 *erp_info;
3461 u8 erp_info_len;
3462 u8 *ht_cap_param;
3463 u8 ht_cap_param_len;
3464 u8 *ht_extra_param;
3465 u8 ht_extra_param_len;
3466};
3467
3468static int parse_elems(u8 *start, size_t len, struct ieee802_11_elems *elems)
3469{
3470 size_t left = len;
3471 u8 *pos = start;
3472 int unknown = 0;
3473
3474 memset(elems, 0, sizeof(*elems));
3475
3476 while (left >= 2) {
3477 u8 id, elen;
3478
3479 id = *pos++;
3480 elen = *pos++;
3481 left -= 2;
3482
3483 if (elen > left)
3484 return -1;
3485
3486 switch (id) {
3487 case WLAN_EID_DS_PARAMS:
3488 elems->ds_params = pos;
3489 elems->ds_params_len = elen;
3490 break;
3491 case WLAN_EID_TIM:
3492 elems->tim = pos;
3493 elems->tim_len = elen;
3494 break;
3495 case WLAN_EID_IBSS_PARAMS:
3496 elems->ibss_params = pos;
3497 elems->ibss_params_len = elen;
3498 break;
3499 case WLAN_EID_ERP_INFO:
3500 elems->erp_info = pos;
3501 elems->erp_info_len = elen;
3502 break;
3503 case WLAN_EID_HT_CAPABILITY:
3504 elems->ht_cap_param = pos;
3505 elems->ht_cap_param_len = elen;
3506 break;
3507 case WLAN_EID_HT_EXTRA_INFO:
3508 elems->ht_extra_param = pos;
3509 elems->ht_extra_param_len = elen;
3510 break;
3511 default:
3512 unknown++;
3513 break;
3514 }
3515
3516 left -= elen;
3517 pos += elen;
3518 }
3519
3520 return 0;
3521}
326eeee8 3522
78330fdd
TW
3523void iwl4965_init_ht_hw_capab(struct ieee80211_ht_info *ht_info,
3524 enum ieee80211_band band)
326eeee8
RR
3525{
3526 ht_info->cap = 0;
3527 memset(ht_info->supp_mcs_set, 0, 16);
3528
3529 ht_info->ht_supported = 1;
3530
78330fdd 3531 if (band == IEEE80211_BAND_5GHZ) {
326eeee8
RR
3532 ht_info->cap |= (u16)IEEE80211_HT_CAP_SUP_WIDTH;
3533 ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_40;
3534 ht_info->supp_mcs_set[4] = 0x01;
3535 }
3536 ht_info->cap |= (u16)IEEE80211_HT_CAP_GRN_FLD;
3537 ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_20;
3538 ht_info->cap |= (u16)(IEEE80211_HT_CAP_MIMO_PS &
3539 (IWL_MIMO_PS_NONE << 2));
9ee1ba47
RR
3540 if (iwl4965_param_amsdu_size_8K) {
3541 printk(KERN_DEBUG "iwl4965 in A-MSDU 8K support mode\n");
3542 ht_info->cap |= (u16)IEEE80211_HT_CAP_MAX_AMSDU;
3543 }
326eeee8
RR
3544
3545 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
3546 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
3547
3548 ht_info->supp_mcs_set[0] = 0xFF;
3549 ht_info->supp_mcs_set[1] = 0xFF;
3550}
c8b0e6e1 3551#endif /* CONFIG_IWL4965_HT */
b481de9c 3552
bb8c093b 3553static void iwl4965_sta_modify_ps_wake(struct iwl4965_priv *priv, int sta_id)
b481de9c
ZY
3554{
3555 unsigned long flags;
3556
3557 spin_lock_irqsave(&priv->sta_lock, flags);
3558 priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
3559 priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
3560 priv->stations[sta_id].sta.sta.modify_mask = 0;
3561 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3562 spin_unlock_irqrestore(&priv->sta_lock, flags);
3563
bb8c093b 3564 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
b481de9c
ZY
3565}
3566
bb8c093b 3567static void iwl4965_update_ps_mode(struct iwl4965_priv *priv, u16 ps_bit, u8 *addr)
b481de9c
ZY
3568{
3569 /* FIXME: need locking over ps_status ??? */
bb8c093b 3570 u8 sta_id = iwl4965_hw_find_station(priv, addr);
b481de9c
ZY
3571
3572 if (sta_id != IWL_INVALID_STATION) {
3573 u8 sta_awake = priv->stations[sta_id].
3574 ps_status == STA_PS_STATUS_WAKE;
3575
3576 if (sta_awake && ps_bit)
3577 priv->stations[sta_id].ps_status = STA_PS_STATUS_SLEEP;
3578 else if (!sta_awake && !ps_bit) {
3579 iwl4965_sta_modify_ps_wake(priv, sta_id);
3580 priv->stations[sta_id].ps_status = STA_PS_STATUS_WAKE;
3581 }
3582 }
3583}
17744ff6
TW
3584#ifdef CONFIG_IWL4965_DEBUG
3585
3586/**
3587 * iwl4965_dbg_report_frame - dump frame to syslog during debug sessions
3588 *
3589 * You may hack this function to show different aspects of received frames,
3590 * including selective frame dumps.
3591 * group100 parameter selects whether to show 1 out of 100 good frames.
3592 *
3593 * TODO: This was originally written for 3945, need to audit for
3594 * proper operation with 4965.
3595 */
3596static void iwl4965_dbg_report_frame(struct iwl4965_priv *priv,
3597 struct iwl4965_rx_packet *pkt,
3598 struct ieee80211_hdr *header, int group100)
3599{
3600 u32 to_us;
3601 u32 print_summary = 0;
3602 u32 print_dump = 0; /* set to 1 to dump all frames' contents */
3603 u32 hundred = 0;
3604 u32 dataframe = 0;
3605 u16 fc;
3606 u16 seq_ctl;
3607 u16 channel;
3608 u16 phy_flags;
3609 int rate_sym;
3610 u16 length;
3611 u16 status;
3612 u16 bcn_tmr;
3613 u32 tsf_low;
3614 u64 tsf;
3615 u8 rssi;
3616 u8 agc;
3617 u16 sig_avg;
3618 u16 noise_diff;
3619 struct iwl4965_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
3620 struct iwl4965_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
3621 struct iwl4965_rx_frame_end *rx_end = IWL_RX_END(pkt);
3622 u8 *data = IWL_RX_DATA(pkt);
3623
3624 if (likely(!(iwl4965_debug_level & IWL_DL_RX)))
3625 return;
3626
3627 /* MAC header */
3628 fc = le16_to_cpu(header->frame_control);
3629 seq_ctl = le16_to_cpu(header->seq_ctrl);
3630
3631 /* metadata */
3632 channel = le16_to_cpu(rx_hdr->channel);
3633 phy_flags = le16_to_cpu(rx_hdr->phy_flags);
3634 rate_sym = rx_hdr->rate;
3635 length = le16_to_cpu(rx_hdr->len);
3636
3637 /* end-of-frame status and timestamp */
3638 status = le32_to_cpu(rx_end->status);
3639 bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp);
3640 tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff;
3641 tsf = le64_to_cpu(rx_end->timestamp);
3642
3643 /* signal statistics */
3644 rssi = rx_stats->rssi;
3645 agc = rx_stats->agc;
3646 sig_avg = le16_to_cpu(rx_stats->sig_avg);
3647 noise_diff = le16_to_cpu(rx_stats->noise_diff);
3648
3649 to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
3650
3651 /* if data frame is to us and all is good,
3652 * (optionally) print summary for only 1 out of every 100 */
3653 if (to_us && (fc & ~IEEE80211_FCTL_PROTECTED) ==
3654 (IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
3655 dataframe = 1;
3656 if (!group100)
3657 print_summary = 1; /* print each frame */
3658 else if (priv->framecnt_to_us < 100) {
3659 priv->framecnt_to_us++;
3660 print_summary = 0;
3661 } else {
3662 priv->framecnt_to_us = 0;
3663 print_summary = 1;
3664 hundred = 1;
3665 }
3666 } else {
3667 /* print summary for all other frames */
3668 print_summary = 1;
3669 }
3670
3671 if (print_summary) {
3672 char *title;
3673 int rate_idx;
3674 u32 bitrate;
3675
3676 if (hundred)
3677 title = "100Frames";
3678 else if (fc & IEEE80211_FCTL_RETRY)
3679 title = "Retry";
3680 else if (ieee80211_is_assoc_response(fc))
3681 title = "AscRsp";
3682 else if (ieee80211_is_reassoc_response(fc))
3683 title = "RasRsp";
3684 else if (ieee80211_is_probe_response(fc)) {
3685 title = "PrbRsp";
3686 print_dump = 1; /* dump frame contents */
3687 } else if (ieee80211_is_beacon(fc)) {
3688 title = "Beacon";
3689 print_dump = 1; /* dump frame contents */
3690 } else if (ieee80211_is_atim(fc))
3691 title = "ATIM";
3692 else if (ieee80211_is_auth(fc))
3693 title = "Auth";
3694 else if (ieee80211_is_deauth(fc))
3695 title = "DeAuth";
3696 else if (ieee80211_is_disassoc(fc))
3697 title = "DisAssoc";
3698 else
3699 title = "Frame";
3700
3701 rate_idx = iwl4965_hwrate_to_plcp_idx(rate_sym);
3702 if (unlikely(rate_idx == -1))
3703 bitrate = 0;
3704 else
3705 bitrate = iwl4965_rates[rate_idx].ieee / 2;
3706
3707 /* print frame summary.
3708 * MAC addresses show just the last byte (for brevity),
3709 * but you can hack it to show more, if you'd like to. */
3710 if (dataframe)
3711 IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, "
3712 "len=%u, rssi=%d, chnl=%d, rate=%u, \n",
3713 title, fc, header->addr1[5],
3714 length, rssi, channel, bitrate);
3715 else {
3716 /* src/dst addresses assume managed mode */
3717 IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, "
3718 "src=0x%02x, rssi=%u, tim=%lu usec, "
3719 "phy=0x%02x, chnl=%d\n",
3720 title, fc, header->addr1[5],
3721 header->addr3[5], rssi,
3722 tsf_low - priv->scan_start_tsf,
3723 phy_flags, channel);
3724 }
3725 }
3726 if (print_dump)
3727 iwl4965_print_hex_dump(IWL_DL_RX, data, length);
3728}
3729#else
3730static inline void iwl4965_dbg_report_frame(struct iwl4965_priv *priv,
3731 struct iwl4965_rx_packet *pkt,
3732 struct ieee80211_hdr *header,
3733 int group100)
3734{
3735}
3736#endif
3737
b481de9c 3738
7878a5a4
MA
3739#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
3740
b481de9c
ZY
3741/* Called for REPLY_4965_RX (legacy ABG frames), or
3742 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
bb8c093b
CH
3743static void iwl4965_rx_reply_rx(struct iwl4965_priv *priv,
3744 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3745{
17744ff6
TW
3746 struct ieee80211_hdr *header;
3747 struct ieee80211_rx_status rx_status;
bb8c093b 3748 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
3749 /* Use phy data (Rx signal strength, etc.) contained within
3750 * this rx packet for legacy frames,
3751 * or phy data cached from REPLY_RX_PHY_CMD for HT frames. */
3752 int include_phy = (pkt->hdr.cmd == REPLY_4965_RX);
3753 struct iwl4965_rx_phy_res *rx_start = (include_phy) ?
3754 (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) :
3755 (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1];
3756 __le32 *rx_end;
3757 unsigned int len = 0;
b481de9c 3758 u16 fc;
b481de9c
ZY
3759 u8 network_packet;
3760
17744ff6
TW
3761 rx_status.mactime = le64_to_cpu(rx_start->timestamp);
3762 rx_status.freq = ieee80211chan2mhz(le16_to_cpu(rx_start->channel));
3763 rx_status.band = (rx_start->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
3764 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
3765 rx_status.rate_idx = iwl4965_hwrate_to_plcp_idx(
3766 le32_to_cpu(rx_start->rate_n_flags));
3767
3768 if (rx_status.band == IEEE80211_BAND_5GHZ)
3769 rx_status.rate_idx -= IWL_FIRST_OFDM_RATE;
3770
3771 rx_status.antenna = 0;
3772 rx_status.flag = 0;
3773
b481de9c
ZY
3774 if ((unlikely(rx_start->cfg_phy_cnt > 20))) {
3775 IWL_DEBUG_DROP
3776 ("dsp size out of range [0,20]: "
3777 "%d/n", rx_start->cfg_phy_cnt);
3778 return;
3779 }
17744ff6 3780
b481de9c
ZY
3781 if (!include_phy) {
3782 if (priv->last_phy_res[0])
3783 rx_start = (struct iwl4965_rx_phy_res *)
3784 &priv->last_phy_res[1];
3785 else
3786 rx_start = NULL;
3787 }
3788
3789 if (!rx_start) {
3790 IWL_ERROR("MPDU frame without a PHY data\n");
3791 return;
3792 }
3793
3794 if (include_phy) {
3795 header = (struct ieee80211_hdr *)((u8 *) & rx_start[1]
3796 + rx_start->cfg_phy_cnt);
3797
3798 len = le16_to_cpu(rx_start->byte_count);
17744ff6 3799 rx_end = (__le32 *)(pkt->u.raw + rx_start->cfg_phy_cnt +
b481de9c
ZY
3800 sizeof(struct iwl4965_rx_phy_res) + len);
3801 } else {
3802 struct iwl4965_rx_mpdu_res_start *amsdu =
3803 (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
3804
3805 header = (void *)(pkt->u.raw +
3806 sizeof(struct iwl4965_rx_mpdu_res_start));
3807 len = le16_to_cpu(amsdu->byte_count);
3808 rx_end = (__le32 *) (pkt->u.raw +
3809 sizeof(struct iwl4965_rx_mpdu_res_start) + len);
3810 }
3811
3812 if (!(*rx_end & RX_RES_STATUS_NO_CRC32_ERROR) ||
3813 !(*rx_end & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
3814 IWL_DEBUG_RX("Bad CRC or FIFO: 0x%08X.\n",
3815 le32_to_cpu(*rx_end));
3816 return;
3817 }
3818
3819 priv->ucode_beacon_time = le32_to_cpu(rx_start->beacon_time_stamp);
3820
b481de9c 3821 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
17744ff6 3822 rx_status.ssi = iwl4965_calc_rssi(rx_start);
b481de9c
ZY
3823
3824 /* Meaningful noise values are available only from beacon statistics,
3825 * which are gathered only when associated, and indicate noise
3826 * only for the associated network channel ...
3827 * Ignore these noise values while scanning (other channels) */
bb8c093b 3828 if (iwl4965_is_associated(priv) &&
b481de9c 3829 !test_bit(STATUS_SCANNING, &priv->status)) {
17744ff6
TW
3830 rx_status.noise = priv->last_rx_noise;
3831 rx_status.signal = iwl4965_calc_sig_qual(rx_status.ssi,
3832 rx_status.noise);
b481de9c 3833 } else {
17744ff6
TW
3834 rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
3835 rx_status.signal = iwl4965_calc_sig_qual(rx_status.ssi, 0);
b481de9c
ZY
3836 }
3837
3838 /* Reset beacon noise level if not associated. */
bb8c093b 3839 if (!iwl4965_is_associated(priv))
b481de9c
ZY
3840 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
3841
17744ff6
TW
3842 /* Set "1" to report good data frames in groups of 100 */
3843 /* FIXME: need to optimze the call: */
3844 iwl4965_dbg_report_frame(priv, pkt, header, 1);
3845
3846 IWL_DEBUG_STATS_LIMIT("Rssi %d, noise %d, qual %d, TSF %llu\n",
3847 rx_status.ssi, rx_status.noise, rx_status.signal,
3848 rx_status.mactime);
b481de9c 3849
bb8c093b 3850 network_packet = iwl4965_is_network_packet(priv, header);
b481de9c 3851 if (network_packet) {
17744ff6 3852 priv->last_rx_rssi = rx_status.ssi;
b481de9c
ZY
3853 priv->last_beacon_time = priv->ucode_beacon_time;
3854 priv->last_tsf = le64_to_cpu(rx_start->timestamp);
3855 }
3856
3857 fc = le16_to_cpu(header->frame_control);
3858 switch (fc & IEEE80211_FCTL_FTYPE) {
3859 case IEEE80211_FTYPE_MGMT:
3860
3861 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
3862 iwl4965_update_ps_mode(priv, fc & IEEE80211_FCTL_PM,
3863 header->addr2);
3864 switch (fc & IEEE80211_FCTL_STYPE) {
3865 case IEEE80211_STYPE_PROBE_RESP:
3866 case IEEE80211_STYPE_BEACON:
3867 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA &&
3868 !compare_ether_addr(header->addr2, priv->bssid)) ||
3869 (priv->iw_mode == IEEE80211_IF_TYPE_IBSS &&
3870 !compare_ether_addr(header->addr3, priv->bssid))) {
3871 struct ieee80211_mgmt *mgmt =
3872 (struct ieee80211_mgmt *)header;
3873 u64 timestamp =
3874 le64_to_cpu(mgmt->u.beacon.timestamp);
3875
3876 priv->timestamp0 = timestamp & 0xFFFFFFFF;
3877 priv->timestamp1 =
3878 (timestamp >> 32) & 0xFFFFFFFF;
3879 priv->beacon_int = le16_to_cpu(
3880 mgmt->u.beacon.beacon_int);
3881 if (priv->call_post_assoc_from_beacon &&
3882 (priv->iw_mode == IEEE80211_IF_TYPE_STA)) {
3883 priv->call_post_assoc_from_beacon = 0;
3884 queue_work(priv->workqueue,
3885 &priv->post_associate.work);
3886 }
3887 }
3888 break;
3889
3890 case IEEE80211_STYPE_ACTION:
3891 break;
3892
3893 /*
471b3efd
JB
3894 * TODO: Use the new callback function from
3895 * mac80211 instead of sniffing these packets.
b481de9c
ZY
3896 */
3897 case IEEE80211_STYPE_ASSOC_RESP:
3898 case IEEE80211_STYPE_REASSOC_RESP:
052c4b9f 3899 if (network_packet) {
c8b0e6e1 3900#ifdef CONFIG_IWL4965_HT
b481de9c
ZY
3901 u8 *pos = NULL;
3902 struct ieee802_11_elems elems;
c8b0e6e1 3903#endif /*CONFIG_IWL4965_HT */
b481de9c
ZY
3904 struct ieee80211_mgmt *mgnt =
3905 (struct ieee80211_mgmt *)header;
3906
7878a5a4
MA
3907 /* We have just associated, give some
3908 * time for the 4-way handshake if
3909 * any. Don't start scan too early. */
3910 priv->next_scan_jiffies = jiffies +
3911 IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
3912
b481de9c
ZY
3913 priv->assoc_id = (~((1 << 15) | (1 << 14))
3914 & le16_to_cpu(mgnt->u.assoc_resp.aid));
3915 priv->assoc_capability =
3916 le16_to_cpu(
3917 mgnt->u.assoc_resp.capab_info);
c8b0e6e1 3918#ifdef CONFIG_IWL4965_HT
b481de9c
ZY
3919 pos = mgnt->u.assoc_resp.variable;
3920 if (!parse_elems(pos,
3921 len - (pos - (u8 *) mgnt),
3922 &elems)) {
3923 if (elems.ht_extra_param &&
3924 elems.ht_cap_param)
3925 break;
3926 }
c8b0e6e1 3927#endif /*CONFIG_IWL4965_HT */
b481de9c
ZY
3928 /* assoc_id is 0 no association */
3929 if (!priv->assoc_id)
3930 break;
3931 if (priv->beacon_int)
3932 queue_work(priv->workqueue,
3933 &priv->post_associate.work);
3934 else
3935 priv->call_post_assoc_from_beacon = 1;
3936 }
3937
3938 break;
3939
3940 case IEEE80211_STYPE_PROBE_REQ:
3941 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
bb8c093b 3942 !iwl4965_is_associated(priv)) {
0795af57
JP
3943 DECLARE_MAC_BUF(mac1);
3944 DECLARE_MAC_BUF(mac2);
3945 DECLARE_MAC_BUF(mac3);
3946
b481de9c 3947 IWL_DEBUG_DROP("Dropping (non network): "
0795af57
JP
3948 "%s, %s, %s\n",
3949 print_mac(mac1, header->addr1),
3950 print_mac(mac2, header->addr2),
3951 print_mac(mac3, header->addr3));
b481de9c
ZY
3952 return;
3953 }
3954 }
17744ff6 3955 iwl4965_handle_data_packet(priv, 0, include_phy, rxb, &rx_status);
b481de9c
ZY
3956 break;
3957
3958 case IEEE80211_FTYPE_CTL:
9ab46173 3959#ifdef CONFIG_IWL4965_HT
b481de9c
ZY
3960 switch (fc & IEEE80211_FCTL_STYPE) {
3961 case IEEE80211_STYPE_BACK_REQ:
3962 IWL_DEBUG_HT("IEEE80211_STYPE_BACK_REQ arrived\n");
3963 iwl4965_handle_data_packet(priv, 0, include_phy,
17744ff6 3964 rxb, &rx_status);
b481de9c
ZY
3965 break;
3966 default:
3967 break;
3968 }
3969#endif
b481de9c
ZY
3970 break;
3971
0795af57
JP
3972 case IEEE80211_FTYPE_DATA: {
3973 DECLARE_MAC_BUF(mac1);
3974 DECLARE_MAC_BUF(mac2);
3975 DECLARE_MAC_BUF(mac3);
3976
b481de9c
ZY
3977 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
3978 iwl4965_update_ps_mode(priv, fc & IEEE80211_FCTL_PM,
3979 header->addr2);
3980
3981 if (unlikely(!network_packet))
3982 IWL_DEBUG_DROP("Dropping (non network): "
0795af57
JP
3983 "%s, %s, %s\n",
3984 print_mac(mac1, header->addr1),
3985 print_mac(mac2, header->addr2),
3986 print_mac(mac3, header->addr3));
bb8c093b 3987 else if (unlikely(iwl4965_is_duplicate_packet(priv, header)))
0795af57
JP
3988 IWL_DEBUG_DROP("Dropping (dup): %s, %s, %s\n",
3989 print_mac(mac1, header->addr1),
3990 print_mac(mac2, header->addr2),
3991 print_mac(mac3, header->addr3));
b481de9c
ZY
3992 else
3993 iwl4965_handle_data_packet(priv, 1, include_phy, rxb,
17744ff6 3994 &rx_status);
b481de9c 3995 break;
0795af57 3996 }
b481de9c
ZY
3997 default:
3998 break;
3999
4000 }
4001}
4002
4003/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
4004 * This will be used later in iwl4965_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
bb8c093b
CH
4005static void iwl4965_rx_reply_rx_phy(struct iwl4965_priv *priv,
4006 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 4007{
bb8c093b 4008 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
4009 priv->last_phy_res[0] = 1;
4010 memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]),
4011 sizeof(struct iwl4965_rx_phy_res));
4012}
4013
bb8c093b
CH
4014static void iwl4965_rx_missed_beacon_notif(struct iwl4965_priv *priv,
4015 struct iwl4965_rx_mem_buffer *rxb)
b481de9c
ZY
4016
4017{
c8b0e6e1 4018#ifdef CONFIG_IWL4965_SENSITIVITY
bb8c093b
CH
4019 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
4020 struct iwl4965_missed_beacon_notif *missed_beacon;
b481de9c
ZY
4021
4022 missed_beacon = &pkt->u.missed_beacon;
4023 if (le32_to_cpu(missed_beacon->consequtive_missed_beacons) > 5) {
4024 IWL_DEBUG_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
4025 le32_to_cpu(missed_beacon->consequtive_missed_beacons),
4026 le32_to_cpu(missed_beacon->total_missed_becons),
4027 le32_to_cpu(missed_beacon->num_recvd_beacons),
4028 le32_to_cpu(missed_beacon->num_expected_beacons));
4029 priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT;
4030 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)))
4031 queue_work(priv->workqueue, &priv->sensitivity_work);
4032 }
c8b0e6e1 4033#endif /*CONFIG_IWL4965_SENSITIVITY*/
b481de9c
ZY
4034}
4035
c8b0e6e1 4036#ifdef CONFIG_IWL4965_HT
b481de9c 4037
8b6eaea8
CB
4038/**
4039 * iwl4965_sta_modify_enable_tid_tx - Enable Tx for this TID in station table
4040 */
bb8c093b 4041static void iwl4965_sta_modify_enable_tid_tx(struct iwl4965_priv *priv,
b481de9c
ZY
4042 int sta_id, int tid)
4043{
4044 unsigned long flags;
4045
8b6eaea8 4046 /* Remove "disable" flag, to enable Tx for this TID */
b481de9c
ZY
4047 spin_lock_irqsave(&priv->sta_lock, flags);
4048 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
4049 priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
4050 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
4051 spin_unlock_irqrestore(&priv->sta_lock, flags);
4052
bb8c093b 4053 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
b481de9c
ZY
4054}
4055
8b6eaea8
CB
4056/**
4057 * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
4058 *
4059 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
4060 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
4061 */
bb8c093b
CH
4062static int iwl4965_tx_status_reply_compressed_ba(struct iwl4965_priv *priv,
4063 struct iwl4965_ht_agg *agg,
4064 struct iwl4965_compressed_ba_resp*
b481de9c
ZY
4065 ba_resp)
4066
4067{
4068 int i, sh, ack;
fe01b477
RR
4069 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
4070 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
4071 u64 bitmap;
4072 int successes = 0;
4073 struct ieee80211_tx_status *tx_status;
b481de9c
ZY
4074
4075 if (unlikely(!agg->wait_for_ba)) {
4076 IWL_ERROR("Received BA when not expected\n");
4077 return -EINVAL;
4078 }
8b6eaea8
CB
4079
4080 /* Mark that the expected block-ack response arrived */
b481de9c 4081 agg->wait_for_ba = 0;
fe01b477 4082 IWL_DEBUG_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
8b6eaea8
CB
4083
4084 /* Calculate shift to align block-ack bits with our Tx window bits */
fe01b477 4085 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl>>4);
01ebd063 4086 if (sh < 0) /* tbw something is wrong with indices */
b481de9c
ZY
4087 sh += 0x100;
4088
8b6eaea8 4089 /* don't use 64-bit values for now */
fe01b477 4090 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
b481de9c
ZY
4091
4092 if (agg->frame_count > (64 - sh)) {
4093 IWL_DEBUG_TX_REPLY("more frames than bitmap size");
4094 return -1;
4095 }
4096
4097 /* check for success or failure according to the
8b6eaea8 4098 * transmitted bitmap and block-ack bitmap */
fe01b477 4099 bitmap &= agg->bitmap;
b481de9c 4100
8b6eaea8
CB
4101 /* For each frame attempted in aggregation,
4102 * update driver's record of tx frame's status. */
b481de9c 4103 for (i = 0; i < agg->frame_count ; i++) {
fe01b477
RR
4104 ack = bitmap & (1 << i);
4105 successes += !!ack;
b481de9c 4106 IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n",
fe01b477
RR
4107 ack? "ACK":"NACK", i, (agg->start_idx + i) & 0xff,
4108 agg->start_idx + i);
4109 }
4110
4111 tx_status = &priv->txq[scd_flow].txb[agg->start_idx].status;
4112 tx_status->flags = IEEE80211_TX_STATUS_ACK;
99556438
RR
4113 tx_status->flags |= IEEE80211_TX_STATUS_AMPDU;
4114 tx_status->ampdu_ack_map = successes;
4115 tx_status->ampdu_ack_len = agg->frame_count;
4c424e4c
RR
4116 iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags,
4117 &tx_status->control);
fe01b477
RR
4118
4119 IWL_DEBUG_TX_REPLY("Bitmap %llx\n", bitmap);
4120
4121 return 0;
4122}
4123
4124/**
4125 * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
4126 */
4127static void iwl4965_tx_queue_stop_scheduler(struct iwl4965_priv *priv,
4128 u16 txq_id)
4129{
4130 /* Simply stop the queue, but don't change any configuration;
4131 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
4132 iwl4965_write_prph(priv,
4133 KDR_SCD_QUEUE_STATUS_BITS(txq_id),
4134 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
4135 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
4136}
b481de9c 4137
fe01b477
RR
4138/**
4139 * txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID
4140 */
4141static int iwl4965_tx_queue_agg_disable(struct iwl4965_priv *priv, u16 txq_id,
4142 u16 ssn_idx, u8 tx_fifo)
4143{
4144 if (IWL_BACK_QUEUE_FIRST_ID > txq_id) {
4145 IWL_WARNING("queue number too small: %d, must be > %d\n",
4146 txq_id, IWL_BACK_QUEUE_FIRST_ID);
4147 return -EINVAL;
b481de9c
ZY
4148 }
4149
fe01b477
RR
4150 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
4151
4152 iwl4965_clear_bits_prph(priv, KDR_SCD_QUEUECHAIN_SEL, (1 << txq_id));
4153
4154 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
4155 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
4156 /* supposes that ssn_idx is valid (!= 0xFFF) */
4157 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
4158
4159 iwl4965_clear_bits_prph(priv, KDR_SCD_INTERRUPT_MASK, (1 << txq_id));
4160 iwl4965_txq_ctx_deactivate(priv, txq_id);
4161 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
4162
4163 return 0;
4164}
b481de9c 4165
fe01b477
RR
4166int iwl4965_check_empty_hw_queue(struct iwl4965_priv *priv, int sta_id,
4167 u8 tid, int txq_id)
4168{
4169 struct iwl4965_queue *q = &priv->txq[txq_id].q;
4170 u8 *addr = priv->stations[sta_id].sta.sta.addr;
4171 struct iwl4965_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
4172
4173 switch (priv->stations[sta_id].tid[tid].agg.state) {
4174 case IWL_EMPTYING_HW_QUEUE_DELBA:
4175 /* We are reclaiming the last packet of the */
4176 /* aggregated HW queue */
4177 if (txq_id == tid_data->agg.txq_id &&
4178 q->read_ptr == q->write_ptr) {
4179 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
4180 int tx_fifo = default_tid_to_tx_fifo[tid];
4181 IWL_DEBUG_HT("HW queue empty: continue DELBA flow\n");
4182 iwl4965_tx_queue_agg_disable(priv, txq_id,
4183 ssn, tx_fifo);
4184 tid_data->agg.state = IWL_AGG_OFF;
4185 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid);
4186 }
4187 break;
4188 case IWL_EMPTYING_HW_QUEUE_ADDBA:
4189 /* We are reclaiming the last packet of the queue */
4190 if (tid_data->tfds_in_queue == 0) {
4191 IWL_DEBUG_HT("HW queue empty: continue ADDBA flow\n");
4192 tid_data->agg.state = IWL_AGG_ON;
4193 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid);
4194 }
4195 break;
4196 }
b481de9c
ZY
4197 return 0;
4198}
4199
8b6eaea8
CB
4200/**
4201 * iwl4965_queue_dec_wrap - Decrement queue index, wrap back to end if needed
4202 * @index -- current index
4203 * @n_bd -- total number of entries in queue (s/b power of 2)
4204 */
bb8c093b 4205static inline int iwl4965_queue_dec_wrap(int index, int n_bd)
b481de9c
ZY
4206{
4207 return (index == 0) ? n_bd - 1 : index - 1;
4208}
4209
8b6eaea8
CB
4210/**
4211 * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
4212 *
4213 * Handles block-acknowledge notification from device, which reports success
4214 * of frames sent via aggregation.
4215 */
bb8c093b
CH
4216static void iwl4965_rx_reply_compressed_ba(struct iwl4965_priv *priv,
4217 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 4218{
bb8c093b
CH
4219 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
4220 struct iwl4965_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
b481de9c 4221 int index;
bb8c093b
CH
4222 struct iwl4965_tx_queue *txq = NULL;
4223 struct iwl4965_ht_agg *agg;
fe01b477 4224 DECLARE_MAC_BUF(mac);
8b6eaea8
CB
4225
4226 /* "flow" corresponds to Tx queue */
fe01b477 4227 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
8b6eaea8
CB
4228
4229 /* "ssn" is start of block-ack Tx window, corresponds to index
4230 * (in Tx queue's circular buffer) of first TFD/frame in window */
b481de9c
ZY
4231 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
4232
fe01b477 4233 if (scd_flow >= ARRAY_SIZE(priv->txq)) {
b481de9c
ZY
4234 IWL_ERROR("BUG_ON scd_flow is bigger than number of queues");
4235 return;
4236 }
4237
fe01b477 4238 txq = &priv->txq[scd_flow];
b481de9c 4239 agg = &priv->stations[ba_resp->sta_id].tid[ba_resp->tid].agg;
8b6eaea8
CB
4240
4241 /* Find index just before block-ack window */
bb8c093b 4242 index = iwl4965_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
b481de9c 4243
01ebd063 4244 /* TODO: Need to get this copy more safely - now good for debug */
fe01b477 4245
0795af57
JP
4246 IWL_DEBUG_TX_REPLY("REPLY_COMPRESSED_BA [%d]Received from %s, "
4247 "sta_id = %d\n",
b481de9c 4248 agg->wait_for_ba,
0795af57 4249 print_mac(mac, (u8*) &ba_resp->sta_addr_lo32),
b481de9c 4250 ba_resp->sta_id);
fe01b477 4251 IWL_DEBUG_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
b481de9c
ZY
4252 "%d, scd_ssn = %d\n",
4253 ba_resp->tid,
fe01b477
RR
4254 ba_resp->seq_ctl,
4255 ba_resp->bitmap,
b481de9c
ZY
4256 ba_resp->scd_flow,
4257 ba_resp->scd_ssn);
fe01b477 4258 IWL_DEBUG_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx \n",
b481de9c 4259 agg->start_idx,
fe01b477 4260 agg->bitmap);
8b6eaea8
CB
4261
4262 /* Update driver's record of ACK vs. not for each frame in window */
b481de9c 4263 iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp);
8b6eaea8
CB
4264
4265 /* Release all TFDs before the SSN, i.e. all TFDs in front of
4266 * block-ack window (we assume that they've been successfully
4267 * transmitted ... if not, it's too late anyway). */
fe01b477
RR
4268 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
4269 int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index);
4270 priv->stations[ba_resp->sta_id].
4271 tid[ba_resp->tid].tfds_in_queue -= freed;
4272 if (iwl4965_queue_space(&txq->q) > txq->q.low_mark &&
4273 priv->mac80211_registered &&
4274 agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)
4275 ieee80211_wake_queue(priv->hw, scd_flow);
4276 iwl4965_check_empty_hw_queue(priv, ba_resp->sta_id,
4277 ba_resp->tid, scd_flow);
4278 }
b481de9c
ZY
4279}
4280
8b6eaea8
CB
4281/**
4282 * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
4283 */
bb8c093b 4284static int iwl4965_tx_queue_set_q2ratid(struct iwl4965_priv *priv, u16 ra_tid,
b481de9c
ZY
4285 u16 txq_id)
4286{
4287 u32 tbl_dw_addr;
4288 u32 tbl_dw;
4289 u16 scd_q2ratid;
4290
4291 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
4292
4293 tbl_dw_addr = priv->scd_base_addr +
4294 SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
4295
bb8c093b 4296 tbl_dw = iwl4965_read_targ_mem(priv, tbl_dw_addr);
b481de9c
ZY
4297
4298 if (txq_id & 0x1)
4299 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
4300 else
4301 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
4302
bb8c093b 4303 iwl4965_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
b481de9c
ZY
4304
4305 return 0;
4306}
4307
fe01b477 4308
b481de9c 4309/**
8b6eaea8
CB
4310 * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
4311 *
4312 * NOTE: txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID,
4313 * i.e. it must be one of the higher queues used for aggregation
b481de9c 4314 */
bb8c093b 4315static int iwl4965_tx_queue_agg_enable(struct iwl4965_priv *priv, int txq_id,
b481de9c
ZY
4316 int tx_fifo, int sta_id, int tid,
4317 u16 ssn_idx)
4318{
4319 unsigned long flags;
4320 int rc;
4321 u16 ra_tid;
4322
4323 if (IWL_BACK_QUEUE_FIRST_ID > txq_id)
4324 IWL_WARNING("queue number too small: %d, must be > %d\n",
4325 txq_id, IWL_BACK_QUEUE_FIRST_ID);
4326
4327 ra_tid = BUILD_RAxTID(sta_id, tid);
4328
8b6eaea8 4329 /* Modify device's station table to Tx this TID */
bb8c093b 4330 iwl4965_sta_modify_enable_tid_tx(priv, sta_id, tid);
b481de9c
ZY
4331
4332 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 4333 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
4334 if (rc) {
4335 spin_unlock_irqrestore(&priv->lock, flags);
4336 return rc;
4337 }
4338
8b6eaea8 4339 /* Stop this Tx queue before configuring it */
b481de9c
ZY
4340 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
4341
8b6eaea8 4342 /* Map receiver-address / traffic-ID to this queue */
b481de9c
ZY
4343 iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
4344
8b6eaea8 4345 /* Set this queue as a chain-building queue */
8a1b0245 4346 iwl4965_set_bits_prph(priv, KDR_SCD_QUEUECHAIN_SEL, (1 << txq_id));
b481de9c 4347
8b6eaea8
CB
4348 /* Place first TFD at index corresponding to start sequence number.
4349 * Assumes that ssn_idx is valid (!= 0xFFF) */
fc4b6853
TW
4350 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
4351 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
b481de9c
ZY
4352 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
4353
8b6eaea8 4354 /* Set up Tx window size and frame limit for this queue */
bb8c093b 4355 iwl4965_write_targ_mem(priv,
b481de9c
ZY
4356 priv->scd_base_addr + SCD_CONTEXT_QUEUE_OFFSET(txq_id),
4357 (SCD_WIN_SIZE << SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
4358 SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
4359
bb8c093b 4360 iwl4965_write_targ_mem(priv, priv->scd_base_addr +
b481de9c
ZY
4361 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
4362 (SCD_FRAME_LIMIT << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
4363 & SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
4364
bb8c093b 4365 iwl4965_set_bits_prph(priv, KDR_SCD_INTERRUPT_MASK, (1 << txq_id));
b481de9c 4366
8b6eaea8 4367 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
b481de9c
ZY
4368 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
4369
bb8c093b 4370 iwl4965_release_nic_access(priv);
b481de9c
ZY
4371 spin_unlock_irqrestore(&priv->lock, flags);
4372
4373 return 0;
4374}
4375
c8b0e6e1 4376#endif /* CONFIG_IWL4965_HT */
b481de9c
ZY
4377
4378/**
4379 * iwl4965_add_station - Initialize a station's hardware rate table
4380 *
8b6eaea8 4381 * The uCode's station table contains a table of fallback rates
b481de9c
ZY
4382 * for automatic fallback during transmission.
4383 *
8b6eaea8
CB
4384 * NOTE: This sets up a default set of values. These will be replaced later
4385 * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
4386 * rc80211_simple.
b481de9c 4387 *
8b6eaea8
CB
4388 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
4389 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
4390 * which requires station table entry to exist).
b481de9c 4391 */
bb8c093b 4392void iwl4965_add_station(struct iwl4965_priv *priv, const u8 *addr, int is_ap)
b481de9c
ZY
4393{
4394 int i, r;
bb8c093b 4395 struct iwl4965_link_quality_cmd link_cmd = {
b481de9c
ZY
4396 .reserved1 = 0,
4397 };
4398 u16 rate_flags;
4399
8b6eaea8
CB
4400 /* Set up the rate scaling to start at selected rate, fall back
4401 * all the way down to 1M in IEEE order, and then spin on 1M */
b481de9c
ZY
4402 if (is_ap)
4403 r = IWL_RATE_54M_INDEX;
8318d78a 4404 else if (priv->band == IEEE80211_BAND_5GHZ)
b481de9c
ZY
4405 r = IWL_RATE_6M_INDEX;
4406 else
4407 r = IWL_RATE_1M_INDEX;
4408
4409 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
4410 rate_flags = 0;
4411 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
4412 rate_flags |= RATE_MCS_CCK_MSK;
4413
8b6eaea8 4414 /* Use Tx antenna B only */
b481de9c
ZY
4415 rate_flags |= RATE_MCS_ANT_B_MSK;
4416 rate_flags &= ~RATE_MCS_ANT_A_MSK;
8b6eaea8 4417
b481de9c 4418 link_cmd.rs_table[i].rate_n_flags =
bb8c093b
CH
4419 iwl4965_hw_set_rate_n_flags(iwl4965_rates[r].plcp, rate_flags);
4420 r = iwl4965_get_prev_ieee_rate(r);
b481de9c
ZY
4421 }
4422
4423 link_cmd.general_params.single_stream_ant_msk = 2;
4424 link_cmd.general_params.dual_stream_ant_msk = 3;
4425 link_cmd.agg_params.agg_dis_start_th = 3;
4426 link_cmd.agg_params.agg_time_limit = cpu_to_le16(4000);
4427
4428 /* Update the rate scaling for control frame Tx to AP */
4429 link_cmd.sta_id = is_ap ? IWL_AP_ID : IWL4965_BROADCAST_ID;
4430
bb8c093b 4431 iwl4965_send_cmd_pdu(priv, REPLY_TX_LINK_QUALITY_CMD, sizeof(link_cmd),
b481de9c
ZY
4432 &link_cmd);
4433}
4434
c8b0e6e1 4435#ifdef CONFIG_IWL4965_HT
b481de9c 4436
8318d78a
JB
4437static u8 iwl4965_is_channel_extension(struct iwl4965_priv *priv,
4438 enum ieee80211_band band,
78330fdd 4439 u16 channel, u8 extension_chan_offset)
b481de9c 4440{
bb8c093b 4441 const struct iwl4965_channel_info *ch_info;
b481de9c 4442
8318d78a 4443 ch_info = iwl4965_get_channel_info(priv, band, channel);
b481de9c
ZY
4444 if (!is_channel_valid(ch_info))
4445 return 0;
4446
134eb5d3 4447 if (extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_NONE)
b481de9c
ZY
4448 return 0;
4449
4450 if ((ch_info->fat_extension_channel == extension_chan_offset) ||
4451 (ch_info->fat_extension_channel == HT_IE_EXT_CHANNEL_MAX))
4452 return 1;
4453
4454 return 0;
4455}
4456
bb8c093b 4457static u8 iwl4965_is_fat_tx_allowed(struct iwl4965_priv *priv,
fd105e79 4458 struct ieee80211_ht_info *sta_ht_inf)
b481de9c 4459{
fd105e79 4460 struct iwl_ht_info *iwl_ht_conf = &priv->current_ht_config;
b481de9c 4461
fd105e79
RR
4462 if ((!iwl_ht_conf->is_ht) ||
4463 (iwl_ht_conf->supported_chan_width != IWL_CHANNEL_WIDTH_40MHZ) ||
134eb5d3 4464 (iwl_ht_conf->extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_NONE))
b481de9c
ZY
4465 return 0;
4466
fd105e79
RR
4467 if (sta_ht_inf) {
4468 if ((!sta_ht_inf->ht_supported) ||
194c7ca6 4469 (!(sta_ht_inf->cap & IEEE80211_HT_CAP_SUP_WIDTH)))
fd105e79
RR
4470 return 0;
4471 }
b481de9c 4472
78330fdd 4473 return (iwl4965_is_channel_extension(priv, priv->band,
fd105e79
RR
4474 iwl_ht_conf->control_channel,
4475 iwl_ht_conf->extension_chan_offset));
b481de9c
ZY
4476}
4477
fd105e79 4478void iwl4965_set_rxon_ht(struct iwl4965_priv *priv, struct iwl_ht_info *ht_info)
b481de9c 4479{
bb8c093b 4480 struct iwl4965_rxon_cmd *rxon = &priv->staging_rxon;
b481de9c
ZY
4481 u32 val;
4482
4483 if (!ht_info->is_ht)
4484 return;
4485
8b6eaea8 4486 /* Set up channel bandwidth: 20 MHz only, or 20/40 mixed if fat ok */
fd105e79 4487 if (iwl4965_is_fat_tx_allowed(priv, NULL))
b481de9c
ZY
4488 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED_MSK;
4489 else
4490 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
4491 RXON_FLG_CHANNEL_MODE_PURE_40_MSK);
4492
4493 if (le16_to_cpu(rxon->channel) != ht_info->control_channel) {
4494 IWL_DEBUG_ASSOC("control diff than current %d %d\n",
4495 le16_to_cpu(rxon->channel),
4496 ht_info->control_channel);
4497 rxon->channel = cpu_to_le16(ht_info->control_channel);
4498 return;
4499 }
4500
8b6eaea8 4501 /* Note: control channel is opposite of extension channel */
b481de9c
ZY
4502 switch (ht_info->extension_chan_offset) {
4503 case IWL_EXT_CHANNEL_OFFSET_ABOVE:
4504 rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
4505 break;
4506 case IWL_EXT_CHANNEL_OFFSET_BELOW:
4507 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
4508 break;
134eb5d3 4509 case IWL_EXT_CHANNEL_OFFSET_NONE:
b481de9c
ZY
4510 default:
4511 rxon->flags &= ~RXON_FLG_CHANNEL_MODE_MIXED_MSK;
4512 break;
4513 }
4514
fd105e79 4515 val = ht_info->ht_protection;
b481de9c
ZY
4516
4517 rxon->flags |= cpu_to_le32(val << RXON_FLG_HT_OPERATING_MODE_POS);
4518
b481de9c
ZY
4519 iwl4965_set_rxon_chain(priv);
4520
4521 IWL_DEBUG_ASSOC("supported HT rate 0x%X %X "
4522 "rxon flags 0x%X operation mode :0x%X "
4523 "extension channel offset 0x%x "
4524 "control chan %d\n",
fd105e79
RR
4525 ht_info->supp_mcs_set[0], ht_info->supp_mcs_set[1],
4526 le32_to_cpu(rxon->flags), ht_info->ht_protection,
b481de9c
ZY
4527 ht_info->extension_chan_offset,
4528 ht_info->control_channel);
4529 return;
4530}
4531
67d62035
RR
4532void iwl4965_set_ht_add_station(struct iwl4965_priv *priv, u8 index,
4533 struct ieee80211_ht_info *sta_ht_inf)
b481de9c
ZY
4534{
4535 __le32 sta_flags;
e53cfe0e 4536 u8 mimo_ps_mode;
b481de9c 4537
67d62035 4538 if (!sta_ht_inf || !sta_ht_inf->ht_supported)
b481de9c
ZY
4539 goto done;
4540
e53cfe0e
TW
4541 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2;
4542
b481de9c
ZY
4543 sta_flags = priv->stations[index].sta.station_flags;
4544
e53cfe0e
TW
4545 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
4546
4547 switch (mimo_ps_mode) {
4548 case WLAN_HT_CAP_MIMO_PS_STATIC:
4549 sta_flags |= STA_FLG_MIMO_DIS_MSK;
4550 break;
4551 case WLAN_HT_CAP_MIMO_PS_DYNAMIC:
b481de9c 4552 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
e53cfe0e
TW
4553 break;
4554 case WLAN_HT_CAP_MIMO_PS_DISABLED:
4555 break;
4556 default:
4557 IWL_WARNING("Invalid MIMO PS mode %d", mimo_ps_mode);
4558 break;
4559 }
b481de9c
ZY
4560
4561 sta_flags |= cpu_to_le32(
67d62035 4562 (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
b481de9c
ZY
4563
4564 sta_flags |= cpu_to_le32(
67d62035 4565 (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
b481de9c 4566
67d62035 4567 if (iwl4965_is_fat_tx_allowed(priv, sta_ht_inf))
b481de9c 4568 sta_flags |= STA_FLG_FAT_EN_MSK;
67d62035 4569 else
e53cfe0e 4570 sta_flags &= ~STA_FLG_FAT_EN_MSK;
67d62035 4571
b481de9c
ZY
4572 priv->stations[index].sta.station_flags = sta_flags;
4573 done:
4574 return;
4575}
4576
bb8c093b 4577static void iwl4965_sta_modify_add_ba_tid(struct iwl4965_priv *priv,
b481de9c
ZY
4578 int sta_id, int tid, u16 ssn)
4579{
4580 unsigned long flags;
4581
4582 spin_lock_irqsave(&priv->sta_lock, flags);
4583 priv->stations[sta_id].sta.station_flags_msk = 0;
4584 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
4585 priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
4586 priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
4587 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
4588 spin_unlock_irqrestore(&priv->sta_lock, flags);
4589
bb8c093b 4590 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
b481de9c
ZY
4591}
4592
bb8c093b 4593static void iwl4965_sta_modify_del_ba_tid(struct iwl4965_priv *priv,
b481de9c
ZY
4594 int sta_id, int tid)
4595{
4596 unsigned long flags;
4597
4598 spin_lock_irqsave(&priv->sta_lock, flags);
4599 priv->stations[sta_id].sta.station_flags_msk = 0;
4600 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
4601 priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
4602 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
4603 spin_unlock_irqrestore(&priv->sta_lock, flags);
4604
bb8c093b 4605 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
b481de9c
ZY
4606}
4607
8b6eaea8
CB
4608/*
4609 * Find first available (lowest unused) Tx Queue, mark it "active".
4610 * Called only when finding queue for aggregation.
4611 * Should never return anything < 7, because they should already
4612 * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
4613 */
bb8c093b 4614static int iwl4965_txq_ctx_activate_free(struct iwl4965_priv *priv)
b481de9c
ZY
4615{
4616 int txq_id;
4617
4618 for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++)
4619 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
4620 return txq_id;
4621 return -1;
4622}
4623
fe01b477
RR
4624static int iwl4965_mac_ht_tx_agg_start(struct ieee80211_hw *hw, const u8 *da,
4625 u16 tid, u16 *start_seq_num)
b481de9c 4626{
bb8c093b 4627 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
4628 int sta_id;
4629 int tx_fifo;
4630 int txq_id;
4631 int ssn = -1;
fe01b477 4632 int rc = 0;
b481de9c 4633 unsigned long flags;
bb8c093b 4634 struct iwl4965_tid_data *tid_data;
0795af57 4635 DECLARE_MAC_BUF(mac);
b481de9c
ZY
4636
4637 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
4638 tx_fifo = default_tid_to_tx_fifo[tid];
4639 else
4640 return -EINVAL;
4641
fe01b477
RR
4642 IWL_WARNING("%s on da = %s tid = %d\n",
4643 __func__, print_mac(mac, da), tid);
b481de9c 4644
bb8c093b 4645 sta_id = iwl4965_hw_find_station(priv, da);
b481de9c
ZY
4646 if (sta_id == IWL_INVALID_STATION)
4647 return -ENXIO;
4648
fe01b477
RR
4649 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
4650 IWL_ERROR("Start AGG when state is not IWL_AGG_OFF !\n");
4651 return -ENXIO;
4652 }
4653
bb8c093b 4654 txq_id = iwl4965_txq_ctx_activate_free(priv);
b481de9c
ZY
4655 if (txq_id == -1)
4656 return -ENXIO;
4657
4658 spin_lock_irqsave(&priv->sta_lock, flags);
4659 tid_data = &priv->stations[sta_id].tid[tid];
4660 ssn = SEQ_TO_SN(tid_data->seq_number);
4661 tid_data->agg.txq_id = txq_id;
4662 spin_unlock_irqrestore(&priv->sta_lock, flags);
4663
4664 *start_seq_num = ssn;
fe01b477 4665 rc = iwl4965_tx_queue_agg_enable(priv, txq_id, tx_fifo,
b481de9c 4666 sta_id, tid, ssn);
fe01b477
RR
4667 if (rc)
4668 return rc;
b481de9c 4669
fe01b477
RR
4670 rc = 0;
4671 if (tid_data->tfds_in_queue == 0) {
4672 printk(KERN_ERR "HW queue is empty\n");
4673 tid_data->agg.state = IWL_AGG_ON;
4674 ieee80211_start_tx_ba_cb_irqsafe(hw, da, tid);
4675 } else {
4676 IWL_DEBUG_HT("HW queue is NOT empty: %d packets in HW queue\n",
4677 tid_data->tfds_in_queue);
4678 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
4679 }
4680 return rc;
4681}
b481de9c 4682
fe01b477
RR
4683static int iwl4965_mac_ht_tx_agg_stop(struct ieee80211_hw *hw, const u8 *da,
4684 u16 tid)
b481de9c
ZY
4685{
4686
bb8c093b 4687 struct iwl4965_priv *priv = hw->priv;
b481de9c 4688 int tx_fifo_id, txq_id, sta_id, ssn = -1;
bb8c093b 4689 struct iwl4965_tid_data *tid_data;
fe01b477
RR
4690 int rc, write_ptr, read_ptr;
4691 unsigned long flags;
0795af57
JP
4692 DECLARE_MAC_BUF(mac);
4693
b481de9c 4694 if (!da) {
fe01b477 4695 IWL_ERROR("da = NULL\n");
b481de9c
ZY
4696 return -EINVAL;
4697 }
4698
4699 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
4700 tx_fifo_id = default_tid_to_tx_fifo[tid];
4701 else
4702 return -EINVAL;
4703
bb8c093b 4704 sta_id = iwl4965_hw_find_station(priv, da);
b481de9c
ZY
4705
4706 if (sta_id == IWL_INVALID_STATION)
4707 return -ENXIO;
4708
fe01b477
RR
4709 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
4710 IWL_WARNING("Stopping AGG while state not IWL_AGG_ON\n");
4711
b481de9c
ZY
4712 tid_data = &priv->stations[sta_id].tid[tid];
4713 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
4714 txq_id = tid_data->agg.txq_id;
fe01b477
RR
4715 write_ptr = priv->txq[txq_id].q.write_ptr;
4716 read_ptr = priv->txq[txq_id].q.read_ptr;
4717
4718 /* The queue is not empty */
4719 if (write_ptr != read_ptr) {
4720 IWL_DEBUG_HT("Stopping a non empty AGG HW QUEUE\n");
4721 priv->stations[sta_id].tid[tid].agg.state =
4722 IWL_EMPTYING_HW_QUEUE_DELBA;
4723 return 0;
4724 }
4725
4726 IWL_DEBUG_HT("HW queue empty\n");;
4727 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
b481de9c 4728
fe01b477
RR
4729 spin_lock_irqsave(&priv->lock, flags);
4730 rc = iwl4965_grab_nic_access(priv);
4731 if (rc) {
4732 spin_unlock_irqrestore(&priv->lock, flags);
4733 return rc;
4734 }
b481de9c 4735 rc = iwl4965_tx_queue_agg_disable(priv, txq_id, ssn, tx_fifo_id);
fe01b477
RR
4736 iwl4965_release_nic_access(priv);
4737 spin_unlock_irqrestore(&priv->lock, flags);
4738
b481de9c
ZY
4739 if (rc)
4740 return rc;
4741
fe01b477 4742 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, da, tid);
b481de9c 4743
fe01b477
RR
4744 IWL_DEBUG_INFO("iwl4965_mac_ht_tx_agg_stop on da=%s tid=%d\n",
4745 print_mac(mac, da), tid);
8114fcf1 4746
8114fcf1
RR
4747 return 0;
4748}
4749
4750int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
4751 enum ieee80211_ampdu_mlme_action action,
4752 const u8 *addr, u16 tid, u16 *ssn)
4753{
4754 struct iwl4965_priv *priv = hw->priv;
4755 int sta_id;
4756 DECLARE_MAC_BUF(mac);
4757
4758 IWL_DEBUG_HT("A-MPDU action on da=%s tid=%d ",
4759 print_mac(mac, addr), tid);
4760 sta_id = iwl4965_hw_find_station(priv, addr);
4761 switch (action) {
4762 case IEEE80211_AMPDU_RX_START:
4763 IWL_DEBUG_HT("start Rx\n");
4764 iwl4965_sta_modify_add_ba_tid(priv, sta_id, tid, *ssn);
4765 break;
4766 case IEEE80211_AMPDU_RX_STOP:
4767 IWL_DEBUG_HT("stop Rx\n");
4768 iwl4965_sta_modify_del_ba_tid(priv, sta_id, tid);
4769 break;
4770 case IEEE80211_AMPDU_TX_START:
4771 IWL_DEBUG_HT("start Tx\n");
4772 return iwl4965_mac_ht_tx_agg_start(hw, addr, tid, ssn);
4773 case IEEE80211_AMPDU_TX_STOP:
4774 IWL_DEBUG_HT("stop Tx\n");
4775 return iwl4965_mac_ht_tx_agg_stop(hw, addr, tid);
4776 default:
4777 IWL_DEBUG_HT("unknown\n");
4778 return -EINVAL;
4779 break;
4780 }
4781 return 0;
4782}
4783
c8b0e6e1 4784#endif /* CONFIG_IWL4965_HT */
b481de9c
ZY
4785
4786/* Set up 4965-specific Rx frame reply handlers */
bb8c093b 4787void iwl4965_hw_rx_handler_setup(struct iwl4965_priv *priv)
b481de9c
ZY
4788{
4789 /* Legacy Rx frames */
4790 priv->rx_handlers[REPLY_4965_RX] = iwl4965_rx_reply_rx;
4791
4792 /* High-throughput (HT) Rx frames */
4793 priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy;
4794 priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx;
4795
4796 priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
4797 iwl4965_rx_missed_beacon_notif;
4798
c8b0e6e1 4799#ifdef CONFIG_IWL4965_HT
b481de9c 4800 priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba;
c8b0e6e1 4801#endif /* CONFIG_IWL4965_HT */
b481de9c
ZY
4802}
4803
bb8c093b 4804void iwl4965_hw_setup_deferred_work(struct iwl4965_priv *priv)
b481de9c
ZY
4805{
4806 INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
4807 INIT_WORK(&priv->statistics_work, iwl4965_bg_statistics_work);
c8b0e6e1 4808#ifdef CONFIG_IWL4965_SENSITIVITY
b481de9c
ZY
4809 INIT_WORK(&priv->sensitivity_work, iwl4965_bg_sensitivity_work);
4810#endif
b481de9c
ZY
4811 init_timer(&priv->statistics_periodic);
4812 priv->statistics_periodic.data = (unsigned long)priv;
4813 priv->statistics_periodic.function = iwl4965_bg_statistics_periodic;
4814}
4815
bb8c093b 4816void iwl4965_hw_cancel_deferred_work(struct iwl4965_priv *priv)
b481de9c
ZY
4817{
4818 del_timer_sync(&priv->statistics_periodic);
4819
4820 cancel_delayed_work(&priv->init_alive_start);
4821}
4822
bb8c093b 4823struct pci_device_id iwl4965_hw_card_ids[] = {
3567c11d
ZY
4824 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4229)},
4825 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4230)},
b481de9c
ZY
4826 {0}
4827};
4828
796083cb
BC
4829/*
4830 * The device's EEPROM semaphore prevents conflicts between driver and uCode
4831 * when accessing the EEPROM; each access is a series of pulses to/from the
4832 * EEPROM chip, not a single event, so even reads could conflict if they
4833 * weren't arbitrated by the semaphore.
4834 */
bb8c093b 4835int iwl4965_eeprom_acquire_semaphore(struct iwl4965_priv *priv)
b481de9c
ZY
4836{
4837 u16 count;
4838 int rc;
4839
4840 for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
796083cb 4841 /* Request semaphore */
bb8c093b 4842 iwl4965_set_bit(priv, CSR_HW_IF_CONFIG_REG,
b481de9c 4843 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
796083cb
BC
4844
4845 /* See if we got it */
bb8c093b 4846 rc = iwl4965_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
b481de9c
ZY
4847 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
4848 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
4849 EEPROM_SEM_TIMEOUT);
4850 if (rc >= 0) {
91e17473 4851 IWL_DEBUG_IO("Acquired semaphore after %d tries.\n",
b481de9c
ZY
4852 count+1);
4853 return rc;
4854 }
4855 }
4856
4857 return rc;
4858}
4859
bb8c093b 4860MODULE_DEVICE_TABLE(pci, iwl4965_hw_card_ids);
This page took 0.360198 seconds and 5 git commands to generate.