Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
[deliverable/linux.git] / drivers / net / wireless / iwlwifi / iwl-4965.c
1 /******************************************************************************
2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27 #include <linux/kernel.h>
28 #include <linux/module.h>
29 #include <linux/version.h>
30 #include <linux/init.h>
31 #include <linux/pci.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/delay.h>
34 #include <linux/skbuff.h>
35 #include <linux/netdevice.h>
36 #include <linux/wireless.h>
37 #include <net/mac80211.h>
38 #include <linux/etherdevice.h>
39 #include <asm/unaligned.h>
40
41 #include "iwl-eeprom.h"
42 #include "iwl-core.h"
43 #include "iwl-4965.h"
44 #include "iwl-helpers.h"
45
46 static void iwl4965_hw_card_show_info(struct iwl4965_priv *priv);
47
48 #define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
49 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
50 IWL_RATE_SISO_##s##M_PLCP, \
51 IWL_RATE_MIMO_##s##M_PLCP, \
52 IWL_RATE_##r##M_IEEE, \
53 IWL_RATE_##ip##M_INDEX, \
54 IWL_RATE_##in##M_INDEX, \
55 IWL_RATE_##rp##M_INDEX, \
56 IWL_RATE_##rn##M_INDEX, \
57 IWL_RATE_##pp##M_INDEX, \
58 IWL_RATE_##np##M_INDEX }
59
60 /*
61 * Parameter order:
62 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
63 *
64 * If there isn't a valid next or previous rate then INV is used which
65 * maps to IWL_RATE_INVALID
66 *
67 */
68 const struct iwl4965_rate_info iwl4965_rates[IWL_RATE_COUNT] = {
69 IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
70 IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
71 IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
72 IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
73 IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
74 IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
75 IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
76 IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
77 IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
78 IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
79 IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
80 IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
81 IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
82 };
83
84 #ifdef CONFIG_IWL4965_HT
85
86 static const u16 default_tid_to_tx_fifo[] = {
87 IWL_TX_FIFO_AC1,
88 IWL_TX_FIFO_AC0,
89 IWL_TX_FIFO_AC0,
90 IWL_TX_FIFO_AC1,
91 IWL_TX_FIFO_AC2,
92 IWL_TX_FIFO_AC2,
93 IWL_TX_FIFO_AC3,
94 IWL_TX_FIFO_AC3,
95 IWL_TX_FIFO_NONE,
96 IWL_TX_FIFO_NONE,
97 IWL_TX_FIFO_NONE,
98 IWL_TX_FIFO_NONE,
99 IWL_TX_FIFO_NONE,
100 IWL_TX_FIFO_NONE,
101 IWL_TX_FIFO_NONE,
102 IWL_TX_FIFO_NONE,
103 IWL_TX_FIFO_AC3
104 };
105
106 #endif /*CONFIG_IWL4965_HT */
107
108 static int is_fat_channel(__le32 rxon_flags)
109 {
110 return (rxon_flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) ||
111 (rxon_flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK);
112 }
113
114 static u8 is_single_stream(struct iwl4965_priv *priv)
115 {
116 #ifdef CONFIG_IWL4965_HT
117 if (!priv->current_ht_config.is_ht ||
118 (priv->current_ht_config.supp_mcs_set[1] == 0) ||
119 (priv->ps_mode == IWL_MIMO_PS_STATIC))
120 return 1;
121 #else
122 return 1;
123 #endif /*CONFIG_IWL4965_HT */
124 return 0;
125 }
126
127 int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags)
128 {
129 int idx = 0;
130
131 /* 4965 HT rate format */
132 if (rate_n_flags & RATE_MCS_HT_MSK) {
133 idx = (rate_n_flags & 0xff);
134
135 if (idx >= IWL_RATE_MIMO_6M_PLCP)
136 idx = idx - IWL_RATE_MIMO_6M_PLCP;
137
138 idx += IWL_FIRST_OFDM_RATE;
139 /* skip 9M not supported in ht*/
140 if (idx >= IWL_RATE_9M_INDEX)
141 idx += 1;
142 if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
143 return idx;
144
145 /* 4965 legacy rate format, search for match in table */
146 } else {
147 for (idx = 0; idx < ARRAY_SIZE(iwl4965_rates); idx++)
148 if (iwl4965_rates[idx].plcp == (rate_n_flags & 0xFF))
149 return idx;
150 }
151
152 return -1;
153 }
154
155 /**
156 * translate ucode response to mac80211 tx status control values
157 */
158 void iwl4965_hwrate_to_tx_control(struct iwl4965_priv *priv, u32 rate_n_flags,
159 struct ieee80211_tx_control *control)
160 {
161 int rate_index;
162
163 control->antenna_sel_tx =
164 ((rate_n_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_A_POS);
165 if (rate_n_flags & RATE_MCS_HT_MSK)
166 control->flags |= IEEE80211_TXCTL_OFDM_HT;
167 if (rate_n_flags & RATE_MCS_GF_MSK)
168 control->flags |= IEEE80211_TXCTL_GREEN_FIELD;
169 if (rate_n_flags & RATE_MCS_FAT_MSK)
170 control->flags |= IEEE80211_TXCTL_40_MHZ_WIDTH;
171 if (rate_n_flags & RATE_MCS_DUP_MSK)
172 control->flags |= IEEE80211_TXCTL_DUP_DATA;
173 if (rate_n_flags & RATE_MCS_SGI_MSK)
174 control->flags |= IEEE80211_TXCTL_SHORT_GI;
175 /* since iwl4965_hwrate_to_plcp_idx is band indifferent, we always use
176 * IEEE80211_BAND_2GHZ band as it contains all the rates */
177 rate_index = iwl4965_hwrate_to_plcp_idx(rate_n_flags);
178 if (rate_index == -1)
179 control->tx_rate = NULL;
180 else
181 control->tx_rate =
182 &priv->bands[IEEE80211_BAND_2GHZ].bitrates[rate_index];
183 }
184
185 /*
186 * Determine how many receiver/antenna chains to use.
187 * More provides better reception via diversity. Fewer saves power.
188 * MIMO (dual stream) requires at least 2, but works better with 3.
189 * This does not determine *which* chains to use, just how many.
190 */
191 static int iwl4965_get_rx_chain_counter(struct iwl4965_priv *priv,
192 u8 *idle_state, u8 *rx_state)
193 {
194 u8 is_single = is_single_stream(priv);
195 u8 is_cam = test_bit(STATUS_POWER_PMI, &priv->status) ? 0 : 1;
196
197 /* # of Rx chains to use when expecting MIMO. */
198 if (is_single || (!is_cam && (priv->ps_mode == IWL_MIMO_PS_STATIC)))
199 *rx_state = 2;
200 else
201 *rx_state = 3;
202
203 /* # Rx chains when idling and maybe trying to save power */
204 switch (priv->ps_mode) {
205 case IWL_MIMO_PS_STATIC:
206 case IWL_MIMO_PS_DYNAMIC:
207 *idle_state = (is_cam) ? 2 : 1;
208 break;
209 case IWL_MIMO_PS_NONE:
210 *idle_state = (is_cam) ? *rx_state : 1;
211 break;
212 default:
213 *idle_state = 1;
214 break;
215 }
216
217 return 0;
218 }
219
220 int iwl4965_hw_rxq_stop(struct iwl4965_priv *priv)
221 {
222 int rc;
223 unsigned long flags;
224
225 spin_lock_irqsave(&priv->lock, flags);
226 rc = iwl4965_grab_nic_access(priv);
227 if (rc) {
228 spin_unlock_irqrestore(&priv->lock, flags);
229 return rc;
230 }
231
232 /* stop Rx DMA */
233 iwl4965_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
234 rc = iwl4965_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
235 (1 << 24), 1000);
236 if (rc < 0)
237 IWL_ERROR("Can't stop Rx DMA.\n");
238
239 iwl4965_release_nic_access(priv);
240 spin_unlock_irqrestore(&priv->lock, flags);
241
242 return 0;
243 }
244
245 u8 iwl4965_hw_find_station(struct iwl4965_priv *priv, const u8 *addr)
246 {
247 int i;
248 int start = 0;
249 int ret = IWL_INVALID_STATION;
250 unsigned long flags;
251 DECLARE_MAC_BUF(mac);
252
253 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) ||
254 (priv->iw_mode == IEEE80211_IF_TYPE_AP))
255 start = IWL_STA_ID;
256
257 if (is_broadcast_ether_addr(addr))
258 return priv->hw_setting.bcast_sta_id;
259
260 spin_lock_irqsave(&priv->sta_lock, flags);
261 for (i = start; i < priv->hw_setting.max_stations; i++)
262 if ((priv->stations[i].used) &&
263 (!compare_ether_addr
264 (priv->stations[i].sta.sta.addr, addr))) {
265 ret = i;
266 goto out;
267 }
268
269 IWL_DEBUG_ASSOC_LIMIT("can not find STA %s total %d\n",
270 print_mac(mac, addr), priv->num_stations);
271
272 out:
273 spin_unlock_irqrestore(&priv->sta_lock, flags);
274 return ret;
275 }
276
277 static int iwl4965_nic_set_pwr_src(struct iwl4965_priv *priv, int pwr_max)
278 {
279 int ret;
280 unsigned long flags;
281
282 spin_lock_irqsave(&priv->lock, flags);
283 ret = iwl4965_grab_nic_access(priv);
284 if (ret) {
285 spin_unlock_irqrestore(&priv->lock, flags);
286 return ret;
287 }
288
289 if (!pwr_max) {
290 u32 val;
291
292 ret = pci_read_config_dword(priv->pci_dev, PCI_POWER_SOURCE,
293 &val);
294
295 if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT)
296 iwl4965_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
297 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
298 ~APMG_PS_CTRL_MSK_PWR_SRC);
299 } else
300 iwl4965_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
301 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
302 ~APMG_PS_CTRL_MSK_PWR_SRC);
303
304 iwl4965_release_nic_access(priv);
305 spin_unlock_irqrestore(&priv->lock, flags);
306
307 return ret;
308 }
309
310 static int iwl4965_rx_init(struct iwl4965_priv *priv, struct iwl4965_rx_queue *rxq)
311 {
312 int rc;
313 unsigned long flags;
314 unsigned int rb_size;
315
316 spin_lock_irqsave(&priv->lock, flags);
317 rc = iwl4965_grab_nic_access(priv);
318 if (rc) {
319 spin_unlock_irqrestore(&priv->lock, flags);
320 return rc;
321 }
322
323 if (iwl4965_param_amsdu_size_8K)
324 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
325 else
326 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
327
328 /* Stop Rx DMA */
329 iwl4965_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
330
331 /* Reset driver's Rx queue write index */
332 iwl4965_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
333
334 /* Tell device where to find RBD circular buffer in DRAM */
335 iwl4965_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
336 rxq->dma_addr >> 8);
337
338 /* Tell device where in DRAM to update its Rx status */
339 iwl4965_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
340 (priv->hw_setting.shared_phys +
341 offsetof(struct iwl4965_shared, val0)) >> 4);
342
343 /* Enable Rx DMA, enable host interrupt, Rx buffer size 4k, 256 RBDs */
344 iwl4965_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
345 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
346 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
347 rb_size |
348 /*0x10 << 4 | */
349 (RX_QUEUE_SIZE_LOG <<
350 FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
351
352 /*
353 * iwl4965_write32(priv,CSR_INT_COAL_REG,0);
354 */
355
356 iwl4965_release_nic_access(priv);
357 spin_unlock_irqrestore(&priv->lock, flags);
358
359 return 0;
360 }
361
362 /* Tell 4965 where to find the "keep warm" buffer */
363 static int iwl4965_kw_init(struct iwl4965_priv *priv)
364 {
365 unsigned long flags;
366 int rc;
367
368 spin_lock_irqsave(&priv->lock, flags);
369 rc = iwl4965_grab_nic_access(priv);
370 if (rc)
371 goto out;
372
373 iwl4965_write_direct32(priv, IWL_FH_KW_MEM_ADDR_REG,
374 priv->kw.dma_addr >> 4);
375 iwl4965_release_nic_access(priv);
376 out:
377 spin_unlock_irqrestore(&priv->lock, flags);
378 return rc;
379 }
380
381 static int iwl4965_kw_alloc(struct iwl4965_priv *priv)
382 {
383 struct pci_dev *dev = priv->pci_dev;
384 struct iwl4965_kw *kw = &priv->kw;
385
386 kw->size = IWL4965_KW_SIZE; /* TBW need set somewhere else */
387 kw->v_addr = pci_alloc_consistent(dev, kw->size, &kw->dma_addr);
388 if (!kw->v_addr)
389 return -ENOMEM;
390
391 return 0;
392 }
393
394 #define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
395 ? # x " " : "")
396
397 /**
398 * iwl4965_set_fat_chan_info - Copy fat channel info into driver's priv.
399 *
400 * Does not set up a command, or touch hardware.
401 */
402 int iwl4965_set_fat_chan_info(struct iwl4965_priv *priv,
403 enum ieee80211_band band, u16 channel,
404 const struct iwl4965_eeprom_channel *eeprom_ch,
405 u8 fat_extension_channel)
406 {
407 struct iwl4965_channel_info *ch_info;
408
409 ch_info = (struct iwl4965_channel_info *)
410 iwl4965_get_channel_info(priv, band, channel);
411
412 if (!is_channel_valid(ch_info))
413 return -1;
414
415 IWL_DEBUG_INFO("FAT Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x"
416 " %ddBm): Ad-Hoc %ssupported\n",
417 ch_info->channel,
418 is_channel_a_band(ch_info) ?
419 "5.2" : "2.4",
420 CHECK_AND_PRINT(IBSS),
421 CHECK_AND_PRINT(ACTIVE),
422 CHECK_AND_PRINT(RADAR),
423 CHECK_AND_PRINT(WIDE),
424 CHECK_AND_PRINT(NARROW),
425 CHECK_AND_PRINT(DFS),
426 eeprom_ch->flags,
427 eeprom_ch->max_power_avg,
428 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS)
429 && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ?
430 "" : "not ");
431
432 ch_info->fat_eeprom = *eeprom_ch;
433 ch_info->fat_max_power_avg = eeprom_ch->max_power_avg;
434 ch_info->fat_curr_txpow = eeprom_ch->max_power_avg;
435 ch_info->fat_min_power = 0;
436 ch_info->fat_scan_power = eeprom_ch->max_power_avg;
437 ch_info->fat_flags = eeprom_ch->flags;
438 ch_info->fat_extension_channel = fat_extension_channel;
439
440 return 0;
441 }
442
443 /**
444 * iwl4965_kw_free - Free the "keep warm" buffer
445 */
446 static void iwl4965_kw_free(struct iwl4965_priv *priv)
447 {
448 struct pci_dev *dev = priv->pci_dev;
449 struct iwl4965_kw *kw = &priv->kw;
450
451 if (kw->v_addr) {
452 pci_free_consistent(dev, kw->size, kw->v_addr, kw->dma_addr);
453 memset(kw, 0, sizeof(*kw));
454 }
455 }
456
457 /**
458 * iwl4965_txq_ctx_reset - Reset TX queue context
459 * Destroys all DMA structures and initialise them again
460 *
461 * @param priv
462 * @return error code
463 */
464 static int iwl4965_txq_ctx_reset(struct iwl4965_priv *priv)
465 {
466 int rc = 0;
467 int txq_id, slots_num;
468 unsigned long flags;
469
470 iwl4965_kw_free(priv);
471
472 /* Free all tx/cmd queues and keep-warm buffer */
473 iwl4965_hw_txq_ctx_free(priv);
474
475 /* Alloc keep-warm buffer */
476 rc = iwl4965_kw_alloc(priv);
477 if (rc) {
478 IWL_ERROR("Keep Warm allocation failed");
479 goto error_kw;
480 }
481
482 spin_lock_irqsave(&priv->lock, flags);
483
484 rc = iwl4965_grab_nic_access(priv);
485 if (unlikely(rc)) {
486 IWL_ERROR("TX reset failed");
487 spin_unlock_irqrestore(&priv->lock, flags);
488 goto error_reset;
489 }
490
491 /* Turn off all Tx DMA channels */
492 iwl4965_write_prph(priv, KDR_SCD_TXFACT, 0);
493 iwl4965_release_nic_access(priv);
494 spin_unlock_irqrestore(&priv->lock, flags);
495
496 /* Tell 4965 where to find the keep-warm buffer */
497 rc = iwl4965_kw_init(priv);
498 if (rc) {
499 IWL_ERROR("kw_init failed\n");
500 goto error_reset;
501 }
502
503 /* Alloc and init all (default 16) Tx queues,
504 * including the command queue (#4) */
505 for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++) {
506 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
507 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
508 rc = iwl4965_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
509 txq_id);
510 if (rc) {
511 IWL_ERROR("Tx %d queue init failed\n", txq_id);
512 goto error;
513 }
514 }
515
516 return rc;
517
518 error:
519 iwl4965_hw_txq_ctx_free(priv);
520 error_reset:
521 iwl4965_kw_free(priv);
522 error_kw:
523 return rc;
524 }
525
526 int iwl4965_hw_nic_init(struct iwl4965_priv *priv)
527 {
528 int rc;
529 unsigned long flags;
530 struct iwl4965_rx_queue *rxq = &priv->rxq;
531 u8 rev_id;
532 u32 val;
533 u8 val_link;
534
535 iwl4965_power_init_handle(priv);
536
537 /* nic_init */
538 spin_lock_irqsave(&priv->lock, flags);
539
540 iwl4965_set_bit(priv, CSR_GIO_CHICKEN_BITS,
541 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
542
543 iwl4965_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
544 rc = iwl4965_poll_bit(priv, CSR_GP_CNTRL,
545 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
546 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
547 if (rc < 0) {
548 spin_unlock_irqrestore(&priv->lock, flags);
549 IWL_DEBUG_INFO("Failed to init the card\n");
550 return rc;
551 }
552
553 rc = iwl4965_grab_nic_access(priv);
554 if (rc) {
555 spin_unlock_irqrestore(&priv->lock, flags);
556 return rc;
557 }
558
559 iwl4965_read_prph(priv, APMG_CLK_CTRL_REG);
560
561 iwl4965_write_prph(priv, APMG_CLK_CTRL_REG,
562 APMG_CLK_VAL_DMA_CLK_RQT |
563 APMG_CLK_VAL_BSM_CLK_RQT);
564 iwl4965_read_prph(priv, APMG_CLK_CTRL_REG);
565
566 udelay(20);
567
568 iwl4965_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
569 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
570
571 iwl4965_release_nic_access(priv);
572 iwl4965_write32(priv, CSR_INT_COALESCING, 512 / 32);
573 spin_unlock_irqrestore(&priv->lock, flags);
574
575 /* Determine HW type */
576 rc = pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &rev_id);
577 if (rc)
578 return rc;
579
580 IWL_DEBUG_INFO("HW Revision ID = 0x%X\n", rev_id);
581
582 iwl4965_nic_set_pwr_src(priv, 1);
583 spin_lock_irqsave(&priv->lock, flags);
584
585 if ((rev_id & 0x80) == 0x80 && (rev_id & 0x7f) < 8) {
586 pci_read_config_dword(priv->pci_dev, PCI_REG_WUM8, &val);
587 /* Enable No Snoop field */
588 pci_write_config_dword(priv->pci_dev, PCI_REG_WUM8,
589 val & ~(1 << 11));
590 }
591
592 spin_unlock_irqrestore(&priv->lock, flags);
593
594 if (priv->eeprom.calib_version < EEPROM_TX_POWER_VERSION_NEW) {
595 IWL_ERROR("Older EEPROM detected! Aborting.\n");
596 return -EINVAL;
597 }
598
599 pci_read_config_byte(priv->pci_dev, PCI_LINK_CTRL, &val_link);
600
601 /* disable L1 entry -- workaround for pre-B1 */
602 pci_write_config_byte(priv->pci_dev, PCI_LINK_CTRL, val_link & ~0x02);
603
604 spin_lock_irqsave(&priv->lock, flags);
605
606 /* set CSR_HW_CONFIG_REG for uCode use */
607
608 iwl4965_set_bit(priv, CSR_HW_IF_CONFIG_REG,
609 CSR49_HW_IF_CONFIG_REG_BIT_4965_R |
610 CSR49_HW_IF_CONFIG_REG_BIT_RADIO_SI |
611 CSR49_HW_IF_CONFIG_REG_BIT_MAC_SI);
612
613 rc = iwl4965_grab_nic_access(priv);
614 if (rc < 0) {
615 spin_unlock_irqrestore(&priv->lock, flags);
616 IWL_DEBUG_INFO("Failed to init the card\n");
617 return rc;
618 }
619
620 iwl4965_read_prph(priv, APMG_PS_CTRL_REG);
621 iwl4965_set_bits_prph(priv, APMG_PS_CTRL_REG,
622 APMG_PS_CTRL_VAL_RESET_REQ);
623 udelay(5);
624 iwl4965_clear_bits_prph(priv, APMG_PS_CTRL_REG,
625 APMG_PS_CTRL_VAL_RESET_REQ);
626
627 iwl4965_release_nic_access(priv);
628 spin_unlock_irqrestore(&priv->lock, flags);
629
630 iwl4965_hw_card_show_info(priv);
631
632 /* end nic_init */
633
634 /* Allocate the RX queue, or reset if it is already allocated */
635 if (!rxq->bd) {
636 rc = iwl4965_rx_queue_alloc(priv);
637 if (rc) {
638 IWL_ERROR("Unable to initialize Rx queue\n");
639 return -ENOMEM;
640 }
641 } else
642 iwl4965_rx_queue_reset(priv, rxq);
643
644 iwl4965_rx_replenish(priv);
645
646 iwl4965_rx_init(priv, rxq);
647
648 spin_lock_irqsave(&priv->lock, flags);
649
650 rxq->need_update = 1;
651 iwl4965_rx_queue_update_write_ptr(priv, rxq);
652
653 spin_unlock_irqrestore(&priv->lock, flags);
654
655 /* Allocate and init all Tx and Command queues */
656 rc = iwl4965_txq_ctx_reset(priv);
657 if (rc)
658 return rc;
659
660 if (priv->eeprom.sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
661 IWL_DEBUG_RF_KILL("SW RF KILL supported in EEPROM.\n");
662
663 if (priv->eeprom.sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
664 IWL_DEBUG_RF_KILL("HW RF KILL supported in EEPROM.\n");
665
666 set_bit(STATUS_INIT, &priv->status);
667
668 return 0;
669 }
670
671 int iwl4965_hw_nic_stop_master(struct iwl4965_priv *priv)
672 {
673 int rc = 0;
674 u32 reg_val;
675 unsigned long flags;
676
677 spin_lock_irqsave(&priv->lock, flags);
678
679 /* set stop master bit */
680 iwl4965_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
681
682 reg_val = iwl4965_read32(priv, CSR_GP_CNTRL);
683
684 if (CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE ==
685 (reg_val & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE))
686 IWL_DEBUG_INFO("Card in power save, master is already "
687 "stopped\n");
688 else {
689 rc = iwl4965_poll_bit(priv, CSR_RESET,
690 CSR_RESET_REG_FLAG_MASTER_DISABLED,
691 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
692 if (rc < 0) {
693 spin_unlock_irqrestore(&priv->lock, flags);
694 return rc;
695 }
696 }
697
698 spin_unlock_irqrestore(&priv->lock, flags);
699 IWL_DEBUG_INFO("stop master\n");
700
701 return rc;
702 }
703
704 /**
705 * iwl4965_hw_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
706 */
707 void iwl4965_hw_txq_ctx_stop(struct iwl4965_priv *priv)
708 {
709
710 int txq_id;
711 unsigned long flags;
712
713 /* Stop each Tx DMA channel, and wait for it to be idle */
714 for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++) {
715 spin_lock_irqsave(&priv->lock, flags);
716 if (iwl4965_grab_nic_access(priv)) {
717 spin_unlock_irqrestore(&priv->lock, flags);
718 continue;
719 }
720
721 iwl4965_write_direct32(priv,
722 IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id),
723 0x0);
724 iwl4965_poll_direct_bit(priv, IWL_FH_TSSR_TX_STATUS_REG,
725 IWL_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE
726 (txq_id), 200);
727 iwl4965_release_nic_access(priv);
728 spin_unlock_irqrestore(&priv->lock, flags);
729 }
730
731 /* Deallocate memory for all Tx queues */
732 iwl4965_hw_txq_ctx_free(priv);
733 }
734
735 int iwl4965_hw_nic_reset(struct iwl4965_priv *priv)
736 {
737 int rc = 0;
738 unsigned long flags;
739
740 iwl4965_hw_nic_stop_master(priv);
741
742 spin_lock_irqsave(&priv->lock, flags);
743
744 iwl4965_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
745
746 udelay(10);
747
748 iwl4965_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
749 rc = iwl4965_poll_bit(priv, CSR_RESET,
750 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
751 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25);
752
753 udelay(10);
754
755 rc = iwl4965_grab_nic_access(priv);
756 if (!rc) {
757 iwl4965_write_prph(priv, APMG_CLK_EN_REG,
758 APMG_CLK_VAL_DMA_CLK_RQT |
759 APMG_CLK_VAL_BSM_CLK_RQT);
760
761 udelay(10);
762
763 iwl4965_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
764 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
765
766 iwl4965_release_nic_access(priv);
767 }
768
769 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
770 wake_up_interruptible(&priv->wait_command_queue);
771
772 spin_unlock_irqrestore(&priv->lock, flags);
773
774 return rc;
775
776 }
777
778 #define REG_RECALIB_PERIOD (60)
779
780 /**
781 * iwl4965_bg_statistics_periodic - Timer callback to queue statistics
782 *
783 * This callback is provided in order to queue the statistics_work
784 * in work_queue context (v. softirq)
785 *
786 * This timer function is continually reset to execute within
787 * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
788 * was received. We need to ensure we receive the statistics in order
789 * to update the temperature used for calibrating the TXPOWER. However,
790 * we can't send the statistics command from softirq context (which
791 * is the context which timers run at) so we have to queue off the
792 * statistics_work to actually send the command to the hardware.
793 */
794 static void iwl4965_bg_statistics_periodic(unsigned long data)
795 {
796 struct iwl4965_priv *priv = (struct iwl4965_priv *)data;
797
798 queue_work(priv->workqueue, &priv->statistics_work);
799 }
800
801 /**
802 * iwl4965_bg_statistics_work - Send the statistics request to the hardware.
803 *
804 * This is queued by iwl4965_bg_statistics_periodic.
805 */
806 static void iwl4965_bg_statistics_work(struct work_struct *work)
807 {
808 struct iwl4965_priv *priv = container_of(work, struct iwl4965_priv,
809 statistics_work);
810
811 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
812 return;
813
814 mutex_lock(&priv->mutex);
815 iwl4965_send_statistics_request(priv);
816 mutex_unlock(&priv->mutex);
817 }
818
819 #define CT_LIMIT_CONST 259
820 #define TM_CT_KILL_THRESHOLD 110
821
822 void iwl4965_rf_kill_ct_config(struct iwl4965_priv *priv)
823 {
824 struct iwl4965_ct_kill_config cmd;
825 u32 R1, R2, R3;
826 u32 temp_th;
827 u32 crit_temperature;
828 unsigned long flags;
829 int rc = 0;
830
831 spin_lock_irqsave(&priv->lock, flags);
832 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR,
833 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
834 spin_unlock_irqrestore(&priv->lock, flags);
835
836 if (priv->statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK) {
837 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
838 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
839 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
840 } else {
841 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
842 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
843 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
844 }
845
846 temp_th = CELSIUS_TO_KELVIN(TM_CT_KILL_THRESHOLD);
847
848 crit_temperature = ((temp_th * (R3-R1))/CT_LIMIT_CONST) + R2;
849 cmd.critical_temperature_R = cpu_to_le32(crit_temperature);
850 rc = iwl4965_send_cmd_pdu(priv,
851 REPLY_CT_KILL_CONFIG_CMD, sizeof(cmd), &cmd);
852 if (rc)
853 IWL_ERROR("REPLY_CT_KILL_CONFIG_CMD failed\n");
854 else
855 IWL_DEBUG_INFO("REPLY_CT_KILL_CONFIG_CMD succeeded\n");
856 }
857
858 #ifdef CONFIG_IWL4965_SENSITIVITY
859
860 /* "false alarms" are signals that our DSP tries to lock onto,
861 * but then determines that they are either noise, or transmissions
862 * from a distant wireless network (also "noise", really) that get
863 * "stepped on" by stronger transmissions within our own network.
864 * This algorithm attempts to set a sensitivity level that is high
865 * enough to receive all of our own network traffic, but not so
866 * high that our DSP gets too busy trying to lock onto non-network
867 * activity/noise. */
868 static int iwl4965_sens_energy_cck(struct iwl4965_priv *priv,
869 u32 norm_fa,
870 u32 rx_enable_time,
871 struct statistics_general_data *rx_info)
872 {
873 u32 max_nrg_cck = 0;
874 int i = 0;
875 u8 max_silence_rssi = 0;
876 u32 silence_ref = 0;
877 u8 silence_rssi_a = 0;
878 u8 silence_rssi_b = 0;
879 u8 silence_rssi_c = 0;
880 u32 val;
881
882 /* "false_alarms" values below are cross-multiplications to assess the
883 * numbers of false alarms within the measured period of actual Rx
884 * (Rx is off when we're txing), vs the min/max expected false alarms
885 * (some should be expected if rx is sensitive enough) in a
886 * hypothetical listening period of 200 time units (TU), 204.8 msec:
887 *
888 * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time
889 *
890 * */
891 u32 false_alarms = norm_fa * 200 * 1024;
892 u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
893 u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
894 struct iwl4965_sensitivity_data *data = NULL;
895
896 data = &(priv->sensitivity_data);
897
898 data->nrg_auto_corr_silence_diff = 0;
899
900 /* Find max silence rssi among all 3 receivers.
901 * This is background noise, which may include transmissions from other
902 * networks, measured during silence before our network's beacon */
903 silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a &
904 ALL_BAND_FILTER) >> 8);
905 silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b &
906 ALL_BAND_FILTER) >> 8);
907 silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c &
908 ALL_BAND_FILTER) >> 8);
909
910 val = max(silence_rssi_b, silence_rssi_c);
911 max_silence_rssi = max(silence_rssi_a, (u8) val);
912
913 /* Store silence rssi in 20-beacon history table */
914 data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi;
915 data->nrg_silence_idx++;
916 if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L)
917 data->nrg_silence_idx = 0;
918
919 /* Find max silence rssi across 20 beacon history */
920 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) {
921 val = data->nrg_silence_rssi[i];
922 silence_ref = max(silence_ref, val);
923 }
924 IWL_DEBUG_CALIB("silence a %u, b %u, c %u, 20-bcn max %u\n",
925 silence_rssi_a, silence_rssi_b, silence_rssi_c,
926 silence_ref);
927
928 /* Find max rx energy (min value!) among all 3 receivers,
929 * measured during beacon frame.
930 * Save it in 10-beacon history table. */
931 i = data->nrg_energy_idx;
932 val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c);
933 data->nrg_value[i] = min(rx_info->beacon_energy_a, val);
934
935 data->nrg_energy_idx++;
936 if (data->nrg_energy_idx >= 10)
937 data->nrg_energy_idx = 0;
938
939 /* Find min rx energy (max value) across 10 beacon history.
940 * This is the minimum signal level that we want to receive well.
941 * Add backoff (margin so we don't miss slightly lower energy frames).
942 * This establishes an upper bound (min value) for energy threshold. */
943 max_nrg_cck = data->nrg_value[0];
944 for (i = 1; i < 10; i++)
945 max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
946 max_nrg_cck += 6;
947
948 IWL_DEBUG_CALIB("rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
949 rx_info->beacon_energy_a, rx_info->beacon_energy_b,
950 rx_info->beacon_energy_c, max_nrg_cck - 6);
951
952 /* Count number of consecutive beacons with fewer-than-desired
953 * false alarms. */
954 if (false_alarms < min_false_alarms)
955 data->num_in_cck_no_fa++;
956 else
957 data->num_in_cck_no_fa = 0;
958 IWL_DEBUG_CALIB("consecutive bcns with few false alarms = %u\n",
959 data->num_in_cck_no_fa);
960
961 /* If we got too many false alarms this time, reduce sensitivity */
962 if (false_alarms > max_false_alarms) {
963 IWL_DEBUG_CALIB("norm FA %u > max FA %u\n",
964 false_alarms, max_false_alarms);
965 IWL_DEBUG_CALIB("... reducing sensitivity\n");
966 data->nrg_curr_state = IWL_FA_TOO_MANY;
967
968 if (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK) {
969 /* Store for "fewer than desired" on later beacon */
970 data->nrg_silence_ref = silence_ref;
971
972 /* increase energy threshold (reduce nrg value)
973 * to decrease sensitivity */
974 if (data->nrg_th_cck > (NRG_MAX_CCK + NRG_STEP_CCK))
975 data->nrg_th_cck = data->nrg_th_cck
976 - NRG_STEP_CCK;
977 }
978
979 /* increase auto_corr values to decrease sensitivity */
980 if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK)
981 data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1;
982 else {
983 val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
984 data->auto_corr_cck = min((u32)AUTO_CORR_MAX_CCK, val);
985 }
986 val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
987 data->auto_corr_cck_mrc = min((u32)AUTO_CORR_MAX_CCK_MRC, val);
988
989 /* Else if we got fewer than desired, increase sensitivity */
990 } else if (false_alarms < min_false_alarms) {
991 data->nrg_curr_state = IWL_FA_TOO_FEW;
992
993 /* Compare silence level with silence level for most recent
994 * healthy number or too many false alarms */
995 data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref -
996 (s32)silence_ref;
997
998 IWL_DEBUG_CALIB("norm FA %u < min FA %u, silence diff %d\n",
999 false_alarms, min_false_alarms,
1000 data->nrg_auto_corr_silence_diff);
1001
1002 /* Increase value to increase sensitivity, but only if:
1003 * 1a) previous beacon did *not* have *too many* false alarms
1004 * 1b) AND there's a significant difference in Rx levels
1005 * from a previous beacon with too many, or healthy # FAs
1006 * OR 2) We've seen a lot of beacons (100) with too few
1007 * false alarms */
1008 if ((data->nrg_prev_state != IWL_FA_TOO_MANY) &&
1009 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
1010 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
1011
1012 IWL_DEBUG_CALIB("... increasing sensitivity\n");
1013 /* Increase nrg value to increase sensitivity */
1014 val = data->nrg_th_cck + NRG_STEP_CCK;
1015 data->nrg_th_cck = min((u32)NRG_MIN_CCK, val);
1016
1017 /* Decrease auto_corr values to increase sensitivity */
1018 val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
1019 data->auto_corr_cck = max((u32)AUTO_CORR_MIN_CCK, val);
1020
1021 val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
1022 data->auto_corr_cck_mrc =
1023 max((u32)AUTO_CORR_MIN_CCK_MRC, val);
1024
1025 } else
1026 IWL_DEBUG_CALIB("... but not changing sensitivity\n");
1027
1028 /* Else we got a healthy number of false alarms, keep status quo */
1029 } else {
1030 IWL_DEBUG_CALIB(" FA in safe zone\n");
1031 data->nrg_curr_state = IWL_FA_GOOD_RANGE;
1032
1033 /* Store for use in "fewer than desired" with later beacon */
1034 data->nrg_silence_ref = silence_ref;
1035
1036 /* If previous beacon had too many false alarms,
1037 * give it some extra margin by reducing sensitivity again
1038 * (but don't go below measured energy of desired Rx) */
1039 if (IWL_FA_TOO_MANY == data->nrg_prev_state) {
1040 IWL_DEBUG_CALIB("... increasing margin\n");
1041 data->nrg_th_cck -= NRG_MARGIN;
1042 }
1043 }
1044
1045 /* Make sure the energy threshold does not go above the measured
1046 * energy of the desired Rx signals (reduced by backoff margin),
1047 * or else we might start missing Rx frames.
1048 * Lower value is higher energy, so we use max()!
1049 */
1050 data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
1051 IWL_DEBUG_CALIB("new nrg_th_cck %u\n", data->nrg_th_cck);
1052
1053 data->nrg_prev_state = data->nrg_curr_state;
1054
1055 return 0;
1056 }
1057
1058
1059 static int iwl4965_sens_auto_corr_ofdm(struct iwl4965_priv *priv,
1060 u32 norm_fa,
1061 u32 rx_enable_time)
1062 {
1063 u32 val;
1064 u32 false_alarms = norm_fa * 200 * 1024;
1065 u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
1066 u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
1067 struct iwl4965_sensitivity_data *data = NULL;
1068
1069 data = &(priv->sensitivity_data);
1070
1071 /* If we got too many false alarms this time, reduce sensitivity */
1072 if (false_alarms > max_false_alarms) {
1073
1074 IWL_DEBUG_CALIB("norm FA %u > max FA %u)\n",
1075 false_alarms, max_false_alarms);
1076
1077 val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
1078 data->auto_corr_ofdm =
1079 min((u32)AUTO_CORR_MAX_OFDM, val);
1080
1081 val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
1082 data->auto_corr_ofdm_mrc =
1083 min((u32)AUTO_CORR_MAX_OFDM_MRC, val);
1084
1085 val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
1086 data->auto_corr_ofdm_x1 =
1087 min((u32)AUTO_CORR_MAX_OFDM_X1, val);
1088
1089 val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
1090 data->auto_corr_ofdm_mrc_x1 =
1091 min((u32)AUTO_CORR_MAX_OFDM_MRC_X1, val);
1092 }
1093
1094 /* Else if we got fewer than desired, increase sensitivity */
1095 else if (false_alarms < min_false_alarms) {
1096
1097 IWL_DEBUG_CALIB("norm FA %u < min FA %u\n",
1098 false_alarms, min_false_alarms);
1099
1100 val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
1101 data->auto_corr_ofdm =
1102 max((u32)AUTO_CORR_MIN_OFDM, val);
1103
1104 val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
1105 data->auto_corr_ofdm_mrc =
1106 max((u32)AUTO_CORR_MIN_OFDM_MRC, val);
1107
1108 val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
1109 data->auto_corr_ofdm_x1 =
1110 max((u32)AUTO_CORR_MIN_OFDM_X1, val);
1111
1112 val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
1113 data->auto_corr_ofdm_mrc_x1 =
1114 max((u32)AUTO_CORR_MIN_OFDM_MRC_X1, val);
1115 }
1116
1117 else
1118 IWL_DEBUG_CALIB("min FA %u < norm FA %u < max FA %u OK\n",
1119 min_false_alarms, false_alarms, max_false_alarms);
1120
1121 return 0;
1122 }
1123
1124 static int iwl4965_sensitivity_callback(struct iwl4965_priv *priv,
1125 struct iwl4965_cmd *cmd, struct sk_buff *skb)
1126 {
1127 /* We didn't cache the SKB; let the caller free it */
1128 return 1;
1129 }
1130
1131 /* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
1132 static int iwl4965_sensitivity_write(struct iwl4965_priv *priv, u8 flags)
1133 {
1134 int rc = 0;
1135 struct iwl4965_sensitivity_cmd cmd ;
1136 struct iwl4965_sensitivity_data *data = NULL;
1137 struct iwl4965_host_cmd cmd_out = {
1138 .id = SENSITIVITY_CMD,
1139 .len = sizeof(struct iwl4965_sensitivity_cmd),
1140 .meta.flags = flags,
1141 .data = &cmd,
1142 };
1143
1144 data = &(priv->sensitivity_data);
1145
1146 memset(&cmd, 0, sizeof(cmd));
1147
1148 cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] =
1149 cpu_to_le16((u16)data->auto_corr_ofdm);
1150 cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] =
1151 cpu_to_le16((u16)data->auto_corr_ofdm_mrc);
1152 cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] =
1153 cpu_to_le16((u16)data->auto_corr_ofdm_x1);
1154 cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] =
1155 cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1);
1156
1157 cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] =
1158 cpu_to_le16((u16)data->auto_corr_cck);
1159 cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] =
1160 cpu_to_le16((u16)data->auto_corr_cck_mrc);
1161
1162 cmd.table[HD_MIN_ENERGY_CCK_DET_INDEX] =
1163 cpu_to_le16((u16)data->nrg_th_cck);
1164 cmd.table[HD_MIN_ENERGY_OFDM_DET_INDEX] =
1165 cpu_to_le16((u16)data->nrg_th_ofdm);
1166
1167 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
1168 __constant_cpu_to_le16(190);
1169 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
1170 __constant_cpu_to_le16(390);
1171 cmd.table[HD_OFDM_ENERGY_TH_IN_INDEX] =
1172 __constant_cpu_to_le16(62);
1173
1174 IWL_DEBUG_CALIB("ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
1175 data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
1176 data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
1177 data->nrg_th_ofdm);
1178
1179 IWL_DEBUG_CALIB("cck: ac %u mrc %u thresh %u\n",
1180 data->auto_corr_cck, data->auto_corr_cck_mrc,
1181 data->nrg_th_cck);
1182
1183 /* Update uCode's "work" table, and copy it to DSP */
1184 cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
1185
1186 if (flags & CMD_ASYNC)
1187 cmd_out.meta.u.callback = iwl4965_sensitivity_callback;
1188
1189 /* Don't send command to uCode if nothing has changed */
1190 if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]),
1191 sizeof(u16)*HD_TABLE_SIZE)) {
1192 IWL_DEBUG_CALIB("No change in SENSITIVITY_CMD\n");
1193 return 0;
1194 }
1195
1196 /* Copy table for comparison next time */
1197 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
1198 sizeof(u16)*HD_TABLE_SIZE);
1199
1200 rc = iwl4965_send_cmd(priv, &cmd_out);
1201 if (!rc) {
1202 IWL_DEBUG_CALIB("SENSITIVITY_CMD succeeded\n");
1203 return rc;
1204 }
1205
1206 return 0;
1207 }
1208
1209 void iwl4965_init_sensitivity(struct iwl4965_priv *priv, u8 flags, u8 force)
1210 {
1211 int rc = 0;
1212 int i;
1213 struct iwl4965_sensitivity_data *data = NULL;
1214
1215 IWL_DEBUG_CALIB("Start iwl4965_init_sensitivity\n");
1216
1217 if (force)
1218 memset(&(priv->sensitivity_tbl[0]), 0,
1219 sizeof(u16)*HD_TABLE_SIZE);
1220
1221 /* Clear driver's sensitivity algo data */
1222 data = &(priv->sensitivity_data);
1223 memset(data, 0, sizeof(struct iwl4965_sensitivity_data));
1224
1225 data->num_in_cck_no_fa = 0;
1226 data->nrg_curr_state = IWL_FA_TOO_MANY;
1227 data->nrg_prev_state = IWL_FA_TOO_MANY;
1228 data->nrg_silence_ref = 0;
1229 data->nrg_silence_idx = 0;
1230 data->nrg_energy_idx = 0;
1231
1232 for (i = 0; i < 10; i++)
1233 data->nrg_value[i] = 0;
1234
1235 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
1236 data->nrg_silence_rssi[i] = 0;
1237
1238 data->auto_corr_ofdm = 90;
1239 data->auto_corr_ofdm_mrc = 170;
1240 data->auto_corr_ofdm_x1 = 105;
1241 data->auto_corr_ofdm_mrc_x1 = 220;
1242 data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
1243 data->auto_corr_cck_mrc = 200;
1244 data->nrg_th_cck = 100;
1245 data->nrg_th_ofdm = 100;
1246
1247 data->last_bad_plcp_cnt_ofdm = 0;
1248 data->last_fa_cnt_ofdm = 0;
1249 data->last_bad_plcp_cnt_cck = 0;
1250 data->last_fa_cnt_cck = 0;
1251
1252 /* Clear prior Sensitivity command data to force send to uCode */
1253 if (force)
1254 memset(&(priv->sensitivity_tbl[0]), 0,
1255 sizeof(u16)*HD_TABLE_SIZE);
1256
1257 rc |= iwl4965_sensitivity_write(priv, flags);
1258 IWL_DEBUG_CALIB("<<return 0x%X\n", rc);
1259
1260 return;
1261 }
1262
1263
1264 /* Reset differential Rx gains in NIC to prepare for chain noise calibration.
1265 * Called after every association, but this runs only once!
1266 * ... once chain noise is calibrated the first time, it's good forever. */
1267 void iwl4965_chain_noise_reset(struct iwl4965_priv *priv)
1268 {
1269 struct iwl4965_chain_noise_data *data = NULL;
1270 int rc = 0;
1271
1272 data = &(priv->chain_noise_data);
1273 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl4965_is_associated(priv)) {
1274 struct iwl4965_calibration_cmd cmd;
1275
1276 memset(&cmd, 0, sizeof(cmd));
1277 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
1278 cmd.diff_gain_a = 0;
1279 cmd.diff_gain_b = 0;
1280 cmd.diff_gain_c = 0;
1281 rc = iwl4965_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
1282 sizeof(cmd), &cmd);
1283 msleep(4);
1284 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
1285 IWL_DEBUG_CALIB("Run chain_noise_calibrate\n");
1286 }
1287 return;
1288 }
1289
1290 /*
1291 * Accumulate 20 beacons of signal and noise statistics for each of
1292 * 3 receivers/antennas/rx-chains, then figure out:
1293 * 1) Which antennas are connected.
1294 * 2) Differential rx gain settings to balance the 3 receivers.
1295 */
1296 static void iwl4965_noise_calibration(struct iwl4965_priv *priv,
1297 struct iwl4965_notif_statistics *stat_resp)
1298 {
1299 struct iwl4965_chain_noise_data *data = NULL;
1300 int rc = 0;
1301
1302 u32 chain_noise_a;
1303 u32 chain_noise_b;
1304 u32 chain_noise_c;
1305 u32 chain_sig_a;
1306 u32 chain_sig_b;
1307 u32 chain_sig_c;
1308 u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
1309 u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
1310 u32 max_average_sig;
1311 u16 max_average_sig_antenna_i;
1312 u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
1313 u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
1314 u16 i = 0;
1315 u16 chan_num = INITIALIZATION_VALUE;
1316 u32 band = INITIALIZATION_VALUE;
1317 u32 active_chains = 0;
1318 unsigned long flags;
1319 struct statistics_rx_non_phy *rx_info = &(stat_resp->rx.general);
1320
1321 data = &(priv->chain_noise_data);
1322
1323 /* Accumulate just the first 20 beacons after the first association,
1324 * then we're done forever. */
1325 if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) {
1326 if (data->state == IWL_CHAIN_NOISE_ALIVE)
1327 IWL_DEBUG_CALIB("Wait for noise calib reset\n");
1328 return;
1329 }
1330
1331 spin_lock_irqsave(&priv->lock, flags);
1332 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
1333 IWL_DEBUG_CALIB(" << Interference data unavailable\n");
1334 spin_unlock_irqrestore(&priv->lock, flags);
1335 return;
1336 }
1337
1338 band = (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) ? 0 : 1;
1339 chan_num = le16_to_cpu(priv->staging_rxon.channel);
1340
1341 /* Make sure we accumulate data for just the associated channel
1342 * (even if scanning). */
1343 if ((chan_num != (le32_to_cpu(stat_resp->flag) >> 16)) ||
1344 ((STATISTICS_REPLY_FLG_BAND_24G_MSK ==
1345 (stat_resp->flag & STATISTICS_REPLY_FLG_BAND_24G_MSK)) && band)) {
1346 IWL_DEBUG_CALIB("Stats not from chan=%d, band=%d\n",
1347 chan_num, band);
1348 spin_unlock_irqrestore(&priv->lock, flags);
1349 return;
1350 }
1351
1352 /* Accumulate beacon statistics values across 20 beacons */
1353 chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) &
1354 IN_BAND_FILTER;
1355 chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) &
1356 IN_BAND_FILTER;
1357 chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) &
1358 IN_BAND_FILTER;
1359
1360 chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
1361 chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
1362 chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
1363
1364 spin_unlock_irqrestore(&priv->lock, flags);
1365
1366 data->beacon_count++;
1367
1368 data->chain_noise_a = (chain_noise_a + data->chain_noise_a);
1369 data->chain_noise_b = (chain_noise_b + data->chain_noise_b);
1370 data->chain_noise_c = (chain_noise_c + data->chain_noise_c);
1371
1372 data->chain_signal_a = (chain_sig_a + data->chain_signal_a);
1373 data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
1374 data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
1375
1376 IWL_DEBUG_CALIB("chan=%d, band=%d, beacon=%d\n", chan_num, band,
1377 data->beacon_count);
1378 IWL_DEBUG_CALIB("chain_sig: a %d b %d c %d\n",
1379 chain_sig_a, chain_sig_b, chain_sig_c);
1380 IWL_DEBUG_CALIB("chain_noise: a %d b %d c %d\n",
1381 chain_noise_a, chain_noise_b, chain_noise_c);
1382
1383 /* If this is the 20th beacon, determine:
1384 * 1) Disconnected antennas (using signal strengths)
1385 * 2) Differential gain (using silence noise) to balance receivers */
1386 if (data->beacon_count == CAL_NUM_OF_BEACONS) {
1387
1388 /* Analyze signal for disconnected antenna */
1389 average_sig[0] = (data->chain_signal_a) / CAL_NUM_OF_BEACONS;
1390 average_sig[1] = (data->chain_signal_b) / CAL_NUM_OF_BEACONS;
1391 average_sig[2] = (data->chain_signal_c) / CAL_NUM_OF_BEACONS;
1392
1393 if (average_sig[0] >= average_sig[1]) {
1394 max_average_sig = average_sig[0];
1395 max_average_sig_antenna_i = 0;
1396 active_chains = (1 << max_average_sig_antenna_i);
1397 } else {
1398 max_average_sig = average_sig[1];
1399 max_average_sig_antenna_i = 1;
1400 active_chains = (1 << max_average_sig_antenna_i);
1401 }
1402
1403 if (average_sig[2] >= max_average_sig) {
1404 max_average_sig = average_sig[2];
1405 max_average_sig_antenna_i = 2;
1406 active_chains = (1 << max_average_sig_antenna_i);
1407 }
1408
1409 IWL_DEBUG_CALIB("average_sig: a %d b %d c %d\n",
1410 average_sig[0], average_sig[1], average_sig[2]);
1411 IWL_DEBUG_CALIB("max_average_sig = %d, antenna %d\n",
1412 max_average_sig, max_average_sig_antenna_i);
1413
1414 /* Compare signal strengths for all 3 receivers. */
1415 for (i = 0; i < NUM_RX_CHAINS; i++) {
1416 if (i != max_average_sig_antenna_i) {
1417 s32 rssi_delta = (max_average_sig -
1418 average_sig[i]);
1419
1420 /* If signal is very weak, compared with
1421 * strongest, mark it as disconnected. */
1422 if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
1423 data->disconn_array[i] = 1;
1424 else
1425 active_chains |= (1 << i);
1426 IWL_DEBUG_CALIB("i = %d rssiDelta = %d "
1427 "disconn_array[i] = %d\n",
1428 i, rssi_delta, data->disconn_array[i]);
1429 }
1430 }
1431
1432 /*If both chains A & B are disconnected -
1433 * connect B and leave A as is */
1434 if (data->disconn_array[CHAIN_A] &&
1435 data->disconn_array[CHAIN_B]) {
1436 data->disconn_array[CHAIN_B] = 0;
1437 active_chains |= (1 << CHAIN_B);
1438 IWL_DEBUG_CALIB("both A & B chains are disconnected! "
1439 "W/A - declare B as connected\n");
1440 }
1441
1442 IWL_DEBUG_CALIB("active_chains (bitwise) = 0x%x\n",
1443 active_chains);
1444
1445 /* Save for use within RXON, TX, SCAN commands, etc. */
1446 priv->valid_antenna = active_chains;
1447
1448 /* Analyze noise for rx balance */
1449 average_noise[0] = ((data->chain_noise_a)/CAL_NUM_OF_BEACONS);
1450 average_noise[1] = ((data->chain_noise_b)/CAL_NUM_OF_BEACONS);
1451 average_noise[2] = ((data->chain_noise_c)/CAL_NUM_OF_BEACONS);
1452
1453 for (i = 0; i < NUM_RX_CHAINS; i++) {
1454 if (!(data->disconn_array[i]) &&
1455 (average_noise[i] <= min_average_noise)) {
1456 /* This means that chain i is active and has
1457 * lower noise values so far: */
1458 min_average_noise = average_noise[i];
1459 min_average_noise_antenna_i = i;
1460 }
1461 }
1462
1463 data->delta_gain_code[min_average_noise_antenna_i] = 0;
1464
1465 IWL_DEBUG_CALIB("average_noise: a %d b %d c %d\n",
1466 average_noise[0], average_noise[1],
1467 average_noise[2]);
1468
1469 IWL_DEBUG_CALIB("min_average_noise = %d, antenna %d\n",
1470 min_average_noise, min_average_noise_antenna_i);
1471
1472 for (i = 0; i < NUM_RX_CHAINS; i++) {
1473 s32 delta_g = 0;
1474
1475 if (!(data->disconn_array[i]) &&
1476 (data->delta_gain_code[i] ==
1477 CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
1478 delta_g = average_noise[i] - min_average_noise;
1479 data->delta_gain_code[i] = (u8)((delta_g *
1480 10) / 15);
1481 if (CHAIN_NOISE_MAX_DELTA_GAIN_CODE <
1482 data->delta_gain_code[i])
1483 data->delta_gain_code[i] =
1484 CHAIN_NOISE_MAX_DELTA_GAIN_CODE;
1485
1486 data->delta_gain_code[i] =
1487 (data->delta_gain_code[i] | (1 << 2));
1488 } else
1489 data->delta_gain_code[i] = 0;
1490 }
1491 IWL_DEBUG_CALIB("delta_gain_codes: a %d b %d c %d\n",
1492 data->delta_gain_code[0],
1493 data->delta_gain_code[1],
1494 data->delta_gain_code[2]);
1495
1496 /* Differential gain gets sent to uCode only once */
1497 if (!data->radio_write) {
1498 struct iwl4965_calibration_cmd cmd;
1499 data->radio_write = 1;
1500
1501 memset(&cmd, 0, sizeof(cmd));
1502 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
1503 cmd.diff_gain_a = data->delta_gain_code[0];
1504 cmd.diff_gain_b = data->delta_gain_code[1];
1505 cmd.diff_gain_c = data->delta_gain_code[2];
1506 rc = iwl4965_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
1507 sizeof(cmd), &cmd);
1508 if (rc)
1509 IWL_DEBUG_CALIB("fail sending cmd "
1510 "REPLY_PHY_CALIBRATION_CMD \n");
1511
1512 /* TODO we might want recalculate
1513 * rx_chain in rxon cmd */
1514
1515 /* Mark so we run this algo only once! */
1516 data->state = IWL_CHAIN_NOISE_CALIBRATED;
1517 }
1518 data->chain_noise_a = 0;
1519 data->chain_noise_b = 0;
1520 data->chain_noise_c = 0;
1521 data->chain_signal_a = 0;
1522 data->chain_signal_b = 0;
1523 data->chain_signal_c = 0;
1524 data->beacon_count = 0;
1525 }
1526 return;
1527 }
1528
1529 static void iwl4965_sensitivity_calibration(struct iwl4965_priv *priv,
1530 struct iwl4965_notif_statistics *resp)
1531 {
1532 int rc = 0;
1533 u32 rx_enable_time;
1534 u32 fa_cck;
1535 u32 fa_ofdm;
1536 u32 bad_plcp_cck;
1537 u32 bad_plcp_ofdm;
1538 u32 norm_fa_ofdm;
1539 u32 norm_fa_cck;
1540 struct iwl4965_sensitivity_data *data = NULL;
1541 struct statistics_rx_non_phy *rx_info = &(resp->rx.general);
1542 struct statistics_rx *statistics = &(resp->rx);
1543 unsigned long flags;
1544 struct statistics_general_data statis;
1545
1546 data = &(priv->sensitivity_data);
1547
1548 if (!iwl4965_is_associated(priv)) {
1549 IWL_DEBUG_CALIB("<< - not associated\n");
1550 return;
1551 }
1552
1553 spin_lock_irqsave(&priv->lock, flags);
1554 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
1555 IWL_DEBUG_CALIB("<< invalid data.\n");
1556 spin_unlock_irqrestore(&priv->lock, flags);
1557 return;
1558 }
1559
1560 /* Extract Statistics: */
1561 rx_enable_time = le32_to_cpu(rx_info->channel_load);
1562 fa_cck = le32_to_cpu(statistics->cck.false_alarm_cnt);
1563 fa_ofdm = le32_to_cpu(statistics->ofdm.false_alarm_cnt);
1564 bad_plcp_cck = le32_to_cpu(statistics->cck.plcp_err);
1565 bad_plcp_ofdm = le32_to_cpu(statistics->ofdm.plcp_err);
1566
1567 statis.beacon_silence_rssi_a =
1568 le32_to_cpu(statistics->general.beacon_silence_rssi_a);
1569 statis.beacon_silence_rssi_b =
1570 le32_to_cpu(statistics->general.beacon_silence_rssi_b);
1571 statis.beacon_silence_rssi_c =
1572 le32_to_cpu(statistics->general.beacon_silence_rssi_c);
1573 statis.beacon_energy_a =
1574 le32_to_cpu(statistics->general.beacon_energy_a);
1575 statis.beacon_energy_b =
1576 le32_to_cpu(statistics->general.beacon_energy_b);
1577 statis.beacon_energy_c =
1578 le32_to_cpu(statistics->general.beacon_energy_c);
1579
1580 spin_unlock_irqrestore(&priv->lock, flags);
1581
1582 IWL_DEBUG_CALIB("rx_enable_time = %u usecs\n", rx_enable_time);
1583
1584 if (!rx_enable_time) {
1585 IWL_DEBUG_CALIB("<< RX Enable Time == 0! \n");
1586 return;
1587 }
1588
1589 /* These statistics increase monotonically, and do not reset
1590 * at each beacon. Calculate difference from last value, or just
1591 * use the new statistics value if it has reset or wrapped around. */
1592 if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
1593 data->last_bad_plcp_cnt_cck = bad_plcp_cck;
1594 else {
1595 bad_plcp_cck -= data->last_bad_plcp_cnt_cck;
1596 data->last_bad_plcp_cnt_cck += bad_plcp_cck;
1597 }
1598
1599 if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm)
1600 data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm;
1601 else {
1602 bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm;
1603 data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm;
1604 }
1605
1606 if (data->last_fa_cnt_ofdm > fa_ofdm)
1607 data->last_fa_cnt_ofdm = fa_ofdm;
1608 else {
1609 fa_ofdm -= data->last_fa_cnt_ofdm;
1610 data->last_fa_cnt_ofdm += fa_ofdm;
1611 }
1612
1613 if (data->last_fa_cnt_cck > fa_cck)
1614 data->last_fa_cnt_cck = fa_cck;
1615 else {
1616 fa_cck -= data->last_fa_cnt_cck;
1617 data->last_fa_cnt_cck += fa_cck;
1618 }
1619
1620 /* Total aborted signal locks */
1621 norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
1622 norm_fa_cck = fa_cck + bad_plcp_cck;
1623
1624 IWL_DEBUG_CALIB("cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck,
1625 bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
1626
1627 iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
1628 iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
1629 rc |= iwl4965_sensitivity_write(priv, CMD_ASYNC);
1630
1631 return;
1632 }
1633
1634 static void iwl4965_bg_sensitivity_work(struct work_struct *work)
1635 {
1636 struct iwl4965_priv *priv = container_of(work, struct iwl4965_priv,
1637 sensitivity_work);
1638
1639 mutex_lock(&priv->mutex);
1640
1641 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
1642 test_bit(STATUS_SCANNING, &priv->status)) {
1643 mutex_unlock(&priv->mutex);
1644 return;
1645 }
1646
1647 if (priv->start_calib) {
1648 iwl4965_noise_calibration(priv, &priv->statistics);
1649
1650 if (priv->sensitivity_data.state ==
1651 IWL_SENS_CALIB_NEED_REINIT) {
1652 iwl4965_init_sensitivity(priv, CMD_ASYNC, 0);
1653 priv->sensitivity_data.state = IWL_SENS_CALIB_ALLOWED;
1654 } else
1655 iwl4965_sensitivity_calibration(priv,
1656 &priv->statistics);
1657 }
1658
1659 mutex_unlock(&priv->mutex);
1660 return;
1661 }
1662 #endif /*CONFIG_IWL4965_SENSITIVITY*/
1663
1664 static void iwl4965_bg_txpower_work(struct work_struct *work)
1665 {
1666 struct iwl4965_priv *priv = container_of(work, struct iwl4965_priv,
1667 txpower_work);
1668
1669 /* If a scan happened to start before we got here
1670 * then just return; the statistics notification will
1671 * kick off another scheduled work to compensate for
1672 * any temperature delta we missed here. */
1673 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
1674 test_bit(STATUS_SCANNING, &priv->status))
1675 return;
1676
1677 mutex_lock(&priv->mutex);
1678
1679 /* Regardless of if we are assocaited, we must reconfigure the
1680 * TX power since frames can be sent on non-radar channels while
1681 * not associated */
1682 iwl4965_hw_reg_send_txpower(priv);
1683
1684 /* Update last_temperature to keep is_calib_needed from running
1685 * when it isn't needed... */
1686 priv->last_temperature = priv->temperature;
1687
1688 mutex_unlock(&priv->mutex);
1689 }
1690
1691 /*
1692 * Acquire priv->lock before calling this function !
1693 */
1694 static void iwl4965_set_wr_ptrs(struct iwl4965_priv *priv, int txq_id, u32 index)
1695 {
1696 iwl4965_write_direct32(priv, HBUS_TARG_WRPTR,
1697 (index & 0xff) | (txq_id << 8));
1698 iwl4965_write_prph(priv, KDR_SCD_QUEUE_RDPTR(txq_id), index);
1699 }
1700
1701 /**
1702 * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
1703 * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
1704 * @scd_retry: (1) Indicates queue will be used in aggregation mode
1705 *
1706 * NOTE: Acquire priv->lock before calling this function !
1707 */
1708 static void iwl4965_tx_queue_set_status(struct iwl4965_priv *priv,
1709 struct iwl4965_tx_queue *txq,
1710 int tx_fifo_id, int scd_retry)
1711 {
1712 int txq_id = txq->q.id;
1713
1714 /* Find out whether to activate Tx queue */
1715 int active = test_bit(txq_id, &priv->txq_ctx_active_msk)?1:0;
1716
1717 /* Set up and activate */
1718 iwl4965_write_prph(priv, KDR_SCD_QUEUE_STATUS_BITS(txq_id),
1719 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1720 (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
1721 (scd_retry << SCD_QUEUE_STTS_REG_POS_WSL) |
1722 (scd_retry << SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
1723 SCD_QUEUE_STTS_REG_MSK);
1724
1725 txq->sched_retry = scd_retry;
1726
1727 IWL_DEBUG_INFO("%s %s Queue %d on AC %d\n",
1728 active ? "Activate" : "Deactivate",
1729 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
1730 }
1731
1732 static const u16 default_queue_to_tx_fifo[] = {
1733 IWL_TX_FIFO_AC3,
1734 IWL_TX_FIFO_AC2,
1735 IWL_TX_FIFO_AC1,
1736 IWL_TX_FIFO_AC0,
1737 IWL_CMD_FIFO_NUM,
1738 IWL_TX_FIFO_HCCA_1,
1739 IWL_TX_FIFO_HCCA_2
1740 };
1741
1742 static inline void iwl4965_txq_ctx_activate(struct iwl4965_priv *priv, int txq_id)
1743 {
1744 set_bit(txq_id, &priv->txq_ctx_active_msk);
1745 }
1746
1747 static inline void iwl4965_txq_ctx_deactivate(struct iwl4965_priv *priv, int txq_id)
1748 {
1749 clear_bit(txq_id, &priv->txq_ctx_active_msk);
1750 }
1751
1752 int iwl4965_alive_notify(struct iwl4965_priv *priv)
1753 {
1754 u32 a;
1755 int i = 0;
1756 unsigned long flags;
1757 int rc;
1758
1759 spin_lock_irqsave(&priv->lock, flags);
1760
1761 #ifdef CONFIG_IWL4965_SENSITIVITY
1762 memset(&(priv->sensitivity_data), 0,
1763 sizeof(struct iwl4965_sensitivity_data));
1764 memset(&(priv->chain_noise_data), 0,
1765 sizeof(struct iwl4965_chain_noise_data));
1766 for (i = 0; i < NUM_RX_CHAINS; i++)
1767 priv->chain_noise_data.delta_gain_code[i] =
1768 CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
1769 #endif /* CONFIG_IWL4965_SENSITIVITY*/
1770 rc = iwl4965_grab_nic_access(priv);
1771 if (rc) {
1772 spin_unlock_irqrestore(&priv->lock, flags);
1773 return rc;
1774 }
1775
1776 /* Clear 4965's internal Tx Scheduler data base */
1777 priv->scd_base_addr = iwl4965_read_prph(priv, KDR_SCD_SRAM_BASE_ADDR);
1778 a = priv->scd_base_addr + SCD_CONTEXT_DATA_OFFSET;
1779 for (; a < priv->scd_base_addr + SCD_TX_STTS_BITMAP_OFFSET; a += 4)
1780 iwl4965_write_targ_mem(priv, a, 0);
1781 for (; a < priv->scd_base_addr + SCD_TRANSLATE_TBL_OFFSET; a += 4)
1782 iwl4965_write_targ_mem(priv, a, 0);
1783 for (; a < sizeof(u16) * priv->hw_setting.max_txq_num; a += 4)
1784 iwl4965_write_targ_mem(priv, a, 0);
1785
1786 /* Tel 4965 where to find Tx byte count tables */
1787 iwl4965_write_prph(priv, KDR_SCD_DRAM_BASE_ADDR,
1788 (priv->hw_setting.shared_phys +
1789 offsetof(struct iwl4965_shared, queues_byte_cnt_tbls)) >> 10);
1790
1791 /* Disable chain mode for all queues */
1792 iwl4965_write_prph(priv, KDR_SCD_QUEUECHAIN_SEL, 0);
1793
1794 /* Initialize each Tx queue (including the command queue) */
1795 for (i = 0; i < priv->hw_setting.max_txq_num; i++) {
1796
1797 /* TFD circular buffer read/write indexes */
1798 iwl4965_write_prph(priv, KDR_SCD_QUEUE_RDPTR(i), 0);
1799 iwl4965_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
1800
1801 /* Max Tx Window size for Scheduler-ACK mode */
1802 iwl4965_write_targ_mem(priv, priv->scd_base_addr +
1803 SCD_CONTEXT_QUEUE_OFFSET(i),
1804 (SCD_WIN_SIZE <<
1805 SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
1806 SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
1807
1808 /* Frame limit */
1809 iwl4965_write_targ_mem(priv, priv->scd_base_addr +
1810 SCD_CONTEXT_QUEUE_OFFSET(i) +
1811 sizeof(u32),
1812 (SCD_FRAME_LIMIT <<
1813 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1814 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
1815
1816 }
1817 iwl4965_write_prph(priv, KDR_SCD_INTERRUPT_MASK,
1818 (1 << priv->hw_setting.max_txq_num) - 1);
1819
1820 /* Activate all Tx DMA/FIFO channels */
1821 iwl4965_write_prph(priv, KDR_SCD_TXFACT,
1822 SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
1823
1824 iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
1825
1826 /* Map each Tx/cmd queue to its corresponding fifo */
1827 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
1828 int ac = default_queue_to_tx_fifo[i];
1829 iwl4965_txq_ctx_activate(priv, i);
1830 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
1831 }
1832
1833 iwl4965_release_nic_access(priv);
1834 spin_unlock_irqrestore(&priv->lock, flags);
1835
1836 return 0;
1837 }
1838
1839 /**
1840 * iwl4965_hw_set_hw_setting
1841 *
1842 * Called when initializing driver
1843 */
1844 int iwl4965_hw_set_hw_setting(struct iwl4965_priv *priv)
1845 {
1846 /* Allocate area for Tx byte count tables and Rx queue status */
1847 priv->hw_setting.shared_virt =
1848 pci_alloc_consistent(priv->pci_dev,
1849 sizeof(struct iwl4965_shared),
1850 &priv->hw_setting.shared_phys);
1851
1852 if (!priv->hw_setting.shared_virt)
1853 return -1;
1854
1855 memset(priv->hw_setting.shared_virt, 0, sizeof(struct iwl4965_shared));
1856
1857 priv->hw_setting.max_txq_num = iwl4965_param_queues_num;
1858 priv->hw_setting.tx_cmd_len = sizeof(struct iwl4965_tx_cmd);
1859 priv->hw_setting.max_rxq_size = RX_QUEUE_SIZE;
1860 priv->hw_setting.max_rxq_log = RX_QUEUE_SIZE_LOG;
1861 if (iwl4965_param_amsdu_size_8K)
1862 priv->hw_setting.rx_buf_size = IWL_RX_BUF_SIZE_8K;
1863 else
1864 priv->hw_setting.rx_buf_size = IWL_RX_BUF_SIZE_4K;
1865 priv->hw_setting.max_pkt_size = priv->hw_setting.rx_buf_size - 256;
1866 priv->hw_setting.max_stations = IWL4965_STATION_COUNT;
1867 priv->hw_setting.bcast_sta_id = IWL4965_BROADCAST_ID;
1868
1869 priv->hw_setting.tx_ant_num = 2;
1870
1871 return 0;
1872 }
1873
1874 /**
1875 * iwl4965_hw_txq_ctx_free - Free TXQ Context
1876 *
1877 * Destroy all TX DMA queues and structures
1878 */
1879 void iwl4965_hw_txq_ctx_free(struct iwl4965_priv *priv)
1880 {
1881 int txq_id;
1882
1883 /* Tx queues */
1884 for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++)
1885 iwl4965_tx_queue_free(priv, &priv->txq[txq_id]);
1886
1887 /* Keep-warm buffer */
1888 iwl4965_kw_free(priv);
1889 }
1890
1891 /**
1892 * iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
1893 *
1894 * Does NOT advance any TFD circular buffer read/write indexes
1895 * Does NOT free the TFD itself (which is within circular buffer)
1896 */
1897 int iwl4965_hw_txq_free_tfd(struct iwl4965_priv *priv, struct iwl4965_tx_queue *txq)
1898 {
1899 struct iwl4965_tfd_frame *bd_tmp = (struct iwl4965_tfd_frame *)&txq->bd[0];
1900 struct iwl4965_tfd_frame *bd = &bd_tmp[txq->q.read_ptr];
1901 struct pci_dev *dev = priv->pci_dev;
1902 int i;
1903 int counter = 0;
1904 int index, is_odd;
1905
1906 /* Host command buffers stay mapped in memory, nothing to clean */
1907 if (txq->q.id == IWL_CMD_QUEUE_NUM)
1908 return 0;
1909
1910 /* Sanity check on number of chunks */
1911 counter = IWL_GET_BITS(*bd, num_tbs);
1912 if (counter > MAX_NUM_OF_TBS) {
1913 IWL_ERROR("Too many chunks: %i\n", counter);
1914 /* @todo issue fatal error, it is quite serious situation */
1915 return 0;
1916 }
1917
1918 /* Unmap chunks, if any.
1919 * TFD info for odd chunks is different format than for even chunks. */
1920 for (i = 0; i < counter; i++) {
1921 index = i / 2;
1922 is_odd = i & 0x1;
1923
1924 if (is_odd)
1925 pci_unmap_single(
1926 dev,
1927 IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) |
1928 (IWL_GET_BITS(bd->pa[index],
1929 tb2_addr_hi20) << 16),
1930 IWL_GET_BITS(bd->pa[index], tb2_len),
1931 PCI_DMA_TODEVICE);
1932
1933 else if (i > 0)
1934 pci_unmap_single(dev,
1935 le32_to_cpu(bd->pa[index].tb1_addr),
1936 IWL_GET_BITS(bd->pa[index], tb1_len),
1937 PCI_DMA_TODEVICE);
1938
1939 /* Free SKB, if any, for this chunk */
1940 if (txq->txb[txq->q.read_ptr].skb[i]) {
1941 struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[i];
1942
1943 dev_kfree_skb(skb);
1944 txq->txb[txq->q.read_ptr].skb[i] = NULL;
1945 }
1946 }
1947 return 0;
1948 }
1949
1950 int iwl4965_hw_reg_set_txpower(struct iwl4965_priv *priv, s8 power)
1951 {
1952 IWL_ERROR("TODO: Implement iwl4965_hw_reg_set_txpower!\n");
1953 return -EINVAL;
1954 }
1955
1956 static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res)
1957 {
1958 s32 sign = 1;
1959
1960 if (num < 0) {
1961 sign = -sign;
1962 num = -num;
1963 }
1964 if (denom < 0) {
1965 sign = -sign;
1966 denom = -denom;
1967 }
1968 *res = 1;
1969 *res = ((num * 2 + denom) / (denom * 2)) * sign;
1970
1971 return 1;
1972 }
1973
1974 /**
1975 * iwl4965_get_voltage_compensation - Power supply voltage comp for txpower
1976 *
1977 * Determines power supply voltage compensation for txpower calculations.
1978 * Returns number of 1/2-dB steps to subtract from gain table index,
1979 * to compensate for difference between power supply voltage during
1980 * factory measurements, vs. current power supply voltage.
1981 *
1982 * Voltage indication is higher for lower voltage.
1983 * Lower voltage requires more gain (lower gain table index).
1984 */
1985 static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage,
1986 s32 current_voltage)
1987 {
1988 s32 comp = 0;
1989
1990 if ((TX_POWER_IWL_ILLEGAL_VOLTAGE == eeprom_voltage) ||
1991 (TX_POWER_IWL_ILLEGAL_VOLTAGE == current_voltage))
1992 return 0;
1993
1994 iwl4965_math_div_round(current_voltage - eeprom_voltage,
1995 TX_POWER_IWL_VOLTAGE_CODES_PER_03V, &comp);
1996
1997 if (current_voltage > eeprom_voltage)
1998 comp *= 2;
1999 if ((comp < -2) || (comp > 2))
2000 comp = 0;
2001
2002 return comp;
2003 }
2004
2005 static const struct iwl4965_channel_info *
2006 iwl4965_get_channel_txpower_info(struct iwl4965_priv *priv,
2007 enum ieee80211_band band, u16 channel)
2008 {
2009 const struct iwl4965_channel_info *ch_info;
2010
2011 ch_info = iwl4965_get_channel_info(priv, band, channel);
2012
2013 if (!is_channel_valid(ch_info))
2014 return NULL;
2015
2016 return ch_info;
2017 }
2018
2019 static s32 iwl4965_get_tx_atten_grp(u16 channel)
2020 {
2021 if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH &&
2022 channel <= CALIB_IWL_TX_ATTEN_GR5_LCH)
2023 return CALIB_CH_GROUP_5;
2024
2025 if (channel >= CALIB_IWL_TX_ATTEN_GR1_FCH &&
2026 channel <= CALIB_IWL_TX_ATTEN_GR1_LCH)
2027 return CALIB_CH_GROUP_1;
2028
2029 if (channel >= CALIB_IWL_TX_ATTEN_GR2_FCH &&
2030 channel <= CALIB_IWL_TX_ATTEN_GR2_LCH)
2031 return CALIB_CH_GROUP_2;
2032
2033 if (channel >= CALIB_IWL_TX_ATTEN_GR3_FCH &&
2034 channel <= CALIB_IWL_TX_ATTEN_GR3_LCH)
2035 return CALIB_CH_GROUP_3;
2036
2037 if (channel >= CALIB_IWL_TX_ATTEN_GR4_FCH &&
2038 channel <= CALIB_IWL_TX_ATTEN_GR4_LCH)
2039 return CALIB_CH_GROUP_4;
2040
2041 IWL_ERROR("Can't find txatten group for channel %d.\n", channel);
2042 return -1;
2043 }
2044
2045 static u32 iwl4965_get_sub_band(const struct iwl4965_priv *priv, u32 channel)
2046 {
2047 s32 b = -1;
2048
2049 for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
2050 if (priv->eeprom.calib_info.band_info[b].ch_from == 0)
2051 continue;
2052
2053 if ((channel >= priv->eeprom.calib_info.band_info[b].ch_from)
2054 && (channel <= priv->eeprom.calib_info.band_info[b].ch_to))
2055 break;
2056 }
2057
2058 return b;
2059 }
2060
2061 static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
2062 {
2063 s32 val;
2064
2065 if (x2 == x1)
2066 return y1;
2067 else {
2068 iwl4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
2069 return val + y2;
2070 }
2071 }
2072
2073 /**
2074 * iwl4965_interpolate_chan - Interpolate factory measurements for one channel
2075 *
2076 * Interpolates factory measurements from the two sample channels within a
2077 * sub-band, to apply to channel of interest. Interpolation is proportional to
2078 * differences in channel frequencies, which is proportional to differences
2079 * in channel number.
2080 */
2081 static int iwl4965_interpolate_chan(struct iwl4965_priv *priv, u32 channel,
2082 struct iwl4965_eeprom_calib_ch_info *chan_info)
2083 {
2084 s32 s = -1;
2085 u32 c;
2086 u32 m;
2087 const struct iwl4965_eeprom_calib_measure *m1;
2088 const struct iwl4965_eeprom_calib_measure *m2;
2089 struct iwl4965_eeprom_calib_measure *omeas;
2090 u32 ch_i1;
2091 u32 ch_i2;
2092
2093 s = iwl4965_get_sub_band(priv, channel);
2094 if (s >= EEPROM_TX_POWER_BANDS) {
2095 IWL_ERROR("Tx Power can not find channel %d ", channel);
2096 return -1;
2097 }
2098
2099 ch_i1 = priv->eeprom.calib_info.band_info[s].ch1.ch_num;
2100 ch_i2 = priv->eeprom.calib_info.band_info[s].ch2.ch_num;
2101 chan_info->ch_num = (u8) channel;
2102
2103 IWL_DEBUG_TXPOWER("channel %d subband %d factory cal ch %d & %d\n",
2104 channel, s, ch_i1, ch_i2);
2105
2106 for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
2107 for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
2108 m1 = &(priv->eeprom.calib_info.band_info[s].ch1.
2109 measurements[c][m]);
2110 m2 = &(priv->eeprom.calib_info.band_info[s].ch2.
2111 measurements[c][m]);
2112 omeas = &(chan_info->measurements[c][m]);
2113
2114 omeas->actual_pow =
2115 (u8) iwl4965_interpolate_value(channel, ch_i1,
2116 m1->actual_pow,
2117 ch_i2,
2118 m2->actual_pow);
2119 omeas->gain_idx =
2120 (u8) iwl4965_interpolate_value(channel, ch_i1,
2121 m1->gain_idx, ch_i2,
2122 m2->gain_idx);
2123 omeas->temperature =
2124 (u8) iwl4965_interpolate_value(channel, ch_i1,
2125 m1->temperature,
2126 ch_i2,
2127 m2->temperature);
2128 omeas->pa_det =
2129 (s8) iwl4965_interpolate_value(channel, ch_i1,
2130 m1->pa_det, ch_i2,
2131 m2->pa_det);
2132
2133 IWL_DEBUG_TXPOWER
2134 ("chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m,
2135 m1->actual_pow, m2->actual_pow, omeas->actual_pow);
2136 IWL_DEBUG_TXPOWER
2137 ("chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m,
2138 m1->gain_idx, m2->gain_idx, omeas->gain_idx);
2139 IWL_DEBUG_TXPOWER
2140 ("chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m,
2141 m1->pa_det, m2->pa_det, omeas->pa_det);
2142 IWL_DEBUG_TXPOWER
2143 ("chain %d meas %d T1=%d T2=%d T=%d\n", c, m,
2144 m1->temperature, m2->temperature,
2145 omeas->temperature);
2146 }
2147 }
2148
2149 return 0;
2150 }
2151
2152 /* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
2153 * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
2154 static s32 back_off_table[] = {
2155 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
2156 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
2157 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
2158 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
2159 10 /* CCK */
2160 };
2161
2162 /* Thermal compensation values for txpower for various frequency ranges ...
2163 * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
2164 static struct iwl4965_txpower_comp_entry {
2165 s32 degrees_per_05db_a;
2166 s32 degrees_per_05db_a_denom;
2167 } tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
2168 {9, 2}, /* group 0 5.2, ch 34-43 */
2169 {4, 1}, /* group 1 5.2, ch 44-70 */
2170 {4, 1}, /* group 2 5.2, ch 71-124 */
2171 {4, 1}, /* group 3 5.2, ch 125-200 */
2172 {3, 1} /* group 4 2.4, ch all */
2173 };
2174
2175 static s32 get_min_power_index(s32 rate_power_index, u32 band)
2176 {
2177 if (!band) {
2178 if ((rate_power_index & 7) <= 4)
2179 return MIN_TX_GAIN_INDEX_52GHZ_EXT;
2180 }
2181 return MIN_TX_GAIN_INDEX;
2182 }
2183
2184 struct gain_entry {
2185 u8 dsp;
2186 u8 radio;
2187 };
2188
2189 static const struct gain_entry gain_table[2][108] = {
2190 /* 5.2GHz power gain index table */
2191 {
2192 {123, 0x3F}, /* highest txpower */
2193 {117, 0x3F},
2194 {110, 0x3F},
2195 {104, 0x3F},
2196 {98, 0x3F},
2197 {110, 0x3E},
2198 {104, 0x3E},
2199 {98, 0x3E},
2200 {110, 0x3D},
2201 {104, 0x3D},
2202 {98, 0x3D},
2203 {110, 0x3C},
2204 {104, 0x3C},
2205 {98, 0x3C},
2206 {110, 0x3B},
2207 {104, 0x3B},
2208 {98, 0x3B},
2209 {110, 0x3A},
2210 {104, 0x3A},
2211 {98, 0x3A},
2212 {110, 0x39},
2213 {104, 0x39},
2214 {98, 0x39},
2215 {110, 0x38},
2216 {104, 0x38},
2217 {98, 0x38},
2218 {110, 0x37},
2219 {104, 0x37},
2220 {98, 0x37},
2221 {110, 0x36},
2222 {104, 0x36},
2223 {98, 0x36},
2224 {110, 0x35},
2225 {104, 0x35},
2226 {98, 0x35},
2227 {110, 0x34},
2228 {104, 0x34},
2229 {98, 0x34},
2230 {110, 0x33},
2231 {104, 0x33},
2232 {98, 0x33},
2233 {110, 0x32},
2234 {104, 0x32},
2235 {98, 0x32},
2236 {110, 0x31},
2237 {104, 0x31},
2238 {98, 0x31},
2239 {110, 0x30},
2240 {104, 0x30},
2241 {98, 0x30},
2242 {110, 0x25},
2243 {104, 0x25},
2244 {98, 0x25},
2245 {110, 0x24},
2246 {104, 0x24},
2247 {98, 0x24},
2248 {110, 0x23},
2249 {104, 0x23},
2250 {98, 0x23},
2251 {110, 0x22},
2252 {104, 0x18},
2253 {98, 0x18},
2254 {110, 0x17},
2255 {104, 0x17},
2256 {98, 0x17},
2257 {110, 0x16},
2258 {104, 0x16},
2259 {98, 0x16},
2260 {110, 0x15},
2261 {104, 0x15},
2262 {98, 0x15},
2263 {110, 0x14},
2264 {104, 0x14},
2265 {98, 0x14},
2266 {110, 0x13},
2267 {104, 0x13},
2268 {98, 0x13},
2269 {110, 0x12},
2270 {104, 0x08},
2271 {98, 0x08},
2272 {110, 0x07},
2273 {104, 0x07},
2274 {98, 0x07},
2275 {110, 0x06},
2276 {104, 0x06},
2277 {98, 0x06},
2278 {110, 0x05},
2279 {104, 0x05},
2280 {98, 0x05},
2281 {110, 0x04},
2282 {104, 0x04},
2283 {98, 0x04},
2284 {110, 0x03},
2285 {104, 0x03},
2286 {98, 0x03},
2287 {110, 0x02},
2288 {104, 0x02},
2289 {98, 0x02},
2290 {110, 0x01},
2291 {104, 0x01},
2292 {98, 0x01},
2293 {110, 0x00},
2294 {104, 0x00},
2295 {98, 0x00},
2296 {93, 0x00},
2297 {88, 0x00},
2298 {83, 0x00},
2299 {78, 0x00},
2300 },
2301 /* 2.4GHz power gain index table */
2302 {
2303 {110, 0x3f}, /* highest txpower */
2304 {104, 0x3f},
2305 {98, 0x3f},
2306 {110, 0x3e},
2307 {104, 0x3e},
2308 {98, 0x3e},
2309 {110, 0x3d},
2310 {104, 0x3d},
2311 {98, 0x3d},
2312 {110, 0x3c},
2313 {104, 0x3c},
2314 {98, 0x3c},
2315 {110, 0x3b},
2316 {104, 0x3b},
2317 {98, 0x3b},
2318 {110, 0x3a},
2319 {104, 0x3a},
2320 {98, 0x3a},
2321 {110, 0x39},
2322 {104, 0x39},
2323 {98, 0x39},
2324 {110, 0x38},
2325 {104, 0x38},
2326 {98, 0x38},
2327 {110, 0x37},
2328 {104, 0x37},
2329 {98, 0x37},
2330 {110, 0x36},
2331 {104, 0x36},
2332 {98, 0x36},
2333 {110, 0x35},
2334 {104, 0x35},
2335 {98, 0x35},
2336 {110, 0x34},
2337 {104, 0x34},
2338 {98, 0x34},
2339 {110, 0x33},
2340 {104, 0x33},
2341 {98, 0x33},
2342 {110, 0x32},
2343 {104, 0x32},
2344 {98, 0x32},
2345 {110, 0x31},
2346 {104, 0x31},
2347 {98, 0x31},
2348 {110, 0x30},
2349 {104, 0x30},
2350 {98, 0x30},
2351 {110, 0x6},
2352 {104, 0x6},
2353 {98, 0x6},
2354 {110, 0x5},
2355 {104, 0x5},
2356 {98, 0x5},
2357 {110, 0x4},
2358 {104, 0x4},
2359 {98, 0x4},
2360 {110, 0x3},
2361 {104, 0x3},
2362 {98, 0x3},
2363 {110, 0x2},
2364 {104, 0x2},
2365 {98, 0x2},
2366 {110, 0x1},
2367 {104, 0x1},
2368 {98, 0x1},
2369 {110, 0x0},
2370 {104, 0x0},
2371 {98, 0x0},
2372 {97, 0},
2373 {96, 0},
2374 {95, 0},
2375 {94, 0},
2376 {93, 0},
2377 {92, 0},
2378 {91, 0},
2379 {90, 0},
2380 {89, 0},
2381 {88, 0},
2382 {87, 0},
2383 {86, 0},
2384 {85, 0},
2385 {84, 0},
2386 {83, 0},
2387 {82, 0},
2388 {81, 0},
2389 {80, 0},
2390 {79, 0},
2391 {78, 0},
2392 {77, 0},
2393 {76, 0},
2394 {75, 0},
2395 {74, 0},
2396 {73, 0},
2397 {72, 0},
2398 {71, 0},
2399 {70, 0},
2400 {69, 0},
2401 {68, 0},
2402 {67, 0},
2403 {66, 0},
2404 {65, 0},
2405 {64, 0},
2406 {63, 0},
2407 {62, 0},
2408 {61, 0},
2409 {60, 0},
2410 {59, 0},
2411 }
2412 };
2413
2414 static int iwl4965_fill_txpower_tbl(struct iwl4965_priv *priv, u8 band, u16 channel,
2415 u8 is_fat, u8 ctrl_chan_high,
2416 struct iwl4965_tx_power_db *tx_power_tbl)
2417 {
2418 u8 saturation_power;
2419 s32 target_power;
2420 s32 user_target_power;
2421 s32 power_limit;
2422 s32 current_temp;
2423 s32 reg_limit;
2424 s32 current_regulatory;
2425 s32 txatten_grp = CALIB_CH_GROUP_MAX;
2426 int i;
2427 int c;
2428 const struct iwl4965_channel_info *ch_info = NULL;
2429 struct iwl4965_eeprom_calib_ch_info ch_eeprom_info;
2430 const struct iwl4965_eeprom_calib_measure *measurement;
2431 s16 voltage;
2432 s32 init_voltage;
2433 s32 voltage_compensation;
2434 s32 degrees_per_05db_num;
2435 s32 degrees_per_05db_denom;
2436 s32 factory_temp;
2437 s32 temperature_comp[2];
2438 s32 factory_gain_index[2];
2439 s32 factory_actual_pwr[2];
2440 s32 power_index;
2441
2442 /* Sanity check requested level (dBm) */
2443 if (priv->user_txpower_limit < IWL_TX_POWER_TARGET_POWER_MIN) {
2444 IWL_WARNING("Requested user TXPOWER %d below limit.\n",
2445 priv->user_txpower_limit);
2446 return -EINVAL;
2447 }
2448 if (priv->user_txpower_limit > IWL_TX_POWER_TARGET_POWER_MAX) {
2449 IWL_WARNING("Requested user TXPOWER %d above limit.\n",
2450 priv->user_txpower_limit);
2451 return -EINVAL;
2452 }
2453
2454 /* user_txpower_limit is in dBm, convert to half-dBm (half-dB units
2455 * are used for indexing into txpower table) */
2456 user_target_power = 2 * priv->user_txpower_limit;
2457
2458 /* Get current (RXON) channel, band, width */
2459 ch_info =
2460 iwl4965_get_channel_txpower_info(priv, priv->band, channel);
2461
2462 IWL_DEBUG_TXPOWER("chan %d band %d is_fat %d\n", channel, band,
2463 is_fat);
2464
2465 if (!ch_info)
2466 return -EINVAL;
2467
2468 /* get txatten group, used to select 1) thermal txpower adjustment
2469 * and 2) mimo txpower balance between Tx chains. */
2470 txatten_grp = iwl4965_get_tx_atten_grp(channel);
2471 if (txatten_grp < 0)
2472 return -EINVAL;
2473
2474 IWL_DEBUG_TXPOWER("channel %d belongs to txatten group %d\n",
2475 channel, txatten_grp);
2476
2477 if (is_fat) {
2478 if (ctrl_chan_high)
2479 channel -= 2;
2480 else
2481 channel += 2;
2482 }
2483
2484 /* hardware txpower limits ...
2485 * saturation (clipping distortion) txpowers are in half-dBm */
2486 if (band)
2487 saturation_power = priv->eeprom.calib_info.saturation_power24;
2488 else
2489 saturation_power = priv->eeprom.calib_info.saturation_power52;
2490
2491 if (saturation_power < IWL_TX_POWER_SATURATION_MIN ||
2492 saturation_power > IWL_TX_POWER_SATURATION_MAX) {
2493 if (band)
2494 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_24;
2495 else
2496 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_52;
2497 }
2498
2499 /* regulatory txpower limits ... reg_limit values are in half-dBm,
2500 * max_power_avg values are in dBm, convert * 2 */
2501 if (is_fat)
2502 reg_limit = ch_info->fat_max_power_avg * 2;
2503 else
2504 reg_limit = ch_info->max_power_avg * 2;
2505
2506 if ((reg_limit < IWL_TX_POWER_REGULATORY_MIN) ||
2507 (reg_limit > IWL_TX_POWER_REGULATORY_MAX)) {
2508 if (band)
2509 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_24;
2510 else
2511 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_52;
2512 }
2513
2514 /* Interpolate txpower calibration values for this channel,
2515 * based on factory calibration tests on spaced channels. */
2516 iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
2517
2518 /* calculate tx gain adjustment based on power supply voltage */
2519 voltage = priv->eeprom.calib_info.voltage;
2520 init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
2521 voltage_compensation =
2522 iwl4965_get_voltage_compensation(voltage, init_voltage);
2523
2524 IWL_DEBUG_TXPOWER("curr volt %d eeprom volt %d volt comp %d\n",
2525 init_voltage,
2526 voltage, voltage_compensation);
2527
2528 /* get current temperature (Celsius) */
2529 current_temp = max(priv->temperature, IWL_TX_POWER_TEMPERATURE_MIN);
2530 current_temp = min(priv->temperature, IWL_TX_POWER_TEMPERATURE_MAX);
2531 current_temp = KELVIN_TO_CELSIUS(current_temp);
2532
2533 /* select thermal txpower adjustment params, based on channel group
2534 * (same frequency group used for mimo txatten adjustment) */
2535 degrees_per_05db_num =
2536 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
2537 degrees_per_05db_denom =
2538 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
2539
2540 /* get per-chain txpower values from factory measurements */
2541 for (c = 0; c < 2; c++) {
2542 measurement = &ch_eeprom_info.measurements[c][1];
2543
2544 /* txgain adjustment (in half-dB steps) based on difference
2545 * between factory and current temperature */
2546 factory_temp = measurement->temperature;
2547 iwl4965_math_div_round((current_temp - factory_temp) *
2548 degrees_per_05db_denom,
2549 degrees_per_05db_num,
2550 &temperature_comp[c]);
2551
2552 factory_gain_index[c] = measurement->gain_idx;
2553 factory_actual_pwr[c] = measurement->actual_pow;
2554
2555 IWL_DEBUG_TXPOWER("chain = %d\n", c);
2556 IWL_DEBUG_TXPOWER("fctry tmp %d, "
2557 "curr tmp %d, comp %d steps\n",
2558 factory_temp, current_temp,
2559 temperature_comp[c]);
2560
2561 IWL_DEBUG_TXPOWER("fctry idx %d, fctry pwr %d\n",
2562 factory_gain_index[c],
2563 factory_actual_pwr[c]);
2564 }
2565
2566 /* for each of 33 bit-rates (including 1 for CCK) */
2567 for (i = 0; i < POWER_TABLE_NUM_ENTRIES; i++) {
2568 u8 is_mimo_rate;
2569 union iwl4965_tx_power_dual_stream tx_power;
2570
2571 /* for mimo, reduce each chain's txpower by half
2572 * (3dB, 6 steps), so total output power is regulatory
2573 * compliant. */
2574 if (i & 0x8) {
2575 current_regulatory = reg_limit -
2576 IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
2577 is_mimo_rate = 1;
2578 } else {
2579 current_regulatory = reg_limit;
2580 is_mimo_rate = 0;
2581 }
2582
2583 /* find txpower limit, either hardware or regulatory */
2584 power_limit = saturation_power - back_off_table[i];
2585 if (power_limit > current_regulatory)
2586 power_limit = current_regulatory;
2587
2588 /* reduce user's txpower request if necessary
2589 * for this rate on this channel */
2590 target_power = user_target_power;
2591 if (target_power > power_limit)
2592 target_power = power_limit;
2593
2594 IWL_DEBUG_TXPOWER("rate %d sat %d reg %d usr %d tgt %d\n",
2595 i, saturation_power - back_off_table[i],
2596 current_regulatory, user_target_power,
2597 target_power);
2598
2599 /* for each of 2 Tx chains (radio transmitters) */
2600 for (c = 0; c < 2; c++) {
2601 s32 atten_value;
2602
2603 if (is_mimo_rate)
2604 atten_value =
2605 (s32)le32_to_cpu(priv->card_alive_init.
2606 tx_atten[txatten_grp][c]);
2607 else
2608 atten_value = 0;
2609
2610 /* calculate index; higher index means lower txpower */
2611 power_index = (u8) (factory_gain_index[c] -
2612 (target_power -
2613 factory_actual_pwr[c]) -
2614 temperature_comp[c] -
2615 voltage_compensation +
2616 atten_value);
2617
2618 /* IWL_DEBUG_TXPOWER("calculated txpower index %d\n",
2619 power_index); */
2620
2621 if (power_index < get_min_power_index(i, band))
2622 power_index = get_min_power_index(i, band);
2623
2624 /* adjust 5 GHz index to support negative indexes */
2625 if (!band)
2626 power_index += 9;
2627
2628 /* CCK, rate 32, reduce txpower for CCK */
2629 if (i == POWER_TABLE_CCK_ENTRY)
2630 power_index +=
2631 IWL_TX_POWER_CCK_COMPENSATION_C_STEP;
2632
2633 /* stay within the table! */
2634 if (power_index > 107) {
2635 IWL_WARNING("txpower index %d > 107\n",
2636 power_index);
2637 power_index = 107;
2638 }
2639 if (power_index < 0) {
2640 IWL_WARNING("txpower index %d < 0\n",
2641 power_index);
2642 power_index = 0;
2643 }
2644
2645 /* fill txpower command for this rate/chain */
2646 tx_power.s.radio_tx_gain[c] =
2647 gain_table[band][power_index].radio;
2648 tx_power.s.dsp_predis_atten[c] =
2649 gain_table[band][power_index].dsp;
2650
2651 IWL_DEBUG_TXPOWER("chain %d mimo %d index %d "
2652 "gain 0x%02x dsp %d\n",
2653 c, atten_value, power_index,
2654 tx_power.s.radio_tx_gain[c],
2655 tx_power.s.dsp_predis_atten[c]);
2656 }/* for each chain */
2657
2658 tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
2659
2660 }/* for each rate */
2661
2662 return 0;
2663 }
2664
2665 /**
2666 * iwl4965_hw_reg_send_txpower - Configure the TXPOWER level user limit
2667 *
2668 * Uses the active RXON for channel, band, and characteristics (fat, high)
2669 * The power limit is taken from priv->user_txpower_limit.
2670 */
2671 int iwl4965_hw_reg_send_txpower(struct iwl4965_priv *priv)
2672 {
2673 struct iwl4965_txpowertable_cmd cmd = { 0 };
2674 int rc = 0;
2675 u8 band = 0;
2676 u8 is_fat = 0;
2677 u8 ctrl_chan_high = 0;
2678
2679 if (test_bit(STATUS_SCANNING, &priv->status)) {
2680 /* If this gets hit a lot, switch it to a BUG() and catch
2681 * the stack trace to find out who is calling this during
2682 * a scan. */
2683 IWL_WARNING("TX Power requested while scanning!\n");
2684 return -EAGAIN;
2685 }
2686
2687 band = priv->band == IEEE80211_BAND_2GHZ;
2688
2689 is_fat = is_fat_channel(priv->active_rxon.flags);
2690
2691 if (is_fat &&
2692 (priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
2693 ctrl_chan_high = 1;
2694
2695 cmd.band = band;
2696 cmd.channel = priv->active_rxon.channel;
2697
2698 rc = iwl4965_fill_txpower_tbl(priv, band,
2699 le16_to_cpu(priv->active_rxon.channel),
2700 is_fat, ctrl_chan_high, &cmd.tx_power);
2701 if (rc)
2702 return rc;
2703
2704 rc = iwl4965_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
2705 return rc;
2706 }
2707
2708 int iwl4965_hw_channel_switch(struct iwl4965_priv *priv, u16 channel)
2709 {
2710 int rc;
2711 u8 band = 0;
2712 u8 is_fat = 0;
2713 u8 ctrl_chan_high = 0;
2714 struct iwl4965_channel_switch_cmd cmd = { 0 };
2715 const struct iwl4965_channel_info *ch_info;
2716
2717 band = priv->band == IEEE80211_BAND_2GHZ;
2718
2719 ch_info = iwl4965_get_channel_info(priv, priv->band, channel);
2720
2721 is_fat = is_fat_channel(priv->staging_rxon.flags);
2722
2723 if (is_fat &&
2724 (priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
2725 ctrl_chan_high = 1;
2726
2727 cmd.band = band;
2728 cmd.expect_beacon = 0;
2729 cmd.channel = cpu_to_le16(channel);
2730 cmd.rxon_flags = priv->active_rxon.flags;
2731 cmd.rxon_filter_flags = priv->active_rxon.filter_flags;
2732 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
2733 if (ch_info)
2734 cmd.expect_beacon = is_channel_radar(ch_info);
2735 else
2736 cmd.expect_beacon = 1;
2737
2738 rc = iwl4965_fill_txpower_tbl(priv, band, channel, is_fat,
2739 ctrl_chan_high, &cmd.tx_power);
2740 if (rc) {
2741 IWL_DEBUG_11H("error:%d fill txpower_tbl\n", rc);
2742 return rc;
2743 }
2744
2745 rc = iwl4965_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
2746 return rc;
2747 }
2748
2749 #define RTS_HCCA_RETRY_LIMIT 3
2750 #define RTS_DFAULT_RETRY_LIMIT 60
2751
2752 void iwl4965_hw_build_tx_cmd_rate(struct iwl4965_priv *priv,
2753 struct iwl4965_cmd *cmd,
2754 struct ieee80211_tx_control *ctrl,
2755 struct ieee80211_hdr *hdr, int sta_id,
2756 int is_hcca)
2757 {
2758 struct iwl4965_tx_cmd *tx = &cmd->cmd.tx;
2759 u8 rts_retry_limit = 0;
2760 u8 data_retry_limit = 0;
2761 u16 fc = le16_to_cpu(hdr->frame_control);
2762 u8 rate_plcp;
2763 u16 rate_flags = 0;
2764 int rate_idx = min(ctrl->tx_rate->hw_value & 0xffff, IWL_RATE_COUNT - 1);
2765
2766 rate_plcp = iwl4965_rates[rate_idx].plcp;
2767
2768 rts_retry_limit = (is_hcca) ?
2769 RTS_HCCA_RETRY_LIMIT : RTS_DFAULT_RETRY_LIMIT;
2770
2771 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
2772 rate_flags |= RATE_MCS_CCK_MSK;
2773
2774
2775 if (ieee80211_is_probe_response(fc)) {
2776 data_retry_limit = 3;
2777 if (data_retry_limit < rts_retry_limit)
2778 rts_retry_limit = data_retry_limit;
2779 } else
2780 data_retry_limit = IWL_DEFAULT_TX_RETRY;
2781
2782 if (priv->data_retry_limit != -1)
2783 data_retry_limit = priv->data_retry_limit;
2784
2785
2786 if (ieee80211_is_data(fc)) {
2787 tx->initial_rate_index = 0;
2788 tx->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
2789 } else {
2790 switch (fc & IEEE80211_FCTL_STYPE) {
2791 case IEEE80211_STYPE_AUTH:
2792 case IEEE80211_STYPE_DEAUTH:
2793 case IEEE80211_STYPE_ASSOC_REQ:
2794 case IEEE80211_STYPE_REASSOC_REQ:
2795 if (tx->tx_flags & TX_CMD_FLG_RTS_MSK) {
2796 tx->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2797 tx->tx_flags |= TX_CMD_FLG_CTS_MSK;
2798 }
2799 break;
2800 default:
2801 break;
2802 }
2803
2804 /* Alternate between antenna A and B for successive frames */
2805 if (priv->use_ant_b_for_management_frame) {
2806 priv->use_ant_b_for_management_frame = 0;
2807 rate_flags |= RATE_MCS_ANT_B_MSK;
2808 } else {
2809 priv->use_ant_b_for_management_frame = 1;
2810 rate_flags |= RATE_MCS_ANT_A_MSK;
2811 }
2812 }
2813
2814 tx->rts_retry_limit = rts_retry_limit;
2815 tx->data_retry_limit = data_retry_limit;
2816 tx->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
2817 }
2818
2819 int iwl4965_hw_get_rx_read(struct iwl4965_priv *priv)
2820 {
2821 struct iwl4965_shared *shared_data = priv->hw_setting.shared_virt;
2822
2823 return IWL_GET_BITS(*shared_data, rb_closed_stts_rb_num);
2824 }
2825
2826 int iwl4965_hw_get_temperature(struct iwl4965_priv *priv)
2827 {
2828 return priv->temperature;
2829 }
2830
2831 unsigned int iwl4965_hw_get_beacon_cmd(struct iwl4965_priv *priv,
2832 struct iwl4965_frame *frame, u8 rate)
2833 {
2834 struct iwl4965_tx_beacon_cmd *tx_beacon_cmd;
2835 unsigned int frame_size;
2836
2837 tx_beacon_cmd = &frame->u.beacon;
2838 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
2839
2840 tx_beacon_cmd->tx.sta_id = priv->hw_setting.bcast_sta_id;
2841 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2842
2843 frame_size = iwl4965_fill_beacon_frame(priv,
2844 tx_beacon_cmd->frame,
2845 iwl4965_broadcast_addr,
2846 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
2847
2848 BUG_ON(frame_size > MAX_MPDU_SIZE);
2849 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
2850
2851 if ((rate == IWL_RATE_1M_PLCP) || (rate >= IWL_RATE_2M_PLCP))
2852 tx_beacon_cmd->tx.rate_n_flags =
2853 iwl4965_hw_set_rate_n_flags(rate, RATE_MCS_CCK_MSK);
2854 else
2855 tx_beacon_cmd->tx.rate_n_flags =
2856 iwl4965_hw_set_rate_n_flags(rate, 0);
2857
2858 tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK |
2859 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK);
2860 return (sizeof(*tx_beacon_cmd) + frame_size);
2861 }
2862
2863 /*
2864 * Tell 4965 where to find circular buffer of Tx Frame Descriptors for
2865 * given Tx queue, and enable the DMA channel used for that queue.
2866 *
2867 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
2868 * channels supported in hardware.
2869 */
2870 int iwl4965_hw_tx_queue_init(struct iwl4965_priv *priv, struct iwl4965_tx_queue *txq)
2871 {
2872 int rc;
2873 unsigned long flags;
2874 int txq_id = txq->q.id;
2875
2876 spin_lock_irqsave(&priv->lock, flags);
2877 rc = iwl4965_grab_nic_access(priv);
2878 if (rc) {
2879 spin_unlock_irqrestore(&priv->lock, flags);
2880 return rc;
2881 }
2882
2883 /* Circular buffer (TFD queue in DRAM) physical base address */
2884 iwl4965_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
2885 txq->q.dma_addr >> 8);
2886
2887 /* Enable DMA channel, using same id as for TFD queue */
2888 iwl4965_write_direct32(
2889 priv, IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id),
2890 IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2891 IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
2892 iwl4965_release_nic_access(priv);
2893 spin_unlock_irqrestore(&priv->lock, flags);
2894
2895 return 0;
2896 }
2897
2898 int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl4965_priv *priv, void *ptr,
2899 dma_addr_t addr, u16 len)
2900 {
2901 int index, is_odd;
2902 struct iwl4965_tfd_frame *tfd = ptr;
2903 u32 num_tbs = IWL_GET_BITS(*tfd, num_tbs);
2904
2905 /* Each TFD can point to a maximum 20 Tx buffers */
2906 if ((num_tbs >= MAX_NUM_OF_TBS) || (num_tbs < 0)) {
2907 IWL_ERROR("Error can not send more than %d chunks\n",
2908 MAX_NUM_OF_TBS);
2909 return -EINVAL;
2910 }
2911
2912 index = num_tbs / 2;
2913 is_odd = num_tbs & 0x1;
2914
2915 if (!is_odd) {
2916 tfd->pa[index].tb1_addr = cpu_to_le32(addr);
2917 IWL_SET_BITS(tfd->pa[index], tb1_addr_hi,
2918 iwl_get_dma_hi_address(addr));
2919 IWL_SET_BITS(tfd->pa[index], tb1_len, len);
2920 } else {
2921 IWL_SET_BITS(tfd->pa[index], tb2_addr_lo16,
2922 (u32) (addr & 0xffff));
2923 IWL_SET_BITS(tfd->pa[index], tb2_addr_hi20, addr >> 16);
2924 IWL_SET_BITS(tfd->pa[index], tb2_len, len);
2925 }
2926
2927 IWL_SET_BITS(*tfd, num_tbs, num_tbs + 1);
2928
2929 return 0;
2930 }
2931
2932 static void iwl4965_hw_card_show_info(struct iwl4965_priv *priv)
2933 {
2934 u16 hw_version = priv->eeprom.board_revision_4965;
2935
2936 IWL_DEBUG_INFO("4965ABGN HW Version %u.%u.%u\n",
2937 ((hw_version >> 8) & 0x0F),
2938 ((hw_version >> 8) >> 4), (hw_version & 0x00FF));
2939
2940 IWL_DEBUG_INFO("4965ABGN PBA Number %.16s\n",
2941 priv->eeprom.board_pba_number_4965);
2942 }
2943
2944 #define IWL_TX_CRC_SIZE 4
2945 #define IWL_TX_DELIMITER_SIZE 4
2946
2947 /**
2948 * iwl4965_tx_queue_update_wr_ptr - Set up entry in Tx byte-count array
2949 */
2950 int iwl4965_tx_queue_update_wr_ptr(struct iwl4965_priv *priv,
2951 struct iwl4965_tx_queue *txq, u16 byte_cnt)
2952 {
2953 int len;
2954 int txq_id = txq->q.id;
2955 struct iwl4965_shared *shared_data = priv->hw_setting.shared_virt;
2956
2957 if (txq->need_update == 0)
2958 return 0;
2959
2960 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
2961
2962 /* Set up byte count within first 256 entries */
2963 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
2964 tfd_offset[txq->q.write_ptr], byte_cnt, len);
2965
2966 /* If within first 64 entries, duplicate at end */
2967 if (txq->q.write_ptr < IWL4965_MAX_WIN_SIZE)
2968 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
2969 tfd_offset[IWL4965_QUEUE_SIZE + txq->q.write_ptr],
2970 byte_cnt, len);
2971
2972 return 0;
2973 }
2974
2975 /**
2976 * iwl4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
2977 *
2978 * Selects how many and which Rx receivers/antennas/chains to use.
2979 * This should not be used for scan command ... it puts data in wrong place.
2980 */
2981 void iwl4965_set_rxon_chain(struct iwl4965_priv *priv)
2982 {
2983 u8 is_single = is_single_stream(priv);
2984 u8 idle_state, rx_state;
2985
2986 priv->staging_rxon.rx_chain = 0;
2987 rx_state = idle_state = 3;
2988
2989 /* Tell uCode which antennas are actually connected.
2990 * Before first association, we assume all antennas are connected.
2991 * Just after first association, iwl4965_noise_calibration()
2992 * checks which antennas actually *are* connected. */
2993 priv->staging_rxon.rx_chain |=
2994 cpu_to_le16(priv->valid_antenna << RXON_RX_CHAIN_VALID_POS);
2995
2996 /* How many receivers should we use? */
2997 iwl4965_get_rx_chain_counter(priv, &idle_state, &rx_state);
2998 priv->staging_rxon.rx_chain |=
2999 cpu_to_le16(rx_state << RXON_RX_CHAIN_MIMO_CNT_POS);
3000 priv->staging_rxon.rx_chain |=
3001 cpu_to_le16(idle_state << RXON_RX_CHAIN_CNT_POS);
3002
3003 if (!is_single && (rx_state >= 2) &&
3004 !test_bit(STATUS_POWER_PMI, &priv->status))
3005 priv->staging_rxon.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
3006 else
3007 priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
3008
3009 IWL_DEBUG_ASSOC("rx chain %X\n", priv->staging_rxon.rx_chain);
3010 }
3011
3012 /**
3013 * sign_extend - Sign extend a value using specified bit as sign-bit
3014 *
3015 * Example: sign_extend(9, 3) would return -7 as bit3 of 1001b is 1
3016 * and bit0..2 is 001b which when sign extended to 1111111111111001b is -7.
3017 *
3018 * @param oper value to sign extend
3019 * @param index 0 based bit index (0<=index<32) to sign bit
3020 */
3021 static s32 sign_extend(u32 oper, int index)
3022 {
3023 u8 shift = 31 - index;
3024
3025 return (s32)(oper << shift) >> shift;
3026 }
3027
3028 /**
3029 * iwl4965_get_temperature - return the calibrated temperature (in Kelvin)
3030 * @statistics: Provides the temperature reading from the uCode
3031 *
3032 * A return of <0 indicates bogus data in the statistics
3033 */
3034 int iwl4965_get_temperature(const struct iwl4965_priv *priv)
3035 {
3036 s32 temperature;
3037 s32 vt;
3038 s32 R1, R2, R3;
3039 u32 R4;
3040
3041 if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
3042 (priv->statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK)) {
3043 IWL_DEBUG_TEMP("Running FAT temperature calibration\n");
3044 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
3045 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
3046 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
3047 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]);
3048 } else {
3049 IWL_DEBUG_TEMP("Running temperature calibration\n");
3050 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
3051 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
3052 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
3053 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[0]);
3054 }
3055
3056 /*
3057 * Temperature is only 23 bits, so sign extend out to 32.
3058 *
3059 * NOTE If we haven't received a statistics notification yet
3060 * with an updated temperature, use R4 provided to us in the
3061 * "initialize" ALIVE response.
3062 */
3063 if (!test_bit(STATUS_TEMPERATURE, &priv->status))
3064 vt = sign_extend(R4, 23);
3065 else
3066 vt = sign_extend(
3067 le32_to_cpu(priv->statistics.general.temperature), 23);
3068
3069 IWL_DEBUG_TEMP("Calib values R[1-3]: %d %d %d R4: %d\n",
3070 R1, R2, R3, vt);
3071
3072 if (R3 == R1) {
3073 IWL_ERROR("Calibration conflict R1 == R3\n");
3074 return -1;
3075 }
3076
3077 /* Calculate temperature in degrees Kelvin, adjust by 97%.
3078 * Add offset to center the adjustment around 0 degrees Centigrade. */
3079 temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
3080 temperature /= (R3 - R1);
3081 temperature = (temperature * 97) / 100 +
3082 TEMPERATURE_CALIB_KELVIN_OFFSET;
3083
3084 IWL_DEBUG_TEMP("Calibrated temperature: %dK, %dC\n", temperature,
3085 KELVIN_TO_CELSIUS(temperature));
3086
3087 return temperature;
3088 }
3089
3090 /* Adjust Txpower only if temperature variance is greater than threshold. */
3091 #define IWL_TEMPERATURE_THRESHOLD 3
3092
3093 /**
3094 * iwl4965_is_temp_calib_needed - determines if new calibration is needed
3095 *
3096 * If the temperature changed has changed sufficiently, then a recalibration
3097 * is needed.
3098 *
3099 * Assumes caller will replace priv->last_temperature once calibration
3100 * executed.
3101 */
3102 static int iwl4965_is_temp_calib_needed(struct iwl4965_priv *priv)
3103 {
3104 int temp_diff;
3105
3106 if (!test_bit(STATUS_STATISTICS, &priv->status)) {
3107 IWL_DEBUG_TEMP("Temperature not updated -- no statistics.\n");
3108 return 0;
3109 }
3110
3111 temp_diff = priv->temperature - priv->last_temperature;
3112
3113 /* get absolute value */
3114 if (temp_diff < 0) {
3115 IWL_DEBUG_POWER("Getting cooler, delta %d, \n", temp_diff);
3116 temp_diff = -temp_diff;
3117 } else if (temp_diff == 0)
3118 IWL_DEBUG_POWER("Same temp, \n");
3119 else
3120 IWL_DEBUG_POWER("Getting warmer, delta %d, \n", temp_diff);
3121
3122 if (temp_diff < IWL_TEMPERATURE_THRESHOLD) {
3123 IWL_DEBUG_POWER("Thermal txpower calib not needed\n");
3124 return 0;
3125 }
3126
3127 IWL_DEBUG_POWER("Thermal txpower calib needed\n");
3128
3129 return 1;
3130 }
3131
3132 /* Calculate noise level, based on measurements during network silence just
3133 * before arriving beacon. This measurement can be done only if we know
3134 * exactly when to expect beacons, therefore only when we're associated. */
3135 static void iwl4965_rx_calc_noise(struct iwl4965_priv *priv)
3136 {
3137 struct statistics_rx_non_phy *rx_info
3138 = &(priv->statistics.rx.general);
3139 int num_active_rx = 0;
3140 int total_silence = 0;
3141 int bcn_silence_a =
3142 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
3143 int bcn_silence_b =
3144 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
3145 int bcn_silence_c =
3146 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
3147
3148 if (bcn_silence_a) {
3149 total_silence += bcn_silence_a;
3150 num_active_rx++;
3151 }
3152 if (bcn_silence_b) {
3153 total_silence += bcn_silence_b;
3154 num_active_rx++;
3155 }
3156 if (bcn_silence_c) {
3157 total_silence += bcn_silence_c;
3158 num_active_rx++;
3159 }
3160
3161 /* Average among active antennas */
3162 if (num_active_rx)
3163 priv->last_rx_noise = (total_silence / num_active_rx) - 107;
3164 else
3165 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
3166
3167 IWL_DEBUG_CALIB("inband silence a %u, b %u, c %u, dBm %d\n",
3168 bcn_silence_a, bcn_silence_b, bcn_silence_c,
3169 priv->last_rx_noise);
3170 }
3171
3172 void iwl4965_hw_rx_statistics(struct iwl4965_priv *priv, struct iwl4965_rx_mem_buffer *rxb)
3173 {
3174 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3175 int change;
3176 s32 temp;
3177
3178 IWL_DEBUG_RX("Statistics notification received (%d vs %d).\n",
3179 (int)sizeof(priv->statistics), pkt->len);
3180
3181 change = ((priv->statistics.general.temperature !=
3182 pkt->u.stats.general.temperature) ||
3183 ((priv->statistics.flag &
3184 STATISTICS_REPLY_FLG_FAT_MODE_MSK) !=
3185 (pkt->u.stats.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK)));
3186
3187 memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
3188
3189 set_bit(STATUS_STATISTICS, &priv->status);
3190
3191 /* Reschedule the statistics timer to occur in
3192 * REG_RECALIB_PERIOD seconds to ensure we get a
3193 * thermal update even if the uCode doesn't give
3194 * us one */
3195 mod_timer(&priv->statistics_periodic, jiffies +
3196 msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
3197
3198 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
3199 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
3200 iwl4965_rx_calc_noise(priv);
3201 #ifdef CONFIG_IWL4965_SENSITIVITY
3202 queue_work(priv->workqueue, &priv->sensitivity_work);
3203 #endif
3204 }
3205
3206 /* If the hardware hasn't reported a change in
3207 * temperature then don't bother computing a
3208 * calibrated temperature value */
3209 if (!change)
3210 return;
3211
3212 temp = iwl4965_get_temperature(priv);
3213 if (temp < 0)
3214 return;
3215
3216 if (priv->temperature != temp) {
3217 if (priv->temperature)
3218 IWL_DEBUG_TEMP("Temperature changed "
3219 "from %dC to %dC\n",
3220 KELVIN_TO_CELSIUS(priv->temperature),
3221 KELVIN_TO_CELSIUS(temp));
3222 else
3223 IWL_DEBUG_TEMP("Temperature "
3224 "initialized to %dC\n",
3225 KELVIN_TO_CELSIUS(temp));
3226 }
3227
3228 priv->temperature = temp;
3229 set_bit(STATUS_TEMPERATURE, &priv->status);
3230
3231 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
3232 iwl4965_is_temp_calib_needed(priv))
3233 queue_work(priv->workqueue, &priv->txpower_work);
3234 }
3235
3236 static void iwl4965_add_radiotap(struct iwl4965_priv *priv,
3237 struct sk_buff *skb,
3238 struct iwl4965_rx_phy_res *rx_start,
3239 struct ieee80211_rx_status *stats,
3240 u32 ampdu_status)
3241 {
3242 s8 signal = stats->ssi;
3243 s8 noise = 0;
3244 int rate = stats->rate_idx;
3245 u64 tsf = stats->mactime;
3246 __le16 phy_flags_hw = rx_start->phy_flags;
3247 struct iwl4965_rt_rx_hdr {
3248 struct ieee80211_radiotap_header rt_hdr;
3249 __le64 rt_tsf; /* TSF */
3250 u8 rt_flags; /* radiotap packet flags */
3251 u8 rt_rate; /* rate in 500kb/s */
3252 __le16 rt_channelMHz; /* channel in MHz */
3253 __le16 rt_chbitmask; /* channel bitfield */
3254 s8 rt_dbmsignal; /* signal in dBm, kluged to signed */
3255 s8 rt_dbmnoise;
3256 u8 rt_antenna; /* antenna number */
3257 } __attribute__ ((packed)) *iwl4965_rt;
3258
3259 /* TODO: We won't have enough headroom for HT frames. Fix it later. */
3260 if (skb_headroom(skb) < sizeof(*iwl4965_rt)) {
3261 if (net_ratelimit())
3262 printk(KERN_ERR "not enough headroom [%d] for "
3263 "radiotap head [%zd]\n",
3264 skb_headroom(skb), sizeof(*iwl4965_rt));
3265 return;
3266 }
3267
3268 /* put radiotap header in front of 802.11 header and data */
3269 iwl4965_rt = (void *)skb_push(skb, sizeof(*iwl4965_rt));
3270
3271 /* initialise radiotap header */
3272 iwl4965_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
3273 iwl4965_rt->rt_hdr.it_pad = 0;
3274
3275 /* total header + data */
3276 put_unaligned(cpu_to_le16(sizeof(*iwl4965_rt)),
3277 &iwl4965_rt->rt_hdr.it_len);
3278
3279 /* Indicate all the fields we add to the radiotap header */
3280 put_unaligned(cpu_to_le32((1 << IEEE80211_RADIOTAP_TSFT) |
3281 (1 << IEEE80211_RADIOTAP_FLAGS) |
3282 (1 << IEEE80211_RADIOTAP_RATE) |
3283 (1 << IEEE80211_RADIOTAP_CHANNEL) |
3284 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
3285 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
3286 (1 << IEEE80211_RADIOTAP_ANTENNA)),
3287 &iwl4965_rt->rt_hdr.it_present);
3288
3289 /* Zero the flags, we'll add to them as we go */
3290 iwl4965_rt->rt_flags = 0;
3291
3292 put_unaligned(cpu_to_le64(tsf), &iwl4965_rt->rt_tsf);
3293
3294 iwl4965_rt->rt_dbmsignal = signal;
3295 iwl4965_rt->rt_dbmnoise = noise;
3296
3297 /* Convert the channel frequency and set the flags */
3298 put_unaligned(cpu_to_le16(stats->freq), &iwl4965_rt->rt_channelMHz);
3299 if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK))
3300 put_unaligned(cpu_to_le16(IEEE80211_CHAN_OFDM |
3301 IEEE80211_CHAN_5GHZ),
3302 &iwl4965_rt->rt_chbitmask);
3303 else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK)
3304 put_unaligned(cpu_to_le16(IEEE80211_CHAN_CCK |
3305 IEEE80211_CHAN_2GHZ),
3306 &iwl4965_rt->rt_chbitmask);
3307 else /* 802.11g */
3308 put_unaligned(cpu_to_le16(IEEE80211_CHAN_OFDM |
3309 IEEE80211_CHAN_2GHZ),
3310 &iwl4965_rt->rt_chbitmask);
3311
3312 if (rate == -1)
3313 iwl4965_rt->rt_rate = 0;
3314 else
3315 iwl4965_rt->rt_rate = iwl4965_rates[rate].ieee;
3316
3317 /*
3318 * "antenna number"
3319 *
3320 * It seems that the antenna field in the phy flags value
3321 * is actually a bitfield. This is undefined by radiotap,
3322 * it wants an actual antenna number but I always get "7"
3323 * for most legacy frames I receive indicating that the
3324 * same frame was received on all three RX chains.
3325 *
3326 * I think this field should be removed in favour of a
3327 * new 802.11n radiotap field "RX chains" that is defined
3328 * as a bitmask.
3329 */
3330 iwl4965_rt->rt_antenna =
3331 le16_to_cpu(phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4;
3332
3333 /* set the preamble flag if appropriate */
3334 if (phy_flags_hw & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
3335 iwl4965_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3336
3337 stats->flag |= RX_FLAG_RADIOTAP;
3338 }
3339
3340 static void iwl4965_handle_data_packet(struct iwl4965_priv *priv, int is_data,
3341 int include_phy,
3342 struct iwl4965_rx_mem_buffer *rxb,
3343 struct ieee80211_rx_status *stats)
3344 {
3345 struct iwl4965_rx_packet *pkt = (struct iwl4965_rx_packet *)rxb->skb->data;
3346 struct iwl4965_rx_phy_res *rx_start = (include_phy) ?
3347 (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) : NULL;
3348 struct ieee80211_hdr *hdr;
3349 u16 len;
3350 __le32 *rx_end;
3351 unsigned int skblen;
3352 u32 ampdu_status;
3353
3354 if (!include_phy && priv->last_phy_res[0])
3355 rx_start = (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1];
3356
3357 if (!rx_start) {
3358 IWL_ERROR("MPDU frame without a PHY data\n");
3359 return;
3360 }
3361 if (include_phy) {
3362 hdr = (struct ieee80211_hdr *)((u8 *) & rx_start[1] +
3363 rx_start->cfg_phy_cnt);
3364
3365 len = le16_to_cpu(rx_start->byte_count);
3366
3367 rx_end = (__le32 *) ((u8 *) & pkt->u.raw[0] +
3368 sizeof(struct iwl4965_rx_phy_res) +
3369 rx_start->cfg_phy_cnt + len);
3370
3371 } else {
3372 struct iwl4965_rx_mpdu_res_start *amsdu =
3373 (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
3374
3375 hdr = (struct ieee80211_hdr *)(pkt->u.raw +
3376 sizeof(struct iwl4965_rx_mpdu_res_start));
3377 len = le16_to_cpu(amsdu->byte_count);
3378 rx_start->byte_count = amsdu->byte_count;
3379 rx_end = (__le32 *) (((u8 *) hdr) + len);
3380 }
3381 if (len > priv->hw_setting.max_pkt_size || len < 16) {
3382 IWL_WARNING("byte count out of range [16,4K] : %d\n", len);
3383 return;
3384 }
3385
3386 ampdu_status = le32_to_cpu(*rx_end);
3387 skblen = ((u8 *) rx_end - (u8 *) & pkt->u.raw[0]) + sizeof(u32);
3388
3389 /* start from MAC */
3390 skb_reserve(rxb->skb, (void *)hdr - (void *)pkt);
3391 skb_put(rxb->skb, len); /* end where data ends */
3392
3393 /* We only process data packets if the interface is open */
3394 if (unlikely(!priv->is_open)) {
3395 IWL_DEBUG_DROP_LIMIT
3396 ("Dropping packet while interface is not open.\n");
3397 return;
3398 }
3399
3400 stats->flag = 0;
3401 hdr = (struct ieee80211_hdr *)rxb->skb->data;
3402
3403 if (iwl4965_param_hwcrypto)
3404 iwl4965_set_decrypted_flag(priv, rxb->skb, ampdu_status, stats);
3405
3406 if (priv->add_radiotap)
3407 iwl4965_add_radiotap(priv, rxb->skb, rx_start, stats, ampdu_status);
3408
3409 ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats);
3410 priv->alloc_rxb_skb--;
3411 rxb->skb = NULL;
3412 #ifdef LED
3413 priv->led_packets += len;
3414 iwl4965_setup_activity_timer(priv);
3415 #endif
3416 }
3417
3418 /* Calc max signal level (dBm) among 3 possible receivers */
3419 static int iwl4965_calc_rssi(struct iwl4965_rx_phy_res *rx_resp)
3420 {
3421 /* data from PHY/DSP regarding signal strength, etc.,
3422 * contents are always there, not configurable by host. */
3423 struct iwl4965_rx_non_cfg_phy *ncphy =
3424 (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy;
3425 u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL_AGC_DB_MASK)
3426 >> IWL_AGC_DB_POS;
3427
3428 u32 valid_antennae =
3429 (le16_to_cpu(rx_resp->phy_flags) & RX_PHY_FLAGS_ANTENNAE_MASK)
3430 >> RX_PHY_FLAGS_ANTENNAE_OFFSET;
3431 u8 max_rssi = 0;
3432 u32 i;
3433
3434 /* Find max rssi among 3 possible receivers.
3435 * These values are measured by the digital signal processor (DSP).
3436 * They should stay fairly constant even as the signal strength varies,
3437 * if the radio's automatic gain control (AGC) is working right.
3438 * AGC value (see below) will provide the "interesting" info. */
3439 for (i = 0; i < 3; i++)
3440 if (valid_antennae & (1 << i))
3441 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
3442
3443 IWL_DEBUG_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
3444 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
3445 max_rssi, agc);
3446
3447 /* dBm = max_rssi dB - agc dB - constant.
3448 * Higher AGC (higher radio gain) means lower signal. */
3449 return (max_rssi - agc - IWL_RSSI_OFFSET);
3450 }
3451
3452 #ifdef CONFIG_IWL4965_HT
3453
3454 /* Parsed Information Elements */
3455 struct ieee802_11_elems {
3456 u8 *ds_params;
3457 u8 ds_params_len;
3458 u8 *tim;
3459 u8 tim_len;
3460 u8 *ibss_params;
3461 u8 ibss_params_len;
3462 u8 *erp_info;
3463 u8 erp_info_len;
3464 u8 *ht_cap_param;
3465 u8 ht_cap_param_len;
3466 u8 *ht_extra_param;
3467 u8 ht_extra_param_len;
3468 };
3469
3470 static int parse_elems(u8 *start, size_t len, struct ieee802_11_elems *elems)
3471 {
3472 size_t left = len;
3473 u8 *pos = start;
3474 int unknown = 0;
3475
3476 memset(elems, 0, sizeof(*elems));
3477
3478 while (left >= 2) {
3479 u8 id, elen;
3480
3481 id = *pos++;
3482 elen = *pos++;
3483 left -= 2;
3484
3485 if (elen > left)
3486 return -1;
3487
3488 switch (id) {
3489 case WLAN_EID_DS_PARAMS:
3490 elems->ds_params = pos;
3491 elems->ds_params_len = elen;
3492 break;
3493 case WLAN_EID_TIM:
3494 elems->tim = pos;
3495 elems->tim_len = elen;
3496 break;
3497 case WLAN_EID_IBSS_PARAMS:
3498 elems->ibss_params = pos;
3499 elems->ibss_params_len = elen;
3500 break;
3501 case WLAN_EID_ERP_INFO:
3502 elems->erp_info = pos;
3503 elems->erp_info_len = elen;
3504 break;
3505 case WLAN_EID_HT_CAPABILITY:
3506 elems->ht_cap_param = pos;
3507 elems->ht_cap_param_len = elen;
3508 break;
3509 case WLAN_EID_HT_EXTRA_INFO:
3510 elems->ht_extra_param = pos;
3511 elems->ht_extra_param_len = elen;
3512 break;
3513 default:
3514 unknown++;
3515 break;
3516 }
3517
3518 left -= elen;
3519 pos += elen;
3520 }
3521
3522 return 0;
3523 }
3524
3525 void iwl4965_init_ht_hw_capab(struct ieee80211_ht_info *ht_info,
3526 enum ieee80211_band band)
3527 {
3528 ht_info->cap = 0;
3529 memset(ht_info->supp_mcs_set, 0, 16);
3530
3531 ht_info->ht_supported = 1;
3532
3533 if (band == IEEE80211_BAND_5GHZ) {
3534 ht_info->cap |= (u16)IEEE80211_HT_CAP_SUP_WIDTH;
3535 ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_40;
3536 ht_info->supp_mcs_set[4] = 0x01;
3537 }
3538 ht_info->cap |= (u16)IEEE80211_HT_CAP_GRN_FLD;
3539 ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_20;
3540 ht_info->cap |= (u16)(IEEE80211_HT_CAP_MIMO_PS &
3541 (IWL_MIMO_PS_NONE << 2));
3542 if (iwl4965_param_amsdu_size_8K) {
3543 printk(KERN_DEBUG "iwl4965 in A-MSDU 8K support mode\n");
3544 ht_info->cap |= (u16)IEEE80211_HT_CAP_MAX_AMSDU;
3545 }
3546
3547 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
3548 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
3549
3550 ht_info->supp_mcs_set[0] = 0xFF;
3551 ht_info->supp_mcs_set[1] = 0xFF;
3552 }
3553 #endif /* CONFIG_IWL4965_HT */
3554
3555 static void iwl4965_sta_modify_ps_wake(struct iwl4965_priv *priv, int sta_id)
3556 {
3557 unsigned long flags;
3558
3559 spin_lock_irqsave(&priv->sta_lock, flags);
3560 priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
3561 priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
3562 priv->stations[sta_id].sta.sta.modify_mask = 0;
3563 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3564 spin_unlock_irqrestore(&priv->sta_lock, flags);
3565
3566 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
3567 }
3568
3569 static void iwl4965_update_ps_mode(struct iwl4965_priv *priv, u16 ps_bit, u8 *addr)
3570 {
3571 /* FIXME: need locking over ps_status ??? */
3572 u8 sta_id = iwl4965_hw_find_station(priv, addr);
3573
3574 if (sta_id != IWL_INVALID_STATION) {
3575 u8 sta_awake = priv->stations[sta_id].
3576 ps_status == STA_PS_STATUS_WAKE;
3577
3578 if (sta_awake && ps_bit)
3579 priv->stations[sta_id].ps_status = STA_PS_STATUS_SLEEP;
3580 else if (!sta_awake && !ps_bit) {
3581 iwl4965_sta_modify_ps_wake(priv, sta_id);
3582 priv->stations[sta_id].ps_status = STA_PS_STATUS_WAKE;
3583 }
3584 }
3585 }
3586 #ifdef CONFIG_IWL4965_DEBUG
3587
3588 /**
3589 * iwl4965_dbg_report_frame - dump frame to syslog during debug sessions
3590 *
3591 * You may hack this function to show different aspects of received frames,
3592 * including selective frame dumps.
3593 * group100 parameter selects whether to show 1 out of 100 good frames.
3594 *
3595 * TODO: This was originally written for 3945, need to audit for
3596 * proper operation with 4965.
3597 */
3598 static void iwl4965_dbg_report_frame(struct iwl4965_priv *priv,
3599 struct iwl4965_rx_packet *pkt,
3600 struct ieee80211_hdr *header, int group100)
3601 {
3602 u32 to_us;
3603 u32 print_summary = 0;
3604 u32 print_dump = 0; /* set to 1 to dump all frames' contents */
3605 u32 hundred = 0;
3606 u32 dataframe = 0;
3607 u16 fc;
3608 u16 seq_ctl;
3609 u16 channel;
3610 u16 phy_flags;
3611 int rate_sym;
3612 u16 length;
3613 u16 status;
3614 u16 bcn_tmr;
3615 u32 tsf_low;
3616 u64 tsf;
3617 u8 rssi;
3618 u8 agc;
3619 u16 sig_avg;
3620 u16 noise_diff;
3621 struct iwl4965_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
3622 struct iwl4965_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
3623 struct iwl4965_rx_frame_end *rx_end = IWL_RX_END(pkt);
3624 u8 *data = IWL_RX_DATA(pkt);
3625
3626 if (likely(!(iwl4965_debug_level & IWL_DL_RX)))
3627 return;
3628
3629 /* MAC header */
3630 fc = le16_to_cpu(header->frame_control);
3631 seq_ctl = le16_to_cpu(header->seq_ctrl);
3632
3633 /* metadata */
3634 channel = le16_to_cpu(rx_hdr->channel);
3635 phy_flags = le16_to_cpu(rx_hdr->phy_flags);
3636 rate_sym = rx_hdr->rate;
3637 length = le16_to_cpu(rx_hdr->len);
3638
3639 /* end-of-frame status and timestamp */
3640 status = le32_to_cpu(rx_end->status);
3641 bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp);
3642 tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff;
3643 tsf = le64_to_cpu(rx_end->timestamp);
3644
3645 /* signal statistics */
3646 rssi = rx_stats->rssi;
3647 agc = rx_stats->agc;
3648 sig_avg = le16_to_cpu(rx_stats->sig_avg);
3649 noise_diff = le16_to_cpu(rx_stats->noise_diff);
3650
3651 to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
3652
3653 /* if data frame is to us and all is good,
3654 * (optionally) print summary for only 1 out of every 100 */
3655 if (to_us && (fc & ~IEEE80211_FCTL_PROTECTED) ==
3656 (IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
3657 dataframe = 1;
3658 if (!group100)
3659 print_summary = 1; /* print each frame */
3660 else if (priv->framecnt_to_us < 100) {
3661 priv->framecnt_to_us++;
3662 print_summary = 0;
3663 } else {
3664 priv->framecnt_to_us = 0;
3665 print_summary = 1;
3666 hundred = 1;
3667 }
3668 } else {
3669 /* print summary for all other frames */
3670 print_summary = 1;
3671 }
3672
3673 if (print_summary) {
3674 char *title;
3675 int rate_idx;
3676 u32 bitrate;
3677
3678 if (hundred)
3679 title = "100Frames";
3680 else if (fc & IEEE80211_FCTL_RETRY)
3681 title = "Retry";
3682 else if (ieee80211_is_assoc_response(fc))
3683 title = "AscRsp";
3684 else if (ieee80211_is_reassoc_response(fc))
3685 title = "RasRsp";
3686 else if (ieee80211_is_probe_response(fc)) {
3687 title = "PrbRsp";
3688 print_dump = 1; /* dump frame contents */
3689 } else if (ieee80211_is_beacon(fc)) {
3690 title = "Beacon";
3691 print_dump = 1; /* dump frame contents */
3692 } else if (ieee80211_is_atim(fc))
3693 title = "ATIM";
3694 else if (ieee80211_is_auth(fc))
3695 title = "Auth";
3696 else if (ieee80211_is_deauth(fc))
3697 title = "DeAuth";
3698 else if (ieee80211_is_disassoc(fc))
3699 title = "DisAssoc";
3700 else
3701 title = "Frame";
3702
3703 rate_idx = iwl4965_hwrate_to_plcp_idx(rate_sym);
3704 if (unlikely(rate_idx == -1))
3705 bitrate = 0;
3706 else
3707 bitrate = iwl4965_rates[rate_idx].ieee / 2;
3708
3709 /* print frame summary.
3710 * MAC addresses show just the last byte (for brevity),
3711 * but you can hack it to show more, if you'd like to. */
3712 if (dataframe)
3713 IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, "
3714 "len=%u, rssi=%d, chnl=%d, rate=%u, \n",
3715 title, fc, header->addr1[5],
3716 length, rssi, channel, bitrate);
3717 else {
3718 /* src/dst addresses assume managed mode */
3719 IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, "
3720 "src=0x%02x, rssi=%u, tim=%lu usec, "
3721 "phy=0x%02x, chnl=%d\n",
3722 title, fc, header->addr1[5],
3723 header->addr3[5], rssi,
3724 tsf_low - priv->scan_start_tsf,
3725 phy_flags, channel);
3726 }
3727 }
3728 if (print_dump)
3729 iwl4965_print_hex_dump(IWL_DL_RX, data, length);
3730 }
3731 #else
3732 static inline void iwl4965_dbg_report_frame(struct iwl4965_priv *priv,
3733 struct iwl4965_rx_packet *pkt,
3734 struct ieee80211_hdr *header,
3735 int group100)
3736 {
3737 }
3738 #endif
3739
3740
3741 #define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
3742
3743 /* Called for REPLY_4965_RX (legacy ABG frames), or
3744 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
3745 static void iwl4965_rx_reply_rx(struct iwl4965_priv *priv,
3746 struct iwl4965_rx_mem_buffer *rxb)
3747 {
3748 struct ieee80211_hdr *header;
3749 struct ieee80211_rx_status rx_status;
3750 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3751 /* Use phy data (Rx signal strength, etc.) contained within
3752 * this rx packet for legacy frames,
3753 * or phy data cached from REPLY_RX_PHY_CMD for HT frames. */
3754 int include_phy = (pkt->hdr.cmd == REPLY_4965_RX);
3755 struct iwl4965_rx_phy_res *rx_start = (include_phy) ?
3756 (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) :
3757 (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1];
3758 __le32 *rx_end;
3759 unsigned int len = 0;
3760 u16 fc;
3761 u8 network_packet;
3762
3763 rx_status.mactime = le64_to_cpu(rx_start->timestamp);
3764 rx_status.freq = ieee80211chan2mhz(le16_to_cpu(rx_start->channel));
3765 rx_status.band = (rx_start->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
3766 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
3767 rx_status.rate_idx = iwl4965_hwrate_to_plcp_idx(
3768 le32_to_cpu(rx_start->rate_n_flags));
3769
3770 if (rx_status.band == IEEE80211_BAND_5GHZ)
3771 rx_status.rate_idx -= IWL_FIRST_OFDM_RATE;
3772
3773 rx_status.antenna = 0;
3774 rx_status.flag = 0;
3775
3776 if ((unlikely(rx_start->cfg_phy_cnt > 20))) {
3777 IWL_DEBUG_DROP
3778 ("dsp size out of range [0,20]: "
3779 "%d/n", rx_start->cfg_phy_cnt);
3780 return;
3781 }
3782
3783 if (!include_phy) {
3784 if (priv->last_phy_res[0])
3785 rx_start = (struct iwl4965_rx_phy_res *)
3786 &priv->last_phy_res[1];
3787 else
3788 rx_start = NULL;
3789 }
3790
3791 if (!rx_start) {
3792 IWL_ERROR("MPDU frame without a PHY data\n");
3793 return;
3794 }
3795
3796 if (include_phy) {
3797 header = (struct ieee80211_hdr *)((u8 *) & rx_start[1]
3798 + rx_start->cfg_phy_cnt);
3799
3800 len = le16_to_cpu(rx_start->byte_count);
3801 rx_end = (__le32 *)(pkt->u.raw + rx_start->cfg_phy_cnt +
3802 sizeof(struct iwl4965_rx_phy_res) + len);
3803 } else {
3804 struct iwl4965_rx_mpdu_res_start *amsdu =
3805 (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
3806
3807 header = (void *)(pkt->u.raw +
3808 sizeof(struct iwl4965_rx_mpdu_res_start));
3809 len = le16_to_cpu(amsdu->byte_count);
3810 rx_end = (__le32 *) (pkt->u.raw +
3811 sizeof(struct iwl4965_rx_mpdu_res_start) + len);
3812 }
3813
3814 if (!(*rx_end & RX_RES_STATUS_NO_CRC32_ERROR) ||
3815 !(*rx_end & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
3816 IWL_DEBUG_RX("Bad CRC or FIFO: 0x%08X.\n",
3817 le32_to_cpu(*rx_end));
3818 return;
3819 }
3820
3821 priv->ucode_beacon_time = le32_to_cpu(rx_start->beacon_time_stamp);
3822
3823 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
3824 rx_status.ssi = iwl4965_calc_rssi(rx_start);
3825
3826 /* Meaningful noise values are available only from beacon statistics,
3827 * which are gathered only when associated, and indicate noise
3828 * only for the associated network channel ...
3829 * Ignore these noise values while scanning (other channels) */
3830 if (iwl4965_is_associated(priv) &&
3831 !test_bit(STATUS_SCANNING, &priv->status)) {
3832 rx_status.noise = priv->last_rx_noise;
3833 rx_status.signal = iwl4965_calc_sig_qual(rx_status.ssi,
3834 rx_status.noise);
3835 } else {
3836 rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
3837 rx_status.signal = iwl4965_calc_sig_qual(rx_status.ssi, 0);
3838 }
3839
3840 /* Reset beacon noise level if not associated. */
3841 if (!iwl4965_is_associated(priv))
3842 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
3843
3844 /* Set "1" to report good data frames in groups of 100 */
3845 /* FIXME: need to optimze the call: */
3846 iwl4965_dbg_report_frame(priv, pkt, header, 1);
3847
3848 IWL_DEBUG_STATS_LIMIT("Rssi %d, noise %d, qual %d, TSF %llu\n",
3849 rx_status.ssi, rx_status.noise, rx_status.signal,
3850 rx_status.mactime);
3851
3852 network_packet = iwl4965_is_network_packet(priv, header);
3853 if (network_packet) {
3854 priv->last_rx_rssi = rx_status.ssi;
3855 priv->last_beacon_time = priv->ucode_beacon_time;
3856 priv->last_tsf = le64_to_cpu(rx_start->timestamp);
3857 }
3858
3859 fc = le16_to_cpu(header->frame_control);
3860 switch (fc & IEEE80211_FCTL_FTYPE) {
3861 case IEEE80211_FTYPE_MGMT:
3862
3863 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
3864 iwl4965_update_ps_mode(priv, fc & IEEE80211_FCTL_PM,
3865 header->addr2);
3866 switch (fc & IEEE80211_FCTL_STYPE) {
3867 case IEEE80211_STYPE_PROBE_RESP:
3868 case IEEE80211_STYPE_BEACON:
3869 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA &&
3870 !compare_ether_addr(header->addr2, priv->bssid)) ||
3871 (priv->iw_mode == IEEE80211_IF_TYPE_IBSS &&
3872 !compare_ether_addr(header->addr3, priv->bssid))) {
3873 struct ieee80211_mgmt *mgmt =
3874 (struct ieee80211_mgmt *)header;
3875 u64 timestamp =
3876 le64_to_cpu(mgmt->u.beacon.timestamp);
3877
3878 priv->timestamp0 = timestamp & 0xFFFFFFFF;
3879 priv->timestamp1 =
3880 (timestamp >> 32) & 0xFFFFFFFF;
3881 priv->beacon_int = le16_to_cpu(
3882 mgmt->u.beacon.beacon_int);
3883 if (priv->call_post_assoc_from_beacon &&
3884 (priv->iw_mode == IEEE80211_IF_TYPE_STA)) {
3885 priv->call_post_assoc_from_beacon = 0;
3886 queue_work(priv->workqueue,
3887 &priv->post_associate.work);
3888 }
3889 }
3890 break;
3891
3892 case IEEE80211_STYPE_ACTION:
3893 break;
3894
3895 /*
3896 * TODO: Use the new callback function from
3897 * mac80211 instead of sniffing these packets.
3898 */
3899 case IEEE80211_STYPE_ASSOC_RESP:
3900 case IEEE80211_STYPE_REASSOC_RESP:
3901 if (network_packet) {
3902 #ifdef CONFIG_IWL4965_HT
3903 u8 *pos = NULL;
3904 struct ieee802_11_elems elems;
3905 #endif /*CONFIG_IWL4965_HT */
3906 struct ieee80211_mgmt *mgnt =
3907 (struct ieee80211_mgmt *)header;
3908
3909 /* We have just associated, give some
3910 * time for the 4-way handshake if
3911 * any. Don't start scan too early. */
3912 priv->next_scan_jiffies = jiffies +
3913 IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
3914
3915 priv->assoc_id = (~((1 << 15) | (1 << 14))
3916 & le16_to_cpu(mgnt->u.assoc_resp.aid));
3917 priv->assoc_capability =
3918 le16_to_cpu(
3919 mgnt->u.assoc_resp.capab_info);
3920 #ifdef CONFIG_IWL4965_HT
3921 pos = mgnt->u.assoc_resp.variable;
3922 if (!parse_elems(pos,
3923 len - (pos - (u8 *) mgnt),
3924 &elems)) {
3925 if (elems.ht_extra_param &&
3926 elems.ht_cap_param)
3927 break;
3928 }
3929 #endif /*CONFIG_IWL4965_HT */
3930 /* assoc_id is 0 no association */
3931 if (!priv->assoc_id)
3932 break;
3933 if (priv->beacon_int)
3934 queue_work(priv->workqueue,
3935 &priv->post_associate.work);
3936 else
3937 priv->call_post_assoc_from_beacon = 1;
3938 }
3939
3940 break;
3941
3942 case IEEE80211_STYPE_PROBE_REQ:
3943 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
3944 !iwl4965_is_associated(priv)) {
3945 DECLARE_MAC_BUF(mac1);
3946 DECLARE_MAC_BUF(mac2);
3947 DECLARE_MAC_BUF(mac3);
3948
3949 IWL_DEBUG_DROP("Dropping (non network): "
3950 "%s, %s, %s\n",
3951 print_mac(mac1, header->addr1),
3952 print_mac(mac2, header->addr2),
3953 print_mac(mac3, header->addr3));
3954 return;
3955 }
3956 }
3957 iwl4965_handle_data_packet(priv, 0, include_phy, rxb, &rx_status);
3958 break;
3959
3960 case IEEE80211_FTYPE_CTL:
3961 #ifdef CONFIG_IWL4965_HT
3962 switch (fc & IEEE80211_FCTL_STYPE) {
3963 case IEEE80211_STYPE_BACK_REQ:
3964 IWL_DEBUG_HT("IEEE80211_STYPE_BACK_REQ arrived\n");
3965 iwl4965_handle_data_packet(priv, 0, include_phy,
3966 rxb, &rx_status);
3967 break;
3968 default:
3969 break;
3970 }
3971 #endif
3972 break;
3973
3974 case IEEE80211_FTYPE_DATA: {
3975 DECLARE_MAC_BUF(mac1);
3976 DECLARE_MAC_BUF(mac2);
3977 DECLARE_MAC_BUF(mac3);
3978
3979 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
3980 iwl4965_update_ps_mode(priv, fc & IEEE80211_FCTL_PM,
3981 header->addr2);
3982
3983 if (unlikely(!network_packet))
3984 IWL_DEBUG_DROP("Dropping (non network): "
3985 "%s, %s, %s\n",
3986 print_mac(mac1, header->addr1),
3987 print_mac(mac2, header->addr2),
3988 print_mac(mac3, header->addr3));
3989 else if (unlikely(iwl4965_is_duplicate_packet(priv, header)))
3990 IWL_DEBUG_DROP("Dropping (dup): %s, %s, %s\n",
3991 print_mac(mac1, header->addr1),
3992 print_mac(mac2, header->addr2),
3993 print_mac(mac3, header->addr3));
3994 else
3995 iwl4965_handle_data_packet(priv, 1, include_phy, rxb,
3996 &rx_status);
3997 break;
3998 }
3999 default:
4000 break;
4001
4002 }
4003 }
4004
4005 /* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
4006 * This will be used later in iwl4965_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
4007 static void iwl4965_rx_reply_rx_phy(struct iwl4965_priv *priv,
4008 struct iwl4965_rx_mem_buffer *rxb)
4009 {
4010 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
4011 priv->last_phy_res[0] = 1;
4012 memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]),
4013 sizeof(struct iwl4965_rx_phy_res));
4014 }
4015
4016 static void iwl4965_rx_missed_beacon_notif(struct iwl4965_priv *priv,
4017 struct iwl4965_rx_mem_buffer *rxb)
4018
4019 {
4020 #ifdef CONFIG_IWL4965_SENSITIVITY
4021 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
4022 struct iwl4965_missed_beacon_notif *missed_beacon;
4023
4024 missed_beacon = &pkt->u.missed_beacon;
4025 if (le32_to_cpu(missed_beacon->consequtive_missed_beacons) > 5) {
4026 IWL_DEBUG_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
4027 le32_to_cpu(missed_beacon->consequtive_missed_beacons),
4028 le32_to_cpu(missed_beacon->total_missed_becons),
4029 le32_to_cpu(missed_beacon->num_recvd_beacons),
4030 le32_to_cpu(missed_beacon->num_expected_beacons));
4031 priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT;
4032 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)))
4033 queue_work(priv->workqueue, &priv->sensitivity_work);
4034 }
4035 #endif /*CONFIG_IWL4965_SENSITIVITY*/
4036 }
4037
4038 #ifdef CONFIG_IWL4965_HT
4039
4040 /**
4041 * iwl4965_sta_modify_enable_tid_tx - Enable Tx for this TID in station table
4042 */
4043 static void iwl4965_sta_modify_enable_tid_tx(struct iwl4965_priv *priv,
4044 int sta_id, int tid)
4045 {
4046 unsigned long flags;
4047
4048 /* Remove "disable" flag, to enable Tx for this TID */
4049 spin_lock_irqsave(&priv->sta_lock, flags);
4050 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
4051 priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
4052 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
4053 spin_unlock_irqrestore(&priv->sta_lock, flags);
4054
4055 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
4056 }
4057
4058 /**
4059 * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
4060 *
4061 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
4062 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
4063 */
4064 static int iwl4965_tx_status_reply_compressed_ba(struct iwl4965_priv *priv,
4065 struct iwl4965_ht_agg *agg,
4066 struct iwl4965_compressed_ba_resp*
4067 ba_resp)
4068
4069 {
4070 int i, sh, ack;
4071 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
4072 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
4073 u64 bitmap;
4074 int successes = 0;
4075 struct ieee80211_tx_status *tx_status;
4076
4077 if (unlikely(!agg->wait_for_ba)) {
4078 IWL_ERROR("Received BA when not expected\n");
4079 return -EINVAL;
4080 }
4081
4082 /* Mark that the expected block-ack response arrived */
4083 agg->wait_for_ba = 0;
4084 IWL_DEBUG_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
4085
4086 /* Calculate shift to align block-ack bits with our Tx window bits */
4087 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl>>4);
4088 if (sh < 0) /* tbw something is wrong with indices */
4089 sh += 0x100;
4090
4091 /* don't use 64-bit values for now */
4092 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
4093
4094 if (agg->frame_count > (64 - sh)) {
4095 IWL_DEBUG_TX_REPLY("more frames than bitmap size");
4096 return -1;
4097 }
4098
4099 /* check for success or failure according to the
4100 * transmitted bitmap and block-ack bitmap */
4101 bitmap &= agg->bitmap;
4102
4103 /* For each frame attempted in aggregation,
4104 * update driver's record of tx frame's status. */
4105 for (i = 0; i < agg->frame_count ; i++) {
4106 ack = bitmap & (1 << i);
4107 successes += !!ack;
4108 IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n",
4109 ack? "ACK":"NACK", i, (agg->start_idx + i) & 0xff,
4110 agg->start_idx + i);
4111 }
4112
4113 tx_status = &priv->txq[scd_flow].txb[agg->start_idx].status;
4114 tx_status->flags = IEEE80211_TX_STATUS_ACK;
4115 tx_status->flags |= IEEE80211_TX_STATUS_AMPDU;
4116 tx_status->ampdu_ack_map = successes;
4117 tx_status->ampdu_ack_len = agg->frame_count;
4118 iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags,
4119 &tx_status->control);
4120
4121 IWL_DEBUG_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap);
4122
4123 return 0;
4124 }
4125
4126 /**
4127 * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
4128 */
4129 static void iwl4965_tx_queue_stop_scheduler(struct iwl4965_priv *priv,
4130 u16 txq_id)
4131 {
4132 /* Simply stop the queue, but don't change any configuration;
4133 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
4134 iwl4965_write_prph(priv,
4135 KDR_SCD_QUEUE_STATUS_BITS(txq_id),
4136 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
4137 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
4138 }
4139
4140 /**
4141 * txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID
4142 * priv->lock must be held by the caller
4143 */
4144 static int iwl4965_tx_queue_agg_disable(struct iwl4965_priv *priv, u16 txq_id,
4145 u16 ssn_idx, u8 tx_fifo)
4146 {
4147 int ret = 0;
4148
4149 if (IWL_BACK_QUEUE_FIRST_ID > txq_id) {
4150 IWL_WARNING("queue number too small: %d, must be > %d\n",
4151 txq_id, IWL_BACK_QUEUE_FIRST_ID);
4152 return -EINVAL;
4153 }
4154
4155 ret = iwl4965_grab_nic_access(priv);
4156 if (ret)
4157 return ret;
4158
4159 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
4160
4161 iwl4965_clear_bits_prph(priv, KDR_SCD_QUEUECHAIN_SEL, (1 << txq_id));
4162
4163 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
4164 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
4165 /* supposes that ssn_idx is valid (!= 0xFFF) */
4166 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
4167
4168 iwl4965_clear_bits_prph(priv, KDR_SCD_INTERRUPT_MASK, (1 << txq_id));
4169 iwl4965_txq_ctx_deactivate(priv, txq_id);
4170 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
4171
4172 iwl4965_release_nic_access(priv);
4173
4174 return 0;
4175 }
4176
4177 int iwl4965_check_empty_hw_queue(struct iwl4965_priv *priv, int sta_id,
4178 u8 tid, int txq_id)
4179 {
4180 struct iwl4965_queue *q = &priv->txq[txq_id].q;
4181 u8 *addr = priv->stations[sta_id].sta.sta.addr;
4182 struct iwl4965_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
4183
4184 switch (priv->stations[sta_id].tid[tid].agg.state) {
4185 case IWL_EMPTYING_HW_QUEUE_DELBA:
4186 /* We are reclaiming the last packet of the */
4187 /* aggregated HW queue */
4188 if (txq_id == tid_data->agg.txq_id &&
4189 q->read_ptr == q->write_ptr) {
4190 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
4191 int tx_fifo = default_tid_to_tx_fifo[tid];
4192 IWL_DEBUG_HT("HW queue empty: continue DELBA flow\n");
4193 iwl4965_tx_queue_agg_disable(priv, txq_id,
4194 ssn, tx_fifo);
4195 tid_data->agg.state = IWL_AGG_OFF;
4196 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid);
4197 }
4198 break;
4199 case IWL_EMPTYING_HW_QUEUE_ADDBA:
4200 /* We are reclaiming the last packet of the queue */
4201 if (tid_data->tfds_in_queue == 0) {
4202 IWL_DEBUG_HT("HW queue empty: continue ADDBA flow\n");
4203 tid_data->agg.state = IWL_AGG_ON;
4204 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid);
4205 }
4206 break;
4207 }
4208 return 0;
4209 }
4210
4211 /**
4212 * iwl4965_queue_dec_wrap - Decrement queue index, wrap back to end if needed
4213 * @index -- current index
4214 * @n_bd -- total number of entries in queue (s/b power of 2)
4215 */
4216 static inline int iwl4965_queue_dec_wrap(int index, int n_bd)
4217 {
4218 return (index == 0) ? n_bd - 1 : index - 1;
4219 }
4220
4221 /**
4222 * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
4223 *
4224 * Handles block-acknowledge notification from device, which reports success
4225 * of frames sent via aggregation.
4226 */
4227 static void iwl4965_rx_reply_compressed_ba(struct iwl4965_priv *priv,
4228 struct iwl4965_rx_mem_buffer *rxb)
4229 {
4230 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
4231 struct iwl4965_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
4232 int index;
4233 struct iwl4965_tx_queue *txq = NULL;
4234 struct iwl4965_ht_agg *agg;
4235 DECLARE_MAC_BUF(mac);
4236
4237 /* "flow" corresponds to Tx queue */
4238 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
4239
4240 /* "ssn" is start of block-ack Tx window, corresponds to index
4241 * (in Tx queue's circular buffer) of first TFD/frame in window */
4242 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
4243
4244 if (scd_flow >= ARRAY_SIZE(priv->txq)) {
4245 IWL_ERROR("BUG_ON scd_flow is bigger than number of queues");
4246 return;
4247 }
4248
4249 txq = &priv->txq[scd_flow];
4250 agg = &priv->stations[ba_resp->sta_id].tid[ba_resp->tid].agg;
4251
4252 /* Find index just before block-ack window */
4253 index = iwl4965_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
4254
4255 /* TODO: Need to get this copy more safely - now good for debug */
4256
4257 IWL_DEBUG_TX_REPLY("REPLY_COMPRESSED_BA [%d]Received from %s, "
4258 "sta_id = %d\n",
4259 agg->wait_for_ba,
4260 print_mac(mac, (u8*) &ba_resp->sta_addr_lo32),
4261 ba_resp->sta_id);
4262 IWL_DEBUG_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
4263 "%d, scd_ssn = %d\n",
4264 ba_resp->tid,
4265 ba_resp->seq_ctl,
4266 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
4267 ba_resp->scd_flow,
4268 ba_resp->scd_ssn);
4269 IWL_DEBUG_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx \n",
4270 agg->start_idx,
4271 (unsigned long long)agg->bitmap);
4272
4273 /* Update driver's record of ACK vs. not for each frame in window */
4274 iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp);
4275
4276 /* Release all TFDs before the SSN, i.e. all TFDs in front of
4277 * block-ack window (we assume that they've been successfully
4278 * transmitted ... if not, it's too late anyway). */
4279 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
4280 int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index);
4281 priv->stations[ba_resp->sta_id].
4282 tid[ba_resp->tid].tfds_in_queue -= freed;
4283 if (iwl4965_queue_space(&txq->q) > txq->q.low_mark &&
4284 priv->mac80211_registered &&
4285 agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)
4286 ieee80211_wake_queue(priv->hw, scd_flow);
4287 iwl4965_check_empty_hw_queue(priv, ba_resp->sta_id,
4288 ba_resp->tid, scd_flow);
4289 }
4290 }
4291
4292 /**
4293 * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
4294 */
4295 static int iwl4965_tx_queue_set_q2ratid(struct iwl4965_priv *priv, u16 ra_tid,
4296 u16 txq_id)
4297 {
4298 u32 tbl_dw_addr;
4299 u32 tbl_dw;
4300 u16 scd_q2ratid;
4301
4302 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
4303
4304 tbl_dw_addr = priv->scd_base_addr +
4305 SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
4306
4307 tbl_dw = iwl4965_read_targ_mem(priv, tbl_dw_addr);
4308
4309 if (txq_id & 0x1)
4310 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
4311 else
4312 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
4313
4314 iwl4965_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
4315
4316 return 0;
4317 }
4318
4319
4320 /**
4321 * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
4322 *
4323 * NOTE: txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID,
4324 * i.e. it must be one of the higher queues used for aggregation
4325 */
4326 static int iwl4965_tx_queue_agg_enable(struct iwl4965_priv *priv, int txq_id,
4327 int tx_fifo, int sta_id, int tid,
4328 u16 ssn_idx)
4329 {
4330 unsigned long flags;
4331 int rc;
4332 u16 ra_tid;
4333
4334 if (IWL_BACK_QUEUE_FIRST_ID > txq_id)
4335 IWL_WARNING("queue number too small: %d, must be > %d\n",
4336 txq_id, IWL_BACK_QUEUE_FIRST_ID);
4337
4338 ra_tid = BUILD_RAxTID(sta_id, tid);
4339
4340 /* Modify device's station table to Tx this TID */
4341 iwl4965_sta_modify_enable_tid_tx(priv, sta_id, tid);
4342
4343 spin_lock_irqsave(&priv->lock, flags);
4344 rc = iwl4965_grab_nic_access(priv);
4345 if (rc) {
4346 spin_unlock_irqrestore(&priv->lock, flags);
4347 return rc;
4348 }
4349
4350 /* Stop this Tx queue before configuring it */
4351 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
4352
4353 /* Map receiver-address / traffic-ID to this queue */
4354 iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
4355
4356 /* Set this queue as a chain-building queue */
4357 iwl4965_set_bits_prph(priv, KDR_SCD_QUEUECHAIN_SEL, (1 << txq_id));
4358
4359 /* Place first TFD at index corresponding to start sequence number.
4360 * Assumes that ssn_idx is valid (!= 0xFFF) */
4361 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
4362 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
4363 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
4364
4365 /* Set up Tx window size and frame limit for this queue */
4366 iwl4965_write_targ_mem(priv,
4367 priv->scd_base_addr + SCD_CONTEXT_QUEUE_OFFSET(txq_id),
4368 (SCD_WIN_SIZE << SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
4369 SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
4370
4371 iwl4965_write_targ_mem(priv, priv->scd_base_addr +
4372 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
4373 (SCD_FRAME_LIMIT << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
4374 & SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
4375
4376 iwl4965_set_bits_prph(priv, KDR_SCD_INTERRUPT_MASK, (1 << txq_id));
4377
4378 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
4379 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
4380
4381 iwl4965_release_nic_access(priv);
4382 spin_unlock_irqrestore(&priv->lock, flags);
4383
4384 return 0;
4385 }
4386
4387 #endif /* CONFIG_IWL4965_HT */
4388
4389 /**
4390 * iwl4965_add_station - Initialize a station's hardware rate table
4391 *
4392 * The uCode's station table contains a table of fallback rates
4393 * for automatic fallback during transmission.
4394 *
4395 * NOTE: This sets up a default set of values. These will be replaced later
4396 * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
4397 * rc80211_simple.
4398 *
4399 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
4400 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
4401 * which requires station table entry to exist).
4402 */
4403 void iwl4965_add_station(struct iwl4965_priv *priv, const u8 *addr, int is_ap)
4404 {
4405 int i, r;
4406 struct iwl4965_link_quality_cmd link_cmd = {
4407 .reserved1 = 0,
4408 };
4409 u16 rate_flags;
4410
4411 /* Set up the rate scaling to start at selected rate, fall back
4412 * all the way down to 1M in IEEE order, and then spin on 1M */
4413 if (is_ap)
4414 r = IWL_RATE_54M_INDEX;
4415 else if (priv->band == IEEE80211_BAND_5GHZ)
4416 r = IWL_RATE_6M_INDEX;
4417 else
4418 r = IWL_RATE_1M_INDEX;
4419
4420 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
4421 rate_flags = 0;
4422 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
4423 rate_flags |= RATE_MCS_CCK_MSK;
4424
4425 /* Use Tx antenna B only */
4426 rate_flags |= RATE_MCS_ANT_B_MSK;
4427 rate_flags &= ~RATE_MCS_ANT_A_MSK;
4428
4429 link_cmd.rs_table[i].rate_n_flags =
4430 iwl4965_hw_set_rate_n_flags(iwl4965_rates[r].plcp, rate_flags);
4431 r = iwl4965_get_prev_ieee_rate(r);
4432 }
4433
4434 link_cmd.general_params.single_stream_ant_msk = 2;
4435 link_cmd.general_params.dual_stream_ant_msk = 3;
4436 link_cmd.agg_params.agg_dis_start_th = 3;
4437 link_cmd.agg_params.agg_time_limit = cpu_to_le16(4000);
4438
4439 /* Update the rate scaling for control frame Tx to AP */
4440 link_cmd.sta_id = is_ap ? IWL_AP_ID : priv->hw_setting.bcast_sta_id;
4441
4442 iwl4965_send_cmd_pdu(priv, REPLY_TX_LINK_QUALITY_CMD, sizeof(link_cmd),
4443 &link_cmd);
4444 }
4445
4446 #ifdef CONFIG_IWL4965_HT
4447
4448 static u8 iwl4965_is_channel_extension(struct iwl4965_priv *priv,
4449 enum ieee80211_band band,
4450 u16 channel, u8 extension_chan_offset)
4451 {
4452 const struct iwl4965_channel_info *ch_info;
4453
4454 ch_info = iwl4965_get_channel_info(priv, band, channel);
4455 if (!is_channel_valid(ch_info))
4456 return 0;
4457
4458 if (extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_NONE)
4459 return 0;
4460
4461 if ((ch_info->fat_extension_channel == extension_chan_offset) ||
4462 (ch_info->fat_extension_channel == HT_IE_EXT_CHANNEL_MAX))
4463 return 1;
4464
4465 return 0;
4466 }
4467
4468 static u8 iwl4965_is_fat_tx_allowed(struct iwl4965_priv *priv,
4469 struct ieee80211_ht_info *sta_ht_inf)
4470 {
4471 struct iwl_ht_info *iwl_ht_conf = &priv->current_ht_config;
4472
4473 if ((!iwl_ht_conf->is_ht) ||
4474 (iwl_ht_conf->supported_chan_width != IWL_CHANNEL_WIDTH_40MHZ) ||
4475 (iwl_ht_conf->extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_NONE))
4476 return 0;
4477
4478 if (sta_ht_inf) {
4479 if ((!sta_ht_inf->ht_supported) ||
4480 (!(sta_ht_inf->cap & IEEE80211_HT_CAP_SUP_WIDTH)))
4481 return 0;
4482 }
4483
4484 return (iwl4965_is_channel_extension(priv, priv->band,
4485 iwl_ht_conf->control_channel,
4486 iwl_ht_conf->extension_chan_offset));
4487 }
4488
4489 void iwl4965_set_rxon_ht(struct iwl4965_priv *priv, struct iwl_ht_info *ht_info)
4490 {
4491 struct iwl4965_rxon_cmd *rxon = &priv->staging_rxon;
4492 u32 val;
4493
4494 if (!ht_info->is_ht)
4495 return;
4496
4497 /* Set up channel bandwidth: 20 MHz only, or 20/40 mixed if fat ok */
4498 if (iwl4965_is_fat_tx_allowed(priv, NULL))
4499 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED_MSK;
4500 else
4501 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
4502 RXON_FLG_CHANNEL_MODE_PURE_40_MSK);
4503
4504 if (le16_to_cpu(rxon->channel) != ht_info->control_channel) {
4505 IWL_DEBUG_ASSOC("control diff than current %d %d\n",
4506 le16_to_cpu(rxon->channel),
4507 ht_info->control_channel);
4508 rxon->channel = cpu_to_le16(ht_info->control_channel);
4509 return;
4510 }
4511
4512 /* Note: control channel is opposite of extension channel */
4513 switch (ht_info->extension_chan_offset) {
4514 case IWL_EXT_CHANNEL_OFFSET_ABOVE:
4515 rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
4516 break;
4517 case IWL_EXT_CHANNEL_OFFSET_BELOW:
4518 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
4519 break;
4520 case IWL_EXT_CHANNEL_OFFSET_NONE:
4521 default:
4522 rxon->flags &= ~RXON_FLG_CHANNEL_MODE_MIXED_MSK;
4523 break;
4524 }
4525
4526 val = ht_info->ht_protection;
4527
4528 rxon->flags |= cpu_to_le32(val << RXON_FLG_HT_OPERATING_MODE_POS);
4529
4530 iwl4965_set_rxon_chain(priv);
4531
4532 IWL_DEBUG_ASSOC("supported HT rate 0x%X %X "
4533 "rxon flags 0x%X operation mode :0x%X "
4534 "extension channel offset 0x%x "
4535 "control chan %d\n",
4536 ht_info->supp_mcs_set[0], ht_info->supp_mcs_set[1],
4537 le32_to_cpu(rxon->flags), ht_info->ht_protection,
4538 ht_info->extension_chan_offset,
4539 ht_info->control_channel);
4540 return;
4541 }
4542
4543 void iwl4965_set_ht_add_station(struct iwl4965_priv *priv, u8 index,
4544 struct ieee80211_ht_info *sta_ht_inf)
4545 {
4546 __le32 sta_flags;
4547 u8 mimo_ps_mode;
4548
4549 if (!sta_ht_inf || !sta_ht_inf->ht_supported)
4550 goto done;
4551
4552 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2;
4553
4554 sta_flags = priv->stations[index].sta.station_flags;
4555
4556 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
4557
4558 switch (mimo_ps_mode) {
4559 case WLAN_HT_CAP_MIMO_PS_STATIC:
4560 sta_flags |= STA_FLG_MIMO_DIS_MSK;
4561 break;
4562 case WLAN_HT_CAP_MIMO_PS_DYNAMIC:
4563 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
4564 break;
4565 case WLAN_HT_CAP_MIMO_PS_DISABLED:
4566 break;
4567 default:
4568 IWL_WARNING("Invalid MIMO PS mode %d", mimo_ps_mode);
4569 break;
4570 }
4571
4572 sta_flags |= cpu_to_le32(
4573 (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
4574
4575 sta_flags |= cpu_to_le32(
4576 (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
4577
4578 if (iwl4965_is_fat_tx_allowed(priv, sta_ht_inf))
4579 sta_flags |= STA_FLG_FAT_EN_MSK;
4580 else
4581 sta_flags &= ~STA_FLG_FAT_EN_MSK;
4582
4583 priv->stations[index].sta.station_flags = sta_flags;
4584 done:
4585 return;
4586 }
4587
4588 static void iwl4965_sta_modify_add_ba_tid(struct iwl4965_priv *priv,
4589 int sta_id, int tid, u16 ssn)
4590 {
4591 unsigned long flags;
4592
4593 spin_lock_irqsave(&priv->sta_lock, flags);
4594 priv->stations[sta_id].sta.station_flags_msk = 0;
4595 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
4596 priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
4597 priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
4598 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
4599 spin_unlock_irqrestore(&priv->sta_lock, flags);
4600
4601 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
4602 }
4603
4604 static void iwl4965_sta_modify_del_ba_tid(struct iwl4965_priv *priv,
4605 int sta_id, int tid)
4606 {
4607 unsigned long flags;
4608
4609 spin_lock_irqsave(&priv->sta_lock, flags);
4610 priv->stations[sta_id].sta.station_flags_msk = 0;
4611 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
4612 priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
4613 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
4614 spin_unlock_irqrestore(&priv->sta_lock, flags);
4615
4616 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
4617 }
4618
4619 /*
4620 * Find first available (lowest unused) Tx Queue, mark it "active".
4621 * Called only when finding queue for aggregation.
4622 * Should never return anything < 7, because they should already
4623 * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
4624 */
4625 static int iwl4965_txq_ctx_activate_free(struct iwl4965_priv *priv)
4626 {
4627 int txq_id;
4628
4629 for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++)
4630 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
4631 return txq_id;
4632 return -1;
4633 }
4634
4635 static int iwl4965_mac_ht_tx_agg_start(struct ieee80211_hw *hw, const u8 *da,
4636 u16 tid, u16 *start_seq_num)
4637 {
4638 struct iwl4965_priv *priv = hw->priv;
4639 int sta_id;
4640 int tx_fifo;
4641 int txq_id;
4642 int ssn = -1;
4643 int ret = 0;
4644 unsigned long flags;
4645 struct iwl4965_tid_data *tid_data;
4646 DECLARE_MAC_BUF(mac);
4647
4648 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
4649 tx_fifo = default_tid_to_tx_fifo[tid];
4650 else
4651 return -EINVAL;
4652
4653 IWL_WARNING("%s on da = %s tid = %d\n",
4654 __func__, print_mac(mac, da), tid);
4655
4656 sta_id = iwl4965_hw_find_station(priv, da);
4657 if (sta_id == IWL_INVALID_STATION)
4658 return -ENXIO;
4659
4660 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
4661 IWL_ERROR("Start AGG when state is not IWL_AGG_OFF !\n");
4662 return -ENXIO;
4663 }
4664
4665 txq_id = iwl4965_txq_ctx_activate_free(priv);
4666 if (txq_id == -1)
4667 return -ENXIO;
4668
4669 spin_lock_irqsave(&priv->sta_lock, flags);
4670 tid_data = &priv->stations[sta_id].tid[tid];
4671 ssn = SEQ_TO_SN(tid_data->seq_number);
4672 tid_data->agg.txq_id = txq_id;
4673 spin_unlock_irqrestore(&priv->sta_lock, flags);
4674
4675 *start_seq_num = ssn;
4676 ret = iwl4965_tx_queue_agg_enable(priv, txq_id, tx_fifo,
4677 sta_id, tid, ssn);
4678 if (ret)
4679 return ret;
4680
4681 ret = 0;
4682 if (tid_data->tfds_in_queue == 0) {
4683 printk(KERN_ERR "HW queue is empty\n");
4684 tid_data->agg.state = IWL_AGG_ON;
4685 ieee80211_start_tx_ba_cb_irqsafe(hw, da, tid);
4686 } else {
4687 IWL_DEBUG_HT("HW queue is NOT empty: %d packets in HW queue\n",
4688 tid_data->tfds_in_queue);
4689 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
4690 }
4691 return ret;
4692 }
4693
4694 static int iwl4965_mac_ht_tx_agg_stop(struct ieee80211_hw *hw, const u8 *da,
4695 u16 tid)
4696 {
4697
4698 struct iwl4965_priv *priv = hw->priv;
4699 int tx_fifo_id, txq_id, sta_id, ssn = -1;
4700 struct iwl4965_tid_data *tid_data;
4701 int ret, write_ptr, read_ptr;
4702 unsigned long flags;
4703 DECLARE_MAC_BUF(mac);
4704
4705 if (!da) {
4706 IWL_ERROR("da = NULL\n");
4707 return -EINVAL;
4708 }
4709
4710 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
4711 tx_fifo_id = default_tid_to_tx_fifo[tid];
4712 else
4713 return -EINVAL;
4714
4715 sta_id = iwl4965_hw_find_station(priv, da);
4716
4717 if (sta_id == IWL_INVALID_STATION)
4718 return -ENXIO;
4719
4720 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
4721 IWL_WARNING("Stopping AGG while state not IWL_AGG_ON\n");
4722
4723 tid_data = &priv->stations[sta_id].tid[tid];
4724 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
4725 txq_id = tid_data->agg.txq_id;
4726 write_ptr = priv->txq[txq_id].q.write_ptr;
4727 read_ptr = priv->txq[txq_id].q.read_ptr;
4728
4729 /* The queue is not empty */
4730 if (write_ptr != read_ptr) {
4731 IWL_DEBUG_HT("Stopping a non empty AGG HW QUEUE\n");
4732 priv->stations[sta_id].tid[tid].agg.state =
4733 IWL_EMPTYING_HW_QUEUE_DELBA;
4734 return 0;
4735 }
4736
4737 IWL_DEBUG_HT("HW queue empty\n");;
4738 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
4739
4740 spin_lock_irqsave(&priv->lock, flags);
4741 ret = iwl4965_tx_queue_agg_disable(priv, txq_id, ssn, tx_fifo_id);
4742 spin_unlock_irqrestore(&priv->lock, flags);
4743
4744 if (ret)
4745 return ret;
4746
4747 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, da, tid);
4748
4749 IWL_DEBUG_INFO("iwl4965_mac_ht_tx_agg_stop on da=%s tid=%d\n",
4750 print_mac(mac, da), tid);
4751
4752 return 0;
4753 }
4754
4755 int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
4756 enum ieee80211_ampdu_mlme_action action,
4757 const u8 *addr, u16 tid, u16 *ssn)
4758 {
4759 struct iwl4965_priv *priv = hw->priv;
4760 int sta_id;
4761 DECLARE_MAC_BUF(mac);
4762
4763 IWL_DEBUG_HT("A-MPDU action on da=%s tid=%d ",
4764 print_mac(mac, addr), tid);
4765 sta_id = iwl4965_hw_find_station(priv, addr);
4766 switch (action) {
4767 case IEEE80211_AMPDU_RX_START:
4768 IWL_DEBUG_HT("start Rx\n");
4769 iwl4965_sta_modify_add_ba_tid(priv, sta_id, tid, *ssn);
4770 break;
4771 case IEEE80211_AMPDU_RX_STOP:
4772 IWL_DEBUG_HT("stop Rx\n");
4773 iwl4965_sta_modify_del_ba_tid(priv, sta_id, tid);
4774 break;
4775 case IEEE80211_AMPDU_TX_START:
4776 IWL_DEBUG_HT("start Tx\n");
4777 return iwl4965_mac_ht_tx_agg_start(hw, addr, tid, ssn);
4778 case IEEE80211_AMPDU_TX_STOP:
4779 IWL_DEBUG_HT("stop Tx\n");
4780 return iwl4965_mac_ht_tx_agg_stop(hw, addr, tid);
4781 default:
4782 IWL_DEBUG_HT("unknown\n");
4783 return -EINVAL;
4784 break;
4785 }
4786 return 0;
4787 }
4788
4789 #endif /* CONFIG_IWL4965_HT */
4790
4791 /* Set up 4965-specific Rx frame reply handlers */
4792 void iwl4965_hw_rx_handler_setup(struct iwl4965_priv *priv)
4793 {
4794 /* Legacy Rx frames */
4795 priv->rx_handlers[REPLY_4965_RX] = iwl4965_rx_reply_rx;
4796
4797 /* High-throughput (HT) Rx frames */
4798 priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy;
4799 priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx;
4800
4801 priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
4802 iwl4965_rx_missed_beacon_notif;
4803
4804 #ifdef CONFIG_IWL4965_HT
4805 priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba;
4806 #endif /* CONFIG_IWL4965_HT */
4807 }
4808
4809 void iwl4965_hw_setup_deferred_work(struct iwl4965_priv *priv)
4810 {
4811 INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
4812 INIT_WORK(&priv->statistics_work, iwl4965_bg_statistics_work);
4813 #ifdef CONFIG_IWL4965_SENSITIVITY
4814 INIT_WORK(&priv->sensitivity_work, iwl4965_bg_sensitivity_work);
4815 #endif
4816 init_timer(&priv->statistics_periodic);
4817 priv->statistics_periodic.data = (unsigned long)priv;
4818 priv->statistics_periodic.function = iwl4965_bg_statistics_periodic;
4819 }
4820
4821 void iwl4965_hw_cancel_deferred_work(struct iwl4965_priv *priv)
4822 {
4823 del_timer_sync(&priv->statistics_periodic);
4824
4825 cancel_delayed_work(&priv->init_alive_start);
4826 }
4827
4828 static struct iwl_lib_ops iwl4965_lib = {
4829 .eeprom_ops = {
4830 .verify_signature = iwlcore_eeprom_verify_signature,
4831 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
4832 .release_semaphore = iwlcore_eeprom_release_semaphore,
4833 },
4834 };
4835
4836 static struct iwl_ops iwl4965_ops = {
4837 .lib = &iwl4965_lib,
4838 };
4839
4840 static struct iwl_cfg iwl4965_agn_cfg = {
4841 .name = "4965AGN",
4842 .fw_name = "iwlwifi-4965" IWL4965_UCODE_API ".ucode",
4843 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
4844 .ops = &iwl4965_ops,
4845 };
4846
4847 struct pci_device_id iwl4965_hw_card_ids[] = {
4848 {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)},
4849 {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)},
4850 {0}
4851 };
4852
4853 MODULE_DEVICE_TABLE(pci, iwl4965_hw_card_ids);
This page took 0.165475 seconds and 6 git commands to generate.