1 /******************************************************************************
3 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/init.h>
34 #include <net/mac80211.h>
36 #include "iwl-eeprom.h"
40 #include "iwl-commands.h"
41 #include "iwl-debug.h"
42 #include "iwl-power.h"
45 * Setting power level allows the card to go to sleep when not busy.
47 * We calculate a sleep command based on the required latency, which
48 * we get from mac80211. In order to handle thermal throttling, we can
49 * also use pre-defined power levels.
53 * For now, keep using power level 1 instead of automatically
56 bool no_sleep_autoadjust
= true;
57 module_param(no_sleep_autoadjust
, bool, S_IRUGO
);
58 MODULE_PARM_DESC(no_sleep_autoadjust
,
59 "don't automatically adjust sleep level "
60 "according to maximum network latency");
63 * This defines the old power levels. They are still used by default
64 * (level 1) and for thermal throttle (levels 3 through 5)
67 struct iwl_power_vec_entry
{
68 struct iwl_powertable_cmd cmd
;
72 #define IWL_DTIM_RANGE_0_MAX 2
73 #define IWL_DTIM_RANGE_1_MAX 10
75 #define NOSLP cpu_to_le16(0), 0, 0
76 #define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0
77 #define TU_TO_USEC 1024
78 #define SLP_TOUT(T) cpu_to_le32((T) * TU_TO_USEC)
79 #define SLP_VEC(X0, X1, X2, X3, X4) {cpu_to_le32(X0), \
84 /* default power management (not Tx power) table values */
85 /* for DTIM period 0 through IWL_DTIM_RANGE_0_MAX */
86 static const struct iwl_power_vec_entry range_0
[IWL_POWER_NUM
] = {
87 {{SLP
, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0},
88 {{SLP
, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0},
89 {{SLP
, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 2, 2, 2, 0xFF)}, 0},
90 {{SLP
, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 2, 4, 4, 0xFF)}, 1},
91 {{SLP
, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 2, 4, 6, 0xFF)}, 2}
95 /* for DTIM period IWL_DTIM_RANGE_0_MAX + 1 through IWL_DTIM_RANGE_1_MAX */
96 static const struct iwl_power_vec_entry range_1
[IWL_POWER_NUM
] = {
97 {{SLP
, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
98 {{SLP
, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 3, 4, 7)}, 0},
99 {{SLP
, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 4, 6, 7, 9)}, 0},
100 {{SLP
, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 4, 6, 9, 10)}, 1},
101 {{SLP
, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 4, 7, 10, 10)}, 2}
104 /* for DTIM period > IWL_DTIM_RANGE_1_MAX */
105 static const struct iwl_power_vec_entry range_2
[IWL_POWER_NUM
] = {
106 {{SLP
, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
107 {{SLP
, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
108 {{SLP
, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
109 {{SLP
, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
110 {{SLP
, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
113 static void iwl_static_sleep_cmd(struct iwl_priv
*priv
,
114 struct iwl_powertable_cmd
*cmd
,
115 enum iwl_power_level lvl
, int period
)
117 const struct iwl_power_vec_entry
*table
;
122 if (period
< IWL_DTIM_RANGE_1_MAX
)
124 if (period
< IWL_DTIM_RANGE_0_MAX
)
127 BUG_ON(lvl
< 0 || lvl
>= IWL_POWER_NUM
);
129 *cmd
= table
[lvl
].cmd
;
135 skip
= !!table
[lvl
].no_dtim
;
139 __le32 slp_itrvl
= cmd
->sleep_interval
[IWL_POWER_VEC_SIZE
- 1];
140 max_sleep
= le32_to_cpu(slp_itrvl
);
141 if (max_sleep
== 0xFF)
142 max_sleep
= period
* (skip
+ 1);
143 else if (max_sleep
> period
)
144 max_sleep
= (le32_to_cpu(slp_itrvl
) / period
) * period
;
145 cmd
->flags
|= IWL_POWER_SLEEP_OVER_DTIM_MSK
;
148 cmd
->flags
&= ~IWL_POWER_SLEEP_OVER_DTIM_MSK
;
151 for (i
= 0; i
< IWL_POWER_VEC_SIZE
; i
++)
152 if (le32_to_cpu(cmd
->sleep_interval
[i
]) > max_sleep
)
153 cmd
->sleep_interval
[i
] = cpu_to_le32(max_sleep
);
155 if (priv
->power_data
.pci_pm
)
156 cmd
->flags
|= IWL_POWER_PCI_PM_MSK
;
158 cmd
->flags
&= ~IWL_POWER_PCI_PM_MSK
;
160 IWL_DEBUG_POWER(priv
, "Sleep command for index %d\n", lvl
+ 1);
163 /* default Thermal Throttling transaction table
164 * Current state | Throttling Down | Throttling Up
165 *=============================================================================
166 * Condition Nxt State Condition Nxt State Condition Nxt State
167 *-----------------------------------------------------------------------------
168 * IWL_TI_0 T >= 115 CT_KILL 115>T>=105 TI_1 N/A N/A
169 * IWL_TI_1 T >= 115 CT_KILL 115>T>=110 TI_2 T<=95 TI_0
170 * IWL_TI_2 T >= 115 CT_KILL T<=100 TI_1
171 * IWL_CT_KILL N/A N/A N/A N/A T<=95 TI_0
172 *=============================================================================
174 static const struct iwl_tt_trans tt_range_0
[IWL_TI_STATE_MAX
- 1] = {
175 {IWL_TI_0
, IWL_ABSOLUTE_ZERO
, 104},
176 {IWL_TI_1
, 105, CT_KILL_THRESHOLD
},
177 {IWL_TI_CT_KILL
, CT_KILL_THRESHOLD
+ 1, IWL_ABSOLUTE_MAX
}
179 static const struct iwl_tt_trans tt_range_1
[IWL_TI_STATE_MAX
- 1] = {
180 {IWL_TI_0
, IWL_ABSOLUTE_ZERO
, 95},
181 {IWL_TI_2
, 110, CT_KILL_THRESHOLD
},
182 {IWL_TI_CT_KILL
, CT_KILL_THRESHOLD
+ 1, IWL_ABSOLUTE_MAX
}
184 static const struct iwl_tt_trans tt_range_2
[IWL_TI_STATE_MAX
- 1] = {
185 {IWL_TI_1
, IWL_ABSOLUTE_ZERO
, 100},
186 {IWL_TI_CT_KILL
, CT_KILL_THRESHOLD
+ 1, IWL_ABSOLUTE_MAX
},
187 {IWL_TI_CT_KILL
, CT_KILL_THRESHOLD
+ 1, IWL_ABSOLUTE_MAX
}
189 static const struct iwl_tt_trans tt_range_3
[IWL_TI_STATE_MAX
- 1] = {
190 {IWL_TI_0
, IWL_ABSOLUTE_ZERO
, CT_KILL_EXIT_THRESHOLD
},
191 {IWL_TI_CT_KILL
, CT_KILL_EXIT_THRESHOLD
+ 1, IWL_ABSOLUTE_MAX
},
192 {IWL_TI_CT_KILL
, CT_KILL_EXIT_THRESHOLD
+ 1, IWL_ABSOLUTE_MAX
}
195 /* Advance Thermal Throttling default restriction table */
196 static const struct iwl_tt_restriction restriction_range
[IWL_TI_STATE_MAX
] = {
197 {IWL_ANT_OK_MULTI
, IWL_ANT_OK_MULTI
, true },
198 {IWL_ANT_OK_SINGLE
, IWL_ANT_OK_MULTI
, true },
199 {IWL_ANT_OK_SINGLE
, IWL_ANT_OK_SINGLE
, false },
200 {IWL_ANT_OK_NONE
, IWL_ANT_OK_NONE
, false }
204 static void iwl_power_sleep_cam_cmd(struct iwl_priv
*priv
,
205 struct iwl_powertable_cmd
*cmd
)
207 memset(cmd
, 0, sizeof(*cmd
));
209 if (priv
->power_data
.pci_pm
)
210 cmd
->flags
|= IWL_POWER_PCI_PM_MSK
;
212 IWL_DEBUG_POWER(priv
, "Sleep command for CAM\n");
215 static void iwl_power_fill_sleep_cmd(struct iwl_priv
*priv
,
216 struct iwl_powertable_cmd
*cmd
,
217 int dynps_ms
, int wakeup_period
)
220 * These are the original power level 3 sleep successions. The
221 * device may behave better with such succession and was also
222 * only tested with that. Just like the original sleep commands,
223 * also adjust the succession here to the wakeup_period below.
224 * The ranges are the same as for the sleep commands, 0-2, 3-9
225 * and >10, which is selected based on the DTIM interval for
226 * the sleep index but here we use the wakeup period since that
227 * is what we need to do for the latency requirements.
229 static const u8 slp_succ_r0
[IWL_POWER_VEC_SIZE
] = { 2, 2, 2, 2, 2 };
230 static const u8 slp_succ_r1
[IWL_POWER_VEC_SIZE
] = { 2, 4, 6, 7, 9 };
231 static const u8 slp_succ_r2
[IWL_POWER_VEC_SIZE
] = { 2, 7, 9, 9, 0xFF };
232 const u8
*slp_succ
= slp_succ_r0
;
235 if (wakeup_period
> IWL_DTIM_RANGE_0_MAX
)
236 slp_succ
= slp_succ_r1
;
237 if (wakeup_period
> IWL_DTIM_RANGE_1_MAX
)
238 slp_succ
= slp_succ_r2
;
240 memset(cmd
, 0, sizeof(*cmd
));
242 cmd
->flags
= IWL_POWER_DRIVER_ALLOW_SLEEP_MSK
|
243 IWL_POWER_FAST_PD
; /* no use seeing frames for others */
245 if (priv
->power_data
.pci_pm
)
246 cmd
->flags
|= IWL_POWER_PCI_PM_MSK
;
248 cmd
->rx_data_timeout
= cpu_to_le32(1000 * dynps_ms
);
249 cmd
->tx_data_timeout
= cpu_to_le32(1000 * dynps_ms
);
251 for (i
= 0; i
< IWL_POWER_VEC_SIZE
; i
++)
252 cmd
->sleep_interval
[i
] =
253 cpu_to_le32(min_t(int, slp_succ
[i
], wakeup_period
));
255 IWL_DEBUG_POWER(priv
, "Automatic sleep command\n");
258 static int iwl_set_power(struct iwl_priv
*priv
, struct iwl_powertable_cmd
*cmd
)
260 IWL_DEBUG_POWER(priv
, "Sending power/sleep command\n");
261 IWL_DEBUG_POWER(priv
, "Flags value = 0x%08X\n", cmd
->flags
);
262 IWL_DEBUG_POWER(priv
, "Tx timeout = %u\n", le32_to_cpu(cmd
->tx_data_timeout
));
263 IWL_DEBUG_POWER(priv
, "Rx timeout = %u\n", le32_to_cpu(cmd
->rx_data_timeout
));
264 IWL_DEBUG_POWER(priv
, "Sleep interval vector = { %d , %d , %d , %d , %d }\n",
265 le32_to_cpu(cmd
->sleep_interval
[0]),
266 le32_to_cpu(cmd
->sleep_interval
[1]),
267 le32_to_cpu(cmd
->sleep_interval
[2]),
268 le32_to_cpu(cmd
->sleep_interval
[3]),
269 le32_to_cpu(cmd
->sleep_interval
[4]));
271 return iwl_send_cmd_pdu(priv
, POWER_TABLE_CMD
,
272 sizeof(struct iwl_powertable_cmd
), cmd
);
276 int iwl_power_update_mode(struct iwl_priv
*priv
, bool force
)
279 struct iwl_tt_mgmt
*tt
= &priv
->thermal_throttle
;
280 bool enabled
= (priv
->iw_mode
== NL80211_IFTYPE_STATION
) &&
281 (priv
->hw
->conf
.flags
& IEEE80211_CONF_PS
);
283 struct iwl_powertable_cmd cmd
;
286 /* Don't update the RX chain when chain noise calibration is running */
287 update_chains
= priv
->chain_noise_data
.state
== IWL_CHAIN_NOISE_DONE
||
288 priv
->chain_noise_data
.state
== IWL_CHAIN_NOISE_ALIVE
;
291 dtimper
= priv
->vif
->bss_conf
.dtim_period
;
295 if (priv
->cfg
->broken_powersave
)
296 iwl_power_sleep_cam_cmd(priv
, &cmd
);
297 else if (priv
->cfg
->supports_idle
&&
298 priv
->hw
->conf
.flags
& IEEE80211_CONF_IDLE
)
299 iwl_static_sleep_cmd(priv
, &cmd
, IWL_POWER_INDEX_5
, 20);
300 else if (tt
->state
>= IWL_TI_1
)
301 iwl_static_sleep_cmd(priv
, &cmd
, tt
->tt_power_mode
, dtimper
);
303 iwl_power_sleep_cam_cmd(priv
, &cmd
);
304 else if (priv
->power_data
.debug_sleep_level_override
>= 0)
305 iwl_static_sleep_cmd(priv
, &cmd
,
306 priv
->power_data
.debug_sleep_level_override
,
308 else if (no_sleep_autoadjust
)
309 iwl_static_sleep_cmd(priv
, &cmd
, IWL_POWER_INDEX_1
, dtimper
);
311 iwl_power_fill_sleep_cmd(priv
, &cmd
,
312 priv
->hw
->conf
.dynamic_ps_timeout
,
313 priv
->hw
->conf
.max_sleep_period
);
315 if (iwl_is_ready_rf(priv
) &&
316 (memcmp(&priv
->power_data
.sleep_cmd
, &cmd
, sizeof(cmd
)) || force
)) {
317 if (cmd
.flags
& IWL_POWER_DRIVER_ALLOW_SLEEP_MSK
)
318 set_bit(STATUS_POWER_PMI
, &priv
->status
);
320 ret
= iwl_set_power(priv
, &cmd
);
322 if (!(cmd
.flags
& IWL_POWER_DRIVER_ALLOW_SLEEP_MSK
))
323 clear_bit(STATUS_POWER_PMI
, &priv
->status
);
325 if (priv
->cfg
->ops
->lib
->update_chain_flags
&&
327 priv
->cfg
->ops
->lib
->update_chain_flags(priv
);
328 else if (priv
->cfg
->ops
->lib
->update_chain_flags
)
329 IWL_DEBUG_POWER(priv
,
330 "Cannot update the power, chain noise "
331 "calibration running: %d\n",
332 priv
->chain_noise_data
.state
);
333 memcpy(&priv
->power_data
.sleep_cmd
, &cmd
, sizeof(cmd
));
335 IWL_ERR(priv
, "set power fail, ret = %d", ret
);
340 EXPORT_SYMBOL(iwl_power_update_mode
);
342 bool iwl_ht_enabled(struct iwl_priv
*priv
)
344 struct iwl_tt_mgmt
*tt
= &priv
->thermal_throttle
;
345 struct iwl_tt_restriction
*restriction
;
347 if (!priv
->thermal_throttle
.advanced_tt
)
349 restriction
= tt
->restriction
+ tt
->state
;
350 return restriction
->is_ht
;
352 EXPORT_SYMBOL(iwl_ht_enabled
);
354 enum iwl_antenna_ok
iwl_tx_ant_restriction(struct iwl_priv
*priv
)
356 struct iwl_tt_mgmt
*tt
= &priv
->thermal_throttle
;
357 struct iwl_tt_restriction
*restriction
;
359 if (!priv
->thermal_throttle
.advanced_tt
)
360 return IWL_ANT_OK_MULTI
;
361 restriction
= tt
->restriction
+ tt
->state
;
362 return restriction
->tx_stream
;
364 EXPORT_SYMBOL(iwl_tx_ant_restriction
);
366 enum iwl_antenna_ok
iwl_rx_ant_restriction(struct iwl_priv
*priv
)
368 struct iwl_tt_mgmt
*tt
= &priv
->thermal_throttle
;
369 struct iwl_tt_restriction
*restriction
;
371 if (!priv
->thermal_throttle
.advanced_tt
)
372 return IWL_ANT_OK_MULTI
;
373 restriction
= tt
->restriction
+ tt
->state
;
374 return restriction
->rx_stream
;
377 #define CT_KILL_EXIT_DURATION (5) /* 5 seconds duration */
380 * toggle the bit to wake up uCode and check the temperature
381 * if the temperature is below CT, uCode will stay awake and send card
382 * state notification with CT_KILL bit clear to inform Thermal Throttling
383 * Management to change state. Otherwise, uCode will go back to sleep
384 * without doing anything, driver should continue the 5 seconds timer
385 * to wake up uCode for temperature check until temperature drop below CT
387 static void iwl_tt_check_exit_ct_kill(unsigned long data
)
389 struct iwl_priv
*priv
= (struct iwl_priv
*)data
;
390 struct iwl_tt_mgmt
*tt
= &priv
->thermal_throttle
;
393 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
396 if (tt
->state
== IWL_TI_CT_KILL
) {
397 if (priv
->thermal_throttle
.ct_kill_toggle
) {
398 iwl_write32(priv
, CSR_UCODE_DRV_GP1_CLR
,
399 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT
);
400 priv
->thermal_throttle
.ct_kill_toggle
= false;
402 iwl_write32(priv
, CSR_UCODE_DRV_GP1_SET
,
403 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT
);
404 priv
->thermal_throttle
.ct_kill_toggle
= true;
406 iwl_read32(priv
, CSR_UCODE_DRV_GP1
);
407 spin_lock_irqsave(&priv
->reg_lock
, flags
);
408 if (!iwl_grab_nic_access(priv
))
409 iwl_release_nic_access(priv
);
410 spin_unlock_irqrestore(&priv
->reg_lock
, flags
);
412 /* Reschedule the ct_kill timer to occur in
413 * CT_KILL_EXIT_DURATION seconds to ensure we get a
415 mod_timer(&priv
->thermal_throttle
.ct_kill_exit_tm
, jiffies
+
416 CT_KILL_EXIT_DURATION
* HZ
);
420 static void iwl_perform_ct_kill_task(struct iwl_priv
*priv
,
424 IWL_DEBUG_POWER(priv
, "Stop all queues\n");
425 if (priv
->mac80211_registered
)
426 ieee80211_stop_queues(priv
->hw
);
427 IWL_DEBUG_POWER(priv
,
428 "Schedule 5 seconds CT_KILL Timer\n");
429 mod_timer(&priv
->thermal_throttle
.ct_kill_exit_tm
, jiffies
+
430 CT_KILL_EXIT_DURATION
* HZ
);
432 IWL_DEBUG_POWER(priv
, "Wake all queues\n");
433 if (priv
->mac80211_registered
)
434 ieee80211_wake_queues(priv
->hw
);
438 #define IWL_MINIMAL_POWER_THRESHOLD (CT_KILL_THRESHOLD_LEGACY)
439 #define IWL_REDUCED_PERFORMANCE_THRESHOLD_2 (100)
440 #define IWL_REDUCED_PERFORMANCE_THRESHOLD_1 (90)
443 * Legacy thermal throttling
444 * 1) Avoid NIC destruction due to high temperatures
445 * Chip will identify dangerously high temperatures that can
446 * harm the device and will power down
447 * 2) Avoid the NIC power down due to high temperature
448 * Throttle early enough to lower the power consumption before
449 * drastic steps are needed
451 static void iwl_legacy_tt_handler(struct iwl_priv
*priv
, s32 temp
)
453 struct iwl_tt_mgmt
*tt
= &priv
->thermal_throttle
;
454 enum iwl_tt_state old_state
;
456 #ifdef CONFIG_IWLWIFI_DEBUG
457 if ((tt
->tt_previous_temp
) &&
458 (temp
> tt
->tt_previous_temp
) &&
459 ((temp
- tt
->tt_previous_temp
) >
460 IWL_TT_INCREASE_MARGIN
)) {
461 IWL_DEBUG_POWER(priv
,
462 "Temperature increase %d degree Celsius\n",
463 (temp
- tt
->tt_previous_temp
));
466 old_state
= tt
->state
;
468 if (temp
>= IWL_MINIMAL_POWER_THRESHOLD
)
469 tt
->state
= IWL_TI_CT_KILL
;
470 else if (temp
>= IWL_REDUCED_PERFORMANCE_THRESHOLD_2
)
471 tt
->state
= IWL_TI_2
;
472 else if (temp
>= IWL_REDUCED_PERFORMANCE_THRESHOLD_1
)
473 tt
->state
= IWL_TI_1
;
475 tt
->state
= IWL_TI_0
;
477 #ifdef CONFIG_IWLWIFI_DEBUG
478 tt
->tt_previous_temp
= temp
;
480 if (tt
->state
!= old_state
) {
484 * When the system is ready to go back to IWL_TI_0
485 * we only have to call iwl_power_update_mode() to
490 tt
->tt_power_mode
= IWL_POWER_INDEX_3
;
493 tt
->tt_power_mode
= IWL_POWER_INDEX_4
;
496 tt
->tt_power_mode
= IWL_POWER_INDEX_5
;
499 mutex_lock(&priv
->mutex
);
500 if (iwl_power_update_mode(priv
, true)) {
501 /* TT state not updated
502 * try again during next temperature read
504 tt
->state
= old_state
;
505 IWL_ERR(priv
, "Cannot update power mode, "
506 "TT state not updated\n");
508 if (tt
->state
== IWL_TI_CT_KILL
)
509 iwl_perform_ct_kill_task(priv
, true);
510 else if (old_state
== IWL_TI_CT_KILL
&&
511 tt
->state
!= IWL_TI_CT_KILL
)
512 iwl_perform_ct_kill_task(priv
, false);
513 IWL_DEBUG_POWER(priv
, "Temperature state changed %u\n",
515 IWL_DEBUG_POWER(priv
, "Power Index change to %u\n",
518 mutex_unlock(&priv
->mutex
);
523 * Advance thermal throttling
524 * 1) Avoid NIC destruction due to high temperatures
525 * Chip will identify dangerously high temperatures that can
526 * harm the device and will power down
527 * 2) Avoid the NIC power down due to high temperature
528 * Throttle early enough to lower the power consumption before
529 * drastic steps are needed
530 * Actions include relaxing the power down sleep thresholds and
531 * decreasing the number of TX streams
532 * 3) Avoid throughput performance impact as much as possible
534 *=============================================================================
535 * Condition Nxt State Condition Nxt State Condition Nxt State
536 *-----------------------------------------------------------------------------
537 * IWL_TI_0 T >= 115 CT_KILL 115>T>=105 TI_1 N/A N/A
538 * IWL_TI_1 T >= 115 CT_KILL 115>T>=110 TI_2 T<=95 TI_0
539 * IWL_TI_2 T >= 115 CT_KILL T<=100 TI_1
540 * IWL_CT_KILL N/A N/A N/A N/A T<=95 TI_0
541 *=============================================================================
543 static void iwl_advance_tt_handler(struct iwl_priv
*priv
, s32 temp
)
545 struct iwl_tt_mgmt
*tt
= &priv
->thermal_throttle
;
547 bool changed
= false;
548 enum iwl_tt_state old_state
;
549 struct iwl_tt_trans
*transaction
;
551 old_state
= tt
->state
;
552 for (i
= 0; i
< IWL_TI_STATE_MAX
- 1; i
++) {
553 /* based on the current TT state,
554 * find the curresponding transaction table
555 * each table has (IWL_TI_STATE_MAX - 1) entries
556 * tt->transaction + ((old_state * (IWL_TI_STATE_MAX - 1))
557 * will advance to the correct table.
558 * then based on the current temperature
559 * find the next state need to transaction to
560 * go through all the possible (IWL_TI_STATE_MAX - 1) entries
561 * in the current table to see if transaction is needed
563 transaction
= tt
->transaction
+
564 ((old_state
* (IWL_TI_STATE_MAX
- 1)) + i
);
565 if (temp
>= transaction
->tt_low
&&
566 temp
<= transaction
->tt_high
) {
567 #ifdef CONFIG_IWLWIFI_DEBUG
568 if ((tt
->tt_previous_temp
) &&
569 (temp
> tt
->tt_previous_temp
) &&
570 ((temp
- tt
->tt_previous_temp
) >
571 IWL_TT_INCREASE_MARGIN
)) {
572 IWL_DEBUG_POWER(priv
,
573 "Temperature increase %d "
575 (temp
- tt
->tt_previous_temp
));
577 tt
->tt_previous_temp
= temp
;
580 transaction
->next_state
) {
583 transaction
->next_state
;
589 struct iwl_rxon_cmd
*rxon
= &priv
->staging_rxon
;
591 if (tt
->state
>= IWL_TI_1
) {
592 /* force PI = IWL_POWER_INDEX_5 in the case of TI > 0 */
593 tt
->tt_power_mode
= IWL_POWER_INDEX_5
;
594 if (!iwl_ht_enabled(priv
))
596 rxon
->flags
&= ~(RXON_FLG_CHANNEL_MODE_MSK
|
597 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK
|
598 RXON_FLG_HT40_PROT_MSK
|
599 RXON_FLG_HT_PROT_MSK
);
601 /* check HT capability and set
602 * according to the system HT capability
603 * in case get disabled before */
604 iwl_set_rxon_ht(priv
, &priv
->current_ht_config
);
609 * restore system power setting -- it will be
610 * recalculated automatically.
613 /* check HT capability and set
614 * according to the system HT capability
615 * in case get disabled before */
616 iwl_set_rxon_ht(priv
, &priv
->current_ht_config
);
618 mutex_lock(&priv
->mutex
);
619 if (iwl_power_update_mode(priv
, true)) {
620 /* TT state not updated
621 * try again during next temperature read
623 IWL_ERR(priv
, "Cannot update power mode, "
624 "TT state not updated\n");
625 tt
->state
= old_state
;
627 IWL_DEBUG_POWER(priv
,
628 "Thermal Throttling to new state: %u\n",
630 if (old_state
!= IWL_TI_CT_KILL
&&
631 tt
->state
== IWL_TI_CT_KILL
) {
632 IWL_DEBUG_POWER(priv
, "Enter IWL_TI_CT_KILL\n");
633 iwl_perform_ct_kill_task(priv
, true);
635 } else if (old_state
== IWL_TI_CT_KILL
&&
636 tt
->state
!= IWL_TI_CT_KILL
) {
637 IWL_DEBUG_POWER(priv
, "Exit IWL_TI_CT_KILL\n");
638 iwl_perform_ct_kill_task(priv
, false);
641 mutex_unlock(&priv
->mutex
);
645 /* Card State Notification indicated reach critical temperature
646 * if PSP not enable, no Thermal Throttling function will be performed
647 * just set the GP1 bit to acknowledge the event
648 * otherwise, go into IWL_TI_CT_KILL state
649 * since Card State Notification will not provide any temperature reading
651 * so just pass the CT_KILL temperature to iwl_legacy_tt_handler()
653 * pass CT_KILL_THRESHOLD+1 to make sure move into IWL_TI_CT_KILL state
655 static void iwl_bg_ct_enter(struct work_struct
*work
)
657 struct iwl_priv
*priv
= container_of(work
, struct iwl_priv
, ct_enter
);
658 struct iwl_tt_mgmt
*tt
= &priv
->thermal_throttle
;
660 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
663 if (!iwl_is_ready(priv
))
666 if (tt
->state
!= IWL_TI_CT_KILL
) {
667 IWL_ERR(priv
, "Device reached critical temperature "
668 "- ucode going to sleep!\n");
669 if (!priv
->thermal_throttle
.advanced_tt
)
670 iwl_legacy_tt_handler(priv
,
671 IWL_MINIMAL_POWER_THRESHOLD
);
673 iwl_advance_tt_handler(priv
,
674 CT_KILL_THRESHOLD
+ 1);
678 /* Card State Notification indicated out of critical temperature
679 * since Card State Notification will not provide any temperature reading
680 * so pass the IWL_REDUCED_PERFORMANCE_THRESHOLD_2 temperature
681 * to iwl_legacy_tt_handler() to get out of IWL_CT_KILL state
683 static void iwl_bg_ct_exit(struct work_struct
*work
)
685 struct iwl_priv
*priv
= container_of(work
, struct iwl_priv
, ct_exit
);
686 struct iwl_tt_mgmt
*tt
= &priv
->thermal_throttle
;
688 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
691 if (!iwl_is_ready(priv
))
694 /* stop ct_kill_exit_tm timer */
695 del_timer_sync(&priv
->thermal_throttle
.ct_kill_exit_tm
);
697 if (tt
->state
== IWL_TI_CT_KILL
) {
699 "Device temperature below critical"
701 if (!priv
->thermal_throttle
.advanced_tt
)
702 iwl_legacy_tt_handler(priv
,
703 IWL_REDUCED_PERFORMANCE_THRESHOLD_2
);
705 iwl_advance_tt_handler(priv
, CT_KILL_EXIT_THRESHOLD
);
709 void iwl_tt_enter_ct_kill(struct iwl_priv
*priv
)
711 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
714 IWL_DEBUG_POWER(priv
, "Queueing critical temperature enter.\n");
715 queue_work(priv
->workqueue
, &priv
->ct_enter
);
717 EXPORT_SYMBOL(iwl_tt_enter_ct_kill
);
719 void iwl_tt_exit_ct_kill(struct iwl_priv
*priv
)
721 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
724 IWL_DEBUG_POWER(priv
, "Queueing critical temperature exit.\n");
725 queue_work(priv
->workqueue
, &priv
->ct_exit
);
727 EXPORT_SYMBOL(iwl_tt_exit_ct_kill
);
729 static void iwl_bg_tt_work(struct work_struct
*work
)
731 struct iwl_priv
*priv
= container_of(work
, struct iwl_priv
, tt_work
);
732 s32 temp
= priv
->temperature
; /* degrees CELSIUS except 4965 */
734 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
737 if ((priv
->hw_rev
& CSR_HW_REV_TYPE_MSK
) == CSR_HW_REV_TYPE_4965
)
738 temp
= KELVIN_TO_CELSIUS(priv
->temperature
);
740 if (!priv
->thermal_throttle
.advanced_tt
)
741 iwl_legacy_tt_handler(priv
, temp
);
743 iwl_advance_tt_handler(priv
, temp
);
746 void iwl_tt_handler(struct iwl_priv
*priv
)
748 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
751 IWL_DEBUG_POWER(priv
, "Queueing thermal throttling work.\n");
752 queue_work(priv
->workqueue
, &priv
->tt_work
);
754 EXPORT_SYMBOL(iwl_tt_handler
);
756 /* Thermal throttling initialization
757 * For advance thermal throttling:
758 * Initialize Thermal Index and temperature threshold table
759 * Initialize thermal throttling restriction table
761 void iwl_tt_initialize(struct iwl_priv
*priv
)
763 struct iwl_tt_mgmt
*tt
= &priv
->thermal_throttle
;
764 int size
= sizeof(struct iwl_tt_trans
) * (IWL_TI_STATE_MAX
- 1);
765 struct iwl_tt_trans
*transaction
;
767 IWL_DEBUG_POWER(priv
, "Initialize Thermal Throttling \n");
769 memset(tt
, 0, sizeof(struct iwl_tt_mgmt
));
771 tt
->state
= IWL_TI_0
;
772 init_timer(&priv
->thermal_throttle
.ct_kill_exit_tm
);
773 priv
->thermal_throttle
.ct_kill_exit_tm
.data
= (unsigned long)priv
;
774 priv
->thermal_throttle
.ct_kill_exit_tm
.function
= iwl_tt_check_exit_ct_kill
;
776 /* setup deferred ct kill work */
777 INIT_WORK(&priv
->tt_work
, iwl_bg_tt_work
);
778 INIT_WORK(&priv
->ct_enter
, iwl_bg_ct_enter
);
779 INIT_WORK(&priv
->ct_exit
, iwl_bg_ct_exit
);
781 switch (priv
->hw_rev
& CSR_HW_REV_TYPE_MSK
) {
782 case CSR_HW_REV_TYPE_6x00
:
783 case CSR_HW_REV_TYPE_6x50
:
784 IWL_DEBUG_POWER(priv
, "Advanced Thermal Throttling\n");
785 tt
->restriction
= kzalloc(sizeof(struct iwl_tt_restriction
) *
786 IWL_TI_STATE_MAX
, GFP_KERNEL
);
787 tt
->transaction
= kzalloc(sizeof(struct iwl_tt_trans
) *
788 IWL_TI_STATE_MAX
* (IWL_TI_STATE_MAX
- 1),
790 if (!tt
->restriction
|| !tt
->transaction
) {
791 IWL_ERR(priv
, "Fallback to Legacy Throttling\n");
792 priv
->thermal_throttle
.advanced_tt
= false;
793 kfree(tt
->restriction
);
794 tt
->restriction
= NULL
;
795 kfree(tt
->transaction
);
796 tt
->transaction
= NULL
;
798 transaction
= tt
->transaction
+
799 (IWL_TI_0
* (IWL_TI_STATE_MAX
- 1));
800 memcpy(transaction
, &tt_range_0
[0], size
);
801 transaction
= tt
->transaction
+
802 (IWL_TI_1
* (IWL_TI_STATE_MAX
- 1));
803 memcpy(transaction
, &tt_range_1
[0], size
);
804 transaction
= tt
->transaction
+
805 (IWL_TI_2
* (IWL_TI_STATE_MAX
- 1));
806 memcpy(transaction
, &tt_range_2
[0], size
);
807 transaction
= tt
->transaction
+
808 (IWL_TI_CT_KILL
* (IWL_TI_STATE_MAX
- 1));
809 memcpy(transaction
, &tt_range_3
[0], size
);
810 size
= sizeof(struct iwl_tt_restriction
) *
812 memcpy(tt
->restriction
,
813 &restriction_range
[0], size
);
814 priv
->thermal_throttle
.advanced_tt
= true;
818 IWL_DEBUG_POWER(priv
, "Legacy Thermal Throttling\n");
819 priv
->thermal_throttle
.advanced_tt
= false;
823 EXPORT_SYMBOL(iwl_tt_initialize
);
825 /* cleanup thermal throttling management related memory and timer */
826 void iwl_tt_exit(struct iwl_priv
*priv
)
828 struct iwl_tt_mgmt
*tt
= &priv
->thermal_throttle
;
830 /* stop ct_kill_exit_tm timer if activated */
831 del_timer_sync(&priv
->thermal_throttle
.ct_kill_exit_tm
);
832 cancel_work_sync(&priv
->tt_work
);
833 cancel_work_sync(&priv
->ct_enter
);
834 cancel_work_sync(&priv
->ct_exit
);
836 if (priv
->thermal_throttle
.advanced_tt
) {
837 /* free advance thermal throttling memory */
838 kfree(tt
->restriction
);
839 tt
->restriction
= NULL
;
840 kfree(tt
->transaction
);
841 tt
->transaction
= NULL
;
844 EXPORT_SYMBOL(iwl_tt_exit
);
846 /* initialize to default */
847 void iwl_power_initialize(struct iwl_priv
*priv
)
849 u16 lctl
= iwl_pcie_link_ctl(priv
);
851 priv
->power_data
.pci_pm
= !(lctl
& PCI_CFG_LINK_CTRL_VAL_L0S_EN
);
853 priv
->power_data
.debug_sleep_level_override
= -1;
855 memset(&priv
->power_data
.sleep_cmd
, 0,
856 sizeof(priv
->power_data
.sleep_cmd
));
858 EXPORT_SYMBOL(iwl_power_initialize
);