sg_start_req(): use import_iovec()
[deliverable/linux.git] / drivers / net / wireless / ath / ath9k / ar9003_wow.c
1 /*
2 * Copyright (c) 2012 Qualcomm Atheros, Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <linux/export.h>
18 #include "ath9k.h"
19 #include "reg.h"
20 #include "reg_wow.h"
21 #include "hw-ops.h"
22
23 static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah)
24 {
25 struct ath_common *common = ath9k_hw_common(ah);
26
27 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
28
29 /* set rx disable bit */
30 REG_WRITE(ah, AR_CR, AR_CR_RXD);
31
32 if (!ath9k_hw_wait(ah, AR_CR, AR_CR_RXE, 0, AH_WAIT_TIMEOUT)) {
33 ath_err(common, "Failed to stop Rx DMA in 10ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
34 REG_READ(ah, AR_CR), REG_READ(ah, AR_DIAG_SW));
35 return;
36 }
37
38 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
39 if (!REG_READ(ah, AR_MAC_PCU_GEN_TIMER_TSF_SEL))
40 REG_CLR_BIT(ah, AR_DIRECT_CONNECT, AR_DC_TSF2_ENABLE);
41 } else if (AR_SREV_9485(ah)){
42 if (!(REG_READ(ah, AR_NDP2_TIMER_MODE) &
43 AR_GEN_TIMERS2_MODE_ENABLE_MASK))
44 REG_CLR_BIT(ah, AR_DIRECT_CONNECT, AR_DC_TSF2_ENABLE);
45 }
46
47 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_ON_INT);
48 }
49
50 static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
51 {
52 struct ath_common *common = ath9k_hw_common(ah);
53 u8 sta_mac_addr[ETH_ALEN], ap_mac_addr[ETH_ALEN];
54 u32 ctl[13] = {0};
55 u32 data_word[KAL_NUM_DATA_WORDS];
56 u8 i;
57 u32 wow_ka_data_word0;
58
59 memcpy(sta_mac_addr, common->macaddr, ETH_ALEN);
60 memcpy(ap_mac_addr, common->curbssid, ETH_ALEN);
61
62 /* set the transmit buffer */
63 ctl[0] = (KAL_FRAME_LEN | (MAX_RATE_POWER << 16));
64 ctl[1] = 0;
65 ctl[4] = 0;
66 ctl[7] = (ah->txchainmask) << 2;
67 ctl[2] = 0xf << 16; /* tx_tries 0 */
68
69 if (IS_CHAN_2GHZ(ah->curchan))
70 ctl[3] = 0x1b; /* CCK_1M */
71 else
72 ctl[3] = 0xb; /* OFDM_6M */
73
74 for (i = 0; i < KAL_NUM_DESC_WORDS; i++)
75 REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
76
77 REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
78
79 data_word[0] = (KAL_FRAME_TYPE << 2) | (KAL_FRAME_SUB_TYPE << 4) |
80 (KAL_TO_DS << 8) | (KAL_DURATION_ID << 16);
81 data_word[1] = (ap_mac_addr[3] << 24) | (ap_mac_addr[2] << 16) |
82 (ap_mac_addr[1] << 8) | (ap_mac_addr[0]);
83 data_word[2] = (sta_mac_addr[1] << 24) | (sta_mac_addr[0] << 16) |
84 (ap_mac_addr[5] << 8) | (ap_mac_addr[4]);
85 data_word[3] = (sta_mac_addr[5] << 24) | (sta_mac_addr[4] << 16) |
86 (sta_mac_addr[3] << 8) | (sta_mac_addr[2]);
87 data_word[4] = (ap_mac_addr[3] << 24) | (ap_mac_addr[2] << 16) |
88 (ap_mac_addr[1] << 8) | (ap_mac_addr[0]);
89 data_word[5] = (ap_mac_addr[5] << 8) | (ap_mac_addr[4]);
90
91 if (AR_SREV_9462_20(ah)) {
92 /* AR9462 2.0 has an extra descriptor word (time based
93 * discard) compared to other chips */
94 REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + (12 * 4)), 0);
95 wow_ka_data_word0 = AR_WOW_TXBUF(13);
96 } else {
97 wow_ka_data_word0 = AR_WOW_TXBUF(12);
98 }
99
100 for (i = 0; i < KAL_NUM_DATA_WORDS; i++)
101 REG_WRITE(ah, (wow_ka_data_word0 + i*4), data_word[i]);
102
103 }
104
105 int ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern,
106 u8 *user_mask, int pattern_count,
107 int pattern_len)
108 {
109 int i;
110 u32 pattern_val, mask_val;
111 u32 set, clr;
112
113 if (pattern_count >= ah->wow.max_patterns)
114 return -ENOSPC;
115
116 if (pattern_count < MAX_NUM_PATTERN_LEGACY)
117 REG_SET_BIT(ah, AR_WOW_PATTERN, BIT(pattern_count));
118 else
119 REG_SET_BIT(ah, AR_MAC_PCU_WOW4, BIT(pattern_count - 8));
120
121 for (i = 0; i < MAX_PATTERN_SIZE; i += 4) {
122 memcpy(&pattern_val, user_pattern, 4);
123 REG_WRITE(ah, (AR_WOW_TB_PATTERN(pattern_count) + i),
124 pattern_val);
125 user_pattern += 4;
126 }
127
128 for (i = 0; i < MAX_PATTERN_MASK_SIZE; i += 4) {
129 memcpy(&mask_val, user_mask, 4);
130 REG_WRITE(ah, (AR_WOW_TB_MASK(pattern_count) + i), mask_val);
131 user_mask += 4;
132 }
133
134 if (pattern_count < MAX_NUM_PATTERN_LEGACY)
135 ah->wow.wow_event_mask |=
136 BIT(pattern_count + AR_WOW_PAT_FOUND_SHIFT);
137 else
138 ah->wow.wow_event_mask2 |=
139 BIT((pattern_count - 8) + AR_WOW_PAT_FOUND_SHIFT);
140
141 if (pattern_count < 4) {
142 set = (pattern_len & AR_WOW_LENGTH_MAX) <<
143 AR_WOW_LEN1_SHIFT(pattern_count);
144 clr = AR_WOW_LENGTH1_MASK(pattern_count);
145 REG_RMW(ah, AR_WOW_LENGTH1, set, clr);
146 } else if (pattern_count < 8) {
147 set = (pattern_len & AR_WOW_LENGTH_MAX) <<
148 AR_WOW_LEN2_SHIFT(pattern_count);
149 clr = AR_WOW_LENGTH2_MASK(pattern_count);
150 REG_RMW(ah, AR_WOW_LENGTH2, set, clr);
151 } else if (pattern_count < 12) {
152 set = (pattern_len & AR_WOW_LENGTH_MAX) <<
153 AR_WOW_LEN3_SHIFT(pattern_count);
154 clr = AR_WOW_LENGTH3_MASK(pattern_count);
155 REG_RMW(ah, AR_WOW_LENGTH3, set, clr);
156 } else if (pattern_count < MAX_NUM_PATTERN) {
157 set = (pattern_len & AR_WOW_LENGTH_MAX) <<
158 AR_WOW_LEN4_SHIFT(pattern_count);
159 clr = AR_WOW_LENGTH4_MASK(pattern_count);
160 REG_RMW(ah, AR_WOW_LENGTH4, set, clr);
161 }
162
163 return 0;
164 }
165 EXPORT_SYMBOL(ath9k_hw_wow_apply_pattern);
166
167 u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
168 {
169 u32 wow_status = 0;
170 u32 val = 0, rval;
171
172 /*
173 * read the WoW status register to know
174 * the wakeup reason
175 */
176 rval = REG_READ(ah, AR_WOW_PATTERN);
177 val = AR_WOW_STATUS(rval);
178
179 /*
180 * mask only the WoW events that we have enabled. Sometimes
181 * we have spurious WoW events from the AR_WOW_PATTERN
182 * register. This mask will clean it up.
183 */
184
185 val &= ah->wow.wow_event_mask;
186
187 if (val) {
188 if (val & AR_WOW_MAGIC_PAT_FOUND)
189 wow_status |= AH_WOW_MAGIC_PATTERN_EN;
190 if (AR_WOW_PATTERN_FOUND(val))
191 wow_status |= AH_WOW_USER_PATTERN_EN;
192 if (val & AR_WOW_KEEP_ALIVE_FAIL)
193 wow_status |= AH_WOW_LINK_CHANGE;
194 if (val & AR_WOW_BEACON_FAIL)
195 wow_status |= AH_WOW_BEACON_MISS;
196 }
197
198 /*
199 * set and clear WOW_PME_CLEAR registers for the chip to
200 * generate next wow signal.
201 * disable D3 before accessing other registers ?
202 */
203
204 /* do we need to check the bit value 0x01000000 (7-10) ?? */
205 REG_RMW(ah, AR_PCIE_PM_CTRL, AR_PMCTRL_WOW_PME_CLR,
206 AR_PMCTRL_PWR_STATE_D1D3);
207
208 /*
209 * clear all events
210 */
211 REG_WRITE(ah, AR_WOW_PATTERN,
212 AR_WOW_CLEAR_EVENTS(REG_READ(ah, AR_WOW_PATTERN)));
213
214 /*
215 * restore the beacon threshold to init value
216 */
217 REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
218
219 /*
220 * Restore the way the PCI-E reset, Power-On-Reset, external
221 * PCIE_POR_SHORT pins are tied to its original value.
222 * Previously just before WoW sleep, we untie the PCI-E
223 * reset to our Chip's Power On Reset so that any PCI-E
224 * reset from the bus will not reset our chip
225 */
226 if (ah->is_pciexpress)
227 ath9k_hw_configpcipowersave(ah, false);
228
229 ah->wow.wow_event_mask = 0;
230
231 return wow_status;
232 }
233 EXPORT_SYMBOL(ath9k_hw_wow_wakeup);
234
235 static void ath9k_hw_wow_set_arwr_reg(struct ath_hw *ah)
236 {
237 u32 wa_reg;
238
239 if (!ah->is_pciexpress)
240 return;
241
242 /*
243 * We need to untie the internal POR (power-on-reset)
244 * to the external PCI-E reset. We also need to tie
245 * the PCI-E Phy reset to the PCI-E reset.
246 */
247 wa_reg = REG_READ(ah, AR_WA);
248 wa_reg &= ~AR_WA_UNTIE_RESET_EN;
249 wa_reg |= AR_WA_RESET_EN;
250 wa_reg |= AR_WA_POR_SHORT;
251
252 REG_WRITE(ah, AR_WA, wa_reg);
253 }
254
255 void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
256 {
257 u32 wow_event_mask;
258 u32 keep_alive, magic_pattern, host_pm_ctrl;
259
260 wow_event_mask = ah->wow.wow_event_mask;
261
262 /*
263 * AR_PMCTRL_HOST_PME_EN - Override PME enable in configuration
264 * space and allow MAC to generate WoW anyway.
265 *
266 * AR_PMCTRL_PWR_PM_CTRL_ENA - ???
267 *
268 * AR_PMCTRL_AUX_PWR_DET - PCI core SYS_AUX_PWR_DET signal,
269 * needs to be set for WoW in PCI mode.
270 *
271 * AR_PMCTRL_WOW_PME_CLR - WoW Clear Signal going to the MAC.
272 *
273 * Set the power states appropriately and enable PME.
274 *
275 * Set and clear WOW_PME_CLEAR for the chip
276 * to generate next wow signal.
277 */
278 REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PMCTRL_HOST_PME_EN |
279 AR_PMCTRL_PWR_PM_CTRL_ENA |
280 AR_PMCTRL_AUX_PWR_DET |
281 AR_PMCTRL_WOW_PME_CLR);
282 REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, AR_PMCTRL_WOW_PME_CLR);
283
284 /*
285 * Random Backoff.
286 *
287 * 31:28 in AR_WOW_PATTERN : Indicates the number of bits used in the
288 * contention window. For value N,
289 * the random backoff will be selected between
290 * 0 and (2 ^ N) - 1.
291 */
292 REG_SET_BIT(ah, AR_WOW_PATTERN,
293 AR_WOW_BACK_OFF_SHIFT(AR_WOW_PAT_BACKOFF));
294
295 /*
296 * AIFS time, Slot time, Keep Alive count.
297 */
298 REG_SET_BIT(ah, AR_WOW_COUNT, AR_WOW_AIFS_CNT(AR_WOW_CNT_AIFS_CNT) |
299 AR_WOW_SLOT_CNT(AR_WOW_CNT_SLOT_CNT) |
300 AR_WOW_KEEP_ALIVE_CNT(AR_WOW_CNT_KA_CNT));
301 /*
302 * Beacon timeout.
303 */
304 if (pattern_enable & AH_WOW_BEACON_MISS)
305 REG_WRITE(ah, AR_WOW_BCN_TIMO, AR_WOW_BEACON_TIMO);
306 else
307 REG_WRITE(ah, AR_WOW_BCN_TIMO, AR_WOW_BEACON_TIMO_MAX);
308
309 /*
310 * Keep alive timeout in ms.
311 */
312 if (!pattern_enable)
313 REG_WRITE(ah, AR_WOW_KEEP_ALIVE_TIMO, AR_WOW_KEEP_ALIVE_NEVER);
314 else
315 REG_WRITE(ah, AR_WOW_KEEP_ALIVE_TIMO, KAL_TIMEOUT * 32);
316
317 /*
318 * Keep alive delay in us.
319 */
320 REG_WRITE(ah, AR_WOW_KEEP_ALIVE_DELAY, KAL_DELAY * 1000);
321
322 /*
323 * Create keep alive pattern to respond to beacons.
324 */
325 ath9k_wow_create_keep_alive_pattern(ah);
326
327 /*
328 * Configure keep alive register.
329 */
330 keep_alive = REG_READ(ah, AR_WOW_KEEP_ALIVE);
331
332 /* Send keep alive timeouts anyway */
333 keep_alive &= ~AR_WOW_KEEP_ALIVE_AUTO_DIS;
334
335 if (pattern_enable & AH_WOW_LINK_CHANGE) {
336 keep_alive &= ~AR_WOW_KEEP_ALIVE_FAIL_DIS;
337 wow_event_mask |= AR_WOW_KEEP_ALIVE_FAIL;
338 } else {
339 keep_alive |= AR_WOW_KEEP_ALIVE_FAIL_DIS;
340 }
341
342 REG_WRITE(ah, AR_WOW_KEEP_ALIVE, keep_alive);
343
344 /*
345 * We are relying on a bmiss failure, ensure we have
346 * enough threshold to prevent false positives.
347 */
348 REG_RMW_FIELD(ah, AR_RSSI_THR, AR_RSSI_THR_BM_THR,
349 AR_WOW_BMISSTHRESHOLD);
350
351 if (pattern_enable & AH_WOW_BEACON_MISS) {
352 wow_event_mask |= AR_WOW_BEACON_FAIL;
353 REG_SET_BIT(ah, AR_WOW_BCN_EN, AR_WOW_BEACON_FAIL_EN);
354 } else {
355 REG_CLR_BIT(ah, AR_WOW_BCN_EN, AR_WOW_BEACON_FAIL_EN);
356 }
357
358 /*
359 * Enable the magic packet registers.
360 */
361 magic_pattern = REG_READ(ah, AR_WOW_PATTERN);
362 magic_pattern |= AR_WOW_MAC_INTR_EN;
363
364 if (pattern_enable & AH_WOW_MAGIC_PATTERN_EN) {
365 magic_pattern |= AR_WOW_MAGIC_EN;
366 wow_event_mask |= AR_WOW_MAGIC_PAT_FOUND;
367 } else {
368 magic_pattern &= ~AR_WOW_MAGIC_EN;
369 }
370
371 REG_WRITE(ah, AR_WOW_PATTERN, magic_pattern);
372
373 /*
374 * Enable pattern matching for packets which are less
375 * than 256 bytes.
376 */
377 REG_WRITE(ah, AR_WOW_PATTERN_MATCH_LT_256B,
378 AR_WOW_PATTERN_SUPPORTED);
379
380 /*
381 * Set the power states appropriately and enable PME.
382 */
383 host_pm_ctrl = REG_READ(ah, AR_PCIE_PM_CTRL);
384 host_pm_ctrl |= AR_PMCTRL_PWR_STATE_D1D3 |
385 AR_PMCTRL_HOST_PME_EN |
386 AR_PMCTRL_PWR_PM_CTRL_ENA;
387 host_pm_ctrl &= ~AR_PCIE_PM_CTRL_ENA;
388
389 if (AR_SREV_9462(ah)) {
390 /*
391 * This is needed to prevent the chip waking up
392 * the host within 3-4 seconds with certain
393 * platform/BIOS.
394 */
395 host_pm_ctrl &= ~AR_PMCTRL_PWR_STATE_D1D3;
396 host_pm_ctrl |= AR_PMCTRL_PWR_STATE_D1D3_REAL;
397 }
398
399 REG_WRITE(ah, AR_PCIE_PM_CTRL, host_pm_ctrl);
400
401 /*
402 * Enable sequence number generation when asleep.
403 */
404 REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PRESERVE_SEQNUM);
405
406 /* To bring down WOW power low margin */
407 REG_SET_BIT(ah, AR_PCIE_PHY_REG3, BIT(13));
408
409 ath9k_hw_wow_set_arwr_reg(ah);
410
411 /* HW WoW */
412 REG_CLR_BIT(ah, AR_PCU_MISC_MODE3, BIT(5));
413
414 ath9k_hw_set_powermode_wow_sleep(ah);
415 ah->wow.wow_event_mask = wow_event_mask;
416 }
417 EXPORT_SYMBOL(ath9k_hw_wow_enable);
This page took 0.044717 seconds and 5 git commands to generate.