mwifiex: add custom IE framework
[deliverable/linux.git] / drivers / net / wireless / iwlwifi / iwl-io.c
1 /******************************************************************************
2 *
3 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
19 *
20 * The full GNU General Public License is included in this distribution in the
21 * file called LICENSE.
22 *
23 * Contact Information:
24 * Intel Linux Wireless <ilw@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *
27 *****************************************************************************/
28 #include <linux/delay.h>
29 #include <linux/device.h>
30
31 #include "iwl-io.h"
32 #include"iwl-csr.h"
33 #include "iwl-debug.h"
34
35 #define IWL_POLL_INTERVAL 10 /* microseconds */
36
37 static inline void __iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask)
38 {
39 iwl_write32(trans, reg, iwl_read32(trans, reg) | mask);
40 }
41
42 static inline void __iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask)
43 {
44 iwl_write32(trans, reg, iwl_read32(trans, reg) & ~mask);
45 }
46
47 void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask)
48 {
49 unsigned long flags;
50
51 spin_lock_irqsave(&trans->reg_lock, flags);
52 __iwl_set_bit(trans, reg, mask);
53 spin_unlock_irqrestore(&trans->reg_lock, flags);
54 }
55
56 void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask)
57 {
58 unsigned long flags;
59
60 spin_lock_irqsave(&trans->reg_lock, flags);
61 __iwl_clear_bit(trans, reg, mask);
62 spin_unlock_irqrestore(&trans->reg_lock, flags);
63 }
64
65 int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
66 u32 bits, u32 mask, int timeout)
67 {
68 int t = 0;
69
70 do {
71 if ((iwl_read32(trans, addr) & mask) == (bits & mask))
72 return t;
73 udelay(IWL_POLL_INTERVAL);
74 t += IWL_POLL_INTERVAL;
75 } while (t < timeout);
76
77 return -ETIMEDOUT;
78 }
79
80 int iwl_grab_nic_access_silent(struct iwl_trans *trans)
81 {
82 int ret;
83
84 lockdep_assert_held(&trans->reg_lock);
85
86 /* this bit wakes up the NIC */
87 __iwl_set_bit(trans, CSR_GP_CNTRL,
88 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
89
90 /*
91 * These bits say the device is running, and should keep running for
92 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
93 * but they do not indicate that embedded SRAM is restored yet;
94 * 3945 and 4965 have volatile SRAM, and must save/restore contents
95 * to/from host DRAM when sleeping/waking for power-saving.
96 * Each direction takes approximately 1/4 millisecond; with this
97 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
98 * series of register accesses are expected (e.g. reading Event Log),
99 * to keep device from sleeping.
100 *
101 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
102 * SRAM is okay/restored. We don't check that here because this call
103 * is just for hardware register access; but GP1 MAC_SLEEP check is a
104 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
105 *
106 * 5000 series and later (including 1000 series) have non-volatile SRAM,
107 * and do not save/restore SRAM when power cycling.
108 */
109 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
110 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
111 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
112 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
113 if (ret < 0) {
114 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
115 return -EIO;
116 }
117
118 return 0;
119 }
120
121 bool iwl_grab_nic_access(struct iwl_trans *trans)
122 {
123 int ret = iwl_grab_nic_access_silent(trans);
124 if (unlikely(ret)) {
125 u32 val = iwl_read32(trans, CSR_GP_CNTRL);
126 WARN_ONCE(1, "Timeout waiting for hardware access "
127 "(CSR_GP_CNTRL 0x%08x)\n", val);
128 return false;
129 }
130
131 return true;
132 }
133
134 void iwl_release_nic_access(struct iwl_trans *trans)
135 {
136 lockdep_assert_held(&trans->reg_lock);
137 __iwl_clear_bit(trans, CSR_GP_CNTRL,
138 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
139 /*
140 * Above we read the CSR_GP_CNTRL register, which will flush
141 * any previous writes, but we need the write that clears the
142 * MAC_ACCESS_REQ bit to be performed before any other writes
143 * scheduled on different CPUs (after we drop reg_lock).
144 */
145 mmiowb();
146 }
147
148 u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg)
149 {
150 u32 value;
151 unsigned long flags;
152
153 spin_lock_irqsave(&trans->reg_lock, flags);
154 iwl_grab_nic_access(trans);
155 value = iwl_read32(trans, reg);
156 iwl_release_nic_access(trans);
157 spin_unlock_irqrestore(&trans->reg_lock, flags);
158
159 return value;
160 }
161
162 void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value)
163 {
164 unsigned long flags;
165
166 spin_lock_irqsave(&trans->reg_lock, flags);
167 if (likely(iwl_grab_nic_access(trans))) {
168 iwl_write32(trans, reg, value);
169 iwl_release_nic_access(trans);
170 }
171 spin_unlock_irqrestore(&trans->reg_lock, flags);
172 }
173
174 int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
175 int timeout)
176 {
177 int t = 0;
178
179 do {
180 if ((iwl_read_direct32(trans, addr) & mask) == mask)
181 return t;
182 udelay(IWL_POLL_INTERVAL);
183 t += IWL_POLL_INTERVAL;
184 } while (t < timeout);
185
186 return -ETIMEDOUT;
187 }
188
189 static inline u32 __iwl_read_prph(struct iwl_trans *trans, u32 reg)
190 {
191 iwl_write32(trans, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
192 return iwl_read32(trans, HBUS_TARG_PRPH_RDAT);
193 }
194
195 static inline void __iwl_write_prph(struct iwl_trans *trans, u32 addr, u32 val)
196 {
197 iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
198 ((addr & 0x0000FFFF) | (3 << 24)));
199 iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
200 }
201
202 u32 iwl_read_prph(struct iwl_trans *trans, u32 reg)
203 {
204 unsigned long flags;
205 u32 val;
206
207 spin_lock_irqsave(&trans->reg_lock, flags);
208 iwl_grab_nic_access(trans);
209 val = __iwl_read_prph(trans, reg);
210 iwl_release_nic_access(trans);
211 spin_unlock_irqrestore(&trans->reg_lock, flags);
212 return val;
213 }
214
215 void iwl_write_prph(struct iwl_trans *trans, u32 addr, u32 val)
216 {
217 unsigned long flags;
218
219 spin_lock_irqsave(&trans->reg_lock, flags);
220 if (likely(iwl_grab_nic_access(trans))) {
221 __iwl_write_prph(trans, addr, val);
222 iwl_release_nic_access(trans);
223 }
224 spin_unlock_irqrestore(&trans->reg_lock, flags);
225 }
226
227 void iwl_set_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask)
228 {
229 unsigned long flags;
230
231 spin_lock_irqsave(&trans->reg_lock, flags);
232 if (likely(iwl_grab_nic_access(trans))) {
233 __iwl_write_prph(trans, reg,
234 __iwl_read_prph(trans, reg) | mask);
235 iwl_release_nic_access(trans);
236 }
237 spin_unlock_irqrestore(&trans->reg_lock, flags);
238 }
239
240 void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 reg,
241 u32 bits, u32 mask)
242 {
243 unsigned long flags;
244
245 spin_lock_irqsave(&trans->reg_lock, flags);
246 if (likely(iwl_grab_nic_access(trans))) {
247 __iwl_write_prph(trans, reg,
248 (__iwl_read_prph(trans, reg) & mask) | bits);
249 iwl_release_nic_access(trans);
250 }
251 spin_unlock_irqrestore(&trans->reg_lock, flags);
252 }
253
254 void iwl_clear_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask)
255 {
256 unsigned long flags;
257 u32 val;
258
259 spin_lock_irqsave(&trans->reg_lock, flags);
260 if (likely(iwl_grab_nic_access(trans))) {
261 val = __iwl_read_prph(trans, reg);
262 __iwl_write_prph(trans, reg, (val & ~mask));
263 iwl_release_nic_access(trans);
264 }
265 spin_unlock_irqrestore(&trans->reg_lock, flags);
266 }
267
268 void _iwl_read_targ_mem_words(struct iwl_trans *trans, u32 addr,
269 void *buf, int words)
270 {
271 unsigned long flags;
272 int offs;
273 u32 *vals = buf;
274
275 spin_lock_irqsave(&trans->reg_lock, flags);
276 if (likely(iwl_grab_nic_access(trans))) {
277 iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
278 for (offs = 0; offs < words; offs++)
279 vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
280 iwl_release_nic_access(trans);
281 }
282 spin_unlock_irqrestore(&trans->reg_lock, flags);
283 }
284
285 u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr)
286 {
287 u32 value;
288
289 _iwl_read_targ_mem_words(trans, addr, &value, 1);
290
291 return value;
292 }
293
294 int _iwl_write_targ_mem_words(struct iwl_trans *trans, u32 addr,
295 void *buf, int words)
296 {
297 unsigned long flags;
298 int offs, result = 0;
299 u32 *vals = buf;
300
301 spin_lock_irqsave(&trans->reg_lock, flags);
302 if (likely(iwl_grab_nic_access(trans))) {
303 iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
304 for (offs = 0; offs < words; offs++)
305 iwl_write32(trans, HBUS_TARG_MEM_WDAT, vals[offs]);
306 iwl_release_nic_access(trans);
307 } else
308 result = -EBUSY;
309 spin_unlock_irqrestore(&trans->reg_lock, flags);
310
311 return result;
312 }
313
314 int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val)
315 {
316 return _iwl_write_targ_mem_words(trans, addr, &val, 1);
317 }
This page took 0.046643 seconds and 5 git commands to generate.