iwlegacy: remove unnecessary read of PCI_CAP_ID_EXP
[deliverable/linux.git] / drivers / net / wireless / rtlwifi / pci.c
CommitLineData
0c817338
LF
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "core.h"
31#include "wifi.h"
32#include "pci.h"
33#include "base.h"
34#include "ps.h"
c7cfe38e 35#include "efuse.h"
0c817338
LF
36
37static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = {
38 INTEL_VENDOR_ID,
39 ATI_VENDOR_ID,
40 AMD_VENDOR_ID,
41 SIS_VENDOR_ID
42};
43
c7cfe38e
C
44static const u8 ac_to_hwq[] = {
45 VO_QUEUE,
46 VI_QUEUE,
47 BE_QUEUE,
48 BK_QUEUE
49};
50
d3bb1429 51static u8 _rtl_mac_to_hwqueue(struct ieee80211_hw *hw,
c7cfe38e
C
52 struct sk_buff *skb)
53{
54 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
d3bb1429 55 __le16 fc = rtl_get_fc(skb);
c7cfe38e
C
56 u8 queue_index = skb_get_queue_mapping(skb);
57
58 if (unlikely(ieee80211_is_beacon(fc)))
59 return BEACON_QUEUE;
60 if (ieee80211_is_mgmt(fc))
61 return MGNT_QUEUE;
62 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
63 if (ieee80211_is_nullfunc(fc))
64 return HIGH_QUEUE;
65
66 return ac_to_hwq[queue_index];
67}
68
0c817338
LF
69/* Update PCI dependent default settings*/
70static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
71{
72 struct rtl_priv *rtlpriv = rtl_priv(hw);
73 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
74 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
75 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
76 u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
c7cfe38e 77 u8 init_aspm;
0c817338
LF
78
79 ppsc->reg_rfps_level = 0;
7ea47240 80 ppsc->support_aspm = 0;
0c817338
LF
81
82 /*Update PCI ASPM setting */
83 ppsc->const_amdpci_aspm = rtlpci->const_amdpci_aspm;
84 switch (rtlpci->const_pci_aspm) {
85 case 0:
86 /*No ASPM */
87 break;
88
89 case 1:
90 /*ASPM dynamically enabled/disable. */
91 ppsc->reg_rfps_level |= RT_RF_LPS_LEVEL_ASPM;
92 break;
93
94 case 2:
95 /*ASPM with Clock Req dynamically enabled/disable. */
96 ppsc->reg_rfps_level |= (RT_RF_LPS_LEVEL_ASPM |
97 RT_RF_OFF_LEVL_CLK_REQ);
98 break;
99
100 case 3:
101 /*
102 * Always enable ASPM and Clock Req
103 * from initialization to halt.
104 * */
105 ppsc->reg_rfps_level &= ~(RT_RF_LPS_LEVEL_ASPM);
106 ppsc->reg_rfps_level |= (RT_RF_PS_LEVEL_ALWAYS_ASPM |
107 RT_RF_OFF_LEVL_CLK_REQ);
108 break;
109
110 case 4:
111 /*
112 * Always enable ASPM without Clock Req
113 * from initialization to halt.
114 * */
115 ppsc->reg_rfps_level &= ~(RT_RF_LPS_LEVEL_ASPM |
116 RT_RF_OFF_LEVL_CLK_REQ);
117 ppsc->reg_rfps_level |= RT_RF_PS_LEVEL_ALWAYS_ASPM;
118 break;
119 }
120
121 ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_HALT_NIC;
122
123 /*Update Radio OFF setting */
124 switch (rtlpci->const_hwsw_rfoff_d3) {
125 case 1:
126 if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM)
127 ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_ASPM;
128 break;
129
130 case 2:
131 if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM)
132 ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_ASPM;
133 ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_HALT_NIC;
134 break;
135
136 case 3:
137 ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_PCI_D3;
138 break;
139 }
140
141 /*Set HW definition to determine if it supports ASPM. */
142 switch (rtlpci->const_support_pciaspm) {
c7cfe38e
C
143 case 0:{
144 /*Not support ASPM. */
145 bool support_aspm = false;
146 ppsc->support_aspm = support_aspm;
147 break;
148 }
149 case 1:{
150 /*Support ASPM. */
151 bool support_aspm = true;
152 bool support_backdoor = true;
153 ppsc->support_aspm = support_aspm;
154
155 /*if (priv->oem_id == RT_CID_TOSHIBA &&
156 !priv->ndis_adapter.amd_l1_patch)
157 support_backdoor = false; */
158
159 ppsc->support_backdoor = support_backdoor;
160
161 break;
162 }
0c817338
LF
163 case 2:
164 /*ASPM value set by chipset. */
c7cfe38e
C
165 if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL) {
166 bool support_aspm = true;
167 ppsc->support_aspm = support_aspm;
168 }
0c817338
LF
169 break;
170 default:
171 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
172 ("switch case not process\n"));
173 break;
174 }
c7cfe38e
C
175
176 /* toshiba aspm issue, toshiba will set aspm selfly
177 * so we should not set aspm in driver */
178 pci_read_config_byte(rtlpci->pdev, 0x80, &init_aspm);
179 if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8192SE &&
180 init_aspm == 0x43)
181 ppsc->support_aspm = false;
182}
183
0c817338
LF
184static bool _rtl_pci_platform_switch_device_pci_aspm(
185 struct ieee80211_hw *hw,
186 u8 value)
187{
188 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
c7cfe38e
C
189 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
190
191 if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE)
192 value |= 0x40;
0c817338 193
0c817338
LF
194 pci_write_config_byte(rtlpci->pdev, 0x80, value);
195
32473284 196 return false;
0c817338
LF
197}
198
199/*When we set 0x01 to enable clk request. Set 0x0 to disable clk req.*/
200static bool _rtl_pci_switch_clk_req(struct ieee80211_hw *hw, u8 value)
201{
202 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
c7cfe38e 203 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
0c817338 204
0c817338 205 pci_write_config_byte(rtlpci->pdev, 0x81, value);
0c817338 206
c7cfe38e
C
207 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
208 udelay(100);
209
32473284 210 return true;
0c817338
LF
211}
212
213/*Disable RTL8192SE ASPM & Disable Pci Bridge ASPM*/
214static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
215{
216 struct rtl_priv *rtlpriv = rtl_priv(hw);
217 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
218 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
219 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
220 u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
221 u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
222 u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
223 /*Retrieve original configuration settings. */
224 u8 linkctrl_reg = pcipriv->ndis_adapter.linkctrl_reg;
225 u16 pcibridge_linkctrlreg = pcipriv->ndis_adapter.
226 pcibridge_linkctrlreg;
227 u16 aspmlevel = 0;
32473284 228 u8 tmp_u1b = 0;
0c817338 229
c7cfe38e
C
230 if (!ppsc->support_aspm)
231 return;
232
0c817338
LF
233 if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
234 RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
235 ("PCI(Bridge) UNKNOWN.\n"));
236
237 return;
238 }
239
240 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) {
241 RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ);
242 _rtl_pci_switch_clk_req(hw, 0x0);
243 }
244
32473284
LF
245 /*for promising device will in L0 state after an I/O. */
246 pci_read_config_byte(rtlpci->pdev, 0x80, &tmp_u1b);
0c817338
LF
247
248 /*Set corresponding value. */
249 aspmlevel |= BIT(0) | BIT(1);
250 linkctrl_reg &= ~aspmlevel;
251 pcibridge_linkctrlreg &= ~(BIT(0) | BIT(1));
252
253 _rtl_pci_platform_switch_device_pci_aspm(hw, linkctrl_reg);
254 udelay(50);
255
256 /*4 Disable Pci Bridge ASPM */
257 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
258 pcicfg_addrport + (num4bytes << 2));
259 rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, pcibridge_linkctrlreg);
260
261 udelay(50);
0c817338
LF
262}
263
264/*
265 *Enable RTL8192SE ASPM & Enable Pci Bridge ASPM for
266 *power saving We should follow the sequence to enable
267 *RTL8192SE first then enable Pci Bridge ASPM
268 *or the system will show bluescreen.
269 */
270static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
271{
272 struct rtl_priv *rtlpriv = rtl_priv(hw);
273 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
274 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
275 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
276 u8 pcibridge_busnum = pcipriv->ndis_adapter.pcibridge_busnum;
277 u8 pcibridge_devnum = pcipriv->ndis_adapter.pcibridge_devnum;
278 u8 pcibridge_funcnum = pcipriv->ndis_adapter.pcibridge_funcnum;
279 u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
280 u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
281 u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
282 u16 aspmlevel;
283 u8 u_pcibridge_aspmsetting;
284 u8 u_device_aspmsetting;
285
c7cfe38e
C
286 if (!ppsc->support_aspm)
287 return;
288
0c817338
LF
289 if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
290 RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
291 ("PCI(Bridge) UNKNOWN.\n"));
292 return;
293 }
294
295 /*4 Enable Pci Bridge ASPM */
296 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
297 pcicfg_addrport + (num4bytes << 2));
298
299 u_pcibridge_aspmsetting =
300 pcipriv->ndis_adapter.pcibridge_linkctrlreg |
301 rtlpci->const_hostpci_aspm_setting;
302
303 if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL)
304 u_pcibridge_aspmsetting &= ~BIT(0);
305
306 rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, u_pcibridge_aspmsetting);
307
308 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
309 ("PlatformEnableASPM():PciBridge busnumber[%x], "
310 "DevNumbe[%x], funcnumber[%x], Write reg[%x] = %x\n",
311 pcibridge_busnum, pcibridge_devnum, pcibridge_funcnum,
312 (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10),
313 u_pcibridge_aspmsetting));
314
315 udelay(50);
316
317 /*Get ASPM level (with/without Clock Req) */
318 aspmlevel = rtlpci->const_devicepci_aspm_setting;
319 u_device_aspmsetting = pcipriv->ndis_adapter.linkctrl_reg;
320
321 /*_rtl_pci_platform_switch_device_pci_aspm(dev,*/
322 /*(priv->ndis_adapter.linkctrl_reg | ASPMLevel)); */
323
324 u_device_aspmsetting |= aspmlevel;
325
326 _rtl_pci_platform_switch_device_pci_aspm(hw, u_device_aspmsetting);
327
328 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) {
329 _rtl_pci_switch_clk_req(hw, (ppsc->reg_rfps_level &
330 RT_RF_OFF_LEVL_CLK_REQ) ? 1 : 0);
331 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ);
332 }
c7cfe38e 333 udelay(100);
0c817338
LF
334}
335
336static bool rtl_pci_get_amd_l1_patch(struct ieee80211_hw *hw)
337{
338 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
339 u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
340
341 bool status = false;
342 u8 offset_e0;
343 unsigned offset_e4;
344
345 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
346 pcicfg_addrport + 0xE0);
347 rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, 0xA0);
348
349 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
350 pcicfg_addrport + 0xE0);
351 rtl_pci_raw_read_port_uchar(PCI_CONF_DATA, &offset_e0);
352
353 if (offset_e0 == 0xA0) {
354 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
355 pcicfg_addrport + 0xE4);
356 rtl_pci_raw_read_port_ulong(PCI_CONF_DATA, &offset_e4);
357 if (offset_e4 & BIT(23))
358 status = true;
359 }
360
361 return status;
362}
363
d3bb1429 364static void rtl_pci_get_linkcontrol_field(struct ieee80211_hw *hw)
0c817338
LF
365{
366 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
367 u8 capabilityoffset = pcipriv->ndis_adapter.pcibridge_pciehdr_offset;
368 u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
369 u8 linkctrl_reg;
c7cfe38e 370 u8 num4bbytes;
0c817338 371
c7cfe38e 372 num4bbytes = (capabilityoffset + 0x10) / 4;
0c817338
LF
373
374 /*Read Link Control Register */
375 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
c7cfe38e 376 pcicfg_addrport + (num4bbytes << 2));
0c817338
LF
377 rtl_pci_raw_read_port_uchar(PCI_CONF_DATA, &linkctrl_reg);
378
379 pcipriv->ndis_adapter.pcibridge_linkctrlreg = linkctrl_reg;
380}
381
382static void rtl_pci_parse_configuration(struct pci_dev *pdev,
383 struct ieee80211_hw *hw)
384{
385 struct rtl_priv *rtlpriv = rtl_priv(hw);
386 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
387
388 u8 tmp;
389 int pos;
390 u8 linkctrl_reg;
391
392 /*Link Control Register */
393 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
394 pci_read_config_byte(pdev, pos + PCI_EXP_LNKCTL, &linkctrl_reg);
395 pcipriv->ndis_adapter.linkctrl_reg = linkctrl_reg;
396
397 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
398 ("Link Control Register =%x\n",
399 pcipriv->ndis_adapter.linkctrl_reg));
400
401 pci_read_config_byte(pdev, 0x98, &tmp);
402 tmp |= BIT(4);
403 pci_write_config_byte(pdev, 0x98, tmp);
404
405 tmp = 0x17;
406 pci_write_config_byte(pdev, 0x70f, tmp);
407}
408
c7cfe38e 409static void rtl_pci_init_aspm(struct ieee80211_hw *hw)
0c817338
LF
410{
411 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
412
413 _rtl_pci_update_default_setting(hw);
414
415 if (ppsc->reg_rfps_level & RT_RF_PS_LEVEL_ALWAYS_ASPM) {
416 /*Always enable ASPM & Clock Req. */
417 rtl_pci_enable_aspm(hw);
418 RT_SET_PS_LEVEL(ppsc, RT_RF_PS_LEVEL_ALWAYS_ASPM);
419 }
420
421}
422
0c817338
LF
423static void _rtl_pci_io_handler_init(struct device *dev,
424 struct ieee80211_hw *hw)
425{
426 struct rtl_priv *rtlpriv = rtl_priv(hw);
427
428 rtlpriv->io.dev = dev;
429
430 rtlpriv->io.write8_async = pci_write8_async;
431 rtlpriv->io.write16_async = pci_write16_async;
432 rtlpriv->io.write32_async = pci_write32_async;
433
434 rtlpriv->io.read8_sync = pci_read8_sync;
435 rtlpriv->io.read16_sync = pci_read16_sync;
436 rtlpriv->io.read32_sync = pci_read32_sync;
437
438}
439
440static void _rtl_pci_io_handler_release(struct ieee80211_hw *hw)
441{
442}
443
c7cfe38e
C
444static bool _rtl_update_earlymode_info(struct ieee80211_hw *hw,
445 struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc, u8 tid)
446{
447 struct rtl_priv *rtlpriv = rtl_priv(hw);
448 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
449 u8 additionlen = FCS_LEN;
450 struct sk_buff *next_skb;
451
452 /* here open is 4, wep/tkip is 8, aes is 12*/
453 if (info->control.hw_key)
454 additionlen += info->control.hw_key->icv_len;
455
456 /* The most skb num is 6 */
457 tcb_desc->empkt_num = 0;
458 spin_lock_bh(&rtlpriv->locks.waitq_lock);
459 skb_queue_walk(&rtlpriv->mac80211.skb_waitq[tid], next_skb) {
460 struct ieee80211_tx_info *next_info;
461
462 next_info = IEEE80211_SKB_CB(next_skb);
463 if (next_info->flags & IEEE80211_TX_CTL_AMPDU) {
464 tcb_desc->empkt_len[tcb_desc->empkt_num] =
465 next_skb->len + additionlen;
466 tcb_desc->empkt_num++;
467 } else {
468 break;
469 }
470
471 if (skb_queue_is_last(&rtlpriv->mac80211.skb_waitq[tid],
472 next_skb))
473 break;
474
475 if (tcb_desc->empkt_num >= 5)
476 break;
477 }
478 spin_unlock_bh(&rtlpriv->locks.waitq_lock);
479
480 return true;
481}
482
483/* just for early mode now */
484static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw)
485{
486 struct rtl_priv *rtlpriv = rtl_priv(hw);
487 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
488 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
489 struct sk_buff *skb = NULL;
490 struct ieee80211_tx_info *info = NULL;
491 int tid; /* should be int */
492
493 if (!rtlpriv->rtlhal.earlymode_enable)
494 return;
495
496 /* we juse use em for BE/BK/VI/VO */
497 for (tid = 7; tid >= 0; tid--) {
498 u8 hw_queue = ac_to_hwq[rtl_tid_to_ac(hw, tid)];
499 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
500 while (!mac->act_scanning &&
501 rtlpriv->psc.rfpwr_state == ERFON) {
502 struct rtl_tcb_desc tcb_desc;
503 memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
504
505 spin_lock_bh(&rtlpriv->locks.waitq_lock);
506 if (!skb_queue_empty(&mac->skb_waitq[tid]) &&
507 (ring->entries - skb_queue_len(&ring->queue) > 5)) {
508 skb = skb_dequeue(&mac->skb_waitq[tid]);
509 } else {
510 spin_unlock_bh(&rtlpriv->locks.waitq_lock);
511 break;
512 }
513 spin_unlock_bh(&rtlpriv->locks.waitq_lock);
514
515 /* Some macaddr can't do early mode. like
516 * multicast/broadcast/no_qos data */
517 info = IEEE80211_SKB_CB(skb);
518 if (info->flags & IEEE80211_TX_CTL_AMPDU)
519 _rtl_update_earlymode_info(hw, skb,
520 &tcb_desc, tid);
521
c7cfe38e 522 rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc);
c7cfe38e
C
523 }
524 }
525}
526
527
0c817338
LF
528static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
529{
530 struct rtl_priv *rtlpriv = rtl_priv(hw);
531 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
532
533 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
534
535 while (skb_queue_len(&ring->queue)) {
536 struct rtl_tx_desc *entry = &ring->desc[ring->idx];
537 struct sk_buff *skb;
538 struct ieee80211_tx_info *info;
c7cfe38e
C
539 __le16 fc;
540 u8 tid;
0c817338
LF
541
542 u8 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) entry, true,
543 HW_DESC_OWN);
544
545 /*
546 *beacon packet will only use the first
547 *descriptor defautly,and the own may not
548 *be cleared by the hardware
549 */
550 if (own)
551 return;
552 ring->idx = (ring->idx + 1) % ring->entries;
553
554 skb = __skb_dequeue(&ring->queue);
555 pci_unmap_single(rtlpci->pdev,
d3bb1429 556 rtlpriv->cfg->ops->
0c817338 557 get_desc((u8 *) entry, true,
d3bb1429 558 HW_DESC_TXBUFF_ADDR),
0c817338
LF
559 skb->len, PCI_DMA_TODEVICE);
560
c7cfe38e
C
561 /* remove early mode header */
562 if (rtlpriv->rtlhal.earlymode_enable)
563 skb_pull(skb, EM_HDR_LEN);
564
0c817338
LF
565 RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_TRACE,
566 ("new ring->idx:%d, "
567 "free: skb_queue_len:%d, free: seq:%x\n",
568 ring->idx,
569 skb_queue_len(&ring->queue),
570 *(u16 *) (skb->data + 22)));
571
c7cfe38e
C
572 if (prio == TXCMD_QUEUE) {
573 dev_kfree_skb(skb);
574 goto tx_status_ok;
575
576 }
577
578 /* for sw LPS, just after NULL skb send out, we can
579 * sure AP kown we are sleeped, our we should not let
580 * rf to sleep*/
581 fc = rtl_get_fc(skb);
582 if (ieee80211_is_nullfunc(fc)) {
583 if (ieee80211_has_pm(fc)) {
9c050440 584 rtlpriv->mac80211.offchan_delay = true;
c7cfe38e
C
585 rtlpriv->psc.state_inap = 1;
586 } else {
587 rtlpriv->psc.state_inap = 0;
588 }
589 }
590
591 /* update tid tx pkt num */
592 tid = rtl_get_tid(skb);
593 if (tid <= 7)
594 rtlpriv->link_info.tidtx_inperiod[tid]++;
595
0c817338
LF
596 info = IEEE80211_SKB_CB(skb);
597 ieee80211_tx_info_clear_status(info);
598
599 info->flags |= IEEE80211_TX_STAT_ACK;
600 /*info->status.rates[0].count = 1; */
601
602 ieee80211_tx_status_irqsafe(hw, skb);
603
604 if ((ring->entries - skb_queue_len(&ring->queue))
605 == 2) {
606
607 RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
608 ("more desc left, wake"
609 "skb_queue@%d,ring->idx = %d,"
610 "skb_queue_len = 0x%d\n",
611 prio, ring->idx,
612 skb_queue_len(&ring->queue)));
613
614 ieee80211_wake_queue(hw,
615 skb_get_queue_mapping
616 (skb));
617 }
c7cfe38e 618tx_status_ok:
0c817338
LF
619 skb = NULL;
620 }
621
622 if (((rtlpriv->link_info.num_rx_inperiod +
623 rtlpriv->link_info.num_tx_inperiod) > 8) ||
624 (rtlpriv->link_info.num_rx_inperiod > 2)) {
67fc6052 625 tasklet_schedule(&rtlpriv->works.ips_leave_tasklet);
0c817338
LF
626 }
627}
628
fd854772
MM
629static void _rtl_receive_one(struct ieee80211_hw *hw, struct sk_buff *skb,
630 struct ieee80211_rx_status rx_status)
631{
632 struct rtl_priv *rtlpriv = rtl_priv(hw);
633 struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
634 __le16 fc = rtl_get_fc(skb);
635 bool unicast = false;
636 struct sk_buff *uskb = NULL;
637 u8 *pdata;
638
639
640 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
641
642 if (is_broadcast_ether_addr(hdr->addr1)) {
643 ;/*TODO*/
644 } else if (is_multicast_ether_addr(hdr->addr1)) {
645 ;/*TODO*/
646 } else {
647 unicast = true;
648 rtlpriv->stats.rxbytesunicast += skb->len;
649 }
650
651 rtl_is_special_data(hw, skb, false);
652
653 if (ieee80211_is_data(fc)) {
654 rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
655
656 if (unicast)
657 rtlpriv->link_info.num_rx_inperiod++;
658 }
659
660 /* for sw lps */
661 rtl_swlps_beacon(hw, (void *)skb->data, skb->len);
662 rtl_recognize_peer(hw, (void *)skb->data, skb->len);
663 if ((rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP) &&
664 (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G) &&
665 (ieee80211_is_beacon(fc) || ieee80211_is_probe_resp(fc)))
666 return;
667
668 if (unlikely(!rtl_action_proc(hw, skb, false)))
669 return;
670
671 uskb = dev_alloc_skb(skb->len + 128);
672 memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status, sizeof(rx_status));
673 pdata = (u8 *)skb_put(uskb, skb->len);
674 memcpy(pdata, skb->data, skb->len);
675
676 ieee80211_rx_irqsafe(hw, uskb);
677}
678
0c817338
LF
679static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
680{
681 struct rtl_priv *rtlpriv = rtl_priv(hw);
682 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
683 int rx_queue_idx = RTL_PCI_RX_MPDU_QUEUE;
684
685 struct ieee80211_rx_status rx_status = { 0 };
686 unsigned int count = rtlpci->rxringcount;
687 u8 own;
688 u8 tmp_one;
689 u32 bufferaddress;
0c817338
LF
690
691 struct rtl_stats stats = {
692 .signal = 0,
693 .noise = -98,
694 .rate = 0,
695 };
34ddb207 696 int index = rtlpci->rx_ring[rx_queue_idx].idx;
0c817338
LF
697
698 /*RX NORMAL PKT */
699 while (count--) {
700 /*rx descriptor */
701 struct rtl_rx_desc *pdesc = &rtlpci->rx_ring[rx_queue_idx].desc[
34ddb207 702 index];
0c817338
LF
703 /*rx pkt */
704 struct sk_buff *skb = rtlpci->rx_ring[rx_queue_idx].rx_buf[
34ddb207 705 index];
2c333366 706 struct sk_buff *new_skb = NULL;
0c817338
LF
707
708 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
709 false, HW_DESC_OWN);
710
2c333366
MM
711 /*wait data to be filled by hardware */
712 if (own)
34ddb207 713 break;
6633d649 714
2c333366
MM
715 rtlpriv->cfg->ops->query_rx_desc(hw, &stats,
716 &rx_status,
717 (u8 *) pdesc, skb);
718
8db8ddf1
MM
719 if (stats.crc || stats.hwerror)
720 goto done;
721
2c333366
MM
722 new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
723 if (unlikely(!new_skb)) {
724 RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
725 DBG_DMESG,
726 ("can't alloc skb for rx\n"));
727 goto done;
728 }
729
730 pci_unmap_single(rtlpci->pdev,
731 *((dma_addr_t *) skb->cb),
732 rtlpci->rxbuffersize,
733 PCI_DMA_FROMDEVICE);
734
735 skb_put(skb, rtlpriv->cfg->ops->get_desc((u8 *) pdesc, false,
736 HW_DESC_RXPKT_LEN));
737 skb_reserve(skb, stats.rx_drvinfo_size + stats.rx_bufshift);
738
739 /*
740 * NOTICE This can not be use for mac80211,
741 * this is done in mac80211 code,
742 * if you done here sec DHCP will fail
743 * skb_trim(skb, skb->len - 4);
744 */
745
fd854772 746 _rtl_receive_one(hw, skb, rx_status);
0c817338 747
2c333366
MM
748 if (((rtlpriv->link_info.num_rx_inperiod +
749 rtlpriv->link_info.num_tx_inperiod) > 8) ||
750 (rtlpriv->link_info.num_rx_inperiod > 2)) {
751 tasklet_schedule(&rtlpriv->works.ips_leave_tasklet);
752 }
0c817338 753
14058add 754 dev_kfree_skb_any(skb);
2c333366 755 skb = new_skb;
0c817338 756
2c333366
MM
757 rtlpci->rx_ring[rx_queue_idx].rx_buf[index] = skb;
758 *((dma_addr_t *) skb->cb) =
0c817338
LF
759 pci_map_single(rtlpci->pdev, skb_tail_pointer(skb),
760 rtlpci->rxbuffersize,
761 PCI_DMA_FROMDEVICE);
762
0c817338 763done:
d3bb1429 764 bufferaddress = (*((dma_addr_t *)skb->cb));
0c817338
LF
765 tmp_one = 1;
766 rtlpriv->cfg->ops->set_desc((u8 *) pdesc, false,
767 HW_DESC_RXBUFF_ADDR,
768 (u8 *)&bufferaddress);
0c817338
LF
769 rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false,
770 HW_DESC_RXPKT_LEN,
771 (u8 *)&rtlpci->rxbuffersize);
772
34ddb207 773 if (index == rtlpci->rxringcount - 1)
0c817338
LF
774 rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false,
775 HW_DESC_RXERO,
776 (u8 *)&tmp_one);
777
febc9fe5
MM
778 rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false, HW_DESC_RXOWN,
779 (u8 *)&tmp_one);
780
34ddb207 781 index = (index + 1) % rtlpci->rxringcount;
0c817338
LF
782 }
783
34ddb207 784 rtlpci->rx_ring[rx_queue_idx].idx = index;
0c817338
LF
785}
786
0c817338
LF
787static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
788{
789 struct ieee80211_hw *hw = dev_id;
790 struct rtl_priv *rtlpriv = rtl_priv(hw);
791 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
c7cfe38e 792 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
0c817338
LF
793 unsigned long flags;
794 u32 inta = 0;
795 u32 intb = 0;
796
797 if (rtlpci->irq_enabled == 0)
798 return IRQ_HANDLED;
799
800 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
801
802 /*read ISR: 4/8bytes */
803 rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb);
804
805 /*Shared IRQ or HW disappared */
806 if (!inta || inta == 0xffff)
807 goto done;
808
809 /*<1> beacon related */
810 if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK]) {
811 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
812 ("beacon ok interrupt!\n"));
813 }
814
815 if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TBDER])) {
816 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
817 ("beacon err interrupt!\n"));
818 }
819
820 if (inta & rtlpriv->cfg->maps[RTL_IMR_BDOK]) {
821 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
822 ("beacon interrupt!\n"));
823 }
824
825 if (inta & rtlpriv->cfg->maps[RTL_IMR_BcnInt]) {
826 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
827 ("prepare beacon for interrupt!\n"));
828 tasklet_schedule(&rtlpriv->works.irq_prepare_bcn_tasklet);
829 }
830
831 /*<3> Tx related */
832 if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TXFOVW]))
833 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, ("IMR_TXFOVW!\n"));
834
835 if (inta & rtlpriv->cfg->maps[RTL_IMR_MGNTDOK]) {
836 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
837 ("Manage ok interrupt!\n"));
838 _rtl_pci_tx_isr(hw, MGNT_QUEUE);
839 }
840
841 if (inta & rtlpriv->cfg->maps[RTL_IMR_HIGHDOK]) {
842 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
843 ("HIGH_QUEUE ok interrupt!\n"));
844 _rtl_pci_tx_isr(hw, HIGH_QUEUE);
845 }
846
847 if (inta & rtlpriv->cfg->maps[RTL_IMR_BKDOK]) {
848 rtlpriv->link_info.num_tx_inperiod++;
849
850 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
851 ("BK Tx OK interrupt!\n"));
852 _rtl_pci_tx_isr(hw, BK_QUEUE);
853 }
854
855 if (inta & rtlpriv->cfg->maps[RTL_IMR_BEDOK]) {
856 rtlpriv->link_info.num_tx_inperiod++;
857
858 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
859 ("BE TX OK interrupt!\n"));
860 _rtl_pci_tx_isr(hw, BE_QUEUE);
861 }
862
863 if (inta & rtlpriv->cfg->maps[RTL_IMR_VIDOK]) {
864 rtlpriv->link_info.num_tx_inperiod++;
865
866 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
867 ("VI TX OK interrupt!\n"));
868 _rtl_pci_tx_isr(hw, VI_QUEUE);
869 }
870
871 if (inta & rtlpriv->cfg->maps[RTL_IMR_VODOK]) {
872 rtlpriv->link_info.num_tx_inperiod++;
873
874 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
875 ("Vo TX OK interrupt!\n"));
876 _rtl_pci_tx_isr(hw, VO_QUEUE);
877 }
878
c7cfe38e
C
879 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) {
880 if (inta & rtlpriv->cfg->maps[RTL_IMR_COMDOK]) {
881 rtlpriv->link_info.num_tx_inperiod++;
882
883 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
884 ("CMD TX OK interrupt!\n"));
885 _rtl_pci_tx_isr(hw, TXCMD_QUEUE);
886 }
887 }
888
0c817338
LF
889 /*<2> Rx related */
890 if (inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) {
891 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, ("Rx ok interrupt!\n"));
c7cfe38e 892 _rtl_pci_rx_interrupt(hw);
0c817338
LF
893 }
894
895 if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) {
896 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
897 ("rx descriptor unavailable!\n"));
c7cfe38e 898 _rtl_pci_rx_interrupt(hw);
0c817338
LF
899 }
900
901 if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) {
902 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, ("rx overflow !\n"));
c7cfe38e 903 _rtl_pci_rx_interrupt(hw);
0c817338
LF
904 }
905
c7cfe38e
C
906 if (rtlpriv->rtlhal.earlymode_enable)
907 tasklet_schedule(&rtlpriv->works.irq_tasklet);
908
0c817338
LF
909 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
910 return IRQ_HANDLED;
911
912done:
913 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
914 return IRQ_HANDLED;
915}
916
917static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
918{
c7cfe38e 919 _rtl_pci_tx_chk_waitq(hw);
0c817338
LF
920}
921
67fc6052
MM
922static void _rtl_pci_ips_leave_tasklet(struct ieee80211_hw *hw)
923{
924 rtl_lps_leave(hw);
925}
926
0c817338
LF
927static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
928{
929 struct rtl_priv *rtlpriv = rtl_priv(hw);
930 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
931 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
c7cfe38e 932 struct rtl8192_tx_ring *ring = NULL;
0c817338
LF
933 struct ieee80211_hdr *hdr = NULL;
934 struct ieee80211_tx_info *info = NULL;
935 struct sk_buff *pskb = NULL;
936 struct rtl_tx_desc *pdesc = NULL;
c7cfe38e 937 struct rtl_tcb_desc tcb_desc;
0c817338
LF
938 u8 temp_one = 1;
939
c7cfe38e 940 memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
0c817338
LF
941 ring = &rtlpci->tx_ring[BEACON_QUEUE];
942 pskb = __skb_dequeue(&ring->queue);
943 if (pskb)
944 kfree_skb(pskb);
945
946 /*NB: the beacon data buffer must be 32-bit aligned. */
947 pskb = ieee80211_beacon_get(hw, mac->vif);
948 if (pskb == NULL)
949 return;
c7cfe38e 950 hdr = rtl_get_hdr(pskb);
0c817338 951 info = IEEE80211_SKB_CB(pskb);
0c817338
LF
952 pdesc = &ring->desc[0];
953 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
c7cfe38e 954 info, pskb, BEACON_QUEUE, &tcb_desc);
0c817338
LF
955
956 __skb_queue_tail(&ring->queue, pskb);
957
958 rtlpriv->cfg->ops->set_desc((u8 *) pdesc, true, HW_DESC_OWN,
959 (u8 *)&temp_one);
960
961 return;
962}
963
964static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw)
965{
966 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
967 u8 i;
968
969 for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
970 rtlpci->txringcount[i] = RT_TXDESC_NUM;
971
972 /*
973 *we just alloc 2 desc for beacon queue,
974 *because we just need first desc in hw beacon.
975 */
976 rtlpci->txringcount[BEACON_QUEUE] = 2;
977
978 /*
979 *BE queue need more descriptor for performance
980 *consideration or, No more tx desc will happen,
981 *and may cause mac80211 mem leakage.
982 */
983 rtlpci->txringcount[BE_QUEUE] = RT_TXDESC_NUM_BE_QUEUE;
984
985 rtlpci->rxbuffersize = 9100; /*2048/1024; */
986 rtlpci->rxringcount = RTL_PCI_MAX_RX_COUNT; /*64; */
987}
988
989static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
990 struct pci_dev *pdev)
991{
992 struct rtl_priv *rtlpriv = rtl_priv(hw);
993 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
994 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
995 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
0c817338
LF
996
997 rtlpci->up_first_time = true;
998 rtlpci->being_init_adapter = false;
999
1000 rtlhal->hw = hw;
1001 rtlpci->pdev = pdev;
1002
0c817338
LF
1003 /*Tx/Rx related var */
1004 _rtl_pci_init_trx_var(hw);
1005
c7cfe38e 1006 /*IBSS*/ mac->beacon_interval = 100;
0c817338 1007
c7cfe38e
C
1008 /*AMPDU*/
1009 mac->min_space_cfg = 0;
0c817338
LF
1010 mac->max_mss_density = 0;
1011 /*set sane AMPDU defaults */
1012 mac->current_ampdu_density = 7;
1013 mac->current_ampdu_factor = 3;
1014
c7cfe38e
C
1015 /*QOS*/
1016 rtlpci->acm_method = eAcmWay2_SW;
0c817338
LF
1017
1018 /*task */
1019 tasklet_init(&rtlpriv->works.irq_tasklet,
1020 (void (*)(unsigned long))_rtl_pci_irq_tasklet,
1021 (unsigned long)hw);
1022 tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet,
1023 (void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet,
1024 (unsigned long)hw);
67fc6052
MM
1025 tasklet_init(&rtlpriv->works.ips_leave_tasklet,
1026 (void (*)(unsigned long))_rtl_pci_ips_leave_tasklet,
1027 (unsigned long)hw);
0c817338
LF
1028}
1029
1030static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
1031 unsigned int prio, unsigned int entries)
1032{
1033 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1034 struct rtl_priv *rtlpriv = rtl_priv(hw);
1035 struct rtl_tx_desc *ring;
1036 dma_addr_t dma;
1037 u32 nextdescaddress;
1038 int i;
1039
1040 ring = pci_alloc_consistent(rtlpci->pdev,
1041 sizeof(*ring) * entries, &dma);
1042
1043 if (!ring || (unsigned long)ring & 0xFF) {
1044 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1045 ("Cannot allocate TX ring (prio = %d)\n", prio));
1046 return -ENOMEM;
1047 }
1048
1049 memset(ring, 0, sizeof(*ring) * entries);
1050 rtlpci->tx_ring[prio].desc = ring;
1051 rtlpci->tx_ring[prio].dma = dma;
1052 rtlpci->tx_ring[prio].idx = 0;
1053 rtlpci->tx_ring[prio].entries = entries;
1054 skb_queue_head_init(&rtlpci->tx_ring[prio].queue);
1055
1056 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1057 ("queue:%d, ring_addr:%p\n", prio, ring));
1058
1059 for (i = 0; i < entries; i++) {
d3bb1429 1060 nextdescaddress = (u32) dma +
982d96bb 1061 ((i + 1) % entries) *
d3bb1429 1062 sizeof(*ring);
0c817338
LF
1063
1064 rtlpriv->cfg->ops->set_desc((u8 *)&(ring[i]),
1065 true, HW_DESC_TX_NEXTDESC_ADDR,
1066 (u8 *)&nextdescaddress);
1067 }
1068
1069 return 0;
1070}
1071
1072static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
1073{
1074 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1075 struct rtl_priv *rtlpriv = rtl_priv(hw);
1076 struct rtl_rx_desc *entry = NULL;
1077 int i, rx_queue_idx;
1078 u8 tmp_one = 1;
1079
1080 /*
1081 *rx_queue_idx 0:RX_MPDU_QUEUE
1082 *rx_queue_idx 1:RX_CMD_QUEUE
1083 */
1084 for (rx_queue_idx = 0; rx_queue_idx < RTL_PCI_MAX_RX_QUEUE;
1085 rx_queue_idx++) {
1086 rtlpci->rx_ring[rx_queue_idx].desc =
1087 pci_alloc_consistent(rtlpci->pdev,
1088 sizeof(*rtlpci->rx_ring[rx_queue_idx].
1089 desc) * rtlpci->rxringcount,
1090 &rtlpci->rx_ring[rx_queue_idx].dma);
1091
1092 if (!rtlpci->rx_ring[rx_queue_idx].desc ||
1093 (unsigned long)rtlpci->rx_ring[rx_queue_idx].desc & 0xFF) {
1094 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1095 ("Cannot allocate RX ring\n"));
1096 return -ENOMEM;
1097 }
1098
1099 memset(rtlpci->rx_ring[rx_queue_idx].desc, 0,
1100 sizeof(*rtlpci->rx_ring[rx_queue_idx].desc) *
1101 rtlpci->rxringcount);
1102
1103 rtlpci->rx_ring[rx_queue_idx].idx = 0;
1104
0019a2c9
LF
1105 /* If amsdu_8k is disabled, set buffersize to 4096. This
1106 * change will reduce memory fragmentation.
1107 */
1108 if (rtlpci->rxbuffersize > 4096 &&
1109 rtlpriv->rtlhal.disable_amsdu_8k)
1110 rtlpci->rxbuffersize = 4096;
1111
0c817338
LF
1112 for (i = 0; i < rtlpci->rxringcount; i++) {
1113 struct sk_buff *skb =
1114 dev_alloc_skb(rtlpci->rxbuffersize);
1115 u32 bufferaddress;
0c817338
LF
1116 if (!skb)
1117 return 0;
bdc4bf65 1118 entry = &rtlpci->rx_ring[rx_queue_idx].desc[i];
0c817338
LF
1119
1120 /*skb->dev = dev; */
1121
1122 rtlpci->rx_ring[rx_queue_idx].rx_buf[i] = skb;
1123
1124 /*
1125 *just set skb->cb to mapping addr
1126 *for pci_unmap_single use
1127 */
1128 *((dma_addr_t *) skb->cb) =
1129 pci_map_single(rtlpci->pdev, skb_tail_pointer(skb),
1130 rtlpci->rxbuffersize,
1131 PCI_DMA_FROMDEVICE);
1132
d3bb1429 1133 bufferaddress = (*((dma_addr_t *)skb->cb));
0c817338
LF
1134 rtlpriv->cfg->ops->set_desc((u8 *)entry, false,
1135 HW_DESC_RXBUFF_ADDR,
1136 (u8 *)&bufferaddress);
1137 rtlpriv->cfg->ops->set_desc((u8 *)entry, false,
1138 HW_DESC_RXPKT_LEN,
1139 (u8 *)&rtlpci->
1140 rxbuffersize);
1141 rtlpriv->cfg->ops->set_desc((u8 *) entry, false,
1142 HW_DESC_RXOWN,
1143 (u8 *)&tmp_one);
1144 }
1145
1146 rtlpriv->cfg->ops->set_desc((u8 *) entry, false,
1147 HW_DESC_RXERO, (u8 *)&tmp_one);
1148 }
1149 return 0;
1150}
1151
1152static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw,
1153 unsigned int prio)
1154{
1155 struct rtl_priv *rtlpriv = rtl_priv(hw);
1156 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1157 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
1158
1159 while (skb_queue_len(&ring->queue)) {
1160 struct rtl_tx_desc *entry = &ring->desc[ring->idx];
1161 struct sk_buff *skb = __skb_dequeue(&ring->queue);
1162
1163 pci_unmap_single(rtlpci->pdev,
d3bb1429 1164 rtlpriv->cfg->
0c817338 1165 ops->get_desc((u8 *) entry, true,
d3bb1429 1166 HW_DESC_TXBUFF_ADDR),
0c817338
LF
1167 skb->len, PCI_DMA_TODEVICE);
1168 kfree_skb(skb);
1169 ring->idx = (ring->idx + 1) % ring->entries;
1170 }
1171
1172 pci_free_consistent(rtlpci->pdev,
1173 sizeof(*ring->desc) * ring->entries,
1174 ring->desc, ring->dma);
1175 ring->desc = NULL;
1176}
1177
1178static void _rtl_pci_free_rx_ring(struct rtl_pci *rtlpci)
1179{
1180 int i, rx_queue_idx;
1181
1182 /*rx_queue_idx 0:RX_MPDU_QUEUE */
1183 /*rx_queue_idx 1:RX_CMD_QUEUE */
1184 for (rx_queue_idx = 0; rx_queue_idx < RTL_PCI_MAX_RX_QUEUE;
1185 rx_queue_idx++) {
1186 for (i = 0; i < rtlpci->rxringcount; i++) {
1187 struct sk_buff *skb =
1188 rtlpci->rx_ring[rx_queue_idx].rx_buf[i];
1189 if (!skb)
1190 continue;
1191
1192 pci_unmap_single(rtlpci->pdev,
1193 *((dma_addr_t *) skb->cb),
1194 rtlpci->rxbuffersize,
1195 PCI_DMA_FROMDEVICE);
1196 kfree_skb(skb);
1197 }
1198
1199 pci_free_consistent(rtlpci->pdev,
1200 sizeof(*rtlpci->rx_ring[rx_queue_idx].
1201 desc) * rtlpci->rxringcount,
1202 rtlpci->rx_ring[rx_queue_idx].desc,
1203 rtlpci->rx_ring[rx_queue_idx].dma);
1204 rtlpci->rx_ring[rx_queue_idx].desc = NULL;
1205 }
1206}
1207
1208static int _rtl_pci_init_trx_ring(struct ieee80211_hw *hw)
1209{
1210 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1211 int ret;
1212 int i;
1213
1214 ret = _rtl_pci_init_rx_ring(hw);
1215 if (ret)
1216 return ret;
1217
1218 for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
1219 ret = _rtl_pci_init_tx_ring(hw, i,
1220 rtlpci->txringcount[i]);
1221 if (ret)
1222 goto err_free_rings;
1223 }
1224
1225 return 0;
1226
1227err_free_rings:
1228 _rtl_pci_free_rx_ring(rtlpci);
1229
1230 for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
1231 if (rtlpci->tx_ring[i].desc)
1232 _rtl_pci_free_tx_ring(hw, i);
1233
1234 return 1;
1235}
1236
1237static int _rtl_pci_deinit_trx_ring(struct ieee80211_hw *hw)
1238{
1239 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1240 u32 i;
1241
1242 /*free rx rings */
1243 _rtl_pci_free_rx_ring(rtlpci);
1244
1245 /*free tx rings */
1246 for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
1247 _rtl_pci_free_tx_ring(hw, i);
1248
1249 return 0;
1250}
1251
1252int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
1253{
1254 struct rtl_priv *rtlpriv = rtl_priv(hw);
1255 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1256 int i, rx_queue_idx;
1257 unsigned long flags;
1258 u8 tmp_one = 1;
1259
1260 /*rx_queue_idx 0:RX_MPDU_QUEUE */
1261 /*rx_queue_idx 1:RX_CMD_QUEUE */
1262 for (rx_queue_idx = 0; rx_queue_idx < RTL_PCI_MAX_RX_QUEUE;
1263 rx_queue_idx++) {
1264 /*
1265 *force the rx_ring[RX_MPDU_QUEUE/
1266 *RX_CMD_QUEUE].idx to the first one
1267 */
1268 if (rtlpci->rx_ring[rx_queue_idx].desc) {
1269 struct rtl_rx_desc *entry = NULL;
1270
1271 for (i = 0; i < rtlpci->rxringcount; i++) {
1272 entry = &rtlpci->rx_ring[rx_queue_idx].desc[i];
1273 rtlpriv->cfg->ops->set_desc((u8 *) entry,
1274 false,
1275 HW_DESC_RXOWN,
1276 (u8 *)&tmp_one);
1277 }
1278 rtlpci->rx_ring[rx_queue_idx].idx = 0;
1279 }
1280 }
1281
1282 /*
1283 *after reset, release previous pending packet,
1284 *and force the tx idx to the first one
1285 */
1286 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
1287 for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
1288 if (rtlpci->tx_ring[i].desc) {
1289 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[i];
1290
1291 while (skb_queue_len(&ring->queue)) {
1292 struct rtl_tx_desc *entry =
1293 &ring->desc[ring->idx];
1294 struct sk_buff *skb =
1295 __skb_dequeue(&ring->queue);
1296
1297 pci_unmap_single(rtlpci->pdev,
d3bb1429 1298 rtlpriv->cfg->ops->
0c817338
LF
1299 get_desc((u8 *)
1300 entry,
1301 true,
d3bb1429 1302 HW_DESC_TXBUFF_ADDR),
0c817338
LF
1303 skb->len, PCI_DMA_TODEVICE);
1304 kfree_skb(skb);
1305 ring->idx = (ring->idx + 1) % ring->entries;
1306 }
1307 ring->idx = 0;
1308 }
1309 }
1310
1311 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
1312
1313 return 0;
1314}
1315
c7cfe38e
C
1316static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw,
1317 struct sk_buff *skb)
0c817338 1318{
c7cfe38e
C
1319 struct rtl_priv *rtlpriv = rtl_priv(hw);
1320 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1321 struct ieee80211_sta *sta = info->control.sta;
1322 struct rtl_sta_info *sta_entry = NULL;
1323 u8 tid = rtl_get_tid(skb);
1324
1325 if (!sta)
1326 return false;
1327 sta_entry = (struct rtl_sta_info *)sta->drv_priv;
1328
1329 if (!rtlpriv->rtlhal.earlymode_enable)
1330 return false;
1331 if (sta_entry->tids[tid].agg.agg_state != RTL_AGG_OPERATIONAL)
1332 return false;
1333 if (_rtl_mac_to_hwqueue(hw, skb) > VO_QUEUE)
1334 return false;
1335 if (tid > 7)
1336 return false;
1337
1338 /* maybe every tid should be checked */
1339 if (!rtlpriv->link_info.higher_busytxtraffic[tid])
1340 return false;
1341
1342 spin_lock_bh(&rtlpriv->locks.waitq_lock);
1343 skb_queue_tail(&rtlpriv->mac80211.skb_waitq[tid], skb);
1344 spin_unlock_bh(&rtlpriv->locks.waitq_lock);
0c817338 1345
c7cfe38e 1346 return true;
0c817338
LF
1347}
1348
d3bb1429 1349static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
c7cfe38e 1350 struct rtl_tcb_desc *ptcb_desc)
0c817338
LF
1351{
1352 struct rtl_priv *rtlpriv = rtl_priv(hw);
c7cfe38e 1353 struct rtl_sta_info *sta_entry = NULL;
0c817338 1354 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
c7cfe38e 1355 struct ieee80211_sta *sta = info->control.sta;
0c817338
LF
1356 struct rtl8192_tx_ring *ring;
1357 struct rtl_tx_desc *pdesc;
1358 u8 idx;
c7cfe38e 1359 u8 hw_queue = _rtl_mac_to_hwqueue(hw, skb);
0c817338 1360 unsigned long flags;
c7cfe38e
C
1361 struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
1362 __le16 fc = rtl_get_fc(skb);
0c817338
LF
1363 u8 *pda_addr = hdr->addr1;
1364 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1365 /*ssn */
0c817338
LF
1366 u8 tid = 0;
1367 u16 seq_number = 0;
1368 u8 own;
1369 u8 temp_one = 1;
1370
c7cfe38e
C
1371 if (ieee80211_is_auth(fc)) {
1372 RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n"));
1373 rtl_ips_nic_on(hw);
1374 }
1375
1376 if (rtlpriv->psc.sw_ps_enabled) {
1377 if (ieee80211_is_data(fc) && !ieee80211_is_nullfunc(fc) &&
1378 !ieee80211_has_pm(fc))
1379 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
1380 }
0c817338 1381
c7cfe38e 1382 rtl_action_proc(hw, skb, true);
0c817338
LF
1383
1384 if (is_multicast_ether_addr(pda_addr))
1385 rtlpriv->stats.txbytesmulticast += skb->len;
1386 else if (is_broadcast_ether_addr(pda_addr))
1387 rtlpriv->stats.txbytesbroadcast += skb->len;
1388 else
1389 rtlpriv->stats.txbytesunicast += skb->len;
1390
1391 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
0c817338
LF
1392 ring = &rtlpci->tx_ring[hw_queue];
1393 if (hw_queue != BEACON_QUEUE)
1394 idx = (ring->idx + skb_queue_len(&ring->queue)) %
1395 ring->entries;
1396 else
1397 idx = 0;
1398
1399 pdesc = &ring->desc[idx];
1400 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
1401 true, HW_DESC_OWN);
1402
1403 if ((own == 1) && (hw_queue != BEACON_QUEUE)) {
1404 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1405 ("No more TX desc@%d, ring->idx = %d,"
1406 "idx = %d, skb_queue_len = 0x%d\n",
1407 hw_queue, ring->idx, idx,
1408 skb_queue_len(&ring->queue)));
1409
1410 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
1411 return skb->len;
1412 }
1413
0c817338 1414 if (ieee80211_is_data_qos(fc)) {
c7cfe38e
C
1415 tid = rtl_get_tid(skb);
1416 if (sta) {
1417 sta_entry = (struct rtl_sta_info *)sta->drv_priv;
1418 seq_number = (le16_to_cpu(hdr->seq_ctrl) &
1419 IEEE80211_SCTL_SEQ) >> 4;
1420 seq_number += 1;
1421
1422 if (!ieee80211_has_morefrags(hdr->frame_control))
1423 sta_entry->tids[tid].seq_number = seq_number;
1424 }
0c817338
LF
1425 }
1426
1427 if (ieee80211_is_data(fc))
1428 rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
1429
c7cfe38e
C
1430 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc,
1431 info, skb, hw_queue, ptcb_desc);
0c817338
LF
1432
1433 __skb_queue_tail(&ring->queue, skb);
1434
c7cfe38e 1435 rtlpriv->cfg->ops->set_desc((u8 *)pdesc, true,
0c817338
LF
1436 HW_DESC_OWN, (u8 *)&temp_one);
1437
0c817338
LF
1438
1439 if ((ring->entries - skb_queue_len(&ring->queue)) < 2 &&
1440 hw_queue != BEACON_QUEUE) {
1441
1442 RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
1443 ("less desc left, stop skb_queue@%d, "
1444 "ring->idx = %d,"
1445 "idx = %d, skb_queue_len = 0x%d\n",
1446 hw_queue, ring->idx, idx,
1447 skb_queue_len(&ring->queue)));
1448
1449 ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
1450 }
1451
1452 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
1453
1454 rtlpriv->cfg->ops->tx_polling(hw, hw_queue);
1455
1456 return 0;
1457}
1458
c7cfe38e
C
1459static void rtl_pci_flush(struct ieee80211_hw *hw, bool drop)
1460{
1461 struct rtl_priv *rtlpriv = rtl_priv(hw);
1462 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
1463 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1464 u16 i = 0;
1465 int queue_id;
1466 struct rtl8192_tx_ring *ring;
1467
1468 for (queue_id = RTL_PCI_MAX_TX_QUEUE_COUNT - 1; queue_id >= 0;) {
1469 u32 queue_len;
1470 ring = &pcipriv->dev.tx_ring[queue_id];
1471 queue_len = skb_queue_len(&ring->queue);
1472 if (queue_len == 0 || queue_id == BEACON_QUEUE ||
1473 queue_id == TXCMD_QUEUE) {
1474 queue_id--;
1475 continue;
1476 } else {
1477 msleep(20);
1478 i++;
1479 }
1480
1481 /* we just wait 1s for all queues */
1482 if (rtlpriv->psc.rfpwr_state == ERFOFF ||
1483 is_hal_stop(rtlhal) || i >= 200)
1484 return;
1485 }
1486}
1487
d3bb1429 1488static void rtl_pci_deinit(struct ieee80211_hw *hw)
0c817338
LF
1489{
1490 struct rtl_priv *rtlpriv = rtl_priv(hw);
1491 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1492
1493 _rtl_pci_deinit_trx_ring(hw);
1494
1495 synchronize_irq(rtlpci->pdev->irq);
1496 tasklet_kill(&rtlpriv->works.irq_tasklet);
67fc6052 1497 tasklet_kill(&rtlpriv->works.ips_leave_tasklet);
0c817338
LF
1498
1499 flush_workqueue(rtlpriv->works.rtl_wq);
1500 destroy_workqueue(rtlpriv->works.rtl_wq);
1501
1502}
1503
d3bb1429 1504static int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
0c817338
LF
1505{
1506 struct rtl_priv *rtlpriv = rtl_priv(hw);
1507 int err;
1508
1509 _rtl_pci_init_struct(hw, pdev);
1510
1511 err = _rtl_pci_init_trx_ring(hw);
1512 if (err) {
1513 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1514 ("tx ring initialization failed"));
1515 return err;
1516 }
1517
1518 return 1;
1519}
1520
d3bb1429 1521static int rtl_pci_start(struct ieee80211_hw *hw)
0c817338
LF
1522{
1523 struct rtl_priv *rtlpriv = rtl_priv(hw);
1524 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1525 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1526 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1527
1528 int err;
1529
1530 rtl_pci_reset_trx_ring(hw);
1531
1532 rtlpci->driver_is_goingto_unload = false;
1533 err = rtlpriv->cfg->ops->hw_init(hw);
1534 if (err) {
1535 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1536 ("Failed to config hardware!\n"));
1537 return err;
1538 }
1539
1540 rtlpriv->cfg->ops->enable_interrupt(hw);
1541 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("enable_interrupt OK\n"));
1542
1543 rtl_init_rx_config(hw);
1544
1545 /*should after adapter start and interrupt enable. */
1546 set_hal_start(rtlhal);
1547
1548 RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
1549
1550 rtlpci->up_first_time = false;
1551
1552 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("OK\n"));
1553 return 0;
1554}
1555
d3bb1429 1556static void rtl_pci_stop(struct ieee80211_hw *hw)
0c817338
LF
1557{
1558 struct rtl_priv *rtlpriv = rtl_priv(hw);
1559 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1560 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1561 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1562 unsigned long flags;
1563 u8 RFInProgressTimeOut = 0;
1564
1565 /*
1566 *should before disable interrrupt&adapter
1567 *and will do it immediately.
1568 */
1569 set_hal_stop(rtlhal);
1570
1571 rtlpriv->cfg->ops->disable_interrupt(hw);
67fc6052 1572 tasklet_kill(&rtlpriv->works.ips_leave_tasklet);
0c817338
LF
1573
1574 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
1575 while (ppsc->rfchange_inprogress) {
1576 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
1577 if (RFInProgressTimeOut > 100) {
1578 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
1579 break;
1580 }
1581 mdelay(1);
1582 RFInProgressTimeOut++;
1583 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
1584 }
1585 ppsc->rfchange_inprogress = true;
1586 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
1587
1588 rtlpci->driver_is_goingto_unload = true;
1589 rtlpriv->cfg->ops->hw_disable(hw);
1590 rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
1591
1592 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
1593 ppsc->rfchange_inprogress = false;
1594 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
1595
1596 rtl_pci_enable_aspm(hw);
1597}
1598
1599static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
1600 struct ieee80211_hw *hw)
1601{
1602 struct rtl_priv *rtlpriv = rtl_priv(hw);
1603 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
1604 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1605 struct pci_dev *bridge_pdev = pdev->bus->self;
1606 u16 venderid;
1607 u16 deviceid;
c7cfe38e 1608 u8 revisionid;
0c817338
LF
1609 u16 irqline;
1610 u8 tmp;
1611
fc7707a4 1612 pcipriv->ndis_adapter.pcibridge_vendor = PCI_BRIDGE_VENDOR_UNKNOWN;
0c817338
LF
1613 venderid = pdev->vendor;
1614 deviceid = pdev->device;
c7cfe38e 1615 pci_read_config_byte(pdev, 0x8, &revisionid);
0c817338
LF
1616 pci_read_config_word(pdev, 0x3C, &irqline);
1617
fa7ccfb1
LF
1618 /* PCI ID 0x10ec:0x8192 occurs for both RTL8192E, which uses
1619 * r8192e_pci, and RTL8192SE, which uses this driver. If the
1620 * revision ID is RTL_PCI_REVISION_ID_8192PCIE (0x01), then
1621 * the correct driver is r8192e_pci, thus this routine should
1622 * return false.
1623 */
1624 if (deviceid == RTL_PCI_8192SE_DID &&
1625 revisionid == RTL_PCI_REVISION_ID_8192PCIE)
1626 return false;
1627
0c817338
LF
1628 if (deviceid == RTL_PCI_8192_DID ||
1629 deviceid == RTL_PCI_0044_DID ||
1630 deviceid == RTL_PCI_0047_DID ||
1631 deviceid == RTL_PCI_8192SE_DID ||
1632 deviceid == RTL_PCI_8174_DID ||
1633 deviceid == RTL_PCI_8173_DID ||
1634 deviceid == RTL_PCI_8172_DID ||
1635 deviceid == RTL_PCI_8171_DID) {
c7cfe38e 1636 switch (revisionid) {
0c817338
LF
1637 case RTL_PCI_REVISION_ID_8192PCIE:
1638 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1639 ("8192 PCI-E is found - "
1640 "vid/did=%x/%x\n", venderid, deviceid));
1641 rtlhal->hw_type = HARDWARE_TYPE_RTL8192E;
1642 break;
1643 case RTL_PCI_REVISION_ID_8192SE:
1644 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1645 ("8192SE is found - "
1646 "vid/did=%x/%x\n", venderid, deviceid));
1647 rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
1648 break;
1649 default:
1650 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1651 ("Err: Unknown device - "
1652 "vid/did=%x/%x\n", venderid, deviceid));
1653 rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
1654 break;
1655
1656 }
1657 } else if (deviceid == RTL_PCI_8192CET_DID ||
1658 deviceid == RTL_PCI_8192CE_DID ||
1659 deviceid == RTL_PCI_8191CE_DID ||
1660 deviceid == RTL_PCI_8188CE_DID) {
1661 rtlhal->hw_type = HARDWARE_TYPE_RTL8192CE;
1662 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1663 ("8192C PCI-E is found - "
1664 "vid/did=%x/%x\n", venderid, deviceid));
c7cfe38e
C
1665 } else if (deviceid == RTL_PCI_8192DE_DID ||
1666 deviceid == RTL_PCI_8192DE_DID2) {
1667 rtlhal->hw_type = HARDWARE_TYPE_RTL8192DE;
1668 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1669 ("8192D PCI-E is found - "
1670 "vid/did=%x/%x\n", venderid, deviceid));
0c817338
LF
1671 } else {
1672 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1673 ("Err: Unknown device -"
1674 " vid/did=%x/%x\n", venderid, deviceid));
1675
1676 rtlhal->hw_type = RTL_DEFAULT_HARDWARE_TYPE;
1677 }
1678
c7cfe38e
C
1679 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE) {
1680 if (revisionid == 0 || revisionid == 1) {
1681 if (revisionid == 0) {
1682 RT_TRACE(rtlpriv, COMP_INIT,
1683 DBG_LOUD, ("Find 92DE MAC0.\n"));
1684 rtlhal->interfaceindex = 0;
1685 } else if (revisionid == 1) {
1686 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1687 ("Find 92DE MAC1.\n"));
1688 rtlhal->interfaceindex = 1;
1689 }
1690 } else {
1691 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1692 ("Unknown device - "
1693 "VendorID/DeviceID=%x/%x, Revision=%x\n",
1694 venderid, deviceid, revisionid));
1695 rtlhal->interfaceindex = 0;
1696 }
1697 }
0c817338
LF
1698 /*find bus info */
1699 pcipriv->ndis_adapter.busnumber = pdev->bus->number;
1700 pcipriv->ndis_adapter.devnumber = PCI_SLOT(pdev->devfn);
1701 pcipriv->ndis_adapter.funcnumber = PCI_FUNC(pdev->devfn);
1702
1703 /*find bridge info */
1704 pcipriv->ndis_adapter.pcibridge_vendorid = bridge_pdev->vendor;
1705 for (tmp = 0; tmp < PCI_BRIDGE_VENDOR_MAX; tmp++) {
1706 if (bridge_pdev->vendor == pcibridge_vendors[tmp]) {
1707 pcipriv->ndis_adapter.pcibridge_vendor = tmp;
1708 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1709 ("Pci Bridge Vendor is found index: %d\n",
1710 tmp));
1711 break;
1712 }
1713 }
1714
1715 if (pcipriv->ndis_adapter.pcibridge_vendor !=
1716 PCI_BRIDGE_VENDOR_UNKNOWN) {
1717 pcipriv->ndis_adapter.pcibridge_busnum =
1718 bridge_pdev->bus->number;
1719 pcipriv->ndis_adapter.pcibridge_devnum =
1720 PCI_SLOT(bridge_pdev->devfn);
1721 pcipriv->ndis_adapter.pcibridge_funcnum =
1722 PCI_FUNC(bridge_pdev->devfn);
0c817338
LF
1723 pcipriv->ndis_adapter.pcicfg_addrport =
1724 (pcipriv->ndis_adapter.pcibridge_busnum << 16) |
1725 (pcipriv->ndis_adapter.pcibridge_devnum << 11) |
1726 (pcipriv->ndis_adapter.pcibridge_funcnum << 8) | (1 << 31);
c7cfe38e
C
1727 pcipriv->ndis_adapter.pcibridge_pciehdr_offset =
1728 pci_pcie_cap(bridge_pdev);
0c817338
LF
1729 pcipriv->ndis_adapter.num4bytes =
1730 (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10) / 4;
1731
1732 rtl_pci_get_linkcontrol_field(hw);
1733
1734 if (pcipriv->ndis_adapter.pcibridge_vendor ==
1735 PCI_BRIDGE_VENDOR_AMD) {
1736 pcipriv->ndis_adapter.amd_l1_patch =
1737 rtl_pci_get_amd_l1_patch(hw);
1738 }
1739 }
1740
1741 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1742 ("pcidev busnumber:devnumber:funcnumber:"
1743 "vendor:link_ctl %d:%d:%d:%x:%x\n",
1744 pcipriv->ndis_adapter.busnumber,
1745 pcipriv->ndis_adapter.devnumber,
1746 pcipriv->ndis_adapter.funcnumber,
1747 pdev->vendor, pcipriv->ndis_adapter.linkctrl_reg));
1748
1749 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1750 ("pci_bridge busnumber:devnumber:funcnumber:vendor:"
1751 "pcie_cap:link_ctl_reg:amd %d:%d:%d:%x:%x:%x:%x\n",
1752 pcipriv->ndis_adapter.pcibridge_busnum,
1753 pcipriv->ndis_adapter.pcibridge_devnum,
1754 pcipriv->ndis_adapter.pcibridge_funcnum,
1755 pcibridge_vendors[pcipriv->ndis_adapter.pcibridge_vendor],
1756 pcipriv->ndis_adapter.pcibridge_pciehdr_offset,
1757 pcipriv->ndis_adapter.pcibridge_linkctrlreg,
1758 pcipriv->ndis_adapter.amd_l1_patch));
1759
1760 rtl_pci_parse_configuration(pdev, hw);
1761
1762 return true;
1763}
1764
1765int __devinit rtl_pci_probe(struct pci_dev *pdev,
1766 const struct pci_device_id *id)
1767{
1768 struct ieee80211_hw *hw = NULL;
1769
1770 struct rtl_priv *rtlpriv = NULL;
1771 struct rtl_pci_priv *pcipriv = NULL;
1772 struct rtl_pci *rtlpci;
1773 unsigned long pmem_start, pmem_len, pmem_flags;
1774 int err;
1775
1776 err = pci_enable_device(pdev);
1777 if (err) {
1778 RT_ASSERT(false,
1779 ("%s : Cannot enable new PCI device\n",
1780 pci_name(pdev)));
1781 return err;
1782 }
1783
1784 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
1785 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
1786 RT_ASSERT(false, ("Unable to obtain 32bit DMA "
1787 "for consistent allocations\n"));
1788 pci_disable_device(pdev);
1789 return -ENOMEM;
1790 }
1791 }
1792
1793 pci_set_master(pdev);
1794
1795 hw = ieee80211_alloc_hw(sizeof(struct rtl_pci_priv) +
1796 sizeof(struct rtl_priv), &rtl_ops);
1797 if (!hw) {
1798 RT_ASSERT(false,
1799 ("%s : ieee80211 alloc failed\n", pci_name(pdev)));
1800 err = -ENOMEM;
1801 goto fail1;
1802 }
1803
1804 SET_IEEE80211_DEV(hw, &pdev->dev);
1805 pci_set_drvdata(pdev, hw);
1806
1807 rtlpriv = hw->priv;
1808 pcipriv = (void *)rtlpriv->priv;
1809 pcipriv->dev.pdev = pdev;
1810
c7cfe38e
C
1811 /* init cfg & intf_ops */
1812 rtlpriv->rtlhal.interface = INTF_PCI;
1813 rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data);
1814 rtlpriv->intf_ops = &rtl_pci_ops;
1815
0c817338
LF
1816 /*
1817 *init dbgp flags before all
1818 *other functions, because we will
1819 *use it in other funtions like
1820 *RT_TRACE/RT_PRINT/RTL_PRINT_DATA
1821 *you can not use these macro
1822 *before this
1823 */
1824 rtl_dbgp_flag_init(hw);
1825
1826 /* MEM map */
1827 err = pci_request_regions(pdev, KBUILD_MODNAME);
1828 if (err) {
1829 RT_ASSERT(false, ("Can't obtain PCI resources\n"));
1830 return err;
1831 }
1832
c7cfe38e
C
1833 pmem_start = pci_resource_start(pdev, rtlpriv->cfg->bar_id);
1834 pmem_len = pci_resource_len(pdev, rtlpriv->cfg->bar_id);
1835 pmem_flags = pci_resource_flags(pdev, rtlpriv->cfg->bar_id);
0c817338
LF
1836
1837 /*shared mem start */
1838 rtlpriv->io.pci_mem_start =
c7cfe38e
C
1839 (unsigned long)pci_iomap(pdev,
1840 rtlpriv->cfg->bar_id, pmem_len);
0c817338
LF
1841 if (rtlpriv->io.pci_mem_start == 0) {
1842 RT_ASSERT(false, ("Can't map PCI mem\n"));
1843 goto fail2;
1844 }
1845
1846 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1847 ("mem mapped space: start: 0x%08lx len:%08lx "
1848 "flags:%08lx, after map:0x%08lx\n",
1849 pmem_start, pmem_len, pmem_flags,
1850 rtlpriv->io.pci_mem_start));
1851
1852 /* Disable Clk Request */
1853 pci_write_config_byte(pdev, 0x81, 0);
1854 /* leave D3 mode */
1855 pci_write_config_byte(pdev, 0x44, 0);
1856 pci_write_config_byte(pdev, 0x04, 0x06);
1857 pci_write_config_byte(pdev, 0x04, 0x07);
1858
0c817338 1859 /* find adapter */
fa7ccfb1
LF
1860 if (!_rtl_pci_find_adapter(pdev, hw))
1861 goto fail3;
0c817338
LF
1862
1863 /* Init IO handler */
1864 _rtl_pci_io_handler_init(&pdev->dev, hw);
1865
1866 /*like read eeprom and so on */
1867 rtlpriv->cfg->ops->read_eeprom_info(hw);
1868
1869 if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
1870 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1871 ("Can't init_sw_vars.\n"));
1872 goto fail3;
1873 }
1874
1875 rtlpriv->cfg->ops->init_sw_leds(hw);
1876
1877 /*aspm */
1878 rtl_pci_init_aspm(hw);
1879
1880 /* Init mac80211 sw */
1881 err = rtl_init_core(hw);
1882 if (err) {
1883 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1884 ("Can't allocate sw for mac80211.\n"));
1885 goto fail3;
1886 }
1887
1888 /* Init PCI sw */
1889 err = !rtl_pci_init(hw, pdev);
1890 if (err) {
1891 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1892 ("Failed to init PCI.\n"));
1893 goto fail3;
1894 }
1895
1896 err = ieee80211_register_hw(hw);
1897 if (err) {
1898 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1899 ("Can't register mac80211 hw.\n"));
1900 goto fail3;
1901 } else {
1902 rtlpriv->mac80211.mac80211_registered = 1;
1903 }
1904
1905 err = sysfs_create_group(&pdev->dev.kobj, &rtl_attribute_group);
1906 if (err) {
1907 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1908 ("failed to create sysfs device attributes\n"));
1909 goto fail3;
1910 }
1911
1912 /*init rfkill */
1913 rtl_init_rfkill(hw);
1914
1915 rtlpci = rtl_pcidev(pcipriv);
1916 err = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
1917 IRQF_SHARED, KBUILD_MODNAME, hw);
1918 if (err) {
1919 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1920 ("%s: failed to register IRQ handler\n",
1921 wiphy_name(hw->wiphy)));
1922 goto fail3;
1923 } else {
1924 rtlpci->irq_alloc = 1;
1925 }
1926
1927 set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
1928 return 0;
1929
1930fail3:
1931 pci_set_drvdata(pdev, NULL);
1932 rtl_deinit_core(hw);
1933 _rtl_pci_io_handler_release(hw);
1934 ieee80211_free_hw(hw);
1935
1936 if (rtlpriv->io.pci_mem_start != 0)
62e63975 1937 pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
0c817338
LF
1938
1939fail2:
1940 pci_release_regions(pdev);
1941
1942fail1:
1943
1944 pci_disable_device(pdev);
1945
1946 return -ENODEV;
1947
1948}
1949EXPORT_SYMBOL(rtl_pci_probe);
1950
1951void rtl_pci_disconnect(struct pci_dev *pdev)
1952{
1953 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1954 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
1955 struct rtl_priv *rtlpriv = rtl_priv(hw);
1956 struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
1957 struct rtl_mac *rtlmac = rtl_mac(rtlpriv);
1958
1959 clear_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
1960
1961 sysfs_remove_group(&pdev->dev.kobj, &rtl_attribute_group);
1962
1963 /*ieee80211_unregister_hw will call ops_stop */
1964 if (rtlmac->mac80211_registered == 1) {
1965 ieee80211_unregister_hw(hw);
1966 rtlmac->mac80211_registered = 0;
1967 } else {
1968 rtl_deinit_deferred_work(hw);
1969 rtlpriv->intf_ops->adapter_stop(hw);
1970 }
1971
1972 /*deinit rfkill */
1973 rtl_deinit_rfkill(hw);
1974
1975 rtl_pci_deinit(hw);
1976 rtl_deinit_core(hw);
0c817338
LF
1977 _rtl_pci_io_handler_release(hw);
1978 rtlpriv->cfg->ops->deinit_sw_vars(hw);
1979
1980 if (rtlpci->irq_alloc) {
1981 free_irq(rtlpci->pdev->irq, hw);
1982 rtlpci->irq_alloc = 0;
1983 }
1984
1985 if (rtlpriv->io.pci_mem_start != 0) {
62e63975 1986 pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
0c817338
LF
1987 pci_release_regions(pdev);
1988 }
1989
1990 pci_disable_device(pdev);
c7cfe38e
C
1991
1992 rtl_pci_disable_aspm(hw);
1993
0c817338
LF
1994 pci_set_drvdata(pdev, NULL);
1995
1996 ieee80211_free_hw(hw);
1997}
1998EXPORT_SYMBOL(rtl_pci_disconnect);
1999
2000/***************************************
2001kernel pci power state define:
2002PCI_D0 ((pci_power_t __force) 0)
2003PCI_D1 ((pci_power_t __force) 1)
2004PCI_D2 ((pci_power_t __force) 2)
2005PCI_D3hot ((pci_power_t __force) 3)
2006PCI_D3cold ((pci_power_t __force) 4)
2007PCI_UNKNOWN ((pci_power_t __force) 5)
2008
2009This function is called when system
2010goes into suspend state mac80211 will
2011call rtl_mac_stop() from the mac80211
2012suspend function first, So there is
2013no need to call hw_disable here.
2014****************************************/
2015int rtl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2016{
c7cfe38e
C
2017 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
2018 struct rtl_priv *rtlpriv = rtl_priv(hw);
2019
2020 rtlpriv->cfg->ops->hw_suspend(hw);
2021 rtl_deinit_rfkill(hw);
2022
0c817338
LF
2023 pci_save_state(pdev);
2024 pci_disable_device(pdev);
2025 pci_set_power_state(pdev, PCI_D3hot);
0c817338
LF
2026 return 0;
2027}
2028EXPORT_SYMBOL(rtl_pci_suspend);
2029
2030int rtl_pci_resume(struct pci_dev *pdev)
2031{
2032 int ret;
c7cfe38e
C
2033 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
2034 struct rtl_priv *rtlpriv = rtl_priv(hw);
0c817338
LF
2035
2036 pci_set_power_state(pdev, PCI_D0);
2037 ret = pci_enable_device(pdev);
2038 if (ret) {
2039 RT_ASSERT(false, ("ERR: <======\n"));
2040 return ret;
2041 }
2042
2043 pci_restore_state(pdev);
2044
c7cfe38e
C
2045 rtlpriv->cfg->ops->hw_resume(hw);
2046 rtl_init_rfkill(hw);
0c817338
LF
2047 return 0;
2048}
2049EXPORT_SYMBOL(rtl_pci_resume);
2050
2051struct rtl_intf_ops rtl_pci_ops = {
c7cfe38e 2052 .read_efuse_byte = read_efuse_byte,
0c817338
LF
2053 .adapter_start = rtl_pci_start,
2054 .adapter_stop = rtl_pci_stop,
2055 .adapter_tx = rtl_pci_tx,
c7cfe38e 2056 .flush = rtl_pci_flush,
0c817338 2057 .reset_trx_ring = rtl_pci_reset_trx_ring,
c7cfe38e 2058 .waitq_insert = rtl_pci_tx_chk_waitq_insert,
0c817338
LF
2059
2060 .disable_aspm = rtl_pci_disable_aspm,
2061 .enable_aspm = rtl_pci_enable_aspm,
2062};
This page took 0.176789 seconds and 5 git commands to generate.