rtlwifi: rtl8192c: rtl8192ce: rtl8192cu: rtl8192de: rtl8723ae: Add changes required...
[deliverable/linux.git] / drivers / net / wireless / rtlwifi / pci.c
1 /******************************************************************************
2 *
3 * Copyright(c) 2009-2012 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30 #include "wifi.h"
31 #include "core.h"
32 #include "pci.h"
33 #include "base.h"
34 #include "ps.h"
35 #include "efuse.h"
36 #include <linux/export.h>
37 #include <linux/kmemleak.h>
38
39 static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = {
40 PCI_VENDOR_ID_INTEL,
41 PCI_VENDOR_ID_ATI,
42 PCI_VENDOR_ID_AMD,
43 PCI_VENDOR_ID_SI
44 };
45
46 static const u8 ac_to_hwq[] = {
47 VO_QUEUE,
48 VI_QUEUE,
49 BE_QUEUE,
50 BK_QUEUE
51 };
52
53 static u8 _rtl_mac_to_hwqueue(struct ieee80211_hw *hw,
54 struct sk_buff *skb)
55 {
56 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
57 __le16 fc = rtl_get_fc(skb);
58 u8 queue_index = skb_get_queue_mapping(skb);
59
60 if (unlikely(ieee80211_is_beacon(fc)))
61 return BEACON_QUEUE;
62 if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc))
63 return MGNT_QUEUE;
64 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
65 if (ieee80211_is_nullfunc(fc))
66 return HIGH_QUEUE;
67
68 return ac_to_hwq[queue_index];
69 }
70
71 /* Update PCI dependent default settings*/
72 static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
73 {
74 struct rtl_priv *rtlpriv = rtl_priv(hw);
75 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
76 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
77 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
78 u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
79 u8 init_aspm;
80
81 ppsc->reg_rfps_level = 0;
82 ppsc->support_aspm = false;
83
84 /*Update PCI ASPM setting */
85 ppsc->const_amdpci_aspm = rtlpci->const_amdpci_aspm;
86 switch (rtlpci->const_pci_aspm) {
87 case 0:
88 /*No ASPM */
89 break;
90
91 case 1:
92 /*ASPM dynamically enabled/disable. */
93 ppsc->reg_rfps_level |= RT_RF_LPS_LEVEL_ASPM;
94 break;
95
96 case 2:
97 /*ASPM with Clock Req dynamically enabled/disable. */
98 ppsc->reg_rfps_level |= (RT_RF_LPS_LEVEL_ASPM |
99 RT_RF_OFF_LEVL_CLK_REQ);
100 break;
101
102 case 3:
103 /*
104 * Always enable ASPM and Clock Req
105 * from initialization to halt.
106 * */
107 ppsc->reg_rfps_level &= ~(RT_RF_LPS_LEVEL_ASPM);
108 ppsc->reg_rfps_level |= (RT_RF_PS_LEVEL_ALWAYS_ASPM |
109 RT_RF_OFF_LEVL_CLK_REQ);
110 break;
111
112 case 4:
113 /*
114 * Always enable ASPM without Clock Req
115 * from initialization to halt.
116 * */
117 ppsc->reg_rfps_level &= ~(RT_RF_LPS_LEVEL_ASPM |
118 RT_RF_OFF_LEVL_CLK_REQ);
119 ppsc->reg_rfps_level |= RT_RF_PS_LEVEL_ALWAYS_ASPM;
120 break;
121 }
122
123 ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_HALT_NIC;
124
125 /*Update Radio OFF setting */
126 switch (rtlpci->const_hwsw_rfoff_d3) {
127 case 1:
128 if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM)
129 ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_ASPM;
130 break;
131
132 case 2:
133 if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM)
134 ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_ASPM;
135 ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_HALT_NIC;
136 break;
137
138 case 3:
139 ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_PCI_D3;
140 break;
141 }
142
143 /*Set HW definition to determine if it supports ASPM. */
144 switch (rtlpci->const_support_pciaspm) {
145 case 0:{
146 /*Not support ASPM. */
147 bool support_aspm = false;
148 ppsc->support_aspm = support_aspm;
149 break;
150 }
151 case 1:{
152 /*Support ASPM. */
153 bool support_aspm = true;
154 bool support_backdoor = true;
155 ppsc->support_aspm = support_aspm;
156
157 /*if (priv->oem_id == RT_CID_TOSHIBA &&
158 !priv->ndis_adapter.amd_l1_patch)
159 support_backdoor = false; */
160
161 ppsc->support_backdoor = support_backdoor;
162
163 break;
164 }
165 case 2:
166 /*ASPM value set by chipset. */
167 if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL) {
168 bool support_aspm = true;
169 ppsc->support_aspm = support_aspm;
170 }
171 break;
172 default:
173 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
174 "switch case not processed\n");
175 break;
176 }
177
178 /* toshiba aspm issue, toshiba will set aspm selfly
179 * so we should not set aspm in driver */
180 pci_read_config_byte(rtlpci->pdev, 0x80, &init_aspm);
181 if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8192SE &&
182 init_aspm == 0x43)
183 ppsc->support_aspm = false;
184 }
185
186 static bool _rtl_pci_platform_switch_device_pci_aspm(
187 struct ieee80211_hw *hw,
188 u8 value)
189 {
190 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
191 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
192
193 if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE)
194 value |= 0x40;
195
196 pci_write_config_byte(rtlpci->pdev, 0x80, value);
197
198 return false;
199 }
200
201 /*When we set 0x01 to enable clk request. Set 0x0 to disable clk req.*/
202 static void _rtl_pci_switch_clk_req(struct ieee80211_hw *hw, u8 value)
203 {
204 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
205 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
206
207 pci_write_config_byte(rtlpci->pdev, 0x81, value);
208
209 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
210 udelay(100);
211 }
212
213 /*Disable RTL8192SE ASPM & Disable Pci Bridge ASPM*/
214 static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
215 {
216 struct rtl_priv *rtlpriv = rtl_priv(hw);
217 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
218 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
219 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
220 u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
221 u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
222 /*Retrieve original configuration settings. */
223 u8 linkctrl_reg = pcipriv->ndis_adapter.linkctrl_reg;
224 u16 pcibridge_linkctrlreg = pcipriv->ndis_adapter.
225 pcibridge_linkctrlreg;
226 u16 aspmlevel = 0;
227 u8 tmp_u1b = 0;
228
229 if (!ppsc->support_aspm)
230 return;
231
232 if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
233 RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
234 "PCI(Bridge) UNKNOWN\n");
235
236 return;
237 }
238
239 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) {
240 RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ);
241 _rtl_pci_switch_clk_req(hw, 0x0);
242 }
243
244 /*for promising device will in L0 state after an I/O. */
245 pci_read_config_byte(rtlpci->pdev, 0x80, &tmp_u1b);
246
247 /*Set corresponding value. */
248 aspmlevel |= BIT(0) | BIT(1);
249 linkctrl_reg &= ~aspmlevel;
250 pcibridge_linkctrlreg &= ~(BIT(0) | BIT(1));
251
252 _rtl_pci_platform_switch_device_pci_aspm(hw, linkctrl_reg);
253 udelay(50);
254
255 /*4 Disable Pci Bridge ASPM */
256 pci_write_config_byte(rtlpci->pdev, (num4bytes << 2),
257 pcibridge_linkctrlreg);
258
259 udelay(50);
260 }
261
262 /*
263 *Enable RTL8192SE ASPM & Enable Pci Bridge ASPM for
264 *power saving We should follow the sequence to enable
265 *RTL8192SE first then enable Pci Bridge ASPM
266 *or the system will show bluescreen.
267 */
268 static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
269 {
270 struct rtl_priv *rtlpriv = rtl_priv(hw);
271 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
272 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
273 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
274 u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
275 u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
276 u16 aspmlevel;
277 u8 u_pcibridge_aspmsetting;
278 u8 u_device_aspmsetting;
279
280 if (!ppsc->support_aspm)
281 return;
282
283 if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
284 RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
285 "PCI(Bridge) UNKNOWN\n");
286 return;
287 }
288
289 /*4 Enable Pci Bridge ASPM */
290
291 u_pcibridge_aspmsetting =
292 pcipriv->ndis_adapter.pcibridge_linkctrlreg |
293 rtlpci->const_hostpci_aspm_setting;
294
295 if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL)
296 u_pcibridge_aspmsetting &= ~BIT(0);
297
298 pci_write_config_byte(rtlpci->pdev, (num4bytes << 2),
299 u_pcibridge_aspmsetting);
300
301 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
302 "PlatformEnableASPM(): Write reg[%x] = %x\n",
303 (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10),
304 u_pcibridge_aspmsetting);
305
306 udelay(50);
307
308 /*Get ASPM level (with/without Clock Req) */
309 aspmlevel = rtlpci->const_devicepci_aspm_setting;
310 u_device_aspmsetting = pcipriv->ndis_adapter.linkctrl_reg;
311
312 /*_rtl_pci_platform_switch_device_pci_aspm(dev,*/
313 /*(priv->ndis_adapter.linkctrl_reg | ASPMLevel)); */
314
315 u_device_aspmsetting |= aspmlevel;
316
317 _rtl_pci_platform_switch_device_pci_aspm(hw, u_device_aspmsetting);
318
319 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) {
320 _rtl_pci_switch_clk_req(hw, (ppsc->reg_rfps_level &
321 RT_RF_OFF_LEVL_CLK_REQ) ? 1 : 0);
322 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ);
323 }
324 udelay(100);
325 }
326
327 static bool rtl_pci_get_amd_l1_patch(struct ieee80211_hw *hw)
328 {
329 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
330
331 bool status = false;
332 u8 offset_e0;
333 unsigned offset_e4;
334
335 pci_write_config_byte(rtlpci->pdev, 0xe0, 0xa0);
336
337 pci_read_config_byte(rtlpci->pdev, 0xe0, &offset_e0);
338
339 if (offset_e0 == 0xA0) {
340 pci_read_config_dword(rtlpci->pdev, 0xe4, &offset_e4);
341 if (offset_e4 & BIT(23))
342 status = true;
343 }
344
345 return status;
346 }
347
348 static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
349 struct rtl_priv **buddy_priv)
350 {
351 struct rtl_priv *rtlpriv = rtl_priv(hw);
352 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
353 bool find_buddy_priv = false;
354 struct rtl_priv *tpriv = NULL;
355 struct rtl_pci_priv *tpcipriv = NULL;
356
357 if (!list_empty(&rtlpriv->glb_var->glb_priv_list)) {
358 list_for_each_entry(tpriv, &rtlpriv->glb_var->glb_priv_list,
359 list) {
360 if (tpriv) {
361 tpcipriv = (struct rtl_pci_priv *)tpriv->priv;
362 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
363 "pcipriv->ndis_adapter.funcnumber %x\n",
364 pcipriv->ndis_adapter.funcnumber);
365 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
366 "tpcipriv->ndis_adapter.funcnumber %x\n",
367 tpcipriv->ndis_adapter.funcnumber);
368
369 if ((pcipriv->ndis_adapter.busnumber ==
370 tpcipriv->ndis_adapter.busnumber) &&
371 (pcipriv->ndis_adapter.devnumber ==
372 tpcipriv->ndis_adapter.devnumber) &&
373 (pcipriv->ndis_adapter.funcnumber !=
374 tpcipriv->ndis_adapter.funcnumber)) {
375 find_buddy_priv = true;
376 break;
377 }
378 }
379 }
380 }
381
382 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
383 "find_buddy_priv %d\n", find_buddy_priv);
384
385 if (find_buddy_priv)
386 *buddy_priv = tpriv;
387
388 return find_buddy_priv;
389 }
390
391 static void rtl_pci_get_linkcontrol_field(struct ieee80211_hw *hw)
392 {
393 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
394 struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
395 u8 capabilityoffset = pcipriv->ndis_adapter.pcibridge_pciehdr_offset;
396 u8 linkctrl_reg;
397 u8 num4bbytes;
398
399 num4bbytes = (capabilityoffset + 0x10) / 4;
400
401 /*Read Link Control Register */
402 pci_read_config_byte(rtlpci->pdev, (num4bbytes << 2), &linkctrl_reg);
403
404 pcipriv->ndis_adapter.pcibridge_linkctrlreg = linkctrl_reg;
405 }
406
407 static void rtl_pci_parse_configuration(struct pci_dev *pdev,
408 struct ieee80211_hw *hw)
409 {
410 struct rtl_priv *rtlpriv = rtl_priv(hw);
411 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
412
413 u8 tmp;
414 u16 linkctrl_reg;
415
416 /*Link Control Register */
417 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &linkctrl_reg);
418 pcipriv->ndis_adapter.linkctrl_reg = (u8)linkctrl_reg;
419
420 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Link Control Register =%x\n",
421 pcipriv->ndis_adapter.linkctrl_reg);
422
423 pci_read_config_byte(pdev, 0x98, &tmp);
424 tmp |= BIT(4);
425 pci_write_config_byte(pdev, 0x98, tmp);
426
427 tmp = 0x17;
428 pci_write_config_byte(pdev, 0x70f, tmp);
429 }
430
431 static void rtl_pci_init_aspm(struct ieee80211_hw *hw)
432 {
433 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
434
435 _rtl_pci_update_default_setting(hw);
436
437 if (ppsc->reg_rfps_level & RT_RF_PS_LEVEL_ALWAYS_ASPM) {
438 /*Always enable ASPM & Clock Req. */
439 rtl_pci_enable_aspm(hw);
440 RT_SET_PS_LEVEL(ppsc, RT_RF_PS_LEVEL_ALWAYS_ASPM);
441 }
442
443 }
444
445 static void _rtl_pci_io_handler_init(struct device *dev,
446 struct ieee80211_hw *hw)
447 {
448 struct rtl_priv *rtlpriv = rtl_priv(hw);
449
450 rtlpriv->io.dev = dev;
451
452 rtlpriv->io.write8_async = pci_write8_async;
453 rtlpriv->io.write16_async = pci_write16_async;
454 rtlpriv->io.write32_async = pci_write32_async;
455
456 rtlpriv->io.read8_sync = pci_read8_sync;
457 rtlpriv->io.read16_sync = pci_read16_sync;
458 rtlpriv->io.read32_sync = pci_read32_sync;
459
460 }
461
462 static bool _rtl_update_earlymode_info(struct ieee80211_hw *hw,
463 struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc, u8 tid)
464 {
465 struct rtl_priv *rtlpriv = rtl_priv(hw);
466 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
467 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
468 struct sk_buff *next_skb;
469 u8 additionlen = FCS_LEN;
470
471 /* here open is 4, wep/tkip is 8, aes is 12*/
472 if (info->control.hw_key)
473 additionlen += info->control.hw_key->icv_len;
474
475 /* The most skb num is 6 */
476 tcb_desc->empkt_num = 0;
477 spin_lock_bh(&rtlpriv->locks.waitq_lock);
478 skb_queue_walk(&rtlpriv->mac80211.skb_waitq[tid], next_skb) {
479 struct ieee80211_tx_info *next_info;
480
481 next_info = IEEE80211_SKB_CB(next_skb);
482 if (next_info->flags & IEEE80211_TX_CTL_AMPDU) {
483 tcb_desc->empkt_len[tcb_desc->empkt_num] =
484 next_skb->len + additionlen;
485 tcb_desc->empkt_num++;
486 } else {
487 break;
488 }
489
490 if (skb_queue_is_last(&rtlpriv->mac80211.skb_waitq[tid],
491 next_skb))
492 break;
493
494 if (tcb_desc->empkt_num >= rtlhal->max_earlymode_num)
495 break;
496 }
497 spin_unlock_bh(&rtlpriv->locks.waitq_lock);
498
499 return true;
500 }
501
502 /* just for early mode now */
503 static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw)
504 {
505 struct rtl_priv *rtlpriv = rtl_priv(hw);
506 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
507 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
508 struct sk_buff *skb = NULL;
509 struct ieee80211_tx_info *info = NULL;
510 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
511 int tid;
512
513 if (!rtlpriv->rtlhal.earlymode_enable)
514 return;
515
516 if (rtlpriv->dm.supp_phymode_switch &&
517 (rtlpriv->easy_concurrent_ctl.switch_in_process ||
518 (rtlpriv->buddy_priv &&
519 rtlpriv->buddy_priv->easy_concurrent_ctl.switch_in_process)))
520 return;
521 /* we juse use em for BE/BK/VI/VO */
522 for (tid = 7; tid >= 0; tid--) {
523 u8 hw_queue = ac_to_hwq[rtl_tid_to_ac(tid)];
524 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
525 while (!mac->act_scanning &&
526 rtlpriv->psc.rfpwr_state == ERFON) {
527 struct rtl_tcb_desc tcb_desc;
528 memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
529
530 spin_lock_bh(&rtlpriv->locks.waitq_lock);
531 if (!skb_queue_empty(&mac->skb_waitq[tid]) &&
532 (ring->entries - skb_queue_len(&ring->queue) >
533 rtlhal->max_earlymode_num)) {
534 skb = skb_dequeue(&mac->skb_waitq[tid]);
535 } else {
536 spin_unlock_bh(&rtlpriv->locks.waitq_lock);
537 break;
538 }
539 spin_unlock_bh(&rtlpriv->locks.waitq_lock);
540
541 /* Some macaddr can't do early mode. like
542 * multicast/broadcast/no_qos data */
543 info = IEEE80211_SKB_CB(skb);
544 if (info->flags & IEEE80211_TX_CTL_AMPDU)
545 _rtl_update_earlymode_info(hw, skb,
546 &tcb_desc, tid);
547
548 rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc);
549 }
550 }
551 }
552
553
554 static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
555 {
556 struct rtl_priv *rtlpriv = rtl_priv(hw);
557 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
558
559 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
560
561 while (skb_queue_len(&ring->queue)) {
562 struct rtl_tx_desc *entry = &ring->desc[ring->idx];
563 struct sk_buff *skb;
564 struct ieee80211_tx_info *info;
565 __le16 fc;
566 u8 tid;
567
568 u8 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) entry, true,
569 HW_DESC_OWN);
570
571 /*beacon packet will only use the first
572 *descriptor by defaut, and the own may not
573 *be cleared by the hardware
574 */
575 if (own)
576 return;
577 ring->idx = (ring->idx + 1) % ring->entries;
578
579 skb = __skb_dequeue(&ring->queue);
580 pci_unmap_single(rtlpci->pdev,
581 rtlpriv->cfg->ops->
582 get_desc((u8 *) entry, true,
583 HW_DESC_TXBUFF_ADDR),
584 skb->len, PCI_DMA_TODEVICE);
585
586 /* remove early mode header */
587 if (rtlpriv->rtlhal.earlymode_enable)
588 skb_pull(skb, EM_HDR_LEN);
589
590 RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_TRACE,
591 "new ring->idx:%d, free: skb_queue_len:%d, free: seq:%x\n",
592 ring->idx,
593 skb_queue_len(&ring->queue),
594 *(u16 *) (skb->data + 22));
595
596 if (prio == TXCMD_QUEUE) {
597 dev_kfree_skb(skb);
598 goto tx_status_ok;
599
600 }
601
602 /* for sw LPS, just after NULL skb send out, we can
603 * sure AP knows we are sleeping, we should not let
604 * rf sleep
605 */
606 fc = rtl_get_fc(skb);
607 if (ieee80211_is_nullfunc(fc)) {
608 if (ieee80211_has_pm(fc)) {
609 rtlpriv->mac80211.offchan_delay = true;
610 rtlpriv->psc.state_inap = true;
611 } else {
612 rtlpriv->psc.state_inap = false;
613 }
614 }
615 if (ieee80211_is_action(fc)) {
616 struct ieee80211_mgmt *action_frame =
617 (struct ieee80211_mgmt *)skb->data;
618 if (action_frame->u.action.u.ht_smps.action ==
619 WLAN_HT_ACTION_SMPS) {
620 dev_kfree_skb(skb);
621 goto tx_status_ok;
622 }
623 }
624
625 /* update tid tx pkt num */
626 tid = rtl_get_tid(skb);
627 if (tid <= 7)
628 rtlpriv->link_info.tidtx_inperiod[tid]++;
629
630 info = IEEE80211_SKB_CB(skb);
631 ieee80211_tx_info_clear_status(info);
632
633 info->flags |= IEEE80211_TX_STAT_ACK;
634 /*info->status.rates[0].count = 1; */
635
636 ieee80211_tx_status_irqsafe(hw, skb);
637
638 if ((ring->entries - skb_queue_len(&ring->queue))
639 == 2) {
640
641 RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
642 "more desc left, wake skb_queue@%d, ring->idx = %d, skb_queue_len = 0x%d\n",
643 prio, ring->idx,
644 skb_queue_len(&ring->queue));
645
646 ieee80211_wake_queue(hw,
647 skb_get_queue_mapping
648 (skb));
649 }
650 tx_status_ok:
651 skb = NULL;
652 }
653
654 if (((rtlpriv->link_info.num_rx_inperiod +
655 rtlpriv->link_info.num_tx_inperiod) > 8) ||
656 (rtlpriv->link_info.num_rx_inperiod > 2)) {
657 rtlpriv->enter_ps = false;
658 schedule_work(&rtlpriv->works.lps_change_work);
659 }
660 }
661
662 static void _rtl_receive_one(struct ieee80211_hw *hw, struct sk_buff *skb,
663 struct ieee80211_rx_status rx_status)
664 {
665 struct rtl_priv *rtlpriv = rtl_priv(hw);
666 struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
667 __le16 fc = rtl_get_fc(skb);
668 bool unicast = false;
669 struct sk_buff *uskb = NULL;
670 u8 *pdata;
671
672
673 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
674
675 if (is_broadcast_ether_addr(hdr->addr1)) {
676 ;/*TODO*/
677 } else if (is_multicast_ether_addr(hdr->addr1)) {
678 ;/*TODO*/
679 } else {
680 unicast = true;
681 rtlpriv->stats.rxbytesunicast += skb->len;
682 }
683
684 rtl_is_special_data(hw, skb, false);
685
686 if (ieee80211_is_data(fc)) {
687 rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
688
689 if (unicast)
690 rtlpriv->link_info.num_rx_inperiod++;
691 }
692
693 /* static bcn for roaming */
694 rtl_beacon_statistic(hw, skb);
695 rtl_p2p_info(hw, (void *)skb->data, skb->len);
696
697 /* for sw lps */
698 rtl_swlps_beacon(hw, (void *)skb->data, skb->len);
699 rtl_recognize_peer(hw, (void *)skb->data, skb->len);
700 if ((rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP) &&
701 (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G) &&
702 (ieee80211_is_beacon(fc) || ieee80211_is_probe_resp(fc)))
703 return;
704
705 if (unlikely(!rtl_action_proc(hw, skb, false)))
706 return;
707
708 uskb = dev_alloc_skb(skb->len + 128);
709 if (!uskb)
710 return; /* exit if allocation failed */
711 memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status, sizeof(rx_status));
712 pdata = (u8 *)skb_put(uskb, skb->len);
713 memcpy(pdata, skb->data, skb->len);
714
715 ieee80211_rx_irqsafe(hw, uskb);
716 }
717
718 static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
719 {
720 struct rtl_priv *rtlpriv = rtl_priv(hw);
721 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
722 int rx_queue_idx = RTL_PCI_RX_MPDU_QUEUE;
723
724 struct ieee80211_rx_status rx_status = { 0 };
725 unsigned int count = rtlpci->rxringcount;
726 u8 own;
727 u8 tmp_one;
728 u32 bufferaddress;
729
730 struct rtl_stats stats = {
731 .signal = 0,
732 .noise = -98,
733 .rate = 0,
734 };
735 int index = rtlpci->rx_ring[rx_queue_idx].idx;
736
737 /*RX NORMAL PKT */
738 while (count--) {
739 /*rx descriptor */
740 struct rtl_rx_desc *pdesc = &rtlpci->rx_ring[rx_queue_idx].desc[
741 index];
742 /*rx pkt */
743 struct sk_buff *skb = rtlpci->rx_ring[rx_queue_idx].rx_buf[
744 index];
745 struct sk_buff *new_skb = NULL;
746
747 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
748 false, HW_DESC_OWN);
749
750 /*wait data to be filled by hardware */
751 if (own)
752 break;
753
754 rtlpriv->cfg->ops->query_rx_desc(hw, &stats,
755 &rx_status,
756 (u8 *) pdesc, skb);
757
758 if (stats.crc || stats.hwerror)
759 goto done;
760
761 new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
762 if (unlikely(!new_skb)) {
763 RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV), DBG_DMESG,
764 "can't alloc skb for rx\n");
765 goto done;
766 }
767
768 pci_unmap_single(rtlpci->pdev,
769 *((dma_addr_t *) skb->cb),
770 rtlpci->rxbuffersize,
771 PCI_DMA_FROMDEVICE);
772
773 skb_put(skb, rtlpriv->cfg->ops->get_desc((u8 *) pdesc, false,
774 HW_DESC_RXPKT_LEN));
775 skb_reserve(skb, stats.rx_drvinfo_size + stats.rx_bufshift);
776
777 /*
778 * NOTICE This can not be use for mac80211,
779 * this is done in mac80211 code,
780 * if you done here sec DHCP will fail
781 * skb_trim(skb, skb->len - 4);
782 */
783
784 _rtl_receive_one(hw, skb, rx_status);
785
786 if (((rtlpriv->link_info.num_rx_inperiod +
787 rtlpriv->link_info.num_tx_inperiod) > 8) ||
788 (rtlpriv->link_info.num_rx_inperiod > 2)) {
789 rtlpriv->enter_ps = false;
790 schedule_work(&rtlpriv->works.lps_change_work);
791 }
792
793 dev_kfree_skb_any(skb);
794 skb = new_skb;
795
796 rtlpci->rx_ring[rx_queue_idx].rx_buf[index] = skb;
797 *((dma_addr_t *) skb->cb) =
798 pci_map_single(rtlpci->pdev, skb_tail_pointer(skb),
799 rtlpci->rxbuffersize,
800 PCI_DMA_FROMDEVICE);
801
802 done:
803 bufferaddress = (*((dma_addr_t *)skb->cb));
804 if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress))
805 return;
806 tmp_one = 1;
807 rtlpriv->cfg->ops->set_desc((u8 *) pdesc, false,
808 HW_DESC_RXBUFF_ADDR,
809 (u8 *)&bufferaddress);
810 rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false,
811 HW_DESC_RXPKT_LEN,
812 (u8 *)&rtlpci->rxbuffersize);
813
814 if (index == rtlpci->rxringcount - 1)
815 rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false,
816 HW_DESC_RXERO,
817 &tmp_one);
818
819 rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false, HW_DESC_RXOWN,
820 &tmp_one);
821
822 index = (index + 1) % rtlpci->rxringcount;
823 }
824
825 rtlpci->rx_ring[rx_queue_idx].idx = index;
826 }
827
828 static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
829 {
830 struct ieee80211_hw *hw = dev_id;
831 struct rtl_priv *rtlpriv = rtl_priv(hw);
832 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
833 unsigned long flags;
834 u32 inta = 0;
835 u32 intb = 0;
836 irqreturn_t ret = IRQ_HANDLED;
837
838 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
839
840 /*read ISR: 4/8bytes */
841 rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb);
842
843 /*Shared IRQ or HW disappared */
844 if (!inta || inta == 0xffff) {
845 ret = IRQ_NONE;
846 goto done;
847 }
848
849 /*<1> beacon related */
850 if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK]) {
851 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
852 "beacon ok interrupt!\n");
853 }
854
855 if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TBDER])) {
856 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
857 "beacon err interrupt!\n");
858 }
859
860 if (inta & rtlpriv->cfg->maps[RTL_IMR_BDOK]) {
861 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "beacon interrupt!\n");
862 }
863
864 if (inta & rtlpriv->cfg->maps[RTL_IMR_BCNINT]) {
865 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
866 "prepare beacon for interrupt!\n");
867 tasklet_schedule(&rtlpriv->works.irq_prepare_bcn_tasklet);
868 }
869
870 /*<3> Tx related */
871 if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TXFOVW]))
872 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "IMR_TXFOVW!\n");
873
874 if (inta & rtlpriv->cfg->maps[RTL_IMR_MGNTDOK]) {
875 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
876 "Manage ok interrupt!\n");
877 _rtl_pci_tx_isr(hw, MGNT_QUEUE);
878 }
879
880 if (inta & rtlpriv->cfg->maps[RTL_IMR_HIGHDOK]) {
881 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
882 "HIGH_QUEUE ok interrupt!\n");
883 _rtl_pci_tx_isr(hw, HIGH_QUEUE);
884 }
885
886 if (inta & rtlpriv->cfg->maps[RTL_IMR_BKDOK]) {
887 rtlpriv->link_info.num_tx_inperiod++;
888
889 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
890 "BK Tx OK interrupt!\n");
891 _rtl_pci_tx_isr(hw, BK_QUEUE);
892 }
893
894 if (inta & rtlpriv->cfg->maps[RTL_IMR_BEDOK]) {
895 rtlpriv->link_info.num_tx_inperiod++;
896
897 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
898 "BE TX OK interrupt!\n");
899 _rtl_pci_tx_isr(hw, BE_QUEUE);
900 }
901
902 if (inta & rtlpriv->cfg->maps[RTL_IMR_VIDOK]) {
903 rtlpriv->link_info.num_tx_inperiod++;
904
905 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
906 "VI TX OK interrupt!\n");
907 _rtl_pci_tx_isr(hw, VI_QUEUE);
908 }
909
910 if (inta & rtlpriv->cfg->maps[RTL_IMR_VODOK]) {
911 rtlpriv->link_info.num_tx_inperiod++;
912
913 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
914 "Vo TX OK interrupt!\n");
915 _rtl_pci_tx_isr(hw, VO_QUEUE);
916 }
917
918 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) {
919 if (inta & rtlpriv->cfg->maps[RTL_IMR_COMDOK]) {
920 rtlpriv->link_info.num_tx_inperiod++;
921
922 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
923 "CMD TX OK interrupt!\n");
924 _rtl_pci_tx_isr(hw, TXCMD_QUEUE);
925 }
926 }
927
928 /*<2> Rx related */
929 if (inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) {
930 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "Rx ok interrupt!\n");
931 _rtl_pci_rx_interrupt(hw);
932 }
933
934 if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) {
935 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
936 "rx descriptor unavailable!\n");
937 _rtl_pci_rx_interrupt(hw);
938 }
939
940 if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) {
941 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "rx overflow !\n");
942 _rtl_pci_rx_interrupt(hw);
943 }
944
945 /*fw related*/
946 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723AE) {
947 if (inta & rtlpriv->cfg->maps[RTL_IMR_C2HCMD]) {
948 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
949 "firmware interrupt!\n");
950 queue_delayed_work(rtlpriv->works.rtl_wq,
951 &rtlpriv->works.fwevt_wq, 0);
952 }
953 }
954
955 if (rtlpriv->rtlhal.earlymode_enable)
956 tasklet_schedule(&rtlpriv->works.irq_tasklet);
957
958 done:
959 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
960 return ret;
961 }
962
963 static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
964 {
965 _rtl_pci_tx_chk_waitq(hw);
966 }
967
968 static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
969 {
970 struct rtl_priv *rtlpriv = rtl_priv(hw);
971 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
972 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
973 struct rtl8192_tx_ring *ring = NULL;
974 struct ieee80211_hdr *hdr = NULL;
975 struct ieee80211_tx_info *info = NULL;
976 struct sk_buff *pskb = NULL;
977 struct rtl_tx_desc *pdesc = NULL;
978 struct rtl_tcb_desc tcb_desc;
979 u8 temp_one = 1;
980
981 memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
982 ring = &rtlpci->tx_ring[BEACON_QUEUE];
983 pskb = __skb_dequeue(&ring->queue);
984 if (pskb) {
985 struct rtl_tx_desc *entry = &ring->desc[ring->idx];
986 pci_unmap_single(rtlpci->pdev, rtlpriv->cfg->ops->get_desc(
987 (u8 *) entry, true, HW_DESC_TXBUFF_ADDR),
988 pskb->len, PCI_DMA_TODEVICE);
989 kfree_skb(pskb);
990 }
991
992 /*NB: the beacon data buffer must be 32-bit aligned. */
993 pskb = ieee80211_beacon_get(hw, mac->vif);
994 if (pskb == NULL)
995 return;
996 hdr = rtl_get_hdr(pskb);
997 info = IEEE80211_SKB_CB(pskb);
998 pdesc = &ring->desc[0];
999 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
1000 info, NULL, pskb, BEACON_QUEUE, &tcb_desc);
1001
1002 __skb_queue_tail(&ring->queue, pskb);
1003
1004 rtlpriv->cfg->ops->set_desc((u8 *) pdesc, true, HW_DESC_OWN,
1005 &temp_one);
1006
1007 return;
1008 }
1009
1010 static void rtl_lps_change_work_callback(struct work_struct *work)
1011 {
1012 struct rtl_works *rtlworks =
1013 container_of(work, struct rtl_works, lps_change_work);
1014 struct ieee80211_hw *hw = rtlworks->hw;
1015 struct rtl_priv *rtlpriv = rtl_priv(hw);
1016
1017 if (rtlpriv->enter_ps)
1018 rtl_lps_enter(hw);
1019 else
1020 rtl_lps_leave(hw);
1021 }
1022
1023 static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw)
1024 {
1025 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1026 u8 i;
1027
1028 for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
1029 rtlpci->txringcount[i] = RT_TXDESC_NUM;
1030
1031 /*
1032 *we just alloc 2 desc for beacon queue,
1033 *because we just need first desc in hw beacon.
1034 */
1035 rtlpci->txringcount[BEACON_QUEUE] = 2;
1036
1037 /*
1038 *BE queue need more descriptor for performance
1039 *consideration or, No more tx desc will happen,
1040 *and may cause mac80211 mem leakage.
1041 */
1042 rtlpci->txringcount[BE_QUEUE] = RT_TXDESC_NUM_BE_QUEUE;
1043
1044 rtlpci->rxbuffersize = 9100; /*2048/1024; */
1045 rtlpci->rxringcount = RTL_PCI_MAX_RX_COUNT; /*64; */
1046 }
1047
1048 static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
1049 struct pci_dev *pdev)
1050 {
1051 struct rtl_priv *rtlpriv = rtl_priv(hw);
1052 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1053 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1054 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1055
1056 rtlpci->up_first_time = true;
1057 rtlpci->being_init_adapter = false;
1058
1059 rtlhal->hw = hw;
1060 rtlpci->pdev = pdev;
1061
1062 /*Tx/Rx related var */
1063 _rtl_pci_init_trx_var(hw);
1064
1065 /*IBSS*/ mac->beacon_interval = 100;
1066
1067 /*AMPDU*/
1068 mac->min_space_cfg = 0;
1069 mac->max_mss_density = 0;
1070 /*set sane AMPDU defaults */
1071 mac->current_ampdu_density = 7;
1072 mac->current_ampdu_factor = 3;
1073
1074 /*QOS*/
1075 rtlpci->acm_method = eAcmWay2_SW;
1076
1077 /*task */
1078 tasklet_init(&rtlpriv->works.irq_tasklet,
1079 (void (*)(unsigned long))_rtl_pci_irq_tasklet,
1080 (unsigned long)hw);
1081 tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet,
1082 (void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet,
1083 (unsigned long)hw);
1084 INIT_WORK(&rtlpriv->works.lps_change_work,
1085 rtl_lps_change_work_callback);
1086 }
1087
1088 static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
1089 unsigned int prio, unsigned int entries)
1090 {
1091 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1092 struct rtl_priv *rtlpriv = rtl_priv(hw);
1093 struct rtl_tx_desc *ring;
1094 dma_addr_t dma;
1095 u32 nextdescaddress;
1096 int i;
1097
1098 ring = pci_alloc_consistent(rtlpci->pdev,
1099 sizeof(*ring) * entries, &dma);
1100
1101 if (!ring || (unsigned long)ring & 0xFF) {
1102 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1103 "Cannot allocate TX ring (prio = %d)\n", prio);
1104 return -ENOMEM;
1105 }
1106
1107 memset(ring, 0, sizeof(*ring) * entries);
1108 rtlpci->tx_ring[prio].desc = ring;
1109 rtlpci->tx_ring[prio].dma = dma;
1110 rtlpci->tx_ring[prio].idx = 0;
1111 rtlpci->tx_ring[prio].entries = entries;
1112 skb_queue_head_init(&rtlpci->tx_ring[prio].queue);
1113
1114 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "queue:%d, ring_addr:%p\n",
1115 prio, ring);
1116
1117 for (i = 0; i < entries; i++) {
1118 nextdescaddress = (u32) dma +
1119 ((i + 1) % entries) *
1120 sizeof(*ring);
1121
1122 rtlpriv->cfg->ops->set_desc((u8 *)&(ring[i]),
1123 true, HW_DESC_TX_NEXTDESC_ADDR,
1124 (u8 *)&nextdescaddress);
1125 }
1126
1127 return 0;
1128 }
1129
1130 static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
1131 {
1132 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1133 struct rtl_priv *rtlpriv = rtl_priv(hw);
1134 struct rtl_rx_desc *entry = NULL;
1135 int i, rx_queue_idx;
1136 u8 tmp_one = 1;
1137
1138 /*
1139 *rx_queue_idx 0:RX_MPDU_QUEUE
1140 *rx_queue_idx 1:RX_CMD_QUEUE
1141 */
1142 for (rx_queue_idx = 0; rx_queue_idx < RTL_PCI_MAX_RX_QUEUE;
1143 rx_queue_idx++) {
1144 rtlpci->rx_ring[rx_queue_idx].desc =
1145 pci_alloc_consistent(rtlpci->pdev,
1146 sizeof(*rtlpci->rx_ring[rx_queue_idx].
1147 desc) * rtlpci->rxringcount,
1148 &rtlpci->rx_ring[rx_queue_idx].dma);
1149
1150 if (!rtlpci->rx_ring[rx_queue_idx].desc ||
1151 (unsigned long)rtlpci->rx_ring[rx_queue_idx].desc & 0xFF) {
1152 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1153 "Cannot allocate RX ring\n");
1154 return -ENOMEM;
1155 }
1156
1157 memset(rtlpci->rx_ring[rx_queue_idx].desc, 0,
1158 sizeof(*rtlpci->rx_ring[rx_queue_idx].desc) *
1159 rtlpci->rxringcount);
1160
1161 rtlpci->rx_ring[rx_queue_idx].idx = 0;
1162
1163 /* If amsdu_8k is disabled, set buffersize to 4096. This
1164 * change will reduce memory fragmentation.
1165 */
1166 if (rtlpci->rxbuffersize > 4096 &&
1167 rtlpriv->rtlhal.disable_amsdu_8k)
1168 rtlpci->rxbuffersize = 4096;
1169
1170 for (i = 0; i < rtlpci->rxringcount; i++) {
1171 struct sk_buff *skb =
1172 dev_alloc_skb(rtlpci->rxbuffersize);
1173 u32 bufferaddress;
1174 if (!skb)
1175 return 0;
1176 kmemleak_not_leak(skb);
1177 entry = &rtlpci->rx_ring[rx_queue_idx].desc[i];
1178
1179 /*skb->dev = dev; */
1180
1181 rtlpci->rx_ring[rx_queue_idx].rx_buf[i] = skb;
1182
1183 /*
1184 *just set skb->cb to mapping addr
1185 *for pci_unmap_single use
1186 */
1187 *((dma_addr_t *) skb->cb) =
1188 pci_map_single(rtlpci->pdev, skb_tail_pointer(skb),
1189 rtlpci->rxbuffersize,
1190 PCI_DMA_FROMDEVICE);
1191
1192 bufferaddress = (*((dma_addr_t *)skb->cb));
1193 if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress)) {
1194 dev_kfree_skb_any(skb);
1195 return 1;
1196 }
1197 rtlpriv->cfg->ops->set_desc((u8 *)entry, false,
1198 HW_DESC_RXBUFF_ADDR,
1199 (u8 *)&bufferaddress);
1200 rtlpriv->cfg->ops->set_desc((u8 *)entry, false,
1201 HW_DESC_RXPKT_LEN,
1202 (u8 *)&rtlpci->
1203 rxbuffersize);
1204 rtlpriv->cfg->ops->set_desc((u8 *) entry, false,
1205 HW_DESC_RXOWN,
1206 &tmp_one);
1207 }
1208
1209 rtlpriv->cfg->ops->set_desc((u8 *) entry, false,
1210 HW_DESC_RXERO, &tmp_one);
1211 }
1212 return 0;
1213 }
1214
1215 static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw,
1216 unsigned int prio)
1217 {
1218 struct rtl_priv *rtlpriv = rtl_priv(hw);
1219 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1220 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
1221
1222 while (skb_queue_len(&ring->queue)) {
1223 struct rtl_tx_desc *entry = &ring->desc[ring->idx];
1224 struct sk_buff *skb = __skb_dequeue(&ring->queue);
1225
1226 pci_unmap_single(rtlpci->pdev,
1227 rtlpriv->cfg->
1228 ops->get_desc((u8 *) entry, true,
1229 HW_DESC_TXBUFF_ADDR),
1230 skb->len, PCI_DMA_TODEVICE);
1231 kfree_skb(skb);
1232 ring->idx = (ring->idx + 1) % ring->entries;
1233 }
1234
1235 if (ring->desc) {
1236 pci_free_consistent(rtlpci->pdev,
1237 sizeof(*ring->desc) * ring->entries,
1238 ring->desc, ring->dma);
1239 ring->desc = NULL;
1240 }
1241 }
1242
1243 static void _rtl_pci_free_rx_ring(struct rtl_pci *rtlpci)
1244 {
1245 int i, rx_queue_idx;
1246
1247 /*rx_queue_idx 0:RX_MPDU_QUEUE */
1248 /*rx_queue_idx 1:RX_CMD_QUEUE */
1249 for (rx_queue_idx = 0; rx_queue_idx < RTL_PCI_MAX_RX_QUEUE;
1250 rx_queue_idx++) {
1251 for (i = 0; i < rtlpci->rxringcount; i++) {
1252 struct sk_buff *skb =
1253 rtlpci->rx_ring[rx_queue_idx].rx_buf[i];
1254 if (!skb)
1255 continue;
1256
1257 pci_unmap_single(rtlpci->pdev,
1258 *((dma_addr_t *) skb->cb),
1259 rtlpci->rxbuffersize,
1260 PCI_DMA_FROMDEVICE);
1261 kfree_skb(skb);
1262 }
1263
1264 if (rtlpci->rx_ring[rx_queue_idx].desc) {
1265 pci_free_consistent(rtlpci->pdev,
1266 sizeof(*rtlpci->rx_ring[rx_queue_idx].
1267 desc) * rtlpci->rxringcount,
1268 rtlpci->rx_ring[rx_queue_idx].desc,
1269 rtlpci->rx_ring[rx_queue_idx].dma);
1270 rtlpci->rx_ring[rx_queue_idx].desc = NULL;
1271 }
1272 }
1273 }
1274
1275 static int _rtl_pci_init_trx_ring(struct ieee80211_hw *hw)
1276 {
1277 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1278 int ret;
1279 int i;
1280
1281 ret = _rtl_pci_init_rx_ring(hw);
1282 if (ret)
1283 return ret;
1284
1285 for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
1286 ret = _rtl_pci_init_tx_ring(hw, i,
1287 rtlpci->txringcount[i]);
1288 if (ret)
1289 goto err_free_rings;
1290 }
1291
1292 return 0;
1293
1294 err_free_rings:
1295 _rtl_pci_free_rx_ring(rtlpci);
1296
1297 for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
1298 if (rtlpci->tx_ring[i].desc)
1299 _rtl_pci_free_tx_ring(hw, i);
1300
1301 return 1;
1302 }
1303
1304 static int _rtl_pci_deinit_trx_ring(struct ieee80211_hw *hw)
1305 {
1306 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1307 u32 i;
1308
1309 /*free rx rings */
1310 _rtl_pci_free_rx_ring(rtlpci);
1311
1312 /*free tx rings */
1313 for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
1314 _rtl_pci_free_tx_ring(hw, i);
1315
1316 return 0;
1317 }
1318
1319 int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
1320 {
1321 struct rtl_priv *rtlpriv = rtl_priv(hw);
1322 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1323 int i, rx_queue_idx;
1324 unsigned long flags;
1325 u8 tmp_one = 1;
1326
1327 /*rx_queue_idx 0:RX_MPDU_QUEUE */
1328 /*rx_queue_idx 1:RX_CMD_QUEUE */
1329 for (rx_queue_idx = 0; rx_queue_idx < RTL_PCI_MAX_RX_QUEUE;
1330 rx_queue_idx++) {
1331 /*
1332 *force the rx_ring[RX_MPDU_QUEUE/
1333 *RX_CMD_QUEUE].idx to the first one
1334 */
1335 if (rtlpci->rx_ring[rx_queue_idx].desc) {
1336 struct rtl_rx_desc *entry = NULL;
1337
1338 for (i = 0; i < rtlpci->rxringcount; i++) {
1339 entry = &rtlpci->rx_ring[rx_queue_idx].desc[i];
1340 rtlpriv->cfg->ops->set_desc((u8 *) entry,
1341 false,
1342 HW_DESC_RXOWN,
1343 &tmp_one);
1344 }
1345 rtlpci->rx_ring[rx_queue_idx].idx = 0;
1346 }
1347 }
1348
1349 /*
1350 *after reset, release previous pending packet,
1351 *and force the tx idx to the first one
1352 */
1353 for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
1354 if (rtlpci->tx_ring[i].desc) {
1355 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[i];
1356
1357 while (skb_queue_len(&ring->queue)) {
1358 struct rtl_tx_desc *entry;
1359 struct sk_buff *skb;
1360
1361 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock,
1362 flags);
1363 entry = &ring->desc[ring->idx];
1364 skb = __skb_dequeue(&ring->queue);
1365 pci_unmap_single(rtlpci->pdev,
1366 rtlpriv->cfg->ops->
1367 get_desc((u8 *)
1368 entry,
1369 true,
1370 HW_DESC_TXBUFF_ADDR),
1371 skb->len, PCI_DMA_TODEVICE);
1372 ring->idx = (ring->idx + 1) % ring->entries;
1373 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock,
1374 flags);
1375 kfree_skb(skb);
1376 }
1377 ring->idx = 0;
1378 }
1379 }
1380
1381 return 0;
1382 }
1383
1384 static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw,
1385 struct ieee80211_sta *sta,
1386 struct sk_buff *skb)
1387 {
1388 struct rtl_priv *rtlpriv = rtl_priv(hw);
1389 struct rtl_sta_info *sta_entry = NULL;
1390 u8 tid = rtl_get_tid(skb);
1391 __le16 fc = rtl_get_fc(skb);
1392
1393 if (!sta)
1394 return false;
1395 sta_entry = (struct rtl_sta_info *)sta->drv_priv;
1396
1397 if (!rtlpriv->rtlhal.earlymode_enable)
1398 return false;
1399 if (ieee80211_is_nullfunc(fc))
1400 return false;
1401 if (ieee80211_is_qos_nullfunc(fc))
1402 return false;
1403 if (ieee80211_is_pspoll(fc))
1404 return false;
1405 if (sta_entry->tids[tid].agg.agg_state != RTL_AGG_OPERATIONAL)
1406 return false;
1407 if (_rtl_mac_to_hwqueue(hw, skb) > VO_QUEUE)
1408 return false;
1409 if (tid > 7)
1410 return false;
1411
1412 /* maybe every tid should be checked */
1413 if (!rtlpriv->link_info.higher_busytxtraffic[tid])
1414 return false;
1415
1416 spin_lock_bh(&rtlpriv->locks.waitq_lock);
1417 skb_queue_tail(&rtlpriv->mac80211.skb_waitq[tid], skb);
1418 spin_unlock_bh(&rtlpriv->locks.waitq_lock);
1419
1420 return true;
1421 }
1422
1423 static int rtl_pci_tx(struct ieee80211_hw *hw,
1424 struct ieee80211_sta *sta,
1425 struct sk_buff *skb,
1426 struct rtl_tcb_desc *ptcb_desc)
1427 {
1428 struct rtl_priv *rtlpriv = rtl_priv(hw);
1429 struct rtl_sta_info *sta_entry = NULL;
1430 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1431 struct rtl8192_tx_ring *ring;
1432 struct rtl_tx_desc *pdesc;
1433 u8 idx;
1434 u8 hw_queue = _rtl_mac_to_hwqueue(hw, skb);
1435 unsigned long flags;
1436 struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
1437 __le16 fc = rtl_get_fc(skb);
1438 u8 *pda_addr = hdr->addr1;
1439 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1440 /*ssn */
1441 u8 tid = 0;
1442 u16 seq_number = 0;
1443 u8 own;
1444 u8 temp_one = 1;
1445
1446 if (ieee80211_is_mgmt(fc))
1447 rtl_tx_mgmt_proc(hw, skb);
1448
1449 if (rtlpriv->psc.sw_ps_enabled) {
1450 if (ieee80211_is_data(fc) && !ieee80211_is_nullfunc(fc) &&
1451 !ieee80211_has_pm(fc))
1452 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
1453 }
1454
1455 rtl_action_proc(hw, skb, true);
1456
1457 if (is_multicast_ether_addr(pda_addr))
1458 rtlpriv->stats.txbytesmulticast += skb->len;
1459 else if (is_broadcast_ether_addr(pda_addr))
1460 rtlpriv->stats.txbytesbroadcast += skb->len;
1461 else
1462 rtlpriv->stats.txbytesunicast += skb->len;
1463
1464 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
1465 ring = &rtlpci->tx_ring[hw_queue];
1466 if (hw_queue != BEACON_QUEUE)
1467 idx = (ring->idx + skb_queue_len(&ring->queue)) %
1468 ring->entries;
1469 else
1470 idx = 0;
1471
1472 pdesc = &ring->desc[idx];
1473 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
1474 true, HW_DESC_OWN);
1475
1476 if ((own == 1) && (hw_queue != BEACON_QUEUE)) {
1477 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1478 "No more TX desc@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%d\n",
1479 hw_queue, ring->idx, idx,
1480 skb_queue_len(&ring->queue));
1481
1482 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
1483 return skb->len;
1484 }
1485
1486 if (ieee80211_is_data_qos(fc)) {
1487 tid = rtl_get_tid(skb);
1488 if (sta) {
1489 sta_entry = (struct rtl_sta_info *)sta->drv_priv;
1490 seq_number = (le16_to_cpu(hdr->seq_ctrl) &
1491 IEEE80211_SCTL_SEQ) >> 4;
1492 seq_number += 1;
1493
1494 if (!ieee80211_has_morefrags(hdr->frame_control))
1495 sta_entry->tids[tid].seq_number = seq_number;
1496 }
1497 }
1498
1499 if (ieee80211_is_data(fc))
1500 rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
1501
1502 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc,
1503 info, sta, skb, hw_queue, ptcb_desc);
1504
1505 __skb_queue_tail(&ring->queue, skb);
1506
1507 rtlpriv->cfg->ops->set_desc((u8 *)pdesc, true,
1508 HW_DESC_OWN, &temp_one);
1509
1510
1511 if ((ring->entries - skb_queue_len(&ring->queue)) < 2 &&
1512 hw_queue != BEACON_QUEUE) {
1513
1514 RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
1515 "less desc left, stop skb_queue@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%d\n",
1516 hw_queue, ring->idx, idx,
1517 skb_queue_len(&ring->queue));
1518
1519 ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
1520 }
1521
1522 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
1523
1524 rtlpriv->cfg->ops->tx_polling(hw, hw_queue);
1525
1526 return 0;
1527 }
1528
1529 static void rtl_pci_flush(struct ieee80211_hw *hw, bool drop)
1530 {
1531 struct rtl_priv *rtlpriv = rtl_priv(hw);
1532 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
1533 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1534 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1535 u16 i = 0;
1536 int queue_id;
1537 struct rtl8192_tx_ring *ring;
1538
1539 if (mac->skip_scan)
1540 return;
1541
1542 for (queue_id = RTL_PCI_MAX_TX_QUEUE_COUNT - 1; queue_id >= 0;) {
1543 u32 queue_len;
1544 ring = &pcipriv->dev.tx_ring[queue_id];
1545 queue_len = skb_queue_len(&ring->queue);
1546 if (queue_len == 0 || queue_id == BEACON_QUEUE ||
1547 queue_id == TXCMD_QUEUE) {
1548 queue_id--;
1549 continue;
1550 } else {
1551 msleep(20);
1552 i++;
1553 }
1554
1555 /* we just wait 1s for all queues */
1556 if (rtlpriv->psc.rfpwr_state == ERFOFF ||
1557 is_hal_stop(rtlhal) || i >= 200)
1558 return;
1559 }
1560 }
1561
1562 static void rtl_pci_deinit(struct ieee80211_hw *hw)
1563 {
1564 struct rtl_priv *rtlpriv = rtl_priv(hw);
1565 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1566
1567 _rtl_pci_deinit_trx_ring(hw);
1568
1569 synchronize_irq(rtlpci->pdev->irq);
1570 tasklet_kill(&rtlpriv->works.irq_tasklet);
1571 cancel_work_sync(&rtlpriv->works.lps_change_work);
1572
1573 flush_workqueue(rtlpriv->works.rtl_wq);
1574 destroy_workqueue(rtlpriv->works.rtl_wq);
1575
1576 }
1577
1578 static int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
1579 {
1580 struct rtl_priv *rtlpriv = rtl_priv(hw);
1581 int err;
1582
1583 _rtl_pci_init_struct(hw, pdev);
1584
1585 err = _rtl_pci_init_trx_ring(hw);
1586 if (err) {
1587 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1588 "tx ring initialization failed\n");
1589 return err;
1590 }
1591
1592 return 0;
1593 }
1594
1595 static int rtl_pci_start(struct ieee80211_hw *hw)
1596 {
1597 struct rtl_priv *rtlpriv = rtl_priv(hw);
1598 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1599 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1600 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1601
1602 int err;
1603
1604 rtl_pci_reset_trx_ring(hw);
1605
1606 rtlpci->driver_is_goingto_unload = false;
1607 err = rtlpriv->cfg->ops->hw_init(hw);
1608 if (err) {
1609 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1610 "Failed to config hardware!\n");
1611 return err;
1612 }
1613
1614 rtlpriv->cfg->ops->enable_interrupt(hw);
1615 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "enable_interrupt OK\n");
1616
1617 rtl_init_rx_config(hw);
1618
1619 /*should be after adapter start and interrupt enable. */
1620 set_hal_start(rtlhal);
1621
1622 RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
1623
1624 rtlpci->up_first_time = false;
1625
1626 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "OK\n");
1627 return 0;
1628 }
1629
1630 static void rtl_pci_stop(struct ieee80211_hw *hw)
1631 {
1632 struct rtl_priv *rtlpriv = rtl_priv(hw);
1633 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1634 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1635 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1636 unsigned long flags;
1637 u8 RFInProgressTimeOut = 0;
1638
1639 /*
1640 *should be before disable interrupt&adapter
1641 *and will do it immediately.
1642 */
1643 set_hal_stop(rtlhal);
1644
1645 rtlpriv->cfg->ops->disable_interrupt(hw);
1646 cancel_work_sync(&rtlpriv->works.lps_change_work);
1647
1648 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
1649 while (ppsc->rfchange_inprogress) {
1650 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
1651 if (RFInProgressTimeOut > 100) {
1652 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
1653 break;
1654 }
1655 mdelay(1);
1656 RFInProgressTimeOut++;
1657 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
1658 }
1659 ppsc->rfchange_inprogress = true;
1660 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
1661
1662 rtlpci->driver_is_goingto_unload = true;
1663 rtlpriv->cfg->ops->hw_disable(hw);
1664 /* some things are not needed if firmware not available */
1665 if (!rtlpriv->max_fw_size)
1666 return;
1667 rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
1668
1669 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
1670 ppsc->rfchange_inprogress = false;
1671 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
1672
1673 rtl_pci_enable_aspm(hw);
1674 }
1675
1676 static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
1677 struct ieee80211_hw *hw)
1678 {
1679 struct rtl_priv *rtlpriv = rtl_priv(hw);
1680 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
1681 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1682 struct pci_dev *bridge_pdev = pdev->bus->self;
1683 u16 venderid;
1684 u16 deviceid;
1685 u8 revisionid;
1686 u16 irqline;
1687 u8 tmp;
1688
1689 pcipriv->ndis_adapter.pcibridge_vendor = PCI_BRIDGE_VENDOR_UNKNOWN;
1690 venderid = pdev->vendor;
1691 deviceid = pdev->device;
1692 pci_read_config_byte(pdev, 0x8, &revisionid);
1693 pci_read_config_word(pdev, 0x3C, &irqline);
1694
1695 /* PCI ID 0x10ec:0x8192 occurs for both RTL8192E, which uses
1696 * r8192e_pci, and RTL8192SE, which uses this driver. If the
1697 * revision ID is RTL_PCI_REVISION_ID_8192PCIE (0x01), then
1698 * the correct driver is r8192e_pci, thus this routine should
1699 * return false.
1700 */
1701 if (deviceid == RTL_PCI_8192SE_DID &&
1702 revisionid == RTL_PCI_REVISION_ID_8192PCIE)
1703 return false;
1704
1705 if (deviceid == RTL_PCI_8192_DID ||
1706 deviceid == RTL_PCI_0044_DID ||
1707 deviceid == RTL_PCI_0047_DID ||
1708 deviceid == RTL_PCI_8192SE_DID ||
1709 deviceid == RTL_PCI_8174_DID ||
1710 deviceid == RTL_PCI_8173_DID ||
1711 deviceid == RTL_PCI_8172_DID ||
1712 deviceid == RTL_PCI_8171_DID) {
1713 switch (revisionid) {
1714 case RTL_PCI_REVISION_ID_8192PCIE:
1715 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1716 "8192 PCI-E is found - vid/did=%x/%x\n",
1717 venderid, deviceid);
1718 rtlhal->hw_type = HARDWARE_TYPE_RTL8192E;
1719 return false;
1720 case RTL_PCI_REVISION_ID_8192SE:
1721 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1722 "8192SE is found - vid/did=%x/%x\n",
1723 venderid, deviceid);
1724 rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
1725 break;
1726 default:
1727 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1728 "Err: Unknown device - vid/did=%x/%x\n",
1729 venderid, deviceid);
1730 rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
1731 break;
1732
1733 }
1734 } else if (deviceid == RTL_PCI_8723AE_DID) {
1735 rtlhal->hw_type = HARDWARE_TYPE_RTL8723AE;
1736 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1737 "8723AE PCI-E is found - "
1738 "vid/did=%x/%x\n", venderid, deviceid);
1739 } else if (deviceid == RTL_PCI_8192CET_DID ||
1740 deviceid == RTL_PCI_8192CE_DID ||
1741 deviceid == RTL_PCI_8191CE_DID ||
1742 deviceid == RTL_PCI_8188CE_DID) {
1743 rtlhal->hw_type = HARDWARE_TYPE_RTL8192CE;
1744 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1745 "8192C PCI-E is found - vid/did=%x/%x\n",
1746 venderid, deviceid);
1747 } else if (deviceid == RTL_PCI_8192DE_DID ||
1748 deviceid == RTL_PCI_8192DE_DID2) {
1749 rtlhal->hw_type = HARDWARE_TYPE_RTL8192DE;
1750 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1751 "8192D PCI-E is found - vid/did=%x/%x\n",
1752 venderid, deviceid);
1753 } else {
1754 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1755 "Err: Unknown device - vid/did=%x/%x\n",
1756 venderid, deviceid);
1757
1758 rtlhal->hw_type = RTL_DEFAULT_HARDWARE_TYPE;
1759 }
1760
1761 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE) {
1762 if (revisionid == 0 || revisionid == 1) {
1763 if (revisionid == 0) {
1764 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1765 "Find 92DE MAC0\n");
1766 rtlhal->interfaceindex = 0;
1767 } else if (revisionid == 1) {
1768 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1769 "Find 92DE MAC1\n");
1770 rtlhal->interfaceindex = 1;
1771 }
1772 } else {
1773 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1774 "Unknown device - VendorID/DeviceID=%x/%x, Revision=%x\n",
1775 venderid, deviceid, revisionid);
1776 rtlhal->interfaceindex = 0;
1777 }
1778 }
1779 /*find bus info */
1780 pcipriv->ndis_adapter.busnumber = pdev->bus->number;
1781 pcipriv->ndis_adapter.devnumber = PCI_SLOT(pdev->devfn);
1782 pcipriv->ndis_adapter.funcnumber = PCI_FUNC(pdev->devfn);
1783
1784 /* some ARM have no bridge_pdev and will crash here
1785 * so we should check if bridge_pdev is NULL
1786 */
1787 if (bridge_pdev) {
1788 /*find bridge info if available */
1789 pcipriv->ndis_adapter.pcibridge_vendorid = bridge_pdev->vendor;
1790 for (tmp = 0; tmp < PCI_BRIDGE_VENDOR_MAX; tmp++) {
1791 if (bridge_pdev->vendor == pcibridge_vendors[tmp]) {
1792 pcipriv->ndis_adapter.pcibridge_vendor = tmp;
1793 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1794 "Pci Bridge Vendor is found index: %d\n",
1795 tmp);
1796 break;
1797 }
1798 }
1799 }
1800
1801 if (pcipriv->ndis_adapter.pcibridge_vendor !=
1802 PCI_BRIDGE_VENDOR_UNKNOWN) {
1803 pcipriv->ndis_adapter.pcibridge_busnum =
1804 bridge_pdev->bus->number;
1805 pcipriv->ndis_adapter.pcibridge_devnum =
1806 PCI_SLOT(bridge_pdev->devfn);
1807 pcipriv->ndis_adapter.pcibridge_funcnum =
1808 PCI_FUNC(bridge_pdev->devfn);
1809 pcipriv->ndis_adapter.pcibridge_pciehdr_offset =
1810 pci_pcie_cap(bridge_pdev);
1811 pcipriv->ndis_adapter.num4bytes =
1812 (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10) / 4;
1813
1814 rtl_pci_get_linkcontrol_field(hw);
1815
1816 if (pcipriv->ndis_adapter.pcibridge_vendor ==
1817 PCI_BRIDGE_VENDOR_AMD) {
1818 pcipriv->ndis_adapter.amd_l1_patch =
1819 rtl_pci_get_amd_l1_patch(hw);
1820 }
1821 }
1822
1823 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1824 "pcidev busnumber:devnumber:funcnumber:vendor:link_ctl %d:%d:%d:%x:%x\n",
1825 pcipriv->ndis_adapter.busnumber,
1826 pcipriv->ndis_adapter.devnumber,
1827 pcipriv->ndis_adapter.funcnumber,
1828 pdev->vendor, pcipriv->ndis_adapter.linkctrl_reg);
1829
1830 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1831 "pci_bridge busnumber:devnumber:funcnumber:vendor:pcie_cap:link_ctl_reg:amd %d:%d:%d:%x:%x:%x:%x\n",
1832 pcipriv->ndis_adapter.pcibridge_busnum,
1833 pcipriv->ndis_adapter.pcibridge_devnum,
1834 pcipriv->ndis_adapter.pcibridge_funcnum,
1835 pcibridge_vendors[pcipriv->ndis_adapter.pcibridge_vendor],
1836 pcipriv->ndis_adapter.pcibridge_pciehdr_offset,
1837 pcipriv->ndis_adapter.pcibridge_linkctrlreg,
1838 pcipriv->ndis_adapter.amd_l1_patch);
1839
1840 rtl_pci_parse_configuration(pdev, hw);
1841 list_add_tail(&rtlpriv->list, &rtlpriv->glb_var->glb_priv_list);
1842
1843 return true;
1844 }
1845
1846 int rtl_pci_probe(struct pci_dev *pdev,
1847 const struct pci_device_id *id)
1848 {
1849 struct ieee80211_hw *hw = NULL;
1850
1851 struct rtl_priv *rtlpriv = NULL;
1852 struct rtl_pci_priv *pcipriv = NULL;
1853 struct rtl_pci *rtlpci;
1854 unsigned long pmem_start, pmem_len, pmem_flags;
1855 int err;
1856
1857 err = pci_enable_device(pdev);
1858 if (err) {
1859 RT_ASSERT(false, "%s : Cannot enable new PCI device\n",
1860 pci_name(pdev));
1861 return err;
1862 }
1863
1864 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
1865 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
1866 RT_ASSERT(false,
1867 "Unable to obtain 32bit DMA for consistent allocations\n");
1868 err = -ENOMEM;
1869 goto fail1;
1870 }
1871 }
1872
1873 pci_set_master(pdev);
1874
1875 hw = ieee80211_alloc_hw(sizeof(struct rtl_pci_priv) +
1876 sizeof(struct rtl_priv), &rtl_ops);
1877 if (!hw) {
1878 RT_ASSERT(false,
1879 "%s : ieee80211 alloc failed\n", pci_name(pdev));
1880 err = -ENOMEM;
1881 goto fail1;
1882 }
1883
1884 SET_IEEE80211_DEV(hw, &pdev->dev);
1885 pci_set_drvdata(pdev, hw);
1886
1887 rtlpriv = hw->priv;
1888 rtlpriv->hw = hw;
1889 pcipriv = (void *)rtlpriv->priv;
1890 pcipriv->dev.pdev = pdev;
1891 init_completion(&rtlpriv->firmware_loading_complete);
1892
1893 /* init cfg & intf_ops */
1894 rtlpriv->rtlhal.interface = INTF_PCI;
1895 rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data);
1896 rtlpriv->intf_ops = &rtl_pci_ops;
1897 rtlpriv->glb_var = &global_var;
1898
1899 /*
1900 *init dbgp flags before all
1901 *other functions, because we will
1902 *use it in other funtions like
1903 *RT_TRACE/RT_PRINT/RTL_PRINT_DATA
1904 *you can not use these macro
1905 *before this
1906 */
1907 rtl_dbgp_flag_init(hw);
1908
1909 /* MEM map */
1910 err = pci_request_regions(pdev, KBUILD_MODNAME);
1911 if (err) {
1912 RT_ASSERT(false, "Can't obtain PCI resources\n");
1913 goto fail1;
1914 }
1915
1916 pmem_start = pci_resource_start(pdev, rtlpriv->cfg->bar_id);
1917 pmem_len = pci_resource_len(pdev, rtlpriv->cfg->bar_id);
1918 pmem_flags = pci_resource_flags(pdev, rtlpriv->cfg->bar_id);
1919
1920 /*shared mem start */
1921 rtlpriv->io.pci_mem_start =
1922 (unsigned long)pci_iomap(pdev,
1923 rtlpriv->cfg->bar_id, pmem_len);
1924 if (rtlpriv->io.pci_mem_start == 0) {
1925 RT_ASSERT(false, "Can't map PCI mem\n");
1926 err = -ENOMEM;
1927 goto fail2;
1928 }
1929
1930 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1931 "mem mapped space: start: 0x%08lx len:%08lx flags:%08lx, after map:0x%08lx\n",
1932 pmem_start, pmem_len, pmem_flags,
1933 rtlpriv->io.pci_mem_start);
1934
1935 /* Disable Clk Request */
1936 pci_write_config_byte(pdev, 0x81, 0);
1937 /* leave D3 mode */
1938 pci_write_config_byte(pdev, 0x44, 0);
1939 pci_write_config_byte(pdev, 0x04, 0x06);
1940 pci_write_config_byte(pdev, 0x04, 0x07);
1941
1942 /* find adapter */
1943 if (!_rtl_pci_find_adapter(pdev, hw)) {
1944 err = -ENODEV;
1945 goto fail3;
1946 }
1947
1948 /* Init IO handler */
1949 _rtl_pci_io_handler_init(&pdev->dev, hw);
1950
1951 /*like read eeprom and so on */
1952 rtlpriv->cfg->ops->read_eeprom_info(hw);
1953
1954 /*aspm */
1955 rtl_pci_init_aspm(hw);
1956
1957 /* Init mac80211 sw */
1958 err = rtl_init_core(hw);
1959 if (err) {
1960 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1961 "Can't allocate sw for mac80211\n");
1962 goto fail3;
1963 }
1964
1965 /* Init PCI sw */
1966 err = rtl_pci_init(hw, pdev);
1967 if (err) {
1968 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Failed to init PCI\n");
1969 goto fail3;
1970 }
1971
1972 if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
1973 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
1974 err = -ENODEV;
1975 goto fail3;
1976 }
1977
1978 rtlpriv->cfg->ops->init_sw_leds(hw);
1979
1980 err = sysfs_create_group(&pdev->dev.kobj, &rtl_attribute_group);
1981 if (err) {
1982 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1983 "failed to create sysfs device attributes\n");
1984 goto fail3;
1985 }
1986
1987 rtlpci = rtl_pcidev(pcipriv);
1988 err = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
1989 IRQF_SHARED, KBUILD_MODNAME, hw);
1990 if (err) {
1991 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1992 "%s: failed to register IRQ handler\n",
1993 wiphy_name(hw->wiphy));
1994 goto fail3;
1995 }
1996 rtlpci->irq_alloc = 1;
1997
1998 return 0;
1999
2000 fail3:
2001 rtl_deinit_core(hw);
2002
2003 if (rtlpriv->io.pci_mem_start != 0)
2004 pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
2005
2006 fail2:
2007 pci_release_regions(pdev);
2008 complete(&rtlpriv->firmware_loading_complete);
2009
2010 fail1:
2011 if (hw)
2012 ieee80211_free_hw(hw);
2013 pci_set_drvdata(pdev, NULL);
2014 pci_disable_device(pdev);
2015
2016 return err;
2017
2018 }
2019 EXPORT_SYMBOL(rtl_pci_probe);
2020
2021 void rtl_pci_disconnect(struct pci_dev *pdev)
2022 {
2023 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
2024 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
2025 struct rtl_priv *rtlpriv = rtl_priv(hw);
2026 struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
2027 struct rtl_mac *rtlmac = rtl_mac(rtlpriv);
2028
2029 /* just in case driver is removed before firmware callback */
2030 wait_for_completion(&rtlpriv->firmware_loading_complete);
2031 clear_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
2032
2033 sysfs_remove_group(&pdev->dev.kobj, &rtl_attribute_group);
2034
2035 /*ieee80211_unregister_hw will call ops_stop */
2036 if (rtlmac->mac80211_registered == 1) {
2037 ieee80211_unregister_hw(hw);
2038 rtlmac->mac80211_registered = 0;
2039 } else {
2040 rtl_deinit_deferred_work(hw);
2041 rtlpriv->intf_ops->adapter_stop(hw);
2042 }
2043 rtlpriv->cfg->ops->disable_interrupt(hw);
2044
2045 /*deinit rfkill */
2046 rtl_deinit_rfkill(hw);
2047
2048 rtl_pci_deinit(hw);
2049 rtl_deinit_core(hw);
2050 rtlpriv->cfg->ops->deinit_sw_vars(hw);
2051
2052 if (rtlpci->irq_alloc) {
2053 synchronize_irq(rtlpci->pdev->irq);
2054 free_irq(rtlpci->pdev->irq, hw);
2055 rtlpci->irq_alloc = 0;
2056 }
2057
2058 list_del(&rtlpriv->list);
2059 if (rtlpriv->io.pci_mem_start != 0) {
2060 pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
2061 pci_release_regions(pdev);
2062 }
2063
2064 pci_disable_device(pdev);
2065
2066 rtl_pci_disable_aspm(hw);
2067
2068 pci_set_drvdata(pdev, NULL);
2069
2070 ieee80211_free_hw(hw);
2071 }
2072 EXPORT_SYMBOL(rtl_pci_disconnect);
2073
2074 #ifdef CONFIG_PM_SLEEP
2075 /***************************************
2076 kernel pci power state define:
2077 PCI_D0 ((pci_power_t __force) 0)
2078 PCI_D1 ((pci_power_t __force) 1)
2079 PCI_D2 ((pci_power_t __force) 2)
2080 PCI_D3hot ((pci_power_t __force) 3)
2081 PCI_D3cold ((pci_power_t __force) 4)
2082 PCI_UNKNOWN ((pci_power_t __force) 5)
2083
2084 This function is called when system
2085 goes into suspend state mac80211 will
2086 call rtl_mac_stop() from the mac80211
2087 suspend function first, So there is
2088 no need to call hw_disable here.
2089 ****************************************/
2090 int rtl_pci_suspend(struct device *dev)
2091 {
2092 struct pci_dev *pdev = to_pci_dev(dev);
2093 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
2094 struct rtl_priv *rtlpriv = rtl_priv(hw);
2095
2096 rtlpriv->cfg->ops->hw_suspend(hw);
2097 rtl_deinit_rfkill(hw);
2098
2099 return 0;
2100 }
2101 EXPORT_SYMBOL(rtl_pci_suspend);
2102
2103 int rtl_pci_resume(struct device *dev)
2104 {
2105 struct pci_dev *pdev = to_pci_dev(dev);
2106 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
2107 struct rtl_priv *rtlpriv = rtl_priv(hw);
2108
2109 rtlpriv->cfg->ops->hw_resume(hw);
2110 rtl_init_rfkill(hw);
2111 return 0;
2112 }
2113 EXPORT_SYMBOL(rtl_pci_resume);
2114 #endif /* CONFIG_PM_SLEEP */
2115
2116 struct rtl_intf_ops rtl_pci_ops = {
2117 .read_efuse_byte = read_efuse_byte,
2118 .adapter_start = rtl_pci_start,
2119 .adapter_stop = rtl_pci_stop,
2120 .check_buddy_priv = rtl_pci_check_buddy_priv,
2121 .adapter_tx = rtl_pci_tx,
2122 .flush = rtl_pci_flush,
2123 .reset_trx_ring = rtl_pci_reset_trx_ring,
2124 .waitq_insert = rtl_pci_tx_chk_waitq_insert,
2125
2126 .disable_aspm = rtl_pci_disable_aspm,
2127 .enable_aspm = rtl_pci_enable_aspm,
2128 };
This page took 0.308805 seconds and 5 git commands to generate.