2 * Copyright (c) 2010 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 #define ATH9K_HTC_INIT_TXQ(subtype) do { \
24 qi.tqi_subtype = subtype; \
25 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT; \
26 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT; \
27 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT; \
28 qi.tqi_physCompBuf = 0; \
29 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | \
30 TXQ_FLAG_TXDESCINT_ENABLE; \
33 int get_hw_qnum(u16 queue
, int *hwq_map
)
37 return hwq_map
[WME_AC_VO
];
39 return hwq_map
[WME_AC_VI
];
41 return hwq_map
[WME_AC_BE
];
43 return hwq_map
[WME_AC_BK
];
45 return hwq_map
[WME_AC_BE
];
49 int ath_htc_txq_update(struct ath9k_htc_priv
*priv
, int qnum
,
50 struct ath9k_tx_queue_info
*qinfo
)
52 struct ath_hw
*ah
= priv
->ah
;
54 struct ath9k_tx_queue_info qi
;
56 ath9k_hw_get_txq_props(ah
, qnum
, &qi
);
58 qi
.tqi_aifs
= qinfo
->tqi_aifs
;
59 qi
.tqi_cwmin
= qinfo
->tqi_cwmin
/ 2; /* XXX */
60 qi
.tqi_cwmax
= qinfo
->tqi_cwmax
;
61 qi
.tqi_burstTime
= qinfo
->tqi_burstTime
;
62 qi
.tqi_readyTime
= qinfo
->tqi_readyTime
;
64 if (!ath9k_hw_set_txq_props(ah
, qnum
, &qi
)) {
65 ath_print(ath9k_hw_common(ah
), ATH_DBG_FATAL
,
66 "Unable to update hardware queue %u!\n", qnum
);
69 ath9k_hw_resettxqueue(ah
, qnum
);
75 int ath9k_htc_tx_start(struct ath9k_htc_priv
*priv
, struct sk_buff
*skb
)
77 struct ieee80211_hdr
*hdr
;
78 struct ieee80211_tx_info
*tx_info
= IEEE80211_SKB_CB(skb
);
79 struct ieee80211_sta
*sta
= tx_info
->control
.sta
;
80 struct ath9k_htc_sta
*ista
;
81 struct ath9k_htc_tx_ctl tx_ctl
;
82 enum htc_endpoint_id epid
;
88 hdr
= (struct ieee80211_hdr
*) skb
->data
;
89 fc
= hdr
->frame_control
;
91 if (tx_info
->control
.vif
&&
92 (struct ath9k_htc_vif
*) tx_info
->control
.vif
->drv_priv
)
93 vif_idx
= ((struct ath9k_htc_vif
*)
94 tx_info
->control
.vif
->drv_priv
)->index
;
96 vif_idx
= priv
->nvifs
;
99 ista
= (struct ath9k_htc_sta
*) sta
->drv_priv
;
100 sta_idx
= ista
->index
;
105 memset(&tx_ctl
, 0, sizeof(struct ath9k_htc_tx_ctl
));
107 if (ieee80211_is_data(fc
)) {
108 struct tx_frame_hdr tx_hdr
;
111 memset(&tx_hdr
, 0, sizeof(struct tx_frame_hdr
));
113 tx_hdr
.node_idx
= sta_idx
;
114 tx_hdr
.vif_idx
= vif_idx
;
116 if (tx_info
->flags
& IEEE80211_TX_CTL_AMPDU
) {
117 tx_ctl
.type
= ATH9K_HTC_AMPDU
;
118 tx_hdr
.data_type
= ATH9K_HTC_AMPDU
;
120 tx_ctl
.type
= ATH9K_HTC_NORMAL
;
121 tx_hdr
.data_type
= ATH9K_HTC_NORMAL
;
124 if (ieee80211_is_data(fc
)) {
125 qc
= ieee80211_get_qos_ctl(hdr
);
126 tx_hdr
.tidno
= qc
[0] & IEEE80211_QOS_CTL_TID_MASK
;
129 /* Check for RTS protection */
130 if (priv
->hw
->wiphy
->rts_threshold
!= (u32
) -1)
131 if (skb
->len
> priv
->hw
->wiphy
->rts_threshold
)
132 tx_hdr
.flags
|= ATH9K_HTC_TX_RTSCTS
;
135 if (!(tx_hdr
.flags
& ATH9K_HTC_TX_RTSCTS
) &&
136 (priv
->op_flags
& OP_PROTECT_ENABLE
))
137 tx_hdr
.flags
|= ATH9K_HTC_TX_CTSONLY
;
139 tx_hdr
.key_type
= ath9k_cmn_get_hw_crypto_keytype(skb
);
140 if (tx_hdr
.key_type
== ATH9K_KEY_TYPE_CLEAR
)
141 tx_hdr
.keyix
= (u8
) ATH9K_TXKEYIX_INVALID
;
143 tx_hdr
.keyix
= tx_info
->control
.hw_key
->hw_key_idx
;
145 tx_fhdr
= skb_push(skb
, sizeof(tx_hdr
));
146 memcpy(tx_fhdr
, (u8
*) &tx_hdr
, sizeof(tx_hdr
));
148 qnum
= skb_get_queue_mapping(skb
);
152 TX_QSTAT_INC(WME_AC_VO
);
153 epid
= priv
->data_vo_ep
;
156 TX_QSTAT_INC(WME_AC_VI
);
157 epid
= priv
->data_vi_ep
;
160 TX_QSTAT_INC(WME_AC_BE
);
161 epid
= priv
->data_be_ep
;
165 TX_QSTAT_INC(WME_AC_BK
);
166 epid
= priv
->data_bk_ep
;
170 struct tx_mgmt_hdr mgmt_hdr
;
172 memset(&mgmt_hdr
, 0, sizeof(struct tx_mgmt_hdr
));
174 tx_ctl
.type
= ATH9K_HTC_NORMAL
;
176 mgmt_hdr
.node_idx
= sta_idx
;
177 mgmt_hdr
.vif_idx
= vif_idx
;
181 mgmt_hdr
.key_type
= ath9k_cmn_get_hw_crypto_keytype(skb
);
182 if (mgmt_hdr
.key_type
== ATH9K_KEY_TYPE_CLEAR
)
183 mgmt_hdr
.keyix
= (u8
) ATH9K_TXKEYIX_INVALID
;
185 mgmt_hdr
.keyix
= tx_info
->control
.hw_key
->hw_key_idx
;
187 tx_fhdr
= skb_push(skb
, sizeof(mgmt_hdr
));
188 memcpy(tx_fhdr
, (u8
*) &mgmt_hdr
, sizeof(mgmt_hdr
));
189 epid
= priv
->mgmt_ep
;
192 return htc_send(priv
->htc
, skb
, epid
, &tx_ctl
);
195 static bool ath9k_htc_check_tx_aggr(struct ath9k_htc_priv
*priv
,
196 struct ath9k_htc_sta
*ista
, u8 tid
)
200 spin_lock_bh(&priv
->tx_lock
);
201 if ((tid
< ATH9K_HTC_MAX_TID
) && (ista
->tid_state
[tid
] == AGGR_STOP
))
203 spin_unlock_bh(&priv
->tx_lock
);
208 void ath9k_tx_tasklet(unsigned long data
)
210 struct ath9k_htc_priv
*priv
= (struct ath9k_htc_priv
*)data
;
211 struct ieee80211_sta
*sta
;
212 struct ieee80211_hdr
*hdr
;
213 struct ieee80211_tx_info
*tx_info
;
214 struct sk_buff
*skb
= NULL
;
217 while ((skb
= skb_dequeue(&priv
->tx_queue
)) != NULL
) {
219 hdr
= (struct ieee80211_hdr
*) skb
->data
;
220 fc
= hdr
->frame_control
;
221 tx_info
= IEEE80211_SKB_CB(skb
);
223 memset(&tx_info
->status
, 0, sizeof(tx_info
->status
));
227 sta
= ieee80211_find_sta(priv
->vif
, hdr
->addr1
);
230 ieee80211_tx_status(priv
->hw
, skb
);
234 /* Check if we need to start aggregation */
236 if (sta
&& conf_is_ht(&priv
->hw
->conf
) &&
237 !(skb
->protocol
== cpu_to_be16(ETH_P_PAE
))) {
238 if (ieee80211_is_data_qos(fc
)) {
240 struct ath9k_htc_sta
*ista
;
242 qc
= ieee80211_get_qos_ctl(hdr
);
244 ista
= (struct ath9k_htc_sta
*)sta
->drv_priv
;
246 if (ath9k_htc_check_tx_aggr(priv
, ista
, tid
)) {
247 ieee80211_start_tx_ba_session(sta
, tid
);
248 spin_lock_bh(&priv
->tx_lock
);
249 ista
->tid_state
[tid
] = AGGR_PROGRESS
;
250 spin_unlock_bh(&priv
->tx_lock
);
257 /* Send status to mac80211 */
258 ieee80211_tx_status(priv
->hw
, skb
);
261 /* Wake TX queues if needed */
262 spin_lock_bh(&priv
->tx_lock
);
263 if (priv
->tx_queues_stop
) {
264 priv
->tx_queues_stop
= false;
265 spin_unlock_bh(&priv
->tx_lock
);
266 ath_print(ath9k_hw_common(priv
->ah
), ATH_DBG_XMIT
,
267 "Waking up TX queues\n");
268 ieee80211_wake_queues(priv
->hw
);
271 spin_unlock_bh(&priv
->tx_lock
);
274 void ath9k_htc_txep(void *drv_priv
, struct sk_buff
*skb
,
275 enum htc_endpoint_id ep_id
, bool txok
)
277 struct ath9k_htc_priv
*priv
= (struct ath9k_htc_priv
*) drv_priv
;
278 struct ath_common
*common
= ath9k_hw_common(priv
->ah
);
279 struct ieee80211_tx_info
*tx_info
;
284 if (ep_id
== priv
->mgmt_ep
) {
285 skb_pull(skb
, sizeof(struct tx_mgmt_hdr
));
286 } else if ((ep_id
== priv
->data_bk_ep
) ||
287 (ep_id
== priv
->data_be_ep
) ||
288 (ep_id
== priv
->data_vi_ep
) ||
289 (ep_id
== priv
->data_vo_ep
)) {
290 skb_pull(skb
, sizeof(struct tx_frame_hdr
));
292 ath_print(common
, ATH_DBG_FATAL
,
293 "Unsupported TX EPID: %d\n", ep_id
);
294 dev_kfree_skb_any(skb
);
298 tx_info
= IEEE80211_SKB_CB(skb
);
301 tx_info
->flags
|= IEEE80211_TX_STAT_ACK
;
303 skb_queue_tail(&priv
->tx_queue
, skb
);
304 tasklet_schedule(&priv
->tx_tasklet
);
307 int ath9k_tx_init(struct ath9k_htc_priv
*priv
)
309 skb_queue_head_init(&priv
->tx_queue
);
313 void ath9k_tx_cleanup(struct ath9k_htc_priv
*priv
)
318 bool ath9k_htc_txq_setup(struct ath9k_htc_priv
*priv
, int subtype
)
320 struct ath_hw
*ah
= priv
->ah
;
321 struct ath_common
*common
= ath9k_hw_common(ah
);
322 struct ath9k_tx_queue_info qi
;
325 memset(&qi
, 0, sizeof(qi
));
326 ATH9K_HTC_INIT_TXQ(subtype
);
328 qnum
= ath9k_hw_setuptxqueue(priv
->ah
, ATH9K_TX_QUEUE_DATA
, &qi
);
332 if (qnum
>= ARRAY_SIZE(priv
->hwq_map
)) {
333 ath_print(common
, ATH_DBG_FATAL
,
334 "qnum %u out of range, max %u!\n",
335 qnum
, (unsigned int)ARRAY_SIZE(priv
->hwq_map
));
336 ath9k_hw_releasetxqueue(ah
, qnum
);
340 priv
->hwq_map
[subtype
] = qnum
;
344 int ath9k_htc_cabq_setup(struct ath9k_htc_priv
*priv
)
346 struct ath9k_tx_queue_info qi
;
348 memset(&qi
, 0, sizeof(qi
));
349 ATH9K_HTC_INIT_TXQ(0);
351 return ath9k_hw_setuptxqueue(priv
->ah
, ATH9K_TX_QUEUE_CAB
, &qi
);
359 * Calculate the RX filter to be set in the HW.
361 u32
ath9k_htc_calcrxfilter(struct ath9k_htc_priv
*priv
)
363 #define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
365 struct ath_hw
*ah
= priv
->ah
;
368 rfilt
= (ath9k_hw_getrxfilter(ah
) & RX_FILTER_PRESERVE
)
369 | ATH9K_RX_FILTER_UCAST
| ATH9K_RX_FILTER_BCAST
370 | ATH9K_RX_FILTER_MCAST
;
372 /* If not a STA, enable processing of Probe Requests */
373 if (ah
->opmode
!= NL80211_IFTYPE_STATION
)
374 rfilt
|= ATH9K_RX_FILTER_PROBEREQ
;
377 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
378 * mode interface or when in monitor mode. AP mode does not need this
379 * since it receives all in-BSS frames anyway.
381 if (((ah
->opmode
!= NL80211_IFTYPE_AP
) &&
382 (priv
->rxfilter
& FIF_PROMISC_IN_BSS
)) ||
383 (ah
->opmode
== NL80211_IFTYPE_MONITOR
))
384 rfilt
|= ATH9K_RX_FILTER_PROM
;
386 if (priv
->rxfilter
& FIF_CONTROL
)
387 rfilt
|= ATH9K_RX_FILTER_CONTROL
;
389 if ((ah
->opmode
== NL80211_IFTYPE_STATION
) &&
390 !(priv
->rxfilter
& FIF_BCN_PRBRESP_PROMISC
))
391 rfilt
|= ATH9K_RX_FILTER_MYBEACON
;
393 rfilt
|= ATH9K_RX_FILTER_BEACON
;
395 if (conf_is_ht(&priv
->hw
->conf
))
396 rfilt
|= ATH9K_RX_FILTER_COMP_BAR
;
400 #undef RX_FILTER_PRESERVE
404 * Recv initialization for opmode change.
406 static void ath9k_htc_opmode_init(struct ath9k_htc_priv
*priv
)
408 struct ath_hw
*ah
= priv
->ah
;
409 struct ath_common
*common
= ath9k_hw_common(ah
);
413 /* configure rx filter */
414 rfilt
= ath9k_htc_calcrxfilter(priv
);
415 ath9k_hw_setrxfilter(ah
, rfilt
);
417 /* configure bssid mask */
418 if (ah
->caps
.hw_caps
& ATH9K_HW_CAP_BSSIDMASK
)
419 ath_hw_setbssidmask(common
);
421 /* configure operational mode */
422 ath9k_hw_setopmode(ah
);
424 /* calculate and install multicast filter */
425 mfilt
[0] = mfilt
[1] = ~0;
426 ath9k_hw_setmcastfilter(ah
, mfilt
[0], mfilt
[1]);
429 void ath9k_host_rx_init(struct ath9k_htc_priv
*priv
)
431 ath9k_hw_rxena(priv
->ah
);
432 ath9k_htc_opmode_init(priv
);
433 ath9k_hw_startpcureceive(priv
->ah
, (priv
->op_flags
& OP_SCANNING
));
434 priv
->rx
.last_rssi
= ATH_RSSI_DUMMY_MARKER
;
437 static void ath9k_process_rate(struct ieee80211_hw
*hw
,
438 struct ieee80211_rx_status
*rxs
,
439 u8 rx_rate
, u8 rs_flags
)
441 struct ieee80211_supported_band
*sband
;
442 enum ieee80211_band band
;
445 if (rx_rate
& 0x80) {
447 rxs
->flag
|= RX_FLAG_HT
;
448 if (rs_flags
& ATH9K_RX_2040
)
449 rxs
->flag
|= RX_FLAG_40MHZ
;
450 if (rs_flags
& ATH9K_RX_GI
)
451 rxs
->flag
|= RX_FLAG_SHORT_GI
;
452 rxs
->rate_idx
= rx_rate
& 0x7f;
456 band
= hw
->conf
.channel
->band
;
457 sband
= hw
->wiphy
->bands
[band
];
459 for (i
= 0; i
< sband
->n_bitrates
; i
++) {
460 if (sband
->bitrates
[i
].hw_value
== rx_rate
) {
464 if (sband
->bitrates
[i
].hw_value_short
== rx_rate
) {
466 rxs
->flag
|= RX_FLAG_SHORTPRE
;
473 static bool ath9k_rx_prepare(struct ath9k_htc_priv
*priv
,
474 struct ath9k_htc_rxbuf
*rxbuf
,
475 struct ieee80211_rx_status
*rx_status
)
478 struct ieee80211_hdr
*hdr
;
479 struct ieee80211_hw
*hw
= priv
->hw
;
480 struct sk_buff
*skb
= rxbuf
->skb
;
481 struct ath_common
*common
= ath9k_hw_common(priv
->ah
);
482 struct ath_htc_rx_status
*rxstatus
;
483 int hdrlen
, padpos
, padsize
;
484 int last_rssi
= ATH_RSSI_DUMMY_MARKER
;
487 if (skb
->len
<= HTC_RX_FRAME_HEADER_SIZE
) {
488 ath_print(common
, ATH_DBG_FATAL
,
489 "Corrupted RX frame, dropping\n");
493 rxstatus
= (struct ath_htc_rx_status
*)skb
->data
;
495 if (be16_to_cpu(rxstatus
->rs_datalen
) -
496 (skb
->len
- HTC_RX_FRAME_HEADER_SIZE
) != 0) {
497 ath_print(common
, ATH_DBG_FATAL
,
498 "Corrupted RX data len, dropping "
499 "(dlen: %d, skblen: %d)\n",
500 rxstatus
->rs_datalen
, skb
->len
);
504 /* Get the RX status information */
505 memcpy(&rxbuf
->rxstatus
, rxstatus
, HTC_RX_FRAME_HEADER_SIZE
);
506 skb_pull(skb
, HTC_RX_FRAME_HEADER_SIZE
);
508 hdr
= (struct ieee80211_hdr
*)skb
->data
;
509 fc
= hdr
->frame_control
;
510 hdrlen
= ieee80211_get_hdrlen_from_skb(skb
);
512 padpos
= ath9k_cmn_padpos(fc
);
514 padsize
= padpos
& 3;
515 if (padsize
&& skb
->len
>= padpos
+padsize
+FCS_LEN
) {
516 memmove(skb
->data
+ padsize
, skb
->data
, padpos
);
517 skb_pull(skb
, padsize
);
520 memset(rx_status
, 0, sizeof(struct ieee80211_rx_status
));
522 if (rxbuf
->rxstatus
.rs_status
!= 0) {
523 if (rxbuf
->rxstatus
.rs_status
& ATH9K_RXERR_CRC
)
524 rx_status
->flag
|= RX_FLAG_FAILED_FCS_CRC
;
525 if (rxbuf
->rxstatus
.rs_status
& ATH9K_RXERR_PHY
)
528 if (rxbuf
->rxstatus
.rs_status
& ATH9K_RXERR_DECRYPT
) {
530 } else if (rxbuf
->rxstatus
.rs_status
& ATH9K_RXERR_MIC
) {
531 if (ieee80211_is_ctl(fc
))
533 * Sometimes, we get invalid
534 * MIC failures on valid control frames.
535 * Remove these mic errors.
537 rxbuf
->rxstatus
.rs_status
&= ~ATH9K_RXERR_MIC
;
539 rx_status
->flag
|= RX_FLAG_MMIC_ERROR
;
543 * Reject error frames with the exception of
544 * decryption and MIC failures. For monitor mode,
545 * we also ignore the CRC error.
547 if (priv
->ah
->opmode
== NL80211_IFTYPE_MONITOR
) {
548 if (rxbuf
->rxstatus
.rs_status
&
549 ~(ATH9K_RXERR_DECRYPT
| ATH9K_RXERR_MIC
|
553 if (rxbuf
->rxstatus
.rs_status
&
554 ~(ATH9K_RXERR_DECRYPT
| ATH9K_RXERR_MIC
)) {
560 if (!(rxbuf
->rxstatus
.rs_status
& ATH9K_RXERR_DECRYPT
)) {
562 keyix
= rxbuf
->rxstatus
.rs_keyix
;
563 if (keyix
!= ATH9K_RXKEYIX_INVALID
) {
564 rx_status
->flag
|= RX_FLAG_DECRYPTED
;
565 } else if (ieee80211_has_protected(fc
) &&
566 skb
->len
>= hdrlen
+ 4) {
567 keyix
= skb
->data
[hdrlen
+ 3] >> 6;
568 if (test_bit(keyix
, common
->keymap
))
569 rx_status
->flag
|= RX_FLAG_DECRYPTED
;
573 ath9k_process_rate(hw
, rx_status
, rxbuf
->rxstatus
.rs_rate
,
574 rxbuf
->rxstatus
.rs_flags
);
576 if (priv
->op_flags
& OP_ASSOCIATED
) {
577 if (rxbuf
->rxstatus
.rs_rssi
!= ATH9K_RSSI_BAD
&&
578 !rxbuf
->rxstatus
.rs_moreaggr
)
579 ATH_RSSI_LPF(priv
->rx
.last_rssi
,
580 rxbuf
->rxstatus
.rs_rssi
);
582 last_rssi
= priv
->rx
.last_rssi
;
584 if (likely(last_rssi
!= ATH_RSSI_DUMMY_MARKER
))
585 rxbuf
->rxstatus
.rs_rssi
= ATH_EP_RND(last_rssi
,
586 ATH_RSSI_EP_MULTIPLIER
);
588 if (rxbuf
->rxstatus
.rs_rssi
< 0)
589 rxbuf
->rxstatus
.rs_rssi
= 0;
591 if (ieee80211_is_beacon(fc
))
592 priv
->ah
->stats
.avgbrssi
= rxbuf
->rxstatus
.rs_rssi
;
595 rx_status
->mactime
= be64_to_cpu(rxbuf
->rxstatus
.rs_tstamp
);
596 rx_status
->band
= hw
->conf
.channel
->band
;
597 rx_status
->freq
= hw
->conf
.channel
->center_freq
;
598 rx_status
->signal
= rxbuf
->rxstatus
.rs_rssi
+ ATH_DEFAULT_NOISE_FLOOR
;
599 rx_status
->antenna
= rxbuf
->rxstatus
.rs_antenna
;
600 rx_status
->flag
|= RX_FLAG_TSFT
;
609 * FIXME: Handle FLUSH later on.
611 void ath9k_rx_tasklet(unsigned long data
)
613 struct ath9k_htc_priv
*priv
= (struct ath9k_htc_priv
*)data
;
614 struct ath9k_htc_rxbuf
*rxbuf
= NULL
, *tmp_buf
= NULL
;
615 struct ieee80211_rx_status rx_status
;
618 struct ieee80211_hdr
*hdr
;
621 spin_lock_irqsave(&priv
->rx
.rxbuflock
, flags
);
622 list_for_each_entry(tmp_buf
, &priv
->rx
.rxbuf
, list
) {
623 if (tmp_buf
->in_process
) {
630 spin_unlock_irqrestore(&priv
->rx
.rxbuflock
, flags
);
637 if (!ath9k_rx_prepare(priv
, rxbuf
, &rx_status
)) {
638 dev_kfree_skb_any(rxbuf
->skb
);
642 memcpy(IEEE80211_SKB_RXCB(rxbuf
->skb
), &rx_status
,
643 sizeof(struct ieee80211_rx_status
));
645 hdr
= (struct ieee80211_hdr
*) skb
->data
;
647 if (ieee80211_is_beacon(hdr
->frame_control
) && priv
->ps_enabled
)
648 ieee80211_queue_work(priv
->hw
, &priv
->ps_work
);
650 spin_unlock_irqrestore(&priv
->rx
.rxbuflock
, flags
);
652 ieee80211_rx(priv
->hw
, skb
);
654 spin_lock_irqsave(&priv
->rx
.rxbuflock
, flags
);
656 rxbuf
->in_process
= false;
658 list_move_tail(&rxbuf
->list
, &priv
->rx
.rxbuf
);
660 spin_unlock_irqrestore(&priv
->rx
.rxbuflock
, flags
);
665 void ath9k_htc_rxep(void *drv_priv
, struct sk_buff
*skb
,
666 enum htc_endpoint_id ep_id
)
668 struct ath9k_htc_priv
*priv
= (struct ath9k_htc_priv
*)drv_priv
;
669 struct ath_hw
*ah
= priv
->ah
;
670 struct ath_common
*common
= ath9k_hw_common(ah
);
671 struct ath9k_htc_rxbuf
*rxbuf
= NULL
, *tmp_buf
= NULL
;
673 spin_lock(&priv
->rx
.rxbuflock
);
674 list_for_each_entry(tmp_buf
, &priv
->rx
.rxbuf
, list
) {
675 if (!tmp_buf
->in_process
) {
680 spin_unlock(&priv
->rx
.rxbuflock
);
683 ath_print(common
, ATH_DBG_ANY
,
684 "No free RX buffer\n");
688 spin_lock(&priv
->rx
.rxbuflock
);
690 rxbuf
->in_process
= true;
691 spin_unlock(&priv
->rx
.rxbuflock
);
693 tasklet_schedule(&priv
->rx_tasklet
);
696 dev_kfree_skb_any(skb
);
699 /* FIXME: Locking for cleanup/init */
701 void ath9k_rx_cleanup(struct ath9k_htc_priv
*priv
)
703 struct ath9k_htc_rxbuf
*rxbuf
, *tbuf
;
705 list_for_each_entry_safe(rxbuf
, tbuf
, &priv
->rx
.rxbuf
, list
) {
706 list_del(&rxbuf
->list
);
708 dev_kfree_skb_any(rxbuf
->skb
);
713 int ath9k_rx_init(struct ath9k_htc_priv
*priv
)
715 struct ath_hw
*ah
= priv
->ah
;
716 struct ath_common
*common
= ath9k_hw_common(ah
);
717 struct ath9k_htc_rxbuf
*rxbuf
;
720 INIT_LIST_HEAD(&priv
->rx
.rxbuf
);
721 spin_lock_init(&priv
->rx
.rxbuflock
);
723 for (i
= 0; i
< ATH9K_HTC_RXBUF
; i
++) {
724 rxbuf
= kzalloc(sizeof(struct ath9k_htc_rxbuf
), GFP_KERNEL
);
726 ath_print(common
, ATH_DBG_FATAL
,
727 "Unable to allocate RX buffers\n");
730 list_add_tail(&rxbuf
->list
, &priv
->rx
.rxbuf
);
736 ath9k_rx_cleanup(priv
);