Commit | Line | Data |
---|---|---|
181d6902 | 1 | /* |
7e613e16 ID |
2 | Copyright (C) 2010 Willow Garage <http://www.willowgarage.com> |
3 | Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com> | |
9c9a0d14 | 4 | Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com> |
181d6902 ID |
5 | <http://rt2x00.serialmonkey.com> |
6 | ||
7 | This program is free software; you can redistribute it and/or modify | |
8 | it under the terms of the GNU General Public License as published by | |
9 | the Free Software Foundation; either version 2 of the License, or | |
10 | (at your option) any later version. | |
11 | ||
12 | This program is distributed in the hope that it will be useful, | |
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | GNU General Public License for more details. | |
16 | ||
17 | You should have received a copy of the GNU General Public License | |
18 | along with this program; if not, write to the | |
19 | Free Software Foundation, Inc., | |
20 | 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
21 | */ | |
22 | ||
23 | /* | |
24 | Module: rt2x00lib | |
25 | Abstract: rt2x00 queue specific routines. | |
26 | */ | |
27 | ||
5a0e3ad6 | 28 | #include <linux/slab.h> |
181d6902 ID |
29 | #include <linux/kernel.h> |
30 | #include <linux/module.h> | |
c4da0048 | 31 | #include <linux/dma-mapping.h> |
181d6902 ID |
32 | |
33 | #include "rt2x00.h" | |
34 | #include "rt2x00lib.h" | |
35 | ||
fa69560f | 36 | struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry) |
239c249d | 37 | { |
fa69560f | 38 | struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; |
c4da0048 GW |
39 | struct sk_buff *skb; |
40 | struct skb_frame_desc *skbdesc; | |
2bb057d0 ID |
41 | unsigned int frame_size; |
42 | unsigned int head_size = 0; | |
43 | unsigned int tail_size = 0; | |
239c249d GW |
44 | |
45 | /* | |
46 | * The frame size includes descriptor size, because the | |
47 | * hardware directly receive the frame into the skbuffer. | |
48 | */ | |
c4da0048 | 49 | frame_size = entry->queue->data_size + entry->queue->desc_size; |
239c249d GW |
50 | |
51 | /* | |
ff352391 ID |
52 | * The payload should be aligned to a 4-byte boundary, |
53 | * this means we need at least 3 bytes for moving the frame | |
54 | * into the correct offset. | |
239c249d | 55 | */ |
2bb057d0 ID |
56 | head_size = 4; |
57 | ||
58 | /* | |
59 | * For IV/EIV/ICV assembly we must make sure there is | |
60 | * at least 8 bytes bytes available in headroom for IV/EIV | |
9c3444d3 | 61 | * and 8 bytes for ICV data as tailroon. |
2bb057d0 | 62 | */ |
7dab73b3 | 63 | if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags)) { |
2bb057d0 | 64 | head_size += 8; |
9c3444d3 | 65 | tail_size += 8; |
2bb057d0 | 66 | } |
239c249d GW |
67 | |
68 | /* | |
69 | * Allocate skbuffer. | |
70 | */ | |
2bb057d0 | 71 | skb = dev_alloc_skb(frame_size + head_size + tail_size); |
239c249d GW |
72 | if (!skb) |
73 | return NULL; | |
74 | ||
2bb057d0 ID |
75 | /* |
76 | * Make sure we not have a frame with the requested bytes | |
77 | * available in the head and tail. | |
78 | */ | |
79 | skb_reserve(skb, head_size); | |
239c249d GW |
80 | skb_put(skb, frame_size); |
81 | ||
c4da0048 GW |
82 | /* |
83 | * Populate skbdesc. | |
84 | */ | |
85 | skbdesc = get_skb_frame_desc(skb); | |
86 | memset(skbdesc, 0, sizeof(*skbdesc)); | |
87 | skbdesc->entry = entry; | |
88 | ||
7dab73b3 | 89 | if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) { |
c4da0048 GW |
90 | skbdesc->skb_dma = dma_map_single(rt2x00dev->dev, |
91 | skb->data, | |
92 | skb->len, | |
93 | DMA_FROM_DEVICE); | |
94 | skbdesc->flags |= SKBDESC_DMA_MAPPED_RX; | |
95 | } | |
96 | ||
239c249d GW |
97 | return skb; |
98 | } | |
30caa6e3 | 99 | |
fa69560f | 100 | void rt2x00queue_map_txskb(struct queue_entry *entry) |
30caa6e3 | 101 | { |
fa69560f ID |
102 | struct device *dev = entry->queue->rt2x00dev->dev; |
103 | struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); | |
c4da0048 | 104 | |
3ee54a07 | 105 | skbdesc->skb_dma = |
fa69560f | 106 | dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE); |
c4da0048 GW |
107 | skbdesc->flags |= SKBDESC_DMA_MAPPED_TX; |
108 | } | |
109 | EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb); | |
110 | ||
fa69560f | 111 | void rt2x00queue_unmap_skb(struct queue_entry *entry) |
c4da0048 | 112 | { |
fa69560f ID |
113 | struct device *dev = entry->queue->rt2x00dev->dev; |
114 | struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); | |
c4da0048 GW |
115 | |
116 | if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) { | |
fa69560f | 117 | dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len, |
c4da0048 GW |
118 | DMA_FROM_DEVICE); |
119 | skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX; | |
546adf29 | 120 | } else if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) { |
fa69560f | 121 | dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len, |
c4da0048 GW |
122 | DMA_TO_DEVICE); |
123 | skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX; | |
124 | } | |
125 | } | |
0b8004aa | 126 | EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb); |
c4da0048 | 127 | |
fa69560f | 128 | void rt2x00queue_free_skb(struct queue_entry *entry) |
c4da0048 | 129 | { |
fa69560f | 130 | if (!entry->skb) |
9a613195 ID |
131 | return; |
132 | ||
fa69560f ID |
133 | rt2x00queue_unmap_skb(entry); |
134 | dev_kfree_skb_any(entry->skb); | |
135 | entry->skb = NULL; | |
30caa6e3 | 136 | } |
239c249d | 137 | |
daee6c09 | 138 | void rt2x00queue_align_frame(struct sk_buff *skb) |
9f166171 | 139 | { |
9f166171 | 140 | unsigned int frame_length = skb->len; |
daee6c09 | 141 | unsigned int align = ALIGN_SIZE(skb, 0); |
9f166171 ID |
142 | |
143 | if (!align) | |
144 | return; | |
145 | ||
daee6c09 ID |
146 | skb_push(skb, align); |
147 | memmove(skb->data, skb->data + align, frame_length); | |
148 | skb_trim(skb, frame_length); | |
149 | } | |
150 | ||
daee6c09 ID |
151 | void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length) |
152 | { | |
2e331462 | 153 | unsigned int payload_length = skb->len - header_length; |
daee6c09 ID |
154 | unsigned int header_align = ALIGN_SIZE(skb, 0); |
155 | unsigned int payload_align = ALIGN_SIZE(skb, header_length); | |
e54be4e7 | 156 | unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0; |
daee6c09 | 157 | |
2e331462 GW |
158 | /* |
159 | * Adjust the header alignment if the payload needs to be moved more | |
160 | * than the header. | |
161 | */ | |
162 | if (payload_align > header_align) | |
163 | header_align += 4; | |
164 | ||
165 | /* There is nothing to do if no alignment is needed */ | |
166 | if (!header_align) | |
167 | return; | |
daee6c09 | 168 | |
2e331462 GW |
169 | /* Reserve the amount of space needed in front of the frame */ |
170 | skb_push(skb, header_align); | |
171 | ||
172 | /* | |
173 | * Move the header. | |
174 | */ | |
175 | memmove(skb->data, skb->data + header_align, header_length); | |
176 | ||
177 | /* Move the payload, if present and if required */ | |
178 | if (payload_length && payload_align) | |
daee6c09 | 179 | memmove(skb->data + header_length + l2pad, |
a5186e99 | 180 | skb->data + header_length + l2pad + payload_align, |
2e331462 GW |
181 | payload_length); |
182 | ||
183 | /* Trim the skb to the correct size */ | |
184 | skb_trim(skb, header_length + l2pad + payload_length); | |
9f166171 ID |
185 | } |
186 | ||
daee6c09 ID |
187 | void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length) |
188 | { | |
a061a93b GW |
189 | /* |
190 | * L2 padding is only present if the skb contains more than just the | |
191 | * IEEE 802.11 header. | |
192 | */ | |
193 | unsigned int l2pad = (skb->len > header_length) ? | |
194 | L2PAD_SIZE(header_length) : 0; | |
daee6c09 | 195 | |
354e39db | 196 | if (!l2pad) |
daee6c09 ID |
197 | return; |
198 | ||
a061a93b GW |
199 | memmove(skb->data + l2pad, skb->data, header_length); |
200 | skb_pull(skb, l2pad); | |
daee6c09 ID |
201 | } |
202 | ||
7b40982e ID |
203 | static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry, |
204 | struct txentry_desc *txdesc) | |
205 | { | |
206 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); | |
207 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data; | |
208 | struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif); | |
209 | unsigned long irqflags; | |
210 | ||
c262e08b | 211 | if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) |
7b40982e ID |
212 | return; |
213 | ||
7fe7ee77 HS |
214 | __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); |
215 | ||
7dab73b3 | 216 | if (!test_bit(REQUIRE_SW_SEQNO, &entry->queue->rt2x00dev->cap_flags)) |
7fe7ee77 HS |
217 | return; |
218 | ||
7b40982e | 219 | /* |
7fe7ee77 HS |
220 | * The hardware is not able to insert a sequence number. Assign a |
221 | * software generated one here. | |
7b40982e ID |
222 | * |
223 | * This is wrong because beacons are not getting sequence | |
224 | * numbers assigned properly. | |
225 | * | |
226 | * A secondary problem exists for drivers that cannot toggle | |
227 | * sequence counting per-frame, since those will override the | |
228 | * sequence counter given by mac80211. | |
229 | */ | |
230 | spin_lock_irqsave(&intf->seqlock, irqflags); | |
231 | ||
232 | if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) | |
233 | intf->seqno += 0x10; | |
234 | hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); | |
235 | hdr->seq_ctrl |= cpu_to_le16(intf->seqno); | |
236 | ||
237 | spin_unlock_irqrestore(&intf->seqlock, irqflags); | |
238 | ||
7b40982e ID |
239 | } |
240 | ||
241 | static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry, | |
242 | struct txentry_desc *txdesc, | |
243 | const struct rt2x00_rate *hwrate) | |
244 | { | |
245 | struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; | |
246 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); | |
247 | struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; | |
248 | unsigned int data_length; | |
249 | unsigned int duration; | |
250 | unsigned int residual; | |
251 | ||
2517794b HS |
252 | /* |
253 | * Determine with what IFS priority this frame should be send. | |
254 | * Set ifs to IFS_SIFS when the this is not the first fragment, | |
255 | * or this fragment came after RTS/CTS. | |
256 | */ | |
257 | if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) | |
258 | txdesc->u.plcp.ifs = IFS_BACKOFF; | |
259 | else | |
260 | txdesc->u.plcp.ifs = IFS_SIFS; | |
261 | ||
7b40982e ID |
262 | /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */ |
263 | data_length = entry->skb->len + 4; | |
264 | data_length += rt2x00crypto_tx_overhead(rt2x00dev, entry->skb); | |
265 | ||
266 | /* | |
267 | * PLCP setup | |
268 | * Length calculation depends on OFDM/CCK rate. | |
269 | */ | |
26a1d07f HS |
270 | txdesc->u.plcp.signal = hwrate->plcp; |
271 | txdesc->u.plcp.service = 0x04; | |
7b40982e ID |
272 | |
273 | if (hwrate->flags & DEV_RATE_OFDM) { | |
26a1d07f HS |
274 | txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f; |
275 | txdesc->u.plcp.length_low = data_length & 0x3f; | |
7b40982e ID |
276 | } else { |
277 | /* | |
278 | * Convert length to microseconds. | |
279 | */ | |
280 | residual = GET_DURATION_RES(data_length, hwrate->bitrate); | |
281 | duration = GET_DURATION(data_length, hwrate->bitrate); | |
282 | ||
283 | if (residual != 0) { | |
284 | duration++; | |
285 | ||
286 | /* | |
287 | * Check if we need to set the Length Extension | |
288 | */ | |
289 | if (hwrate->bitrate == 110 && residual <= 30) | |
26a1d07f | 290 | txdesc->u.plcp.service |= 0x80; |
7b40982e ID |
291 | } |
292 | ||
26a1d07f HS |
293 | txdesc->u.plcp.length_high = (duration >> 8) & 0xff; |
294 | txdesc->u.plcp.length_low = duration & 0xff; | |
7b40982e ID |
295 | |
296 | /* | |
297 | * When preamble is enabled we should set the | |
298 | * preamble bit for the signal. | |
299 | */ | |
300 | if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) | |
26a1d07f | 301 | txdesc->u.plcp.signal |= 0x08; |
7b40982e ID |
302 | } |
303 | } | |
304 | ||
bd88a781 ID |
305 | static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry, |
306 | struct txentry_desc *txdesc) | |
7050ec82 | 307 | { |
2e92e6f2 | 308 | struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; |
e039fa4a | 309 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); |
7050ec82 | 310 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data; |
55b585e2 HS |
311 | struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; |
312 | struct ieee80211_rate *rate; | |
313 | const struct rt2x00_rate *hwrate = NULL; | |
7050ec82 ID |
314 | |
315 | memset(txdesc, 0, sizeof(*txdesc)); | |
316 | ||
9f166171 | 317 | /* |
df624ca5 | 318 | * Header and frame information. |
9f166171 | 319 | */ |
df624ca5 | 320 | txdesc->length = entry->skb->len; |
9f166171 | 321 | txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb); |
9f166171 | 322 | |
7050ec82 ID |
323 | /* |
324 | * Check whether this frame is to be acked. | |
325 | */ | |
e039fa4a | 326 | if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) |
7050ec82 ID |
327 | __set_bit(ENTRY_TXD_ACK, &txdesc->flags); |
328 | ||
329 | /* | |
330 | * Check if this is a RTS/CTS frame | |
331 | */ | |
ac104462 ID |
332 | if (ieee80211_is_rts(hdr->frame_control) || |
333 | ieee80211_is_cts(hdr->frame_control)) { | |
7050ec82 | 334 | __set_bit(ENTRY_TXD_BURST, &txdesc->flags); |
ac104462 | 335 | if (ieee80211_is_rts(hdr->frame_control)) |
7050ec82 | 336 | __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags); |
e039fa4a | 337 | else |
7050ec82 | 338 | __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags); |
e039fa4a | 339 | if (tx_info->control.rts_cts_rate_idx >= 0) |
2e92e6f2 | 340 | rate = |
e039fa4a | 341 | ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info); |
7050ec82 ID |
342 | } |
343 | ||
344 | /* | |
345 | * Determine retry information. | |
346 | */ | |
e6a9854b | 347 | txdesc->retry_limit = tx_info->control.rates[0].count - 1; |
42c82857 | 348 | if (txdesc->retry_limit >= rt2x00dev->long_retry) |
7050ec82 ID |
349 | __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags); |
350 | ||
351 | /* | |
352 | * Check if more fragments are pending | |
353 | */ | |
2606e422 | 354 | if (ieee80211_has_morefrags(hdr->frame_control)) { |
7050ec82 ID |
355 | __set_bit(ENTRY_TXD_BURST, &txdesc->flags); |
356 | __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags); | |
357 | } | |
358 | ||
2606e422 HS |
359 | /* |
360 | * Check if more frames (!= fragments) are pending | |
361 | */ | |
362 | if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES) | |
363 | __set_bit(ENTRY_TXD_BURST, &txdesc->flags); | |
364 | ||
7050ec82 ID |
365 | /* |
366 | * Beacons and probe responses require the tsf timestamp | |
1bce85cf | 367 | * to be inserted into the frame. |
7050ec82 | 368 | */ |
1bce85cf HS |
369 | if (ieee80211_is_beacon(hdr->frame_control) || |
370 | ieee80211_is_probe_resp(hdr->frame_control)) | |
7050ec82 ID |
371 | __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags); |
372 | ||
7b40982e | 373 | if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) && |
2517794b | 374 | !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) |
7050ec82 | 375 | __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags); |
7050ec82 | 376 | |
076f9582 ID |
377 | /* |
378 | * Determine rate modulation. | |
379 | */ | |
55b585e2 HS |
380 | if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD) |
381 | txdesc->rate_mode = RATE_MODE_HT_GREENFIELD; | |
382 | else if (txrate->flags & IEEE80211_TX_RC_MCS) | |
383 | txdesc->rate_mode = RATE_MODE_HT_MIX; | |
384 | else { | |
385 | rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info); | |
386 | hwrate = rt2x00_get_rate(rate->hw_value); | |
387 | if (hwrate->flags & DEV_RATE_OFDM) | |
388 | txdesc->rate_mode = RATE_MODE_OFDM; | |
389 | else | |
390 | txdesc->rate_mode = RATE_MODE_CCK; | |
391 | } | |
7050ec82 | 392 | |
7b40982e ID |
393 | /* |
394 | * Apply TX descriptor handling by components | |
395 | */ | |
396 | rt2x00crypto_create_tx_descriptor(entry, txdesc); | |
397 | rt2x00queue_create_tx_descriptor_seq(entry, txdesc); | |
26a1d07f | 398 | |
7dab73b3 | 399 | if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags)) |
26a1d07f HS |
400 | rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate); |
401 | else | |
402 | rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate); | |
7050ec82 | 403 | } |
7050ec82 | 404 | |
78eea11b GW |
405 | static int rt2x00queue_write_tx_data(struct queue_entry *entry, |
406 | struct txentry_desc *txdesc) | |
407 | { | |
408 | struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; | |
409 | ||
410 | /* | |
411 | * This should not happen, we already checked the entry | |
412 | * was ours. When the hardware disagrees there has been | |
413 | * a queue corruption! | |
414 | */ | |
415 | if (unlikely(rt2x00dev->ops->lib->get_entry_state && | |
416 | rt2x00dev->ops->lib->get_entry_state(entry))) { | |
417 | ERROR(rt2x00dev, | |
418 | "Corrupt queue %d, accessing entry which is not ours.\n" | |
419 | "Please file bug report to %s.\n", | |
420 | entry->queue->qid, DRV_PROJECT); | |
421 | return -EINVAL; | |
422 | } | |
423 | ||
424 | /* | |
425 | * Add the requested extra tx headroom in front of the skb. | |
426 | */ | |
427 | skb_push(entry->skb, rt2x00dev->ops->extra_tx_headroom); | |
428 | memset(entry->skb->data, 0, rt2x00dev->ops->extra_tx_headroom); | |
429 | ||
430 | /* | |
76dd5ddf | 431 | * Call the driver's write_tx_data function, if it exists. |
78eea11b | 432 | */ |
76dd5ddf GW |
433 | if (rt2x00dev->ops->lib->write_tx_data) |
434 | rt2x00dev->ops->lib->write_tx_data(entry, txdesc); | |
78eea11b GW |
435 | |
436 | /* | |
437 | * Map the skb to DMA. | |
438 | */ | |
7dab73b3 | 439 | if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) |
fa69560f | 440 | rt2x00queue_map_txskb(entry); |
78eea11b GW |
441 | |
442 | return 0; | |
443 | } | |
444 | ||
bd88a781 ID |
445 | static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry, |
446 | struct txentry_desc *txdesc) | |
7050ec82 | 447 | { |
b869767b | 448 | struct data_queue *queue = entry->queue; |
7050ec82 | 449 | |
93331458 | 450 | queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc); |
7050ec82 ID |
451 | |
452 | /* | |
453 | * All processing on the frame has been completed, this means | |
454 | * it is now ready to be dumped to userspace through debugfs. | |
455 | */ | |
93331458 | 456 | rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb); |
6295d815 GW |
457 | } |
458 | ||
8be4eed0 | 459 | static void rt2x00queue_kick_tx_queue(struct data_queue *queue, |
6295d815 GW |
460 | struct txentry_desc *txdesc) |
461 | { | |
7050ec82 | 462 | /* |
b869767b | 463 | * Check if we need to kick the queue, there are however a few rules |
6295d815 | 464 | * 1) Don't kick unless this is the last in frame in a burst. |
b869767b ID |
465 | * When the burst flag is set, this frame is always followed |
466 | * by another frame which in some way are related to eachother. | |
467 | * This is true for fragments, RTS or CTS-to-self frames. | |
6295d815 | 468 | * 2) Rule 1 can be broken when the available entries |
b869767b | 469 | * in the queue are less then a certain threshold. |
7050ec82 | 470 | */ |
b869767b ID |
471 | if (rt2x00queue_threshold(queue) || |
472 | !test_bit(ENTRY_TXD_BURST, &txdesc->flags)) | |
dbba306f | 473 | queue->rt2x00dev->ops->lib->kick_queue(queue); |
7050ec82 | 474 | } |
7050ec82 | 475 | |
7351c6bd JB |
476 | int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, |
477 | bool local) | |
6db3786a | 478 | { |
e6a9854b | 479 | struct ieee80211_tx_info *tx_info; |
6db3786a ID |
480 | struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX); |
481 | struct txentry_desc txdesc; | |
d74f5ba4 | 482 | struct skb_frame_desc *skbdesc; |
e6a9854b | 483 | u8 rate_idx, rate_flags; |
6db3786a | 484 | |
6a4c499e HS |
485 | if (unlikely(rt2x00queue_full(queue))) { |
486 | ERROR(queue->rt2x00dev, | |
487 | "Dropping frame due to full tx queue %d.\n", queue->qid); | |
0e3de998 | 488 | return -ENOBUFS; |
6a4c499e | 489 | } |
6db3786a | 490 | |
c6084d5f HS |
491 | if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, |
492 | &entry->flags))) { | |
6db3786a ID |
493 | ERROR(queue->rt2x00dev, |
494 | "Arrived at non-free entry in the non-full queue %d.\n" | |
495 | "Please file bug report to %s.\n", | |
496 | queue->qid, DRV_PROJECT); | |
497 | return -EINVAL; | |
498 | } | |
499 | ||
500 | /* | |
501 | * Copy all TX descriptor information into txdesc, | |
502 | * after that we are free to use the skb->cb array | |
503 | * for our information. | |
504 | */ | |
505 | entry->skb = skb; | |
506 | rt2x00queue_create_tx_descriptor(entry, &txdesc); | |
507 | ||
d74f5ba4 | 508 | /* |
e6a9854b | 509 | * All information is retrieved from the skb->cb array, |
2bb057d0 | 510 | * now we should claim ownership of the driver part of that |
e6a9854b | 511 | * array, preserving the bitrate index and flags. |
d74f5ba4 | 512 | */ |
e6a9854b JB |
513 | tx_info = IEEE80211_SKB_CB(skb); |
514 | rate_idx = tx_info->control.rates[0].idx; | |
515 | rate_flags = tx_info->control.rates[0].flags; | |
0e3de998 | 516 | skbdesc = get_skb_frame_desc(skb); |
d74f5ba4 ID |
517 | memset(skbdesc, 0, sizeof(*skbdesc)); |
518 | skbdesc->entry = entry; | |
e6a9854b JB |
519 | skbdesc->tx_rate_idx = rate_idx; |
520 | skbdesc->tx_rate_flags = rate_flags; | |
d74f5ba4 | 521 | |
7351c6bd JB |
522 | if (local) |
523 | skbdesc->flags |= SKBDESC_NOT_MAC80211; | |
524 | ||
2bb057d0 ID |
525 | /* |
526 | * When hardware encryption is supported, and this frame | |
527 | * is to be encrypted, we should strip the IV/EIV data from | |
3ad2f3fb | 528 | * the frame so we can provide it to the driver separately. |
2bb057d0 ID |
529 | */ |
530 | if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) && | |
dddfb478 | 531 | !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) { |
7dab73b3 | 532 | if (test_bit(REQUIRE_COPY_IV, &queue->rt2x00dev->cap_flags)) |
9eb4e21e | 533 | rt2x00crypto_tx_copy_iv(skb, &txdesc); |
dddfb478 | 534 | else |
9eb4e21e | 535 | rt2x00crypto_tx_remove_iv(skb, &txdesc); |
dddfb478 | 536 | } |
2bb057d0 | 537 | |
93354cbb ID |
538 | /* |
539 | * When DMA allocation is required we should guarentee to the | |
540 | * driver that the DMA is aligned to a 4-byte boundary. | |
93354cbb ID |
541 | * However some drivers require L2 padding to pad the payload |
542 | * rather then the header. This could be a requirement for | |
543 | * PCI and USB devices, while header alignment only is valid | |
544 | * for PCI devices. | |
545 | */ | |
7dab73b3 | 546 | if (test_bit(REQUIRE_L2PAD, &queue->rt2x00dev->cap_flags)) |
daee6c09 | 547 | rt2x00queue_insert_l2pad(entry->skb, txdesc.header_length); |
7dab73b3 | 548 | else if (test_bit(REQUIRE_DMA, &queue->rt2x00dev->cap_flags)) |
daee6c09 | 549 | rt2x00queue_align_frame(entry->skb); |
9f166171 | 550 | |
2bb057d0 ID |
551 | /* |
552 | * It could be possible that the queue was corrupted and this | |
0e3de998 ID |
553 | * call failed. Since we always return NETDEV_TX_OK to mac80211, |
554 | * this frame will simply be dropped. | |
2bb057d0 | 555 | */ |
78eea11b | 556 | if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) { |
0262ab0d | 557 | clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); |
2bb057d0 | 558 | entry->skb = NULL; |
0e3de998 | 559 | return -EIO; |
6db3786a ID |
560 | } |
561 | ||
0262ab0d | 562 | set_bit(ENTRY_DATA_PENDING, &entry->flags); |
6db3786a ID |
563 | |
564 | rt2x00queue_index_inc(queue, Q_INDEX); | |
565 | rt2x00queue_write_tx_descriptor(entry, &txdesc); | |
8be4eed0 | 566 | rt2x00queue_kick_tx_queue(queue, &txdesc); |
6db3786a ID |
567 | |
568 | return 0; | |
569 | } | |
570 | ||
69cf36a4 HS |
571 | int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev, |
572 | struct ieee80211_vif *vif) | |
573 | { | |
574 | struct rt2x00_intf *intf = vif_to_intf(vif); | |
575 | ||
576 | if (unlikely(!intf->beacon)) | |
577 | return -ENOBUFS; | |
578 | ||
579 | mutex_lock(&intf->beacon_skb_mutex); | |
580 | ||
581 | /* | |
582 | * Clean up the beacon skb. | |
583 | */ | |
584 | rt2x00queue_free_skb(intf->beacon); | |
585 | ||
586 | /* | |
587 | * Clear beacon (single bssid devices don't need to clear the beacon | |
588 | * since the beacon queue will get stopped anyway). | |
589 | */ | |
590 | if (rt2x00dev->ops->lib->clear_beacon) | |
591 | rt2x00dev->ops->lib->clear_beacon(intf->beacon); | |
592 | ||
593 | mutex_unlock(&intf->beacon_skb_mutex); | |
594 | ||
595 | return 0; | |
596 | } | |
597 | ||
8414ff07 HS |
598 | int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev, |
599 | struct ieee80211_vif *vif) | |
bd88a781 ID |
600 | { |
601 | struct rt2x00_intf *intf = vif_to_intf(vif); | |
602 | struct skb_frame_desc *skbdesc; | |
603 | struct txentry_desc txdesc; | |
bd88a781 ID |
604 | |
605 | if (unlikely(!intf->beacon)) | |
606 | return -ENOBUFS; | |
607 | ||
17512dc3 IP |
608 | /* |
609 | * Clean up the beacon skb. | |
610 | */ | |
fa69560f | 611 | rt2x00queue_free_skb(intf->beacon); |
17512dc3 | 612 | |
bd88a781 | 613 | intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif); |
8414ff07 | 614 | if (!intf->beacon->skb) |
bd88a781 ID |
615 | return -ENOMEM; |
616 | ||
617 | /* | |
618 | * Copy all TX descriptor information into txdesc, | |
619 | * after that we are free to use the skb->cb array | |
620 | * for our information. | |
621 | */ | |
622 | rt2x00queue_create_tx_descriptor(intf->beacon, &txdesc); | |
623 | ||
bd88a781 ID |
624 | /* |
625 | * Fill in skb descriptor | |
626 | */ | |
627 | skbdesc = get_skb_frame_desc(intf->beacon->skb); | |
628 | memset(skbdesc, 0, sizeof(*skbdesc)); | |
bd88a781 ID |
629 | skbdesc->entry = intf->beacon; |
630 | ||
bd88a781 | 631 | /* |
69cf36a4 | 632 | * Send beacon to hardware. |
bd88a781 | 633 | */ |
f224f4ef | 634 | rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc); |
bd88a781 | 635 | |
8414ff07 HS |
636 | return 0; |
637 | ||
638 | } | |
639 | ||
640 | int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev, | |
641 | struct ieee80211_vif *vif) | |
642 | { | |
643 | struct rt2x00_intf *intf = vif_to_intf(vif); | |
644 | int ret; | |
645 | ||
646 | mutex_lock(&intf->beacon_skb_mutex); | |
647 | ret = rt2x00queue_update_beacon_locked(rt2x00dev, vif); | |
17512dc3 IP |
648 | mutex_unlock(&intf->beacon_skb_mutex); |
649 | ||
8414ff07 | 650 | return ret; |
bd88a781 ID |
651 | } |
652 | ||
10e11568 | 653 | bool rt2x00queue_for_each_entry(struct data_queue *queue, |
5eb7efe8 ID |
654 | enum queue_index start, |
655 | enum queue_index end, | |
10e11568 HS |
656 | void *data, |
657 | bool (*fn)(struct queue_entry *entry, | |
658 | void *data)) | |
5eb7efe8 ID |
659 | { |
660 | unsigned long irqflags; | |
661 | unsigned int index_start; | |
662 | unsigned int index_end; | |
663 | unsigned int i; | |
664 | ||
665 | if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) { | |
666 | ERROR(queue->rt2x00dev, | |
667 | "Entry requested from invalid index range (%d - %d)\n", | |
668 | start, end); | |
10e11568 | 669 | return true; |
5eb7efe8 ID |
670 | } |
671 | ||
672 | /* | |
673 | * Only protect the range we are going to loop over, | |
674 | * if during our loop a extra entry is set to pending | |
675 | * it should not be kicked during this run, since it | |
676 | * is part of another TX operation. | |
677 | */ | |
813f0339 | 678 | spin_lock_irqsave(&queue->index_lock, irqflags); |
5eb7efe8 ID |
679 | index_start = queue->index[start]; |
680 | index_end = queue->index[end]; | |
813f0339 | 681 | spin_unlock_irqrestore(&queue->index_lock, irqflags); |
5eb7efe8 ID |
682 | |
683 | /* | |
684 | * Start from the TX done pointer, this guarentees that we will | |
685 | * send out all frames in the correct order. | |
686 | */ | |
687 | if (index_start < index_end) { | |
10e11568 HS |
688 | for (i = index_start; i < index_end; i++) { |
689 | if (fn(&queue->entries[i], data)) | |
690 | return true; | |
691 | } | |
5eb7efe8 | 692 | } else { |
10e11568 HS |
693 | for (i = index_start; i < queue->limit; i++) { |
694 | if (fn(&queue->entries[i], data)) | |
695 | return true; | |
696 | } | |
5eb7efe8 | 697 | |
10e11568 HS |
698 | for (i = 0; i < index_end; i++) { |
699 | if (fn(&queue->entries[i], data)) | |
700 | return true; | |
701 | } | |
5eb7efe8 | 702 | } |
10e11568 HS |
703 | |
704 | return false; | |
5eb7efe8 ID |
705 | } |
706 | EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry); | |
707 | ||
181d6902 ID |
708 | struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue, |
709 | enum queue_index index) | |
710 | { | |
711 | struct queue_entry *entry; | |
5f46c4d0 | 712 | unsigned long irqflags; |
181d6902 ID |
713 | |
714 | if (unlikely(index >= Q_INDEX_MAX)) { | |
715 | ERROR(queue->rt2x00dev, | |
716 | "Entry requested from invalid index type (%d)\n", index); | |
717 | return NULL; | |
718 | } | |
719 | ||
813f0339 | 720 | spin_lock_irqsave(&queue->index_lock, irqflags); |
181d6902 ID |
721 | |
722 | entry = &queue->entries[queue->index[index]]; | |
723 | ||
813f0339 | 724 | spin_unlock_irqrestore(&queue->index_lock, irqflags); |
181d6902 ID |
725 | |
726 | return entry; | |
727 | } | |
728 | EXPORT_SYMBOL_GPL(rt2x00queue_get_entry); | |
729 | ||
730 | void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index) | |
731 | { | |
5f46c4d0 ID |
732 | unsigned long irqflags; |
733 | ||
181d6902 ID |
734 | if (unlikely(index >= Q_INDEX_MAX)) { |
735 | ERROR(queue->rt2x00dev, | |
736 | "Index change on invalid index type (%d)\n", index); | |
737 | return; | |
738 | } | |
739 | ||
813f0339 | 740 | spin_lock_irqsave(&queue->index_lock, irqflags); |
181d6902 ID |
741 | |
742 | queue->index[index]++; | |
743 | if (queue->index[index] >= queue->limit) | |
744 | queue->index[index] = 0; | |
745 | ||
652a9dd2 ID |
746 | queue->last_action[index] = jiffies; |
747 | ||
10b6b801 ID |
748 | if (index == Q_INDEX) { |
749 | queue->length++; | |
750 | } else if (index == Q_INDEX_DONE) { | |
751 | queue->length--; | |
55887511 | 752 | queue->count++; |
10b6b801 | 753 | } |
181d6902 | 754 | |
813f0339 | 755 | spin_unlock_irqrestore(&queue->index_lock, irqflags); |
181d6902 | 756 | } |
181d6902 | 757 | |
0b7fde54 ID |
758 | void rt2x00queue_pause_queue(struct data_queue *queue) |
759 | { | |
760 | if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || | |
761 | !test_bit(QUEUE_STARTED, &queue->flags) || | |
762 | test_and_set_bit(QUEUE_PAUSED, &queue->flags)) | |
763 | return; | |
764 | ||
765 | switch (queue->qid) { | |
f615e9a3 ID |
766 | case QID_AC_VO: |
767 | case QID_AC_VI: | |
0b7fde54 ID |
768 | case QID_AC_BE: |
769 | case QID_AC_BK: | |
0b7fde54 ID |
770 | /* |
771 | * For TX queues, we have to disable the queue | |
772 | * inside mac80211. | |
773 | */ | |
774 | ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid); | |
775 | break; | |
776 | default: | |
777 | break; | |
778 | } | |
779 | } | |
780 | EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue); | |
781 | ||
782 | void rt2x00queue_unpause_queue(struct data_queue *queue) | |
783 | { | |
784 | if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || | |
785 | !test_bit(QUEUE_STARTED, &queue->flags) || | |
786 | !test_and_clear_bit(QUEUE_PAUSED, &queue->flags)) | |
787 | return; | |
788 | ||
789 | switch (queue->qid) { | |
f615e9a3 ID |
790 | case QID_AC_VO: |
791 | case QID_AC_VI: | |
0b7fde54 ID |
792 | case QID_AC_BE: |
793 | case QID_AC_BK: | |
0b7fde54 ID |
794 | /* |
795 | * For TX queues, we have to enable the queue | |
796 | * inside mac80211. | |
797 | */ | |
798 | ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid); | |
799 | break; | |
5be65609 ID |
800 | case QID_RX: |
801 | /* | |
802 | * For RX we need to kick the queue now in order to | |
803 | * receive frames. | |
804 | */ | |
805 | queue->rt2x00dev->ops->lib->kick_queue(queue); | |
0b7fde54 ID |
806 | default: |
807 | break; | |
808 | } | |
809 | } | |
810 | EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue); | |
811 | ||
812 | void rt2x00queue_start_queue(struct data_queue *queue) | |
813 | { | |
814 | mutex_lock(&queue->status_lock); | |
815 | ||
816 | if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || | |
817 | test_and_set_bit(QUEUE_STARTED, &queue->flags)) { | |
818 | mutex_unlock(&queue->status_lock); | |
819 | return; | |
820 | } | |
821 | ||
822 | set_bit(QUEUE_PAUSED, &queue->flags); | |
823 | ||
824 | queue->rt2x00dev->ops->lib->start_queue(queue); | |
825 | ||
826 | rt2x00queue_unpause_queue(queue); | |
827 | ||
828 | mutex_unlock(&queue->status_lock); | |
829 | } | |
830 | EXPORT_SYMBOL_GPL(rt2x00queue_start_queue); | |
831 | ||
832 | void rt2x00queue_stop_queue(struct data_queue *queue) | |
833 | { | |
834 | mutex_lock(&queue->status_lock); | |
835 | ||
836 | if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) { | |
837 | mutex_unlock(&queue->status_lock); | |
838 | return; | |
839 | } | |
840 | ||
841 | rt2x00queue_pause_queue(queue); | |
842 | ||
843 | queue->rt2x00dev->ops->lib->stop_queue(queue); | |
844 | ||
845 | mutex_unlock(&queue->status_lock); | |
846 | } | |
847 | EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue); | |
848 | ||
5be65609 ID |
849 | void rt2x00queue_flush_queue(struct data_queue *queue, bool drop) |
850 | { | |
851 | unsigned int i; | |
852 | bool started; | |
853 | bool tx_queue = | |
f615e9a3 | 854 | (queue->qid == QID_AC_VO) || |
5be65609 | 855 | (queue->qid == QID_AC_VI) || |
f615e9a3 ID |
856 | (queue->qid == QID_AC_BE) || |
857 | (queue->qid == QID_AC_BK); | |
5be65609 ID |
858 | |
859 | mutex_lock(&queue->status_lock); | |
860 | ||
861 | /* | |
862 | * If the queue has been started, we must stop it temporarily | |
863 | * to prevent any new frames to be queued on the device. If | |
864 | * we are not dropping the pending frames, the queue must | |
865 | * only be stopped in the software and not the hardware, | |
866 | * otherwise the queue will never become empty on its own. | |
867 | */ | |
868 | started = test_bit(QUEUE_STARTED, &queue->flags); | |
869 | if (started) { | |
870 | /* | |
871 | * Pause the queue | |
872 | */ | |
873 | rt2x00queue_pause_queue(queue); | |
874 | ||
875 | /* | |
876 | * If we are not supposed to drop any pending | |
877 | * frames, this means we must force a start (=kick) | |
878 | * to the queue to make sure the hardware will | |
879 | * start transmitting. | |
880 | */ | |
881 | if (!drop && tx_queue) | |
882 | queue->rt2x00dev->ops->lib->kick_queue(queue); | |
883 | } | |
884 | ||
885 | /* | |
886 | * Check if driver supports flushing, we can only guarentee | |
887 | * full support for flushing if the driver is able | |
888 | * to cancel all pending frames (drop = true). | |
889 | */ | |
890 | if (drop && queue->rt2x00dev->ops->lib->flush_queue) | |
891 | queue->rt2x00dev->ops->lib->flush_queue(queue); | |
892 | ||
893 | /* | |
894 | * When we don't want to drop any frames, or when | |
895 | * the driver doesn't fully flush the queue correcly, | |
896 | * we must wait for the queue to become empty. | |
897 | */ | |
898 | for (i = 0; !rt2x00queue_empty(queue) && i < 100; i++) | |
899 | msleep(10); | |
900 | ||
901 | /* | |
902 | * The queue flush has failed... | |
903 | */ | |
904 | if (unlikely(!rt2x00queue_empty(queue))) | |
21957c31 | 905 | WARNING(queue->rt2x00dev, "Queue %d failed to flush\n", queue->qid); |
5be65609 ID |
906 | |
907 | /* | |
908 | * Restore the queue to the previous status | |
909 | */ | |
910 | if (started) | |
911 | rt2x00queue_unpause_queue(queue); | |
912 | ||
913 | mutex_unlock(&queue->status_lock); | |
914 | } | |
915 | EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue); | |
916 | ||
0b7fde54 ID |
917 | void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev) |
918 | { | |
919 | struct data_queue *queue; | |
920 | ||
921 | /* | |
922 | * rt2x00queue_start_queue will call ieee80211_wake_queue | |
923 | * for each queue after is has been properly initialized. | |
924 | */ | |
925 | tx_queue_for_each(rt2x00dev, queue) | |
926 | rt2x00queue_start_queue(queue); | |
927 | ||
928 | rt2x00queue_start_queue(rt2x00dev->rx); | |
929 | } | |
930 | EXPORT_SYMBOL_GPL(rt2x00queue_start_queues); | |
931 | ||
932 | void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev) | |
933 | { | |
934 | struct data_queue *queue; | |
935 | ||
936 | /* | |
937 | * rt2x00queue_stop_queue will call ieee80211_stop_queue | |
938 | * as well, but we are completely shutting doing everything | |
939 | * now, so it is much safer to stop all TX queues at once, | |
940 | * and use rt2x00queue_stop_queue for cleaning up. | |
941 | */ | |
942 | ieee80211_stop_queues(rt2x00dev->hw); | |
943 | ||
944 | tx_queue_for_each(rt2x00dev, queue) | |
945 | rt2x00queue_stop_queue(queue); | |
946 | ||
947 | rt2x00queue_stop_queue(rt2x00dev->rx); | |
948 | } | |
949 | EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues); | |
950 | ||
5be65609 ID |
951 | void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop) |
952 | { | |
953 | struct data_queue *queue; | |
954 | ||
955 | tx_queue_for_each(rt2x00dev, queue) | |
956 | rt2x00queue_flush_queue(queue, drop); | |
957 | ||
958 | rt2x00queue_flush_queue(rt2x00dev->rx, drop); | |
959 | } | |
960 | EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues); | |
961 | ||
181d6902 ID |
962 | static void rt2x00queue_reset(struct data_queue *queue) |
963 | { | |
5f46c4d0 | 964 | unsigned long irqflags; |
652a9dd2 | 965 | unsigned int i; |
5f46c4d0 | 966 | |
813f0339 | 967 | spin_lock_irqsave(&queue->index_lock, irqflags); |
181d6902 ID |
968 | |
969 | queue->count = 0; | |
970 | queue->length = 0; | |
652a9dd2 ID |
971 | |
972 | for (i = 0; i < Q_INDEX_MAX; i++) { | |
973 | queue->index[i] = 0; | |
974 | queue->last_action[i] = jiffies; | |
975 | } | |
181d6902 | 976 | |
813f0339 | 977 | spin_unlock_irqrestore(&queue->index_lock, irqflags); |
181d6902 ID |
978 | } |
979 | ||
798b7adb | 980 | void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev) |
181d6902 ID |
981 | { |
982 | struct data_queue *queue; | |
983 | unsigned int i; | |
984 | ||
798b7adb | 985 | queue_for_each(rt2x00dev, queue) { |
181d6902 ID |
986 | rt2x00queue_reset(queue); |
987 | ||
64e7d723 | 988 | for (i = 0; i < queue->limit; i++) |
798b7adb | 989 | rt2x00dev->ops->lib->clear_entry(&queue->entries[i]); |
181d6902 ID |
990 | } |
991 | } | |
992 | ||
993 | static int rt2x00queue_alloc_entries(struct data_queue *queue, | |
994 | const struct data_queue_desc *qdesc) | |
995 | { | |
996 | struct queue_entry *entries; | |
997 | unsigned int entry_size; | |
998 | unsigned int i; | |
999 | ||
1000 | rt2x00queue_reset(queue); | |
1001 | ||
1002 | queue->limit = qdesc->entry_num; | |
b869767b | 1003 | queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10); |
181d6902 ID |
1004 | queue->data_size = qdesc->data_size; |
1005 | queue->desc_size = qdesc->desc_size; | |
1006 | ||
1007 | /* | |
1008 | * Allocate all queue entries. | |
1009 | */ | |
1010 | entry_size = sizeof(*entries) + qdesc->priv_size; | |
baeb2ffa | 1011 | entries = kcalloc(queue->limit, entry_size, GFP_KERNEL); |
181d6902 ID |
1012 | if (!entries) |
1013 | return -ENOMEM; | |
1014 | ||
1015 | #define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \ | |
f8bfbc31 ME |
1016 | (((char *)(__base)) + ((__limit) * (__esize)) + \ |
1017 | ((__index) * (__psize))) | |
181d6902 ID |
1018 | |
1019 | for (i = 0; i < queue->limit; i++) { | |
1020 | entries[i].flags = 0; | |
1021 | entries[i].queue = queue; | |
1022 | entries[i].skb = NULL; | |
1023 | entries[i].entry_idx = i; | |
1024 | entries[i].priv_data = | |
1025 | QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit, | |
1026 | sizeof(*entries), qdesc->priv_size); | |
1027 | } | |
1028 | ||
1029 | #undef QUEUE_ENTRY_PRIV_OFFSET | |
1030 | ||
1031 | queue->entries = entries; | |
1032 | ||
1033 | return 0; | |
1034 | } | |
1035 | ||
fa69560f | 1036 | static void rt2x00queue_free_skbs(struct data_queue *queue) |
30caa6e3 GW |
1037 | { |
1038 | unsigned int i; | |
1039 | ||
1040 | if (!queue->entries) | |
1041 | return; | |
1042 | ||
1043 | for (i = 0; i < queue->limit; i++) { | |
fa69560f | 1044 | rt2x00queue_free_skb(&queue->entries[i]); |
30caa6e3 GW |
1045 | } |
1046 | } | |
1047 | ||
fa69560f | 1048 | static int rt2x00queue_alloc_rxskbs(struct data_queue *queue) |
30caa6e3 GW |
1049 | { |
1050 | unsigned int i; | |
1051 | struct sk_buff *skb; | |
1052 | ||
1053 | for (i = 0; i < queue->limit; i++) { | |
fa69560f | 1054 | skb = rt2x00queue_alloc_rxskb(&queue->entries[i]); |
30caa6e3 | 1055 | if (!skb) |
61243d8e | 1056 | return -ENOMEM; |
30caa6e3 GW |
1057 | queue->entries[i].skb = skb; |
1058 | } | |
1059 | ||
1060 | return 0; | |
30caa6e3 GW |
1061 | } |
1062 | ||
181d6902 ID |
1063 | int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev) |
1064 | { | |
1065 | struct data_queue *queue; | |
1066 | int status; | |
1067 | ||
181d6902 ID |
1068 | status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx); |
1069 | if (status) | |
1070 | goto exit; | |
1071 | ||
1072 | tx_queue_for_each(rt2x00dev, queue) { | |
1073 | status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx); | |
1074 | if (status) | |
1075 | goto exit; | |
1076 | } | |
1077 | ||
1078 | status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn); | |
1079 | if (status) | |
1080 | goto exit; | |
1081 | ||
7dab73b3 | 1082 | if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) { |
e74df4a7 | 1083 | status = rt2x00queue_alloc_entries(rt2x00dev->atim, |
30caa6e3 GW |
1084 | rt2x00dev->ops->atim); |
1085 | if (status) | |
1086 | goto exit; | |
1087 | } | |
181d6902 | 1088 | |
fa69560f | 1089 | status = rt2x00queue_alloc_rxskbs(rt2x00dev->rx); |
181d6902 ID |
1090 | if (status) |
1091 | goto exit; | |
1092 | ||
1093 | return 0; | |
1094 | ||
1095 | exit: | |
1096 | ERROR(rt2x00dev, "Queue entries allocation failed.\n"); | |
1097 | ||
1098 | rt2x00queue_uninitialize(rt2x00dev); | |
1099 | ||
1100 | return status; | |
1101 | } | |
1102 | ||
1103 | void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev) | |
1104 | { | |
1105 | struct data_queue *queue; | |
1106 | ||
fa69560f | 1107 | rt2x00queue_free_skbs(rt2x00dev->rx); |
30caa6e3 | 1108 | |
181d6902 ID |
1109 | queue_for_each(rt2x00dev, queue) { |
1110 | kfree(queue->entries); | |
1111 | queue->entries = NULL; | |
1112 | } | |
1113 | } | |
1114 | ||
8f539276 ID |
1115 | static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev, |
1116 | struct data_queue *queue, enum data_queue_qid qid) | |
1117 | { | |
0b7fde54 | 1118 | mutex_init(&queue->status_lock); |
813f0339 | 1119 | spin_lock_init(&queue->index_lock); |
8f539276 ID |
1120 | |
1121 | queue->rt2x00dev = rt2x00dev; | |
1122 | queue->qid = qid; | |
2af0a570 | 1123 | queue->txop = 0; |
8f539276 ID |
1124 | queue->aifs = 2; |
1125 | queue->cw_min = 5; | |
1126 | queue->cw_max = 10; | |
1127 | } | |
1128 | ||
181d6902 ID |
1129 | int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev) |
1130 | { | |
1131 | struct data_queue *queue; | |
1132 | enum data_queue_qid qid; | |
1133 | unsigned int req_atim = | |
7dab73b3 | 1134 | !!test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags); |
181d6902 ID |
1135 | |
1136 | /* | |
1137 | * We need the following queues: | |
1138 | * RX: 1 | |
61448f88 | 1139 | * TX: ops->tx_queues |
181d6902 ID |
1140 | * Beacon: 1 |
1141 | * Atim: 1 (if required) | |
1142 | */ | |
61448f88 | 1143 | rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim; |
181d6902 | 1144 | |
baeb2ffa | 1145 | queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL); |
181d6902 ID |
1146 | if (!queue) { |
1147 | ERROR(rt2x00dev, "Queue allocation failed.\n"); | |
1148 | return -ENOMEM; | |
1149 | } | |
1150 | ||
1151 | /* | |
1152 | * Initialize pointers | |
1153 | */ | |
1154 | rt2x00dev->rx = queue; | |
1155 | rt2x00dev->tx = &queue[1]; | |
61448f88 | 1156 | rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues]; |
e74df4a7 | 1157 | rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL; |
181d6902 ID |
1158 | |
1159 | /* | |
1160 | * Initialize queue parameters. | |
1161 | * RX: qid = QID_RX | |
f615e9a3 | 1162 | * TX: qid = QID_AC_VO + index |
181d6902 ID |
1163 | * TX: cw_min: 2^5 = 32. |
1164 | * TX: cw_max: 2^10 = 1024. | |
565a019a ID |
1165 | * BCN: qid = QID_BEACON |
1166 | * ATIM: qid = QID_ATIM | |
181d6902 | 1167 | */ |
8f539276 | 1168 | rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX); |
181d6902 | 1169 | |
f615e9a3 | 1170 | qid = QID_AC_VO; |
8f539276 ID |
1171 | tx_queue_for_each(rt2x00dev, queue) |
1172 | rt2x00queue_init(rt2x00dev, queue, qid++); | |
181d6902 | 1173 | |
e74df4a7 | 1174 | rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON); |
181d6902 | 1175 | if (req_atim) |
e74df4a7 | 1176 | rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM); |
181d6902 ID |
1177 | |
1178 | return 0; | |
1179 | } | |
1180 | ||
1181 | void rt2x00queue_free(struct rt2x00_dev *rt2x00dev) | |
1182 | { | |
1183 | kfree(rt2x00dev->rx); | |
1184 | rt2x00dev->rx = NULL; | |
1185 | rt2x00dev->tx = NULL; | |
1186 | rt2x00dev->bcn = NULL; | |
1187 | } |