Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
3 Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
4 Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
5 <http://rt2x00.serialmonkey.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21/*
22 Module: rt2x00lib
23 Abstract: rt2x00 queue specific routines.
24 */
25
26#include <linux/slab.h>
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/dma-mapping.h>
30
31#include "rt2x00.h"
32#include "rt2x00lib.h"
33
34struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
35{
36 struct data_queue *queue = entry->queue;
37 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
38 struct sk_buff *skb;
39 struct skb_frame_desc *skbdesc;
40 unsigned int frame_size;
41 unsigned int head_size = 0;
42 unsigned int tail_size = 0;
43
44 /*
45 * The frame size includes descriptor size, because the
46 * hardware directly receive the frame into the skbuffer.
47 */
48 frame_size = queue->data_size + queue->desc_size + queue->winfo_size;
49
50 /*
51 * The payload should be aligned to a 4-byte boundary,
52 * this means we need at least 3 bytes for moving the frame
53 * into the correct offset.
54 */
55 head_size = 4;
56
57 /*
58 * For IV/EIV/ICV assembly we must make sure there is
59 * at least 8 bytes bytes available in headroom for IV/EIV
60 * and 8 bytes for ICV data as tailroon.
61 */
62 if (rt2x00_has_cap_hw_crypto(rt2x00dev)) {
63 head_size += 8;
64 tail_size += 8;
65 }
66
67 /*
68 * Allocate skbuffer.
69 */
70 skb = __dev_alloc_skb(frame_size + head_size + tail_size, gfp);
71 if (!skb)
72 return NULL;
73
74 /*
75 * Make sure we not have a frame with the requested bytes
76 * available in the head and tail.
77 */
78 skb_reserve(skb, head_size);
79 skb_put(skb, frame_size);
80
81 /*
82 * Populate skbdesc.
83 */
84 skbdesc = get_skb_frame_desc(skb);
85 memset(skbdesc, 0, sizeof(*skbdesc));
86 skbdesc->entry = entry;
87
88 if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) {
89 dma_addr_t skb_dma;
90
91 skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len,
92 DMA_FROM_DEVICE);
93 if (unlikely(dma_mapping_error(rt2x00dev->dev, skb_dma))) {
94 dev_kfree_skb_any(skb);
95 return NULL;
96 }
97
98 skbdesc->skb_dma = skb_dma;
99 skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
100 }
101
102 return skb;
103}
104
105int rt2x00queue_map_txskb(struct queue_entry *entry)
106{
107 struct device *dev = entry->queue->rt2x00dev->dev;
108 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
109
110 skbdesc->skb_dma =
111 dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE);
112
113 if (unlikely(dma_mapping_error(dev, skbdesc->skb_dma)))
114 return -ENOMEM;
115
116 skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
117 return 0;
118}
119EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
120
121void rt2x00queue_unmap_skb(struct queue_entry *entry)
122{
123 struct device *dev = entry->queue->rt2x00dev->dev;
124 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
125
126 if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
127 dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
128 DMA_FROM_DEVICE);
129 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
130 } else if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
131 dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
132 DMA_TO_DEVICE);
133 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
134 }
135}
136EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb);
137
138void rt2x00queue_free_skb(struct queue_entry *entry)
139{
140 if (!entry->skb)
141 return;
142
143 rt2x00queue_unmap_skb(entry);
144 dev_kfree_skb_any(entry->skb);
145 entry->skb = NULL;
146}
147
148void rt2x00queue_align_frame(struct sk_buff *skb)
149{
150 unsigned int frame_length = skb->len;
151 unsigned int align = ALIGN_SIZE(skb, 0);
152
153 if (!align)
154 return;
155
156 skb_push(skb, align);
157 memmove(skb->data, skb->data + align, frame_length);
158 skb_trim(skb, frame_length);
159}
160
161void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
162{
163 unsigned int payload_length = skb->len - header_length;
164 unsigned int header_align = ALIGN_SIZE(skb, 0);
165 unsigned int payload_align = ALIGN_SIZE(skb, header_length);
166 unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0;
167
168 /*
169 * Adjust the header alignment if the payload needs to be moved more
170 * than the header.
171 */
172 if (payload_align > header_align)
173 header_align += 4;
174
175 /* There is nothing to do if no alignment is needed */
176 if (!header_align)
177 return;
178
179 /* Reserve the amount of space needed in front of the frame */
180 skb_push(skb, header_align);
181
182 /*
183 * Move the header.
184 */
185 memmove(skb->data, skb->data + header_align, header_length);
186
187 /* Move the payload, if present and if required */
188 if (payload_length && payload_align)
189 memmove(skb->data + header_length + l2pad,
190 skb->data + header_length + l2pad + payload_align,
191 payload_length);
192
193 /* Trim the skb to the correct size */
194 skb_trim(skb, header_length + l2pad + payload_length);
195}
196
197void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
198{
199 /*
200 * L2 padding is only present if the skb contains more than just the
201 * IEEE 802.11 header.
202 */
203 unsigned int l2pad = (skb->len > header_length) ?
204 L2PAD_SIZE(header_length) : 0;
205
206 if (!l2pad)
207 return;
208
209 memmove(skb->data + l2pad, skb->data, header_length);
210 skb_pull(skb, l2pad);
211}
212
213static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
214 struct sk_buff *skb,
215 struct txentry_desc *txdesc)
216{
217 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
218 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
219 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
220 u16 seqno;
221
222 if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
223 return;
224
225 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
226
227 if (!test_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags)) {
228 /*
229 * rt2800 has a H/W (or F/W) bug, device incorrectly increase
230 * seqno on retransmited data (non-QOS) frames. To workaround
231 * the problem let's generate seqno in software if QOS is
232 * disabled.
233 */
234 if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags))
235 __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
236 else
237 /* H/W will generate sequence number */
238 return;
239 }
240
241 /*
242 * The hardware is not able to insert a sequence number. Assign a
243 * software generated one here.
244 *
245 * This is wrong because beacons are not getting sequence
246 * numbers assigned properly.
247 *
248 * A secondary problem exists for drivers that cannot toggle
249 * sequence counting per-frame, since those will override the
250 * sequence counter given by mac80211.
251 */
252 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
253 seqno = atomic_add_return(0x10, &intf->seqno);
254 else
255 seqno = atomic_read(&intf->seqno);
256
257 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
258 hdr->seq_ctrl |= cpu_to_le16(seqno);
259}
260
261static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
262 struct sk_buff *skb,
263 struct txentry_desc *txdesc,
264 const struct rt2x00_rate *hwrate)
265{
266 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
267 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
268 unsigned int data_length;
269 unsigned int duration;
270 unsigned int residual;
271
272 /*
273 * Determine with what IFS priority this frame should be send.
274 * Set ifs to IFS_SIFS when the this is not the first fragment,
275 * or this fragment came after RTS/CTS.
276 */
277 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
278 txdesc->u.plcp.ifs = IFS_BACKOFF;
279 else
280 txdesc->u.plcp.ifs = IFS_SIFS;
281
282 /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
283 data_length = skb->len + 4;
284 data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb);
285
286 /*
287 * PLCP setup
288 * Length calculation depends on OFDM/CCK rate.
289 */
290 txdesc->u.plcp.signal = hwrate->plcp;
291 txdesc->u.plcp.service = 0x04;
292
293 if (hwrate->flags & DEV_RATE_OFDM) {
294 txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f;
295 txdesc->u.plcp.length_low = data_length & 0x3f;
296 } else {
297 /*
298 * Convert length to microseconds.
299 */
300 residual = GET_DURATION_RES(data_length, hwrate->bitrate);
301 duration = GET_DURATION(data_length, hwrate->bitrate);
302
303 if (residual != 0) {
304 duration++;
305
306 /*
307 * Check if we need to set the Length Extension
308 */
309 if (hwrate->bitrate == 110 && residual <= 30)
310 txdesc->u.plcp.service |= 0x80;
311 }
312
313 txdesc->u.plcp.length_high = (duration >> 8) & 0xff;
314 txdesc->u.plcp.length_low = duration & 0xff;
315
316 /*
317 * When preamble is enabled we should set the
318 * preamble bit for the signal.
319 */
320 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
321 txdesc->u.plcp.signal |= 0x08;
322 }
323}
324
325static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
326 struct sk_buff *skb,
327 struct txentry_desc *txdesc,
328 struct ieee80211_sta *sta,
329 const struct rt2x00_rate *hwrate)
330{
331 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
332 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
333 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
334 struct rt2x00_sta *sta_priv = NULL;
335
336 if (sta) {
337 txdesc->u.ht.mpdu_density =
338 sta->ht_cap.ampdu_density;
339
340 sta_priv = sta_to_rt2x00_sta(sta);
341 txdesc->u.ht.wcid = sta_priv->wcid;
342 }
343
344 /*
345 * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the
346 * mcs rate to be used
347 */
348 if (txrate->flags & IEEE80211_TX_RC_MCS) {
349 txdesc->u.ht.mcs = txrate->idx;
350
351 /*
352 * MIMO PS should be set to 1 for STA's using dynamic SM PS
353 * when using more then one tx stream (>MCS7).
354 */
355 if (sta && txdesc->u.ht.mcs > 7 &&
356 sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
357 __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
358 } else {
359 txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs);
360 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
361 txdesc->u.ht.mcs |= 0x08;
362 }
363
364 if (test_bit(CONFIG_HT_DISABLED, &rt2x00dev->flags)) {
365 if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
366 txdesc->u.ht.txop = TXOP_SIFS;
367 else
368 txdesc->u.ht.txop = TXOP_BACKOFF;
369
370 /* Left zero on all other settings. */
371 return;
372 }
373
374 txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */
375
376 /*
377 * Only one STBC stream is supported for now.
378 */
379 if (tx_info->flags & IEEE80211_TX_CTL_STBC)
380 txdesc->u.ht.stbc = 1;
381
382 /*
383 * This frame is eligible for an AMPDU, however, don't aggregate
384 * frames that are intended to probe a specific tx rate.
385 */
386 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU &&
387 !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
388 __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags);
389
390 /*
391 * Set 40Mhz mode if necessary (for legacy rates this will
392 * duplicate the frame to both channels).
393 */
394 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH ||
395 txrate->flags & IEEE80211_TX_RC_DUP_DATA)
396 __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags);
397 if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
398 __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags);
399
400 /*
401 * Determine IFS values
402 * - Use TXOP_BACKOFF for management frames except beacons
403 * - Use TXOP_SIFS for fragment bursts
404 * - Use TXOP_HTTXOP for everything else
405 *
406 * Note: rt2800 devices won't use CTS protection (if used)
407 * for frames not transmitted with TXOP_HTTXOP
408 */
409 if (ieee80211_is_mgmt(hdr->frame_control) &&
410 !ieee80211_is_beacon(hdr->frame_control))
411 txdesc->u.ht.txop = TXOP_BACKOFF;
412 else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
413 txdesc->u.ht.txop = TXOP_SIFS;
414 else
415 txdesc->u.ht.txop = TXOP_HTTXOP;
416}
417
418static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
419 struct sk_buff *skb,
420 struct txentry_desc *txdesc,
421 struct ieee80211_sta *sta)
422{
423 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
424 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
425 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
426 struct ieee80211_rate *rate;
427 const struct rt2x00_rate *hwrate = NULL;
428
429 memset(txdesc, 0, sizeof(*txdesc));
430
431 /*
432 * Header and frame information.
433 */
434 txdesc->length = skb->len;
435 txdesc->header_length = ieee80211_get_hdrlen_from_skb(skb);
436
437 /*
438 * Check whether this frame is to be acked.
439 */
440 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
441 __set_bit(ENTRY_TXD_ACK, &txdesc->flags);
442
443 /*
444 * Check if this is a RTS/CTS frame
445 */
446 if (ieee80211_is_rts(hdr->frame_control) ||
447 ieee80211_is_cts(hdr->frame_control)) {
448 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
449 if (ieee80211_is_rts(hdr->frame_control))
450 __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
451 else
452 __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
453 if (tx_info->control.rts_cts_rate_idx >= 0)
454 rate =
455 ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
456 }
457
458 /*
459 * Determine retry information.
460 */
461 txdesc->retry_limit = tx_info->control.rates[0].count - 1;
462 if (txdesc->retry_limit >= rt2x00dev->long_retry)
463 __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
464
465 /*
466 * Check if more fragments are pending
467 */
468 if (ieee80211_has_morefrags(hdr->frame_control)) {
469 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
470 __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
471 }
472
473 /*
474 * Check if more frames (!= fragments) are pending
475 */
476 if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)
477 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
478
479 /*
480 * Beacons and probe responses require the tsf timestamp
481 * to be inserted into the frame.
482 */
483 if (ieee80211_is_beacon(hdr->frame_control) ||
484 ieee80211_is_probe_resp(hdr->frame_control))
485 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
486
487 if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
488 !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags))
489 __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
490
491 /*
492 * Determine rate modulation.
493 */
494 if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
495 txdesc->rate_mode = RATE_MODE_HT_GREENFIELD;
496 else if (txrate->flags & IEEE80211_TX_RC_MCS)
497 txdesc->rate_mode = RATE_MODE_HT_MIX;
498 else {
499 rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
500 hwrate = rt2x00_get_rate(rate->hw_value);
501 if (hwrate->flags & DEV_RATE_OFDM)
502 txdesc->rate_mode = RATE_MODE_OFDM;
503 else
504 txdesc->rate_mode = RATE_MODE_CCK;
505 }
506
507 /*
508 * Apply TX descriptor handling by components
509 */
510 rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc);
511 rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc);
512
513 if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags))
514 rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc,
515 sta, hwrate);
516 else
517 rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc,
518 hwrate);
519}
520
521static int rt2x00queue_write_tx_data(struct queue_entry *entry,
522 struct txentry_desc *txdesc)
523{
524 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
525
526 /*
527 * This should not happen, we already checked the entry
528 * was ours. When the hardware disagrees there has been
529 * a queue corruption!
530 */
531 if (unlikely(rt2x00dev->ops->lib->get_entry_state &&
532 rt2x00dev->ops->lib->get_entry_state(entry))) {
533 rt2x00_err(rt2x00dev,
534 "Corrupt queue %d, accessing entry which is not ours\n"
535 "Please file bug report to %s\n",
536 entry->queue->qid, DRV_PROJECT);
537 return -EINVAL;
538 }
539
540 /*
541 * Add the requested extra tx headroom in front of the skb.
542 */
543 skb_push(entry->skb, rt2x00dev->extra_tx_headroom);
544 memset(entry->skb->data, 0, rt2x00dev->extra_tx_headroom);
545
546 /*
547 * Call the driver's write_tx_data function, if it exists.
548 */
549 if (rt2x00dev->ops->lib->write_tx_data)
550 rt2x00dev->ops->lib->write_tx_data(entry, txdesc);
551
552 /*
553 * Map the skb to DMA.
554 */
555 if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags) &&
556 rt2x00queue_map_txskb(entry))
557 return -ENOMEM;
558
559 return 0;
560}
561
562static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
563 struct txentry_desc *txdesc)
564{
565 struct data_queue *queue = entry->queue;
566
567 queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc);
568
569 /*
570 * All processing on the frame has been completed, this means
571 * it is now ready to be dumped to userspace through debugfs.
572 */
573 rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb);
574}
575
576static void rt2x00queue_kick_tx_queue(struct data_queue *queue,
577 struct txentry_desc *txdesc)
578{
579 /*
580 * Check if we need to kick the queue, there are however a few rules
581 * 1) Don't kick unless this is the last in frame in a burst.
582 * When the burst flag is set, this frame is always followed
583 * by another frame which in some way are related to eachother.
584 * This is true for fragments, RTS or CTS-to-self frames.
585 * 2) Rule 1 can be broken when the available entries
586 * in the queue are less then a certain threshold.
587 */
588 if (rt2x00queue_threshold(queue) ||
589 !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
590 queue->rt2x00dev->ops->lib->kick_queue(queue);
591}
592
593static void rt2x00queue_bar_check(struct queue_entry *entry)
594{
595 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
596 struct ieee80211_bar *bar = (void *) (entry->skb->data +
597 rt2x00dev->extra_tx_headroom);
598 struct rt2x00_bar_list_entry *bar_entry;
599
600 if (likely(!ieee80211_is_back_req(bar->frame_control)))
601 return;
602
603 bar_entry = kmalloc(sizeof(*bar_entry), GFP_ATOMIC);
604
605 /*
606 * If the alloc fails we still send the BAR out but just don't track
607 * it in our bar list. And as a result we will report it to mac80211
608 * back as failed.
609 */
610 if (!bar_entry)
611 return;
612
613 bar_entry->entry = entry;
614 bar_entry->block_acked = 0;
615
616 /*
617 * Copy the relevant parts of the 802.11 BAR into out check list
618 * such that we can use RCU for less-overhead in the RX path since
619 * sending BARs and processing the according BlockAck should be
620 * the exception.
621 */
622 memcpy(bar_entry->ra, bar->ra, sizeof(bar->ra));
623 memcpy(bar_entry->ta, bar->ta, sizeof(bar->ta));
624 bar_entry->control = bar->control;
625 bar_entry->start_seq_num = bar->start_seq_num;
626
627 /*
628 * Insert BAR into our BAR check list.
629 */
630 spin_lock_bh(&rt2x00dev->bar_list_lock);
631 list_add_tail_rcu(&bar_entry->list, &rt2x00dev->bar_list);
632 spin_unlock_bh(&rt2x00dev->bar_list_lock);
633}
634
635int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
636 struct ieee80211_sta *sta, bool local)
637{
638 struct ieee80211_tx_info *tx_info;
639 struct queue_entry *entry;
640 struct txentry_desc txdesc;
641 struct skb_frame_desc *skbdesc;
642 u8 rate_idx, rate_flags;
643 int ret = 0;
644
645 /*
646 * Copy all TX descriptor information into txdesc,
647 * after that we are free to use the skb->cb array
648 * for our information.
649 */
650 rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, sta);
651
652 /*
653 * All information is retrieved from the skb->cb array,
654 * now we should claim ownership of the driver part of that
655 * array, preserving the bitrate index and flags.
656 */
657 tx_info = IEEE80211_SKB_CB(skb);
658 rate_idx = tx_info->control.rates[0].idx;
659 rate_flags = tx_info->control.rates[0].flags;
660 skbdesc = get_skb_frame_desc(skb);
661 memset(skbdesc, 0, sizeof(*skbdesc));
662 skbdesc->tx_rate_idx = rate_idx;
663 skbdesc->tx_rate_flags = rate_flags;
664
665 if (local)
666 skbdesc->flags |= SKBDESC_NOT_MAC80211;
667
668 /*
669 * When hardware encryption is supported, and this frame
670 * is to be encrypted, we should strip the IV/EIV data from
671 * the frame so we can provide it to the driver separately.
672 */
673 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
674 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
675 if (test_bit(REQUIRE_COPY_IV, &queue->rt2x00dev->cap_flags))
676 rt2x00crypto_tx_copy_iv(skb, &txdesc);
677 else
678 rt2x00crypto_tx_remove_iv(skb, &txdesc);
679 }
680
681 /*
682 * When DMA allocation is required we should guarantee to the
683 * driver that the DMA is aligned to a 4-byte boundary.
684 * However some drivers require L2 padding to pad the payload
685 * rather then the header. This could be a requirement for
686 * PCI and USB devices, while header alignment only is valid
687 * for PCI devices.
688 */
689 if (test_bit(REQUIRE_L2PAD, &queue->rt2x00dev->cap_flags))
690 rt2x00queue_insert_l2pad(skb, txdesc.header_length);
691 else if (test_bit(REQUIRE_DMA, &queue->rt2x00dev->cap_flags))
692 rt2x00queue_align_frame(skb);
693
694 /*
695 * That function must be called with bh disabled.
696 */
697 spin_lock(&queue->tx_lock);
698
699 if (unlikely(rt2x00queue_full(queue))) {
700 rt2x00_err(queue->rt2x00dev, "Dropping frame due to full tx queue %d\n",
701 queue->qid);
702 ret = -ENOBUFS;
703 goto out;
704 }
705
706 entry = rt2x00queue_get_entry(queue, Q_INDEX);
707
708 if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA,
709 &entry->flags))) {
710 rt2x00_err(queue->rt2x00dev,
711 "Arrived at non-free entry in the non-full queue %d\n"
712 "Please file bug report to %s\n",
713 queue->qid, DRV_PROJECT);
714 ret = -EINVAL;
715 goto out;
716 }
717
718 skbdesc->entry = entry;
719 entry->skb = skb;
720
721 /*
722 * It could be possible that the queue was corrupted and this
723 * call failed. Since we always return NETDEV_TX_OK to mac80211,
724 * this frame will simply be dropped.
725 */
726 if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) {
727 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
728 entry->skb = NULL;
729 ret = -EIO;
730 goto out;
731 }
732
733 /*
734 * Put BlockAckReqs into our check list for driver BA processing.
735 */
736 rt2x00queue_bar_check(entry);
737
738 set_bit(ENTRY_DATA_PENDING, &entry->flags);
739
740 rt2x00queue_index_inc(entry, Q_INDEX);
741 rt2x00queue_write_tx_descriptor(entry, &txdesc);
742 rt2x00queue_kick_tx_queue(queue, &txdesc);
743
744out:
745 spin_unlock(&queue->tx_lock);
746 return ret;
747}
748
749int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
750 struct ieee80211_vif *vif)
751{
752 struct rt2x00_intf *intf = vif_to_intf(vif);
753
754 if (unlikely(!intf->beacon))
755 return -ENOBUFS;
756
757 /*
758 * Clean up the beacon skb.
759 */
760 rt2x00queue_free_skb(intf->beacon);
761
762 /*
763 * Clear beacon (single bssid devices don't need to clear the beacon
764 * since the beacon queue will get stopped anyway).
765 */
766 if (rt2x00dev->ops->lib->clear_beacon)
767 rt2x00dev->ops->lib->clear_beacon(intf->beacon);
768
769 return 0;
770}
771
772int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
773 struct ieee80211_vif *vif)
774{
775 struct rt2x00_intf *intf = vif_to_intf(vif);
776 struct skb_frame_desc *skbdesc;
777 struct txentry_desc txdesc;
778
779 if (unlikely(!intf->beacon))
780 return -ENOBUFS;
781
782 /*
783 * Clean up the beacon skb.
784 */
785 rt2x00queue_free_skb(intf->beacon);
786
787 intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
788 if (!intf->beacon->skb)
789 return -ENOMEM;
790
791 /*
792 * Copy all TX descriptor information into txdesc,
793 * after that we are free to use the skb->cb array
794 * for our information.
795 */
796 rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc, NULL);
797
798 /*
799 * Fill in skb descriptor
800 */
801 skbdesc = get_skb_frame_desc(intf->beacon->skb);
802 memset(skbdesc, 0, sizeof(*skbdesc));
803 skbdesc->entry = intf->beacon;
804
805 /*
806 * Send beacon to hardware.
807 */
808 rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
809
810 return 0;
811
812}
813
814bool rt2x00queue_for_each_entry(struct data_queue *queue,
815 enum queue_index start,
816 enum queue_index end,
817 void *data,
818 bool (*fn)(struct queue_entry *entry,
819 void *data))
820{
821 unsigned long irqflags;
822 unsigned int index_start;
823 unsigned int index_end;
824 unsigned int i;
825
826 if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) {
827 rt2x00_err(queue->rt2x00dev,
828 "Entry requested from invalid index range (%d - %d)\n",
829 start, end);
830 return true;
831 }
832
833 /*
834 * Only protect the range we are going to loop over,
835 * if during our loop a extra entry is set to pending
836 * it should not be kicked during this run, since it
837 * is part of another TX operation.
838 */
839 spin_lock_irqsave(&queue->index_lock, irqflags);
840 index_start = queue->index[start];
841 index_end = queue->index[end];
842 spin_unlock_irqrestore(&queue->index_lock, irqflags);
843
844 /*
845 * Start from the TX done pointer, this guarantees that we will
846 * send out all frames in the correct order.
847 */
848 if (index_start < index_end) {
849 for (i = index_start; i < index_end; i++) {
850 if (fn(&queue->entries[i], data))
851 return true;
852 }
853 } else {
854 for (i = index_start; i < queue->limit; i++) {
855 if (fn(&queue->entries[i], data))
856 return true;
857 }
858
859 for (i = 0; i < index_end; i++) {
860 if (fn(&queue->entries[i], data))
861 return true;
862 }
863 }
864
865 return false;
866}
867EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
868
869struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
870 enum queue_index index)
871{
872 struct queue_entry *entry;
873 unsigned long irqflags;
874
875 if (unlikely(index >= Q_INDEX_MAX)) {
876 rt2x00_err(queue->rt2x00dev, "Entry requested from invalid index type (%d)\n",
877 index);
878 return NULL;
879 }
880
881 spin_lock_irqsave(&queue->index_lock, irqflags);
882
883 entry = &queue->entries[queue->index[index]];
884
885 spin_unlock_irqrestore(&queue->index_lock, irqflags);
886
887 return entry;
888}
889EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
890
891void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
892{
893 struct data_queue *queue = entry->queue;
894 unsigned long irqflags;
895
896 if (unlikely(index >= Q_INDEX_MAX)) {
897 rt2x00_err(queue->rt2x00dev,
898 "Index change on invalid index type (%d)\n", index);
899 return;
900 }
901
902 spin_lock_irqsave(&queue->index_lock, irqflags);
903
904 queue->index[index]++;
905 if (queue->index[index] >= queue->limit)
906 queue->index[index] = 0;
907
908 entry->last_action = jiffies;
909
910 if (index == Q_INDEX) {
911 queue->length++;
912 } else if (index == Q_INDEX_DONE) {
913 queue->length--;
914 queue->count++;
915 }
916
917 spin_unlock_irqrestore(&queue->index_lock, irqflags);
918}
919
920static void rt2x00queue_pause_queue_nocheck(struct data_queue *queue)
921{
922 switch (queue->qid) {
923 case QID_AC_VO:
924 case QID_AC_VI:
925 case QID_AC_BE:
926 case QID_AC_BK:
927 /*
928 * For TX queues, we have to disable the queue
929 * inside mac80211.
930 */
931 ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid);
932 break;
933 default:
934 break;
935 }
936}
937void rt2x00queue_pause_queue(struct data_queue *queue)
938{
939 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
940 !test_bit(QUEUE_STARTED, &queue->flags) ||
941 test_and_set_bit(QUEUE_PAUSED, &queue->flags))
942 return;
943
944 rt2x00queue_pause_queue_nocheck(queue);
945}
946EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
947
948void rt2x00queue_unpause_queue(struct data_queue *queue)
949{
950 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
951 !test_bit(QUEUE_STARTED, &queue->flags) ||
952 !test_and_clear_bit(QUEUE_PAUSED, &queue->flags))
953 return;
954
955 switch (queue->qid) {
956 case QID_AC_VO:
957 case QID_AC_VI:
958 case QID_AC_BE:
959 case QID_AC_BK:
960 /*
961 * For TX queues, we have to enable the queue
962 * inside mac80211.
963 */
964 ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid);
965 break;
966 case QID_RX:
967 /*
968 * For RX we need to kick the queue now in order to
969 * receive frames.
970 */
971 queue->rt2x00dev->ops->lib->kick_queue(queue);
972 default:
973 break;
974 }
975}
976EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue);
977
978void rt2x00queue_start_queue(struct data_queue *queue)
979{
980 mutex_lock(&queue->status_lock);
981
982 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
983 test_and_set_bit(QUEUE_STARTED, &queue->flags)) {
984 mutex_unlock(&queue->status_lock);
985 return;
986 }
987
988 set_bit(QUEUE_PAUSED, &queue->flags);
989
990 queue->rt2x00dev->ops->lib->start_queue(queue);
991
992 rt2x00queue_unpause_queue(queue);
993
994 mutex_unlock(&queue->status_lock);
995}
996EXPORT_SYMBOL_GPL(rt2x00queue_start_queue);
997
998void rt2x00queue_stop_queue(struct data_queue *queue)
999{
1000 mutex_lock(&queue->status_lock);
1001
1002 if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) {
1003 mutex_unlock(&queue->status_lock);
1004 return;
1005 }
1006
1007 rt2x00queue_pause_queue_nocheck(queue);
1008
1009 queue->rt2x00dev->ops->lib->stop_queue(queue);
1010
1011 mutex_unlock(&queue->status_lock);
1012}
1013EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
1014
1015void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
1016{
1017 bool tx_queue =
1018 (queue->qid == QID_AC_VO) ||
1019 (queue->qid == QID_AC_VI) ||
1020 (queue->qid == QID_AC_BE) ||
1021 (queue->qid == QID_AC_BK);
1022
1023
1024 /*
1025 * If we are not supposed to drop any pending
1026 * frames, this means we must force a start (=kick)
1027 * to the queue to make sure the hardware will
1028 * start transmitting.
1029 */
1030 if (!drop && tx_queue)
1031 queue->rt2x00dev->ops->lib->kick_queue(queue);
1032
1033 /*
1034 * Check if driver supports flushing, if that is the case we can
1035 * defer the flushing to the driver. Otherwise we must use the
1036 * alternative which just waits for the queue to become empty.
1037 */
1038 if (likely(queue->rt2x00dev->ops->lib->flush_queue))
1039 queue->rt2x00dev->ops->lib->flush_queue(queue, drop);
1040
1041 /*
1042 * The queue flush has failed...
1043 */
1044 if (unlikely(!rt2x00queue_empty(queue)))
1045 rt2x00_warn(queue->rt2x00dev, "Queue %d failed to flush\n",
1046 queue->qid);
1047}
1048EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue);
1049
1050void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev)
1051{
1052 struct data_queue *queue;
1053
1054 /*
1055 * rt2x00queue_start_queue will call ieee80211_wake_queue
1056 * for each queue after is has been properly initialized.
1057 */
1058 tx_queue_for_each(rt2x00dev, queue)
1059 rt2x00queue_start_queue(queue);
1060
1061 rt2x00queue_start_queue(rt2x00dev->rx);
1062}
1063EXPORT_SYMBOL_GPL(rt2x00queue_start_queues);
1064
1065void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
1066{
1067 struct data_queue *queue;
1068
1069 /*
1070 * rt2x00queue_stop_queue will call ieee80211_stop_queue
1071 * as well, but we are completely shutting doing everything
1072 * now, so it is much safer to stop all TX queues at once,
1073 * and use rt2x00queue_stop_queue for cleaning up.
1074 */
1075 ieee80211_stop_queues(rt2x00dev->hw);
1076
1077 tx_queue_for_each(rt2x00dev, queue)
1078 rt2x00queue_stop_queue(queue);
1079
1080 rt2x00queue_stop_queue(rt2x00dev->rx);
1081}
1082EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues);
1083
1084void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop)
1085{
1086 struct data_queue *queue;
1087
1088 tx_queue_for_each(rt2x00dev, queue)
1089 rt2x00queue_flush_queue(queue, drop);
1090
1091 rt2x00queue_flush_queue(rt2x00dev->rx, drop);
1092}
1093EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues);
1094
1095static void rt2x00queue_reset(struct data_queue *queue)
1096{
1097 unsigned long irqflags;
1098 unsigned int i;
1099
1100 spin_lock_irqsave(&queue->index_lock, irqflags);
1101
1102 queue->count = 0;
1103 queue->length = 0;
1104
1105 for (i = 0; i < Q_INDEX_MAX; i++)
1106 queue->index[i] = 0;
1107
1108 spin_unlock_irqrestore(&queue->index_lock, irqflags);
1109}
1110
1111void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
1112{
1113 struct data_queue *queue;
1114 unsigned int i;
1115
1116 queue_for_each(rt2x00dev, queue) {
1117 rt2x00queue_reset(queue);
1118
1119 for (i = 0; i < queue->limit; i++)
1120 rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
1121 }
1122}
1123
1124static int rt2x00queue_alloc_entries(struct data_queue *queue)
1125{
1126 struct queue_entry *entries;
1127 unsigned int entry_size;
1128 unsigned int i;
1129
1130 rt2x00queue_reset(queue);
1131
1132 /*
1133 * Allocate all queue entries.
1134 */
1135 entry_size = sizeof(*entries) + queue->priv_size;
1136 entries = kcalloc(queue->limit, entry_size, GFP_KERNEL);
1137 if (!entries)
1138 return -ENOMEM;
1139
1140#define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
1141 (((char *)(__base)) + ((__limit) * (__esize)) + \
1142 ((__index) * (__psize)))
1143
1144 for (i = 0; i < queue->limit; i++) {
1145 entries[i].flags = 0;
1146 entries[i].queue = queue;
1147 entries[i].skb = NULL;
1148 entries[i].entry_idx = i;
1149 entries[i].priv_data =
1150 QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
1151 sizeof(*entries), queue->priv_size);
1152 }
1153
1154#undef QUEUE_ENTRY_PRIV_OFFSET
1155
1156 queue->entries = entries;
1157
1158 return 0;
1159}
1160
1161static void rt2x00queue_free_skbs(struct data_queue *queue)
1162{
1163 unsigned int i;
1164
1165 if (!queue->entries)
1166 return;
1167
1168 for (i = 0; i < queue->limit; i++) {
1169 rt2x00queue_free_skb(&queue->entries[i]);
1170 }
1171}
1172
1173static int rt2x00queue_alloc_rxskbs(struct data_queue *queue)
1174{
1175 unsigned int i;
1176 struct sk_buff *skb;
1177
1178 for (i = 0; i < queue->limit; i++) {
1179 skb = rt2x00queue_alloc_rxskb(&queue->entries[i], GFP_KERNEL);
1180 if (!skb)
1181 return -ENOMEM;
1182 queue->entries[i].skb = skb;
1183 }
1184
1185 return 0;
1186}
1187
1188int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
1189{
1190 struct data_queue *queue;
1191 int status;
1192
1193 status = rt2x00queue_alloc_entries(rt2x00dev->rx);
1194 if (status)
1195 goto exit;
1196
1197 tx_queue_for_each(rt2x00dev, queue) {
1198 status = rt2x00queue_alloc_entries(queue);
1199 if (status)
1200 goto exit;
1201 }
1202
1203 status = rt2x00queue_alloc_entries(rt2x00dev->bcn);
1204 if (status)
1205 goto exit;
1206
1207 if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) {
1208 status = rt2x00queue_alloc_entries(rt2x00dev->atim);
1209 if (status)
1210 goto exit;
1211 }
1212
1213 status = rt2x00queue_alloc_rxskbs(rt2x00dev->rx);
1214 if (status)
1215 goto exit;
1216
1217 return 0;
1218
1219exit:
1220 rt2x00_err(rt2x00dev, "Queue entries allocation failed\n");
1221
1222 rt2x00queue_uninitialize(rt2x00dev);
1223
1224 return status;
1225}
1226
1227void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
1228{
1229 struct data_queue *queue;
1230
1231 rt2x00queue_free_skbs(rt2x00dev->rx);
1232
1233 queue_for_each(rt2x00dev, queue) {
1234 kfree(queue->entries);
1235 queue->entries = NULL;
1236 }
1237}
1238
1239static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
1240 struct data_queue *queue, enum data_queue_qid qid)
1241{
1242 mutex_init(&queue->status_lock);
1243 spin_lock_init(&queue->tx_lock);
1244 spin_lock_init(&queue->index_lock);
1245
1246 queue->rt2x00dev = rt2x00dev;
1247 queue->qid = qid;
1248 queue->txop = 0;
1249 queue->aifs = 2;
1250 queue->cw_min = 5;
1251 queue->cw_max = 10;
1252
1253 rt2x00dev->ops->queue_init(queue);
1254
1255 queue->threshold = DIV_ROUND_UP(queue->limit, 10);
1256}
1257
1258int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
1259{
1260 struct data_queue *queue;
1261 enum data_queue_qid qid;
1262 unsigned int req_atim =
1263 !!test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags);
1264
1265 /*
1266 * We need the following queues:
1267 * RX: 1
1268 * TX: ops->tx_queues
1269 * Beacon: 1
1270 * Atim: 1 (if required)
1271 */
1272 rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
1273
1274 queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL);
1275 if (!queue) {
1276 rt2x00_err(rt2x00dev, "Queue allocation failed\n");
1277 return -ENOMEM;
1278 }
1279
1280 /*
1281 * Initialize pointers
1282 */
1283 rt2x00dev->rx = queue;
1284 rt2x00dev->tx = &queue[1];
1285 rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
1286 rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL;
1287
1288 /*
1289 * Initialize queue parameters.
1290 * RX: qid = QID_RX
1291 * TX: qid = QID_AC_VO + index
1292 * TX: cw_min: 2^5 = 32.
1293 * TX: cw_max: 2^10 = 1024.
1294 * BCN: qid = QID_BEACON
1295 * ATIM: qid = QID_ATIM
1296 */
1297 rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
1298
1299 qid = QID_AC_VO;
1300 tx_queue_for_each(rt2x00dev, queue)
1301 rt2x00queue_init(rt2x00dev, queue, qid++);
1302
1303 rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON);
1304 if (req_atim)
1305 rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM);
1306
1307 return 0;
1308}
1309
1310void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
1311{
1312 kfree(rt2x00dev->rx);
1313 rt2x00dev->rx = NULL;
1314 rt2x00dev->tx = NULL;
1315 rt2x00dev->bcn = NULL;
1316}