Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v3.15 1333 lines 35 kB view raw
1/* 2 Copyright (C) 2010 Willow Garage <http://www.willowgarage.com> 3 Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com> 4 Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com> 5 <http://rt2x00.serialmonkey.com> 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License as published by 9 the Free Software Foundation; either version 2 of the License, or 10 (at your option) any later version. 11 12 This program is distributed in the hope that it will be useful, 13 but WITHOUT ANY WARRANTY; without even the implied warranty of 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 GNU General Public License for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with this program; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21/* 22 Module: rt2x00lib 23 Abstract: rt2x00 queue specific routines. 24 */ 25 26#include <linux/slab.h> 27#include <linux/kernel.h> 28#include <linux/module.h> 29#include <linux/dma-mapping.h> 30 31#include "rt2x00.h" 32#include "rt2x00lib.h" 33 34struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp) 35{ 36 struct data_queue *queue = entry->queue; 37 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; 38 struct sk_buff *skb; 39 struct skb_frame_desc *skbdesc; 40 unsigned int frame_size; 41 unsigned int head_size = 0; 42 unsigned int tail_size = 0; 43 44 /* 45 * The frame size includes descriptor size, because the 46 * hardware directly receive the frame into the skbuffer. 47 */ 48 frame_size = queue->data_size + queue->desc_size + queue->winfo_size; 49 50 /* 51 * The payload should be aligned to a 4-byte boundary, 52 * this means we need at least 3 bytes for moving the frame 53 * into the correct offset. 54 */ 55 head_size = 4; 56 57 /* 58 * For IV/EIV/ICV assembly we must make sure there is 59 * at least 8 bytes bytes available in headroom for IV/EIV 60 * and 8 bytes for ICV data as tailroon. 61 */ 62 if (rt2x00_has_cap_hw_crypto(rt2x00dev)) { 63 head_size += 8; 64 tail_size += 8; 65 } 66 67 /* 68 * Allocate skbuffer. 69 */ 70 skb = __dev_alloc_skb(frame_size + head_size + tail_size, gfp); 71 if (!skb) 72 return NULL; 73 74 /* 75 * Make sure we not have a frame with the requested bytes 76 * available in the head and tail. 77 */ 78 skb_reserve(skb, head_size); 79 skb_put(skb, frame_size); 80 81 /* 82 * Populate skbdesc. 83 */ 84 skbdesc = get_skb_frame_desc(skb); 85 memset(skbdesc, 0, sizeof(*skbdesc)); 86 skbdesc->entry = entry; 87 88 if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) { 89 dma_addr_t skb_dma; 90 91 skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len, 92 DMA_FROM_DEVICE); 93 if (unlikely(dma_mapping_error(rt2x00dev->dev, skb_dma))) { 94 dev_kfree_skb_any(skb); 95 return NULL; 96 } 97 98 skbdesc->skb_dma = skb_dma; 99 skbdesc->flags |= SKBDESC_DMA_MAPPED_RX; 100 } 101 102 return skb; 103} 104 105int rt2x00queue_map_txskb(struct queue_entry *entry) 106{ 107 struct device *dev = entry->queue->rt2x00dev->dev; 108 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 109 110 skbdesc->skb_dma = 111 dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE); 112 113 if (unlikely(dma_mapping_error(dev, skbdesc->skb_dma))) 114 return -ENOMEM; 115 116 skbdesc->flags |= SKBDESC_DMA_MAPPED_TX; 117 return 0; 118} 119EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb); 120 121void rt2x00queue_unmap_skb(struct queue_entry *entry) 122{ 123 struct device *dev = entry->queue->rt2x00dev->dev; 124 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 125 126 if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) { 127 dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len, 128 DMA_FROM_DEVICE); 129 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX; 130 } else if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) { 131 dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len, 132 DMA_TO_DEVICE); 133 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX; 134 } 135} 136EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb); 137 138void rt2x00queue_free_skb(struct queue_entry *entry) 139{ 140 if (!entry->skb) 141 return; 142 143 rt2x00queue_unmap_skb(entry); 144 dev_kfree_skb_any(entry->skb); 145 entry->skb = NULL; 146} 147 148void rt2x00queue_align_frame(struct sk_buff *skb) 149{ 150 unsigned int frame_length = skb->len; 151 unsigned int align = ALIGN_SIZE(skb, 0); 152 153 if (!align) 154 return; 155 156 skb_push(skb, align); 157 memmove(skb->data, skb->data + align, frame_length); 158 skb_trim(skb, frame_length); 159} 160 161void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length) 162{ 163 unsigned int payload_length = skb->len - header_length; 164 unsigned int header_align = ALIGN_SIZE(skb, 0); 165 unsigned int payload_align = ALIGN_SIZE(skb, header_length); 166 unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0; 167 168 /* 169 * Adjust the header alignment if the payload needs to be moved more 170 * than the header. 171 */ 172 if (payload_align > header_align) 173 header_align += 4; 174 175 /* There is nothing to do if no alignment is needed */ 176 if (!header_align) 177 return; 178 179 /* Reserve the amount of space needed in front of the frame */ 180 skb_push(skb, header_align); 181 182 /* 183 * Move the header. 184 */ 185 memmove(skb->data, skb->data + header_align, header_length); 186 187 /* Move the payload, if present and if required */ 188 if (payload_length && payload_align) 189 memmove(skb->data + header_length + l2pad, 190 skb->data + header_length + l2pad + payload_align, 191 payload_length); 192 193 /* Trim the skb to the correct size */ 194 skb_trim(skb, header_length + l2pad + payload_length); 195} 196 197void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length) 198{ 199 /* 200 * L2 padding is only present if the skb contains more than just the 201 * IEEE 802.11 header. 202 */ 203 unsigned int l2pad = (skb->len > header_length) ? 204 L2PAD_SIZE(header_length) : 0; 205 206 if (!l2pad) 207 return; 208 209 memmove(skb->data + l2pad, skb->data, header_length); 210 skb_pull(skb, l2pad); 211} 212 213static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev, 214 struct sk_buff *skb, 215 struct txentry_desc *txdesc) 216{ 217 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 218 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 219 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif); 220 u16 seqno; 221 222 if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) 223 return; 224 225 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); 226 227 if (!test_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags)) { 228 /* 229 * rt2800 has a H/W (or F/W) bug, device incorrectly increase 230 * seqno on retransmited data (non-QOS) frames. To workaround 231 * the problem let's generate seqno in software if QOS is 232 * disabled. 233 */ 234 if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags)) 235 __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); 236 else 237 /* H/W will generate sequence number */ 238 return; 239 } 240 241 /* 242 * The hardware is not able to insert a sequence number. Assign a 243 * software generated one here. 244 * 245 * This is wrong because beacons are not getting sequence 246 * numbers assigned properly. 247 * 248 * A secondary problem exists for drivers that cannot toggle 249 * sequence counting per-frame, since those will override the 250 * sequence counter given by mac80211. 251 */ 252 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) 253 seqno = atomic_add_return(0x10, &intf->seqno); 254 else 255 seqno = atomic_read(&intf->seqno); 256 257 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); 258 hdr->seq_ctrl |= cpu_to_le16(seqno); 259} 260 261static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev, 262 struct sk_buff *skb, 263 struct txentry_desc *txdesc, 264 const struct rt2x00_rate *hwrate) 265{ 266 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 267 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; 268 unsigned int data_length; 269 unsigned int duration; 270 unsigned int residual; 271 272 /* 273 * Determine with what IFS priority this frame should be send. 274 * Set ifs to IFS_SIFS when the this is not the first fragment, 275 * or this fragment came after RTS/CTS. 276 */ 277 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) 278 txdesc->u.plcp.ifs = IFS_BACKOFF; 279 else 280 txdesc->u.plcp.ifs = IFS_SIFS; 281 282 /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */ 283 data_length = skb->len + 4; 284 data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb); 285 286 /* 287 * PLCP setup 288 * Length calculation depends on OFDM/CCK rate. 289 */ 290 txdesc->u.plcp.signal = hwrate->plcp; 291 txdesc->u.plcp.service = 0x04; 292 293 if (hwrate->flags & DEV_RATE_OFDM) { 294 txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f; 295 txdesc->u.plcp.length_low = data_length & 0x3f; 296 } else { 297 /* 298 * Convert length to microseconds. 299 */ 300 residual = GET_DURATION_RES(data_length, hwrate->bitrate); 301 duration = GET_DURATION(data_length, hwrate->bitrate); 302 303 if (residual != 0) { 304 duration++; 305 306 /* 307 * Check if we need to set the Length Extension 308 */ 309 if (hwrate->bitrate == 110 && residual <= 30) 310 txdesc->u.plcp.service |= 0x80; 311 } 312 313 txdesc->u.plcp.length_high = (duration >> 8) & 0xff; 314 txdesc->u.plcp.length_low = duration & 0xff; 315 316 /* 317 * When preamble is enabled we should set the 318 * preamble bit for the signal. 319 */ 320 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 321 txdesc->u.plcp.signal |= 0x08; 322 } 323} 324 325static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev, 326 struct sk_buff *skb, 327 struct txentry_desc *txdesc, 328 struct ieee80211_sta *sta, 329 const struct rt2x00_rate *hwrate) 330{ 331 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 332 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; 333 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 334 struct rt2x00_sta *sta_priv = NULL; 335 336 if (sta) { 337 txdesc->u.ht.mpdu_density = 338 sta->ht_cap.ampdu_density; 339 340 sta_priv = sta_to_rt2x00_sta(sta); 341 txdesc->u.ht.wcid = sta_priv->wcid; 342 } 343 344 /* 345 * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the 346 * mcs rate to be used 347 */ 348 if (txrate->flags & IEEE80211_TX_RC_MCS) { 349 txdesc->u.ht.mcs = txrate->idx; 350 351 /* 352 * MIMO PS should be set to 1 for STA's using dynamic SM PS 353 * when using more then one tx stream (>MCS7). 354 */ 355 if (sta && txdesc->u.ht.mcs > 7 && 356 sta->smps_mode == IEEE80211_SMPS_DYNAMIC) 357 __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags); 358 } else { 359 txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs); 360 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 361 txdesc->u.ht.mcs |= 0x08; 362 } 363 364 if (test_bit(CONFIG_HT_DISABLED, &rt2x00dev->flags)) { 365 if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)) 366 txdesc->u.ht.txop = TXOP_SIFS; 367 else 368 txdesc->u.ht.txop = TXOP_BACKOFF; 369 370 /* Left zero on all other settings. */ 371 return; 372 } 373 374 txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */ 375 376 /* 377 * Only one STBC stream is supported for now. 378 */ 379 if (tx_info->flags & IEEE80211_TX_CTL_STBC) 380 txdesc->u.ht.stbc = 1; 381 382 /* 383 * This frame is eligible for an AMPDU, however, don't aggregate 384 * frames that are intended to probe a specific tx rate. 385 */ 386 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU && 387 !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) 388 __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags); 389 390 /* 391 * Set 40Mhz mode if necessary (for legacy rates this will 392 * duplicate the frame to both channels). 393 */ 394 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH || 395 txrate->flags & IEEE80211_TX_RC_DUP_DATA) 396 __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags); 397 if (txrate->flags & IEEE80211_TX_RC_SHORT_GI) 398 __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags); 399 400 /* 401 * Determine IFS values 402 * - Use TXOP_BACKOFF for management frames except beacons 403 * - Use TXOP_SIFS for fragment bursts 404 * - Use TXOP_HTTXOP for everything else 405 * 406 * Note: rt2800 devices won't use CTS protection (if used) 407 * for frames not transmitted with TXOP_HTTXOP 408 */ 409 if (ieee80211_is_mgmt(hdr->frame_control) && 410 !ieee80211_is_beacon(hdr->frame_control)) 411 txdesc->u.ht.txop = TXOP_BACKOFF; 412 else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)) 413 txdesc->u.ht.txop = TXOP_SIFS; 414 else 415 txdesc->u.ht.txop = TXOP_HTTXOP; 416} 417 418static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev, 419 struct sk_buff *skb, 420 struct txentry_desc *txdesc, 421 struct ieee80211_sta *sta) 422{ 423 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 424 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 425 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; 426 struct ieee80211_rate *rate; 427 const struct rt2x00_rate *hwrate = NULL; 428 429 memset(txdesc, 0, sizeof(*txdesc)); 430 431 /* 432 * Header and frame information. 433 */ 434 txdesc->length = skb->len; 435 txdesc->header_length = ieee80211_get_hdrlen_from_skb(skb); 436 437 /* 438 * Check whether this frame is to be acked. 439 */ 440 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) 441 __set_bit(ENTRY_TXD_ACK, &txdesc->flags); 442 443 /* 444 * Check if this is a RTS/CTS frame 445 */ 446 if (ieee80211_is_rts(hdr->frame_control) || 447 ieee80211_is_cts(hdr->frame_control)) { 448 __set_bit(ENTRY_TXD_BURST, &txdesc->flags); 449 if (ieee80211_is_rts(hdr->frame_control)) 450 __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags); 451 else 452 __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags); 453 if (tx_info->control.rts_cts_rate_idx >= 0) 454 rate = 455 ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info); 456 } 457 458 /* 459 * Determine retry information. 460 */ 461 txdesc->retry_limit = tx_info->control.rates[0].count - 1; 462 if (txdesc->retry_limit >= rt2x00dev->long_retry) 463 __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags); 464 465 /* 466 * Check if more fragments are pending 467 */ 468 if (ieee80211_has_morefrags(hdr->frame_control)) { 469 __set_bit(ENTRY_TXD_BURST, &txdesc->flags); 470 __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags); 471 } 472 473 /* 474 * Check if more frames (!= fragments) are pending 475 */ 476 if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES) 477 __set_bit(ENTRY_TXD_BURST, &txdesc->flags); 478 479 /* 480 * Beacons and probe responses require the tsf timestamp 481 * to be inserted into the frame. 482 */ 483 if (ieee80211_is_beacon(hdr->frame_control) || 484 ieee80211_is_probe_resp(hdr->frame_control)) 485 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags); 486 487 if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) && 488 !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) 489 __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags); 490 491 /* 492 * Determine rate modulation. 493 */ 494 if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD) 495 txdesc->rate_mode = RATE_MODE_HT_GREENFIELD; 496 else if (txrate->flags & IEEE80211_TX_RC_MCS) 497 txdesc->rate_mode = RATE_MODE_HT_MIX; 498 else { 499 rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info); 500 hwrate = rt2x00_get_rate(rate->hw_value); 501 if (hwrate->flags & DEV_RATE_OFDM) 502 txdesc->rate_mode = RATE_MODE_OFDM; 503 else 504 txdesc->rate_mode = RATE_MODE_CCK; 505 } 506 507 /* 508 * Apply TX descriptor handling by components 509 */ 510 rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc); 511 rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc); 512 513 if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags)) 514 rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc, 515 sta, hwrate); 516 else 517 rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc, 518 hwrate); 519} 520 521static int rt2x00queue_write_tx_data(struct queue_entry *entry, 522 struct txentry_desc *txdesc) 523{ 524 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 525 526 /* 527 * This should not happen, we already checked the entry 528 * was ours. When the hardware disagrees there has been 529 * a queue corruption! 530 */ 531 if (unlikely(rt2x00dev->ops->lib->get_entry_state && 532 rt2x00dev->ops->lib->get_entry_state(entry))) { 533 rt2x00_err(rt2x00dev, 534 "Corrupt queue %d, accessing entry which is not ours\n" 535 "Please file bug report to %s\n", 536 entry->queue->qid, DRV_PROJECT); 537 return -EINVAL; 538 } 539 540 /* 541 * Add the requested extra tx headroom in front of the skb. 542 */ 543 skb_push(entry->skb, rt2x00dev->extra_tx_headroom); 544 memset(entry->skb->data, 0, rt2x00dev->extra_tx_headroom); 545 546 /* 547 * Call the driver's write_tx_data function, if it exists. 548 */ 549 if (rt2x00dev->ops->lib->write_tx_data) 550 rt2x00dev->ops->lib->write_tx_data(entry, txdesc); 551 552 /* 553 * Map the skb to DMA. 554 */ 555 if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags) && 556 rt2x00queue_map_txskb(entry)) 557 return -ENOMEM; 558 559 return 0; 560} 561 562static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry, 563 struct txentry_desc *txdesc) 564{ 565 struct data_queue *queue = entry->queue; 566 567 queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc); 568 569 /* 570 * All processing on the frame has been completed, this means 571 * it is now ready to be dumped to userspace through debugfs. 572 */ 573 rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb); 574} 575 576static void rt2x00queue_kick_tx_queue(struct data_queue *queue, 577 struct txentry_desc *txdesc) 578{ 579 /* 580 * Check if we need to kick the queue, there are however a few rules 581 * 1) Don't kick unless this is the last in frame in a burst. 582 * When the burst flag is set, this frame is always followed 583 * by another frame which in some way are related to eachother. 584 * This is true for fragments, RTS or CTS-to-self frames. 585 * 2) Rule 1 can be broken when the available entries 586 * in the queue are less then a certain threshold. 587 */ 588 if (rt2x00queue_threshold(queue) || 589 !test_bit(ENTRY_TXD_BURST, &txdesc->flags)) 590 queue->rt2x00dev->ops->lib->kick_queue(queue); 591} 592 593static void rt2x00queue_bar_check(struct queue_entry *entry) 594{ 595 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 596 struct ieee80211_bar *bar = (void *) (entry->skb->data + 597 rt2x00dev->extra_tx_headroom); 598 struct rt2x00_bar_list_entry *bar_entry; 599 600 if (likely(!ieee80211_is_back_req(bar->frame_control))) 601 return; 602 603 bar_entry = kmalloc(sizeof(*bar_entry), GFP_ATOMIC); 604 605 /* 606 * If the alloc fails we still send the BAR out but just don't track 607 * it in our bar list. And as a result we will report it to mac80211 608 * back as failed. 609 */ 610 if (!bar_entry) 611 return; 612 613 bar_entry->entry = entry; 614 bar_entry->block_acked = 0; 615 616 /* 617 * Copy the relevant parts of the 802.11 BAR into out check list 618 * such that we can use RCU for less-overhead in the RX path since 619 * sending BARs and processing the according BlockAck should be 620 * the exception. 621 */ 622 memcpy(bar_entry->ra, bar->ra, sizeof(bar->ra)); 623 memcpy(bar_entry->ta, bar->ta, sizeof(bar->ta)); 624 bar_entry->control = bar->control; 625 bar_entry->start_seq_num = bar->start_seq_num; 626 627 /* 628 * Insert BAR into our BAR check list. 629 */ 630 spin_lock_bh(&rt2x00dev->bar_list_lock); 631 list_add_tail_rcu(&bar_entry->list, &rt2x00dev->bar_list); 632 spin_unlock_bh(&rt2x00dev->bar_list_lock); 633} 634 635int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, 636 struct ieee80211_sta *sta, bool local) 637{ 638 struct ieee80211_tx_info *tx_info; 639 struct queue_entry *entry; 640 struct txentry_desc txdesc; 641 struct skb_frame_desc *skbdesc; 642 u8 rate_idx, rate_flags; 643 int ret = 0; 644 645 /* 646 * Copy all TX descriptor information into txdesc, 647 * after that we are free to use the skb->cb array 648 * for our information. 649 */ 650 rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, sta); 651 652 /* 653 * All information is retrieved from the skb->cb array, 654 * now we should claim ownership of the driver part of that 655 * array, preserving the bitrate index and flags. 656 */ 657 tx_info = IEEE80211_SKB_CB(skb); 658 rate_idx = tx_info->control.rates[0].idx; 659 rate_flags = tx_info->control.rates[0].flags; 660 skbdesc = get_skb_frame_desc(skb); 661 memset(skbdesc, 0, sizeof(*skbdesc)); 662 skbdesc->tx_rate_idx = rate_idx; 663 skbdesc->tx_rate_flags = rate_flags; 664 665 if (local) 666 skbdesc->flags |= SKBDESC_NOT_MAC80211; 667 668 /* 669 * When hardware encryption is supported, and this frame 670 * is to be encrypted, we should strip the IV/EIV data from 671 * the frame so we can provide it to the driver separately. 672 */ 673 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) && 674 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) { 675 if (test_bit(REQUIRE_COPY_IV, &queue->rt2x00dev->cap_flags)) 676 rt2x00crypto_tx_copy_iv(skb, &txdesc); 677 else 678 rt2x00crypto_tx_remove_iv(skb, &txdesc); 679 } 680 681 /* 682 * When DMA allocation is required we should guarantee to the 683 * driver that the DMA is aligned to a 4-byte boundary. 684 * However some drivers require L2 padding to pad the payload 685 * rather then the header. This could be a requirement for 686 * PCI and USB devices, while header alignment only is valid 687 * for PCI devices. 688 */ 689 if (test_bit(REQUIRE_L2PAD, &queue->rt2x00dev->cap_flags)) 690 rt2x00queue_insert_l2pad(skb, txdesc.header_length); 691 else if (test_bit(REQUIRE_DMA, &queue->rt2x00dev->cap_flags)) 692 rt2x00queue_align_frame(skb); 693 694 /* 695 * That function must be called with bh disabled. 696 */ 697 spin_lock(&queue->tx_lock); 698 699 if (unlikely(rt2x00queue_full(queue))) { 700 rt2x00_err(queue->rt2x00dev, "Dropping frame due to full tx queue %d\n", 701 queue->qid); 702 ret = -ENOBUFS; 703 goto out; 704 } 705 706 entry = rt2x00queue_get_entry(queue, Q_INDEX); 707 708 if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, 709 &entry->flags))) { 710 rt2x00_err(queue->rt2x00dev, 711 "Arrived at non-free entry in the non-full queue %d\n" 712 "Please file bug report to %s\n", 713 queue->qid, DRV_PROJECT); 714 ret = -EINVAL; 715 goto out; 716 } 717 718 skbdesc->entry = entry; 719 entry->skb = skb; 720 721 /* 722 * It could be possible that the queue was corrupted and this 723 * call failed. Since we always return NETDEV_TX_OK to mac80211, 724 * this frame will simply be dropped. 725 */ 726 if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) { 727 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); 728 entry->skb = NULL; 729 ret = -EIO; 730 goto out; 731 } 732 733 /* 734 * Put BlockAckReqs into our check list for driver BA processing. 735 */ 736 rt2x00queue_bar_check(entry); 737 738 set_bit(ENTRY_DATA_PENDING, &entry->flags); 739 740 rt2x00queue_index_inc(entry, Q_INDEX); 741 rt2x00queue_write_tx_descriptor(entry, &txdesc); 742 rt2x00queue_kick_tx_queue(queue, &txdesc); 743 744out: 745 spin_unlock(&queue->tx_lock); 746 return ret; 747} 748 749int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev, 750 struct ieee80211_vif *vif) 751{ 752 struct rt2x00_intf *intf = vif_to_intf(vif); 753 754 if (unlikely(!intf->beacon)) 755 return -ENOBUFS; 756 757 mutex_lock(&intf->beacon_skb_mutex); 758 759 /* 760 * Clean up the beacon skb. 761 */ 762 rt2x00queue_free_skb(intf->beacon); 763 764 /* 765 * Clear beacon (single bssid devices don't need to clear the beacon 766 * since the beacon queue will get stopped anyway). 767 */ 768 if (rt2x00dev->ops->lib->clear_beacon) 769 rt2x00dev->ops->lib->clear_beacon(intf->beacon); 770 771 mutex_unlock(&intf->beacon_skb_mutex); 772 773 return 0; 774} 775 776int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev, 777 struct ieee80211_vif *vif) 778{ 779 struct rt2x00_intf *intf = vif_to_intf(vif); 780 struct skb_frame_desc *skbdesc; 781 struct txentry_desc txdesc; 782 783 if (unlikely(!intf->beacon)) 784 return -ENOBUFS; 785 786 /* 787 * Clean up the beacon skb. 788 */ 789 rt2x00queue_free_skb(intf->beacon); 790 791 intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif); 792 if (!intf->beacon->skb) 793 return -ENOMEM; 794 795 /* 796 * Copy all TX descriptor information into txdesc, 797 * after that we are free to use the skb->cb array 798 * for our information. 799 */ 800 rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc, NULL); 801 802 /* 803 * Fill in skb descriptor 804 */ 805 skbdesc = get_skb_frame_desc(intf->beacon->skb); 806 memset(skbdesc, 0, sizeof(*skbdesc)); 807 skbdesc->entry = intf->beacon; 808 809 /* 810 * Send beacon to hardware. 811 */ 812 rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc); 813 814 return 0; 815 816} 817 818int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev, 819 struct ieee80211_vif *vif) 820{ 821 struct rt2x00_intf *intf = vif_to_intf(vif); 822 int ret; 823 824 mutex_lock(&intf->beacon_skb_mutex); 825 ret = rt2x00queue_update_beacon_locked(rt2x00dev, vif); 826 mutex_unlock(&intf->beacon_skb_mutex); 827 828 return ret; 829} 830 831bool rt2x00queue_for_each_entry(struct data_queue *queue, 832 enum queue_index start, 833 enum queue_index end, 834 void *data, 835 bool (*fn)(struct queue_entry *entry, 836 void *data)) 837{ 838 unsigned long irqflags; 839 unsigned int index_start; 840 unsigned int index_end; 841 unsigned int i; 842 843 if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) { 844 rt2x00_err(queue->rt2x00dev, 845 "Entry requested from invalid index range (%d - %d)\n", 846 start, end); 847 return true; 848 } 849 850 /* 851 * Only protect the range we are going to loop over, 852 * if during our loop a extra entry is set to pending 853 * it should not be kicked during this run, since it 854 * is part of another TX operation. 855 */ 856 spin_lock_irqsave(&queue->index_lock, irqflags); 857 index_start = queue->index[start]; 858 index_end = queue->index[end]; 859 spin_unlock_irqrestore(&queue->index_lock, irqflags); 860 861 /* 862 * Start from the TX done pointer, this guarantees that we will 863 * send out all frames in the correct order. 864 */ 865 if (index_start < index_end) { 866 for (i = index_start; i < index_end; i++) { 867 if (fn(&queue->entries[i], data)) 868 return true; 869 } 870 } else { 871 for (i = index_start; i < queue->limit; i++) { 872 if (fn(&queue->entries[i], data)) 873 return true; 874 } 875 876 for (i = 0; i < index_end; i++) { 877 if (fn(&queue->entries[i], data)) 878 return true; 879 } 880 } 881 882 return false; 883} 884EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry); 885 886struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue, 887 enum queue_index index) 888{ 889 struct queue_entry *entry; 890 unsigned long irqflags; 891 892 if (unlikely(index >= Q_INDEX_MAX)) { 893 rt2x00_err(queue->rt2x00dev, "Entry requested from invalid index type (%d)\n", 894 index); 895 return NULL; 896 } 897 898 spin_lock_irqsave(&queue->index_lock, irqflags); 899 900 entry = &queue->entries[queue->index[index]]; 901 902 spin_unlock_irqrestore(&queue->index_lock, irqflags); 903 904 return entry; 905} 906EXPORT_SYMBOL_GPL(rt2x00queue_get_entry); 907 908void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index) 909{ 910 struct data_queue *queue = entry->queue; 911 unsigned long irqflags; 912 913 if (unlikely(index >= Q_INDEX_MAX)) { 914 rt2x00_err(queue->rt2x00dev, 915 "Index change on invalid index type (%d)\n", index); 916 return; 917 } 918 919 spin_lock_irqsave(&queue->index_lock, irqflags); 920 921 queue->index[index]++; 922 if (queue->index[index] >= queue->limit) 923 queue->index[index] = 0; 924 925 entry->last_action = jiffies; 926 927 if (index == Q_INDEX) { 928 queue->length++; 929 } else if (index == Q_INDEX_DONE) { 930 queue->length--; 931 queue->count++; 932 } 933 934 spin_unlock_irqrestore(&queue->index_lock, irqflags); 935} 936 937static void rt2x00queue_pause_queue_nocheck(struct data_queue *queue) 938{ 939 switch (queue->qid) { 940 case QID_AC_VO: 941 case QID_AC_VI: 942 case QID_AC_BE: 943 case QID_AC_BK: 944 /* 945 * For TX queues, we have to disable the queue 946 * inside mac80211. 947 */ 948 ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid); 949 break; 950 default: 951 break; 952 } 953} 954void rt2x00queue_pause_queue(struct data_queue *queue) 955{ 956 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || 957 !test_bit(QUEUE_STARTED, &queue->flags) || 958 test_and_set_bit(QUEUE_PAUSED, &queue->flags)) 959 return; 960 961 rt2x00queue_pause_queue_nocheck(queue); 962} 963EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue); 964 965void rt2x00queue_unpause_queue(struct data_queue *queue) 966{ 967 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || 968 !test_bit(QUEUE_STARTED, &queue->flags) || 969 !test_and_clear_bit(QUEUE_PAUSED, &queue->flags)) 970 return; 971 972 switch (queue->qid) { 973 case QID_AC_VO: 974 case QID_AC_VI: 975 case QID_AC_BE: 976 case QID_AC_BK: 977 /* 978 * For TX queues, we have to enable the queue 979 * inside mac80211. 980 */ 981 ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid); 982 break; 983 case QID_RX: 984 /* 985 * For RX we need to kick the queue now in order to 986 * receive frames. 987 */ 988 queue->rt2x00dev->ops->lib->kick_queue(queue); 989 default: 990 break; 991 } 992} 993EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue); 994 995void rt2x00queue_start_queue(struct data_queue *queue) 996{ 997 mutex_lock(&queue->status_lock); 998 999 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || 1000 test_and_set_bit(QUEUE_STARTED, &queue->flags)) { 1001 mutex_unlock(&queue->status_lock); 1002 return; 1003 } 1004 1005 set_bit(QUEUE_PAUSED, &queue->flags); 1006 1007 queue->rt2x00dev->ops->lib->start_queue(queue); 1008 1009 rt2x00queue_unpause_queue(queue); 1010 1011 mutex_unlock(&queue->status_lock); 1012} 1013EXPORT_SYMBOL_GPL(rt2x00queue_start_queue); 1014 1015void rt2x00queue_stop_queue(struct data_queue *queue) 1016{ 1017 mutex_lock(&queue->status_lock); 1018 1019 if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) { 1020 mutex_unlock(&queue->status_lock); 1021 return; 1022 } 1023 1024 rt2x00queue_pause_queue_nocheck(queue); 1025 1026 queue->rt2x00dev->ops->lib->stop_queue(queue); 1027 1028 mutex_unlock(&queue->status_lock); 1029} 1030EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue); 1031 1032void rt2x00queue_flush_queue(struct data_queue *queue, bool drop) 1033{ 1034 bool tx_queue = 1035 (queue->qid == QID_AC_VO) || 1036 (queue->qid == QID_AC_VI) || 1037 (queue->qid == QID_AC_BE) || 1038 (queue->qid == QID_AC_BK); 1039 1040 1041 /* 1042 * If we are not supposed to drop any pending 1043 * frames, this means we must force a start (=kick) 1044 * to the queue to make sure the hardware will 1045 * start transmitting. 1046 */ 1047 if (!drop && tx_queue) 1048 queue->rt2x00dev->ops->lib->kick_queue(queue); 1049 1050 /* 1051 * Check if driver supports flushing, if that is the case we can 1052 * defer the flushing to the driver. Otherwise we must use the 1053 * alternative which just waits for the queue to become empty. 1054 */ 1055 if (likely(queue->rt2x00dev->ops->lib->flush_queue)) 1056 queue->rt2x00dev->ops->lib->flush_queue(queue, drop); 1057 1058 /* 1059 * The queue flush has failed... 1060 */ 1061 if (unlikely(!rt2x00queue_empty(queue))) 1062 rt2x00_warn(queue->rt2x00dev, "Queue %d failed to flush\n", 1063 queue->qid); 1064} 1065EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue); 1066 1067void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev) 1068{ 1069 struct data_queue *queue; 1070 1071 /* 1072 * rt2x00queue_start_queue will call ieee80211_wake_queue 1073 * for each queue after is has been properly initialized. 1074 */ 1075 tx_queue_for_each(rt2x00dev, queue) 1076 rt2x00queue_start_queue(queue); 1077 1078 rt2x00queue_start_queue(rt2x00dev->rx); 1079} 1080EXPORT_SYMBOL_GPL(rt2x00queue_start_queues); 1081 1082void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev) 1083{ 1084 struct data_queue *queue; 1085 1086 /* 1087 * rt2x00queue_stop_queue will call ieee80211_stop_queue 1088 * as well, but we are completely shutting doing everything 1089 * now, so it is much safer to stop all TX queues at once, 1090 * and use rt2x00queue_stop_queue for cleaning up. 1091 */ 1092 ieee80211_stop_queues(rt2x00dev->hw); 1093 1094 tx_queue_for_each(rt2x00dev, queue) 1095 rt2x00queue_stop_queue(queue); 1096 1097 rt2x00queue_stop_queue(rt2x00dev->rx); 1098} 1099EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues); 1100 1101void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop) 1102{ 1103 struct data_queue *queue; 1104 1105 tx_queue_for_each(rt2x00dev, queue) 1106 rt2x00queue_flush_queue(queue, drop); 1107 1108 rt2x00queue_flush_queue(rt2x00dev->rx, drop); 1109} 1110EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues); 1111 1112static void rt2x00queue_reset(struct data_queue *queue) 1113{ 1114 unsigned long irqflags; 1115 unsigned int i; 1116 1117 spin_lock_irqsave(&queue->index_lock, irqflags); 1118 1119 queue->count = 0; 1120 queue->length = 0; 1121 1122 for (i = 0; i < Q_INDEX_MAX; i++) 1123 queue->index[i] = 0; 1124 1125 spin_unlock_irqrestore(&queue->index_lock, irqflags); 1126} 1127 1128void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev) 1129{ 1130 struct data_queue *queue; 1131 unsigned int i; 1132 1133 queue_for_each(rt2x00dev, queue) { 1134 rt2x00queue_reset(queue); 1135 1136 for (i = 0; i < queue->limit; i++) 1137 rt2x00dev->ops->lib->clear_entry(&queue->entries[i]); 1138 } 1139} 1140 1141static int rt2x00queue_alloc_entries(struct data_queue *queue) 1142{ 1143 struct queue_entry *entries; 1144 unsigned int entry_size; 1145 unsigned int i; 1146 1147 rt2x00queue_reset(queue); 1148 1149 /* 1150 * Allocate all queue entries. 1151 */ 1152 entry_size = sizeof(*entries) + queue->priv_size; 1153 entries = kcalloc(queue->limit, entry_size, GFP_KERNEL); 1154 if (!entries) 1155 return -ENOMEM; 1156 1157#define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \ 1158 (((char *)(__base)) + ((__limit) * (__esize)) + \ 1159 ((__index) * (__psize))) 1160 1161 for (i = 0; i < queue->limit; i++) { 1162 entries[i].flags = 0; 1163 entries[i].queue = queue; 1164 entries[i].skb = NULL; 1165 entries[i].entry_idx = i; 1166 entries[i].priv_data = 1167 QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit, 1168 sizeof(*entries), queue->priv_size); 1169 } 1170 1171#undef QUEUE_ENTRY_PRIV_OFFSET 1172 1173 queue->entries = entries; 1174 1175 return 0; 1176} 1177 1178static void rt2x00queue_free_skbs(struct data_queue *queue) 1179{ 1180 unsigned int i; 1181 1182 if (!queue->entries) 1183 return; 1184 1185 for (i = 0; i < queue->limit; i++) { 1186 rt2x00queue_free_skb(&queue->entries[i]); 1187 } 1188} 1189 1190static int rt2x00queue_alloc_rxskbs(struct data_queue *queue) 1191{ 1192 unsigned int i; 1193 struct sk_buff *skb; 1194 1195 for (i = 0; i < queue->limit; i++) { 1196 skb = rt2x00queue_alloc_rxskb(&queue->entries[i], GFP_KERNEL); 1197 if (!skb) 1198 return -ENOMEM; 1199 queue->entries[i].skb = skb; 1200 } 1201 1202 return 0; 1203} 1204 1205int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev) 1206{ 1207 struct data_queue *queue; 1208 int status; 1209 1210 status = rt2x00queue_alloc_entries(rt2x00dev->rx); 1211 if (status) 1212 goto exit; 1213 1214 tx_queue_for_each(rt2x00dev, queue) { 1215 status = rt2x00queue_alloc_entries(queue); 1216 if (status) 1217 goto exit; 1218 } 1219 1220 status = rt2x00queue_alloc_entries(rt2x00dev->bcn); 1221 if (status) 1222 goto exit; 1223 1224 if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) { 1225 status = rt2x00queue_alloc_entries(rt2x00dev->atim); 1226 if (status) 1227 goto exit; 1228 } 1229 1230 status = rt2x00queue_alloc_rxskbs(rt2x00dev->rx); 1231 if (status) 1232 goto exit; 1233 1234 return 0; 1235 1236exit: 1237 rt2x00_err(rt2x00dev, "Queue entries allocation failed\n"); 1238 1239 rt2x00queue_uninitialize(rt2x00dev); 1240 1241 return status; 1242} 1243 1244void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev) 1245{ 1246 struct data_queue *queue; 1247 1248 rt2x00queue_free_skbs(rt2x00dev->rx); 1249 1250 queue_for_each(rt2x00dev, queue) { 1251 kfree(queue->entries); 1252 queue->entries = NULL; 1253 } 1254} 1255 1256static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev, 1257 struct data_queue *queue, enum data_queue_qid qid) 1258{ 1259 mutex_init(&queue->status_lock); 1260 spin_lock_init(&queue->tx_lock); 1261 spin_lock_init(&queue->index_lock); 1262 1263 queue->rt2x00dev = rt2x00dev; 1264 queue->qid = qid; 1265 queue->txop = 0; 1266 queue->aifs = 2; 1267 queue->cw_min = 5; 1268 queue->cw_max = 10; 1269 1270 rt2x00dev->ops->queue_init(queue); 1271 1272 queue->threshold = DIV_ROUND_UP(queue->limit, 10); 1273} 1274 1275int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev) 1276{ 1277 struct data_queue *queue; 1278 enum data_queue_qid qid; 1279 unsigned int req_atim = 1280 !!test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags); 1281 1282 /* 1283 * We need the following queues: 1284 * RX: 1 1285 * TX: ops->tx_queues 1286 * Beacon: 1 1287 * Atim: 1 (if required) 1288 */ 1289 rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim; 1290 1291 queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL); 1292 if (!queue) { 1293 rt2x00_err(rt2x00dev, "Queue allocation failed\n"); 1294 return -ENOMEM; 1295 } 1296 1297 /* 1298 * Initialize pointers 1299 */ 1300 rt2x00dev->rx = queue; 1301 rt2x00dev->tx = &queue[1]; 1302 rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues]; 1303 rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL; 1304 1305 /* 1306 * Initialize queue parameters. 1307 * RX: qid = QID_RX 1308 * TX: qid = QID_AC_VO + index 1309 * TX: cw_min: 2^5 = 32. 1310 * TX: cw_max: 2^10 = 1024. 1311 * BCN: qid = QID_BEACON 1312 * ATIM: qid = QID_ATIM 1313 */ 1314 rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX); 1315 1316 qid = QID_AC_VO; 1317 tx_queue_for_each(rt2x00dev, queue) 1318 rt2x00queue_init(rt2x00dev, queue, qid++); 1319 1320 rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON); 1321 if (req_atim) 1322 rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM); 1323 1324 return 0; 1325} 1326 1327void rt2x00queue_free(struct rt2x00_dev *rt2x00dev) 1328{ 1329 kfree(rt2x00dev->rx); 1330 rt2x00dev->rx = NULL; 1331 rt2x00dev->tx = NULL; 1332 rt2x00dev->bcn = NULL; 1333}