Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v3.9 1355 lines 36 kB view raw
1/* 2 Copyright (C) 2010 Willow Garage <http://www.willowgarage.com> 3 Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com> 4 Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com> 5 <http://rt2x00.serialmonkey.com> 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License as published by 9 the Free Software Foundation; either version 2 of the License, or 10 (at your option) any later version. 11 12 This program is distributed in the hope that it will be useful, 13 but WITHOUT ANY WARRANTY; without even the implied warranty of 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 GNU General Public License for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with this program; if not, write to the 19 Free Software Foundation, Inc., 20 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 21 */ 22 23/* 24 Module: rt2x00lib 25 Abstract: rt2x00 queue specific routines. 26 */ 27 28#include <linux/slab.h> 29#include <linux/kernel.h> 30#include <linux/module.h> 31#include <linux/dma-mapping.h> 32 33#include "rt2x00.h" 34#include "rt2x00lib.h" 35 36struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp) 37{ 38 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 39 struct sk_buff *skb; 40 struct skb_frame_desc *skbdesc; 41 unsigned int frame_size; 42 unsigned int head_size = 0; 43 unsigned int tail_size = 0; 44 45 /* 46 * The frame size includes descriptor size, because the 47 * hardware directly receive the frame into the skbuffer. 48 */ 49 frame_size = entry->queue->data_size + entry->queue->desc_size; 50 51 /* 52 * The payload should be aligned to a 4-byte boundary, 53 * this means we need at least 3 bytes for moving the frame 54 * into the correct offset. 55 */ 56 head_size = 4; 57 58 /* 59 * For IV/EIV/ICV assembly we must make sure there is 60 * at least 8 bytes bytes available in headroom for IV/EIV 61 * and 8 bytes for ICV data as tailroon. 62 */ 63 if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags)) { 64 head_size += 8; 65 tail_size += 8; 66 } 67 68 /* 69 * Allocate skbuffer. 70 */ 71 skb = __dev_alloc_skb(frame_size + head_size + tail_size, gfp); 72 if (!skb) 73 return NULL; 74 75 /* 76 * Make sure we not have a frame with the requested bytes 77 * available in the head and tail. 78 */ 79 skb_reserve(skb, head_size); 80 skb_put(skb, frame_size); 81 82 /* 83 * Populate skbdesc. 84 */ 85 skbdesc = get_skb_frame_desc(skb); 86 memset(skbdesc, 0, sizeof(*skbdesc)); 87 skbdesc->entry = entry; 88 89 if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) { 90 dma_addr_t skb_dma; 91 92 skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len, 93 DMA_FROM_DEVICE); 94 if (unlikely(dma_mapping_error(rt2x00dev->dev, skb_dma))) { 95 dev_kfree_skb_any(skb); 96 return NULL; 97 } 98 99 skbdesc->skb_dma = skb_dma; 100 skbdesc->flags |= SKBDESC_DMA_MAPPED_RX; 101 } 102 103 return skb; 104} 105 106int rt2x00queue_map_txskb(struct queue_entry *entry) 107{ 108 struct device *dev = entry->queue->rt2x00dev->dev; 109 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 110 111 skbdesc->skb_dma = 112 dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE); 113 114 if (unlikely(dma_mapping_error(dev, skbdesc->skb_dma))) 115 return -ENOMEM; 116 117 skbdesc->flags |= SKBDESC_DMA_MAPPED_TX; 118 return 0; 119} 120EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb); 121 122void rt2x00queue_unmap_skb(struct queue_entry *entry) 123{ 124 struct device *dev = entry->queue->rt2x00dev->dev; 125 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 126 127 if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) { 128 dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len, 129 DMA_FROM_DEVICE); 130 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX; 131 } else if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) { 132 dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len, 133 DMA_TO_DEVICE); 134 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX; 135 } 136} 137EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb); 138 139void rt2x00queue_free_skb(struct queue_entry *entry) 140{ 141 if (!entry->skb) 142 return; 143 144 rt2x00queue_unmap_skb(entry); 145 dev_kfree_skb_any(entry->skb); 146 entry->skb = NULL; 147} 148 149void rt2x00queue_align_frame(struct sk_buff *skb) 150{ 151 unsigned int frame_length = skb->len; 152 unsigned int align = ALIGN_SIZE(skb, 0); 153 154 if (!align) 155 return; 156 157 skb_push(skb, align); 158 memmove(skb->data, skb->data + align, frame_length); 159 skb_trim(skb, frame_length); 160} 161 162void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length) 163{ 164 unsigned int payload_length = skb->len - header_length; 165 unsigned int header_align = ALIGN_SIZE(skb, 0); 166 unsigned int payload_align = ALIGN_SIZE(skb, header_length); 167 unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0; 168 169 /* 170 * Adjust the header alignment if the payload needs to be moved more 171 * than the header. 172 */ 173 if (payload_align > header_align) 174 header_align += 4; 175 176 /* There is nothing to do if no alignment is needed */ 177 if (!header_align) 178 return; 179 180 /* Reserve the amount of space needed in front of the frame */ 181 skb_push(skb, header_align); 182 183 /* 184 * Move the header. 185 */ 186 memmove(skb->data, skb->data + header_align, header_length); 187 188 /* Move the payload, if present and if required */ 189 if (payload_length && payload_align) 190 memmove(skb->data + header_length + l2pad, 191 skb->data + header_length + l2pad + payload_align, 192 payload_length); 193 194 /* Trim the skb to the correct size */ 195 skb_trim(skb, header_length + l2pad + payload_length); 196} 197 198void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length) 199{ 200 /* 201 * L2 padding is only present if the skb contains more than just the 202 * IEEE 802.11 header. 203 */ 204 unsigned int l2pad = (skb->len > header_length) ? 205 L2PAD_SIZE(header_length) : 0; 206 207 if (!l2pad) 208 return; 209 210 memmove(skb->data + l2pad, skb->data, header_length); 211 skb_pull(skb, l2pad); 212} 213 214static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev, 215 struct sk_buff *skb, 216 struct txentry_desc *txdesc) 217{ 218 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 219 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 220 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif); 221 u16 seqno; 222 223 if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) 224 return; 225 226 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); 227 228 if (!test_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags)) { 229 /* 230 * rt2800 has a H/W (or F/W) bug, device incorrectly increase 231 * seqno on retransmited data (non-QOS) frames. To workaround 232 * the problem let's generate seqno in software if QOS is 233 * disabled. 234 */ 235 if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags)) 236 __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); 237 else 238 /* H/W will generate sequence number */ 239 return; 240 } 241 242 /* 243 * The hardware is not able to insert a sequence number. Assign a 244 * software generated one here. 245 * 246 * This is wrong because beacons are not getting sequence 247 * numbers assigned properly. 248 * 249 * A secondary problem exists for drivers that cannot toggle 250 * sequence counting per-frame, since those will override the 251 * sequence counter given by mac80211. 252 */ 253 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) 254 seqno = atomic_add_return(0x10, &intf->seqno); 255 else 256 seqno = atomic_read(&intf->seqno); 257 258 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); 259 hdr->seq_ctrl |= cpu_to_le16(seqno); 260} 261 262static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev, 263 struct sk_buff *skb, 264 struct txentry_desc *txdesc, 265 const struct rt2x00_rate *hwrate) 266{ 267 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 268 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; 269 unsigned int data_length; 270 unsigned int duration; 271 unsigned int residual; 272 273 /* 274 * Determine with what IFS priority this frame should be send. 275 * Set ifs to IFS_SIFS when the this is not the first fragment, 276 * or this fragment came after RTS/CTS. 277 */ 278 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) 279 txdesc->u.plcp.ifs = IFS_BACKOFF; 280 else 281 txdesc->u.plcp.ifs = IFS_SIFS; 282 283 /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */ 284 data_length = skb->len + 4; 285 data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb); 286 287 /* 288 * PLCP setup 289 * Length calculation depends on OFDM/CCK rate. 290 */ 291 txdesc->u.plcp.signal = hwrate->plcp; 292 txdesc->u.plcp.service = 0x04; 293 294 if (hwrate->flags & DEV_RATE_OFDM) { 295 txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f; 296 txdesc->u.plcp.length_low = data_length & 0x3f; 297 } else { 298 /* 299 * Convert length to microseconds. 300 */ 301 residual = GET_DURATION_RES(data_length, hwrate->bitrate); 302 duration = GET_DURATION(data_length, hwrate->bitrate); 303 304 if (residual != 0) { 305 duration++; 306 307 /* 308 * Check if we need to set the Length Extension 309 */ 310 if (hwrate->bitrate == 110 && residual <= 30) 311 txdesc->u.plcp.service |= 0x80; 312 } 313 314 txdesc->u.plcp.length_high = (duration >> 8) & 0xff; 315 txdesc->u.plcp.length_low = duration & 0xff; 316 317 /* 318 * When preamble is enabled we should set the 319 * preamble bit for the signal. 320 */ 321 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 322 txdesc->u.plcp.signal |= 0x08; 323 } 324} 325 326static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev, 327 struct sk_buff *skb, 328 struct txentry_desc *txdesc, 329 struct ieee80211_sta *sta, 330 const struct rt2x00_rate *hwrate) 331{ 332 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 333 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; 334 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 335 struct rt2x00_sta *sta_priv = NULL; 336 337 if (sta) { 338 txdesc->u.ht.mpdu_density = 339 sta->ht_cap.ampdu_density; 340 341 sta_priv = sta_to_rt2x00_sta(sta); 342 txdesc->u.ht.wcid = sta_priv->wcid; 343 } 344 345 /* 346 * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the 347 * mcs rate to be used 348 */ 349 if (txrate->flags & IEEE80211_TX_RC_MCS) { 350 txdesc->u.ht.mcs = txrate->idx; 351 352 /* 353 * MIMO PS should be set to 1 for STA's using dynamic SM PS 354 * when using more then one tx stream (>MCS7). 355 */ 356 if (sta && txdesc->u.ht.mcs > 7 && 357 sta->smps_mode == IEEE80211_SMPS_DYNAMIC) 358 __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags); 359 } else { 360 txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs); 361 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 362 txdesc->u.ht.mcs |= 0x08; 363 } 364 365 if (test_bit(CONFIG_HT_DISABLED, &rt2x00dev->flags)) { 366 if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)) 367 txdesc->u.ht.txop = TXOP_SIFS; 368 else 369 txdesc->u.ht.txop = TXOP_BACKOFF; 370 371 /* Left zero on all other settings. */ 372 return; 373 } 374 375 txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */ 376 377 /* 378 * Only one STBC stream is supported for now. 379 */ 380 if (tx_info->flags & IEEE80211_TX_CTL_STBC) 381 txdesc->u.ht.stbc = 1; 382 383 /* 384 * This frame is eligible for an AMPDU, however, don't aggregate 385 * frames that are intended to probe a specific tx rate. 386 */ 387 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU && 388 !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) 389 __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags); 390 391 /* 392 * Set 40Mhz mode if necessary (for legacy rates this will 393 * duplicate the frame to both channels). 394 */ 395 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH || 396 txrate->flags & IEEE80211_TX_RC_DUP_DATA) 397 __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags); 398 if (txrate->flags & IEEE80211_TX_RC_SHORT_GI) 399 __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags); 400 401 /* 402 * Determine IFS values 403 * - Use TXOP_BACKOFF for management frames except beacons 404 * - Use TXOP_SIFS for fragment bursts 405 * - Use TXOP_HTTXOP for everything else 406 * 407 * Note: rt2800 devices won't use CTS protection (if used) 408 * for frames not transmitted with TXOP_HTTXOP 409 */ 410 if (ieee80211_is_mgmt(hdr->frame_control) && 411 !ieee80211_is_beacon(hdr->frame_control)) 412 txdesc->u.ht.txop = TXOP_BACKOFF; 413 else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)) 414 txdesc->u.ht.txop = TXOP_SIFS; 415 else 416 txdesc->u.ht.txop = TXOP_HTTXOP; 417} 418 419static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev, 420 struct sk_buff *skb, 421 struct txentry_desc *txdesc, 422 struct ieee80211_sta *sta) 423{ 424 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 425 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 426 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; 427 struct ieee80211_rate *rate; 428 const struct rt2x00_rate *hwrate = NULL; 429 430 memset(txdesc, 0, sizeof(*txdesc)); 431 432 /* 433 * Header and frame information. 434 */ 435 txdesc->length = skb->len; 436 txdesc->header_length = ieee80211_get_hdrlen_from_skb(skb); 437 438 /* 439 * Check whether this frame is to be acked. 440 */ 441 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) 442 __set_bit(ENTRY_TXD_ACK, &txdesc->flags); 443 444 /* 445 * Check if this is a RTS/CTS frame 446 */ 447 if (ieee80211_is_rts(hdr->frame_control) || 448 ieee80211_is_cts(hdr->frame_control)) { 449 __set_bit(ENTRY_TXD_BURST, &txdesc->flags); 450 if (ieee80211_is_rts(hdr->frame_control)) 451 __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags); 452 else 453 __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags); 454 if (tx_info->control.rts_cts_rate_idx >= 0) 455 rate = 456 ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info); 457 } 458 459 /* 460 * Determine retry information. 461 */ 462 txdesc->retry_limit = tx_info->control.rates[0].count - 1; 463 if (txdesc->retry_limit >= rt2x00dev->long_retry) 464 __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags); 465 466 /* 467 * Check if more fragments are pending 468 */ 469 if (ieee80211_has_morefrags(hdr->frame_control)) { 470 __set_bit(ENTRY_TXD_BURST, &txdesc->flags); 471 __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags); 472 } 473 474 /* 475 * Check if more frames (!= fragments) are pending 476 */ 477 if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES) 478 __set_bit(ENTRY_TXD_BURST, &txdesc->flags); 479 480 /* 481 * Beacons and probe responses require the tsf timestamp 482 * to be inserted into the frame. 483 */ 484 if (ieee80211_is_beacon(hdr->frame_control) || 485 ieee80211_is_probe_resp(hdr->frame_control)) 486 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags); 487 488 if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) && 489 !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) 490 __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags); 491 492 /* 493 * Determine rate modulation. 494 */ 495 if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD) 496 txdesc->rate_mode = RATE_MODE_HT_GREENFIELD; 497 else if (txrate->flags & IEEE80211_TX_RC_MCS) 498 txdesc->rate_mode = RATE_MODE_HT_MIX; 499 else { 500 rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info); 501 hwrate = rt2x00_get_rate(rate->hw_value); 502 if (hwrate->flags & DEV_RATE_OFDM) 503 txdesc->rate_mode = RATE_MODE_OFDM; 504 else 505 txdesc->rate_mode = RATE_MODE_CCK; 506 } 507 508 /* 509 * Apply TX descriptor handling by components 510 */ 511 rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc); 512 rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc); 513 514 if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags)) 515 rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc, 516 sta, hwrate); 517 else 518 rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc, 519 hwrate); 520} 521 522static int rt2x00queue_write_tx_data(struct queue_entry *entry, 523 struct txentry_desc *txdesc) 524{ 525 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 526 527 /* 528 * This should not happen, we already checked the entry 529 * was ours. When the hardware disagrees there has been 530 * a queue corruption! 531 */ 532 if (unlikely(rt2x00dev->ops->lib->get_entry_state && 533 rt2x00dev->ops->lib->get_entry_state(entry))) { 534 ERROR(rt2x00dev, 535 "Corrupt queue %d, accessing entry which is not ours.\n" 536 "Please file bug report to %s.\n", 537 entry->queue->qid, DRV_PROJECT); 538 return -EINVAL; 539 } 540 541 /* 542 * Add the requested extra tx headroom in front of the skb. 543 */ 544 skb_push(entry->skb, rt2x00dev->ops->extra_tx_headroom); 545 memset(entry->skb->data, 0, rt2x00dev->ops->extra_tx_headroom); 546 547 /* 548 * Call the driver's write_tx_data function, if it exists. 549 */ 550 if (rt2x00dev->ops->lib->write_tx_data) 551 rt2x00dev->ops->lib->write_tx_data(entry, txdesc); 552 553 /* 554 * Map the skb to DMA. 555 */ 556 if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags) && 557 rt2x00queue_map_txskb(entry)) 558 return -ENOMEM; 559 560 return 0; 561} 562 563static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry, 564 struct txentry_desc *txdesc) 565{ 566 struct data_queue *queue = entry->queue; 567 568 queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc); 569 570 /* 571 * All processing on the frame has been completed, this means 572 * it is now ready to be dumped to userspace through debugfs. 573 */ 574 rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb); 575} 576 577static void rt2x00queue_kick_tx_queue(struct data_queue *queue, 578 struct txentry_desc *txdesc) 579{ 580 /* 581 * Check if we need to kick the queue, there are however a few rules 582 * 1) Don't kick unless this is the last in frame in a burst. 583 * When the burst flag is set, this frame is always followed 584 * by another frame which in some way are related to eachother. 585 * This is true for fragments, RTS or CTS-to-self frames. 586 * 2) Rule 1 can be broken when the available entries 587 * in the queue are less then a certain threshold. 588 */ 589 if (rt2x00queue_threshold(queue) || 590 !test_bit(ENTRY_TXD_BURST, &txdesc->flags)) 591 queue->rt2x00dev->ops->lib->kick_queue(queue); 592} 593 594static void rt2x00queue_bar_check(struct queue_entry *entry) 595{ 596 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 597 struct ieee80211_bar *bar = (void *) (entry->skb->data + 598 rt2x00dev->ops->extra_tx_headroom); 599 struct rt2x00_bar_list_entry *bar_entry; 600 601 if (likely(!ieee80211_is_back_req(bar->frame_control))) 602 return; 603 604 bar_entry = kmalloc(sizeof(*bar_entry), GFP_ATOMIC); 605 606 /* 607 * If the alloc fails we still send the BAR out but just don't track 608 * it in our bar list. And as a result we will report it to mac80211 609 * back as failed. 610 */ 611 if (!bar_entry) 612 return; 613 614 bar_entry->entry = entry; 615 bar_entry->block_acked = 0; 616 617 /* 618 * Copy the relevant parts of the 802.11 BAR into out check list 619 * such that we can use RCU for less-overhead in the RX path since 620 * sending BARs and processing the according BlockAck should be 621 * the exception. 622 */ 623 memcpy(bar_entry->ra, bar->ra, sizeof(bar->ra)); 624 memcpy(bar_entry->ta, bar->ta, sizeof(bar->ta)); 625 bar_entry->control = bar->control; 626 bar_entry->start_seq_num = bar->start_seq_num; 627 628 /* 629 * Insert BAR into our BAR check list. 630 */ 631 spin_lock_bh(&rt2x00dev->bar_list_lock); 632 list_add_tail_rcu(&bar_entry->list, &rt2x00dev->bar_list); 633 spin_unlock_bh(&rt2x00dev->bar_list_lock); 634} 635 636int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, 637 bool local) 638{ 639 struct ieee80211_tx_info *tx_info; 640 struct queue_entry *entry; 641 struct txentry_desc txdesc; 642 struct skb_frame_desc *skbdesc; 643 u8 rate_idx, rate_flags; 644 int ret = 0; 645 646 /* 647 * Copy all TX descriptor information into txdesc, 648 * after that we are free to use the skb->cb array 649 * for our information. 650 */ 651 rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, NULL); 652 653 /* 654 * All information is retrieved from the skb->cb array, 655 * now we should claim ownership of the driver part of that 656 * array, preserving the bitrate index and flags. 657 */ 658 tx_info = IEEE80211_SKB_CB(skb); 659 rate_idx = tx_info->control.rates[0].idx; 660 rate_flags = tx_info->control.rates[0].flags; 661 skbdesc = get_skb_frame_desc(skb); 662 memset(skbdesc, 0, sizeof(*skbdesc)); 663 skbdesc->tx_rate_idx = rate_idx; 664 skbdesc->tx_rate_flags = rate_flags; 665 666 if (local) 667 skbdesc->flags |= SKBDESC_NOT_MAC80211; 668 669 /* 670 * When hardware encryption is supported, and this frame 671 * is to be encrypted, we should strip the IV/EIV data from 672 * the frame so we can provide it to the driver separately. 673 */ 674 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) && 675 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) { 676 if (test_bit(REQUIRE_COPY_IV, &queue->rt2x00dev->cap_flags)) 677 rt2x00crypto_tx_copy_iv(skb, &txdesc); 678 else 679 rt2x00crypto_tx_remove_iv(skb, &txdesc); 680 } 681 682 /* 683 * When DMA allocation is required we should guarantee to the 684 * driver that the DMA is aligned to a 4-byte boundary. 685 * However some drivers require L2 padding to pad the payload 686 * rather then the header. This could be a requirement for 687 * PCI and USB devices, while header alignment only is valid 688 * for PCI devices. 689 */ 690 if (test_bit(REQUIRE_L2PAD, &queue->rt2x00dev->cap_flags)) 691 rt2x00queue_insert_l2pad(skb, txdesc.header_length); 692 else if (test_bit(REQUIRE_DMA, &queue->rt2x00dev->cap_flags)) 693 rt2x00queue_align_frame(skb); 694 695 /* 696 * That function must be called with bh disabled. 697 */ 698 spin_lock(&queue->tx_lock); 699 700 if (unlikely(rt2x00queue_full(queue))) { 701 ERROR(queue->rt2x00dev, 702 "Dropping frame due to full tx queue %d.\n", queue->qid); 703 ret = -ENOBUFS; 704 goto out; 705 } 706 707 entry = rt2x00queue_get_entry(queue, Q_INDEX); 708 709 if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, 710 &entry->flags))) { 711 ERROR(queue->rt2x00dev, 712 "Arrived at non-free entry in the non-full queue %d.\n" 713 "Please file bug report to %s.\n", 714 queue->qid, DRV_PROJECT); 715 ret = -EINVAL; 716 goto out; 717 } 718 719 skbdesc->entry = entry; 720 entry->skb = skb; 721 722 /* 723 * It could be possible that the queue was corrupted and this 724 * call failed. Since we always return NETDEV_TX_OK to mac80211, 725 * this frame will simply be dropped. 726 */ 727 if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) { 728 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); 729 entry->skb = NULL; 730 ret = -EIO; 731 goto out; 732 } 733 734 /* 735 * Put BlockAckReqs into our check list for driver BA processing. 736 */ 737 rt2x00queue_bar_check(entry); 738 739 set_bit(ENTRY_DATA_PENDING, &entry->flags); 740 741 rt2x00queue_index_inc(entry, Q_INDEX); 742 rt2x00queue_write_tx_descriptor(entry, &txdesc); 743 rt2x00queue_kick_tx_queue(queue, &txdesc); 744 745out: 746 spin_unlock(&queue->tx_lock); 747 return ret; 748} 749 750int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev, 751 struct ieee80211_vif *vif) 752{ 753 struct rt2x00_intf *intf = vif_to_intf(vif); 754 755 if (unlikely(!intf->beacon)) 756 return -ENOBUFS; 757 758 mutex_lock(&intf->beacon_skb_mutex); 759 760 /* 761 * Clean up the beacon skb. 762 */ 763 rt2x00queue_free_skb(intf->beacon); 764 765 /* 766 * Clear beacon (single bssid devices don't need to clear the beacon 767 * since the beacon queue will get stopped anyway). 768 */ 769 if (rt2x00dev->ops->lib->clear_beacon) 770 rt2x00dev->ops->lib->clear_beacon(intf->beacon); 771 772 mutex_unlock(&intf->beacon_skb_mutex); 773 774 return 0; 775} 776 777int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev, 778 struct ieee80211_vif *vif) 779{ 780 struct rt2x00_intf *intf = vif_to_intf(vif); 781 struct skb_frame_desc *skbdesc; 782 struct txentry_desc txdesc; 783 784 if (unlikely(!intf->beacon)) 785 return -ENOBUFS; 786 787 /* 788 * Clean up the beacon skb. 789 */ 790 rt2x00queue_free_skb(intf->beacon); 791 792 intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif); 793 if (!intf->beacon->skb) 794 return -ENOMEM; 795 796 /* 797 * Copy all TX descriptor information into txdesc, 798 * after that we are free to use the skb->cb array 799 * for our information. 800 */ 801 rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc, NULL); 802 803 /* 804 * Fill in skb descriptor 805 */ 806 skbdesc = get_skb_frame_desc(intf->beacon->skb); 807 memset(skbdesc, 0, sizeof(*skbdesc)); 808 skbdesc->entry = intf->beacon; 809 810 /* 811 * Send beacon to hardware. 812 */ 813 rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc); 814 815 return 0; 816 817} 818 819int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev, 820 struct ieee80211_vif *vif) 821{ 822 struct rt2x00_intf *intf = vif_to_intf(vif); 823 int ret; 824 825 mutex_lock(&intf->beacon_skb_mutex); 826 ret = rt2x00queue_update_beacon_locked(rt2x00dev, vif); 827 mutex_unlock(&intf->beacon_skb_mutex); 828 829 return ret; 830} 831 832bool rt2x00queue_for_each_entry(struct data_queue *queue, 833 enum queue_index start, 834 enum queue_index end, 835 bool (*fn)(struct queue_entry *entry)) 836{ 837 unsigned long irqflags; 838 unsigned int index_start; 839 unsigned int index_end; 840 unsigned int i; 841 842 if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) { 843 ERROR(queue->rt2x00dev, 844 "Entry requested from invalid index range (%d - %d)\n", 845 start, end); 846 return true; 847 } 848 849 /* 850 * Only protect the range we are going to loop over, 851 * if during our loop a extra entry is set to pending 852 * it should not be kicked during this run, since it 853 * is part of another TX operation. 854 */ 855 spin_lock_irqsave(&queue->index_lock, irqflags); 856 index_start = queue->index[start]; 857 index_end = queue->index[end]; 858 spin_unlock_irqrestore(&queue->index_lock, irqflags); 859 860 /* 861 * Start from the TX done pointer, this guarantees that we will 862 * send out all frames in the correct order. 863 */ 864 if (index_start < index_end) { 865 for (i = index_start; i < index_end; i++) { 866 if (fn(&queue->entries[i])) 867 return true; 868 } 869 } else { 870 for (i = index_start; i < queue->limit; i++) { 871 if (fn(&queue->entries[i])) 872 return true; 873 } 874 875 for (i = 0; i < index_end; i++) { 876 if (fn(&queue->entries[i])) 877 return true; 878 } 879 } 880 881 return false; 882} 883EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry); 884 885struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue, 886 enum queue_index index) 887{ 888 struct queue_entry *entry; 889 unsigned long irqflags; 890 891 if (unlikely(index >= Q_INDEX_MAX)) { 892 ERROR(queue->rt2x00dev, 893 "Entry requested from invalid index type (%d)\n", index); 894 return NULL; 895 } 896 897 spin_lock_irqsave(&queue->index_lock, irqflags); 898 899 entry = &queue->entries[queue->index[index]]; 900 901 spin_unlock_irqrestore(&queue->index_lock, irqflags); 902 903 return entry; 904} 905EXPORT_SYMBOL_GPL(rt2x00queue_get_entry); 906 907void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index) 908{ 909 struct data_queue *queue = entry->queue; 910 unsigned long irqflags; 911 912 if (unlikely(index >= Q_INDEX_MAX)) { 913 ERROR(queue->rt2x00dev, 914 "Index change on invalid index type (%d)\n", index); 915 return; 916 } 917 918 spin_lock_irqsave(&queue->index_lock, irqflags); 919 920 queue->index[index]++; 921 if (queue->index[index] >= queue->limit) 922 queue->index[index] = 0; 923 924 entry->last_action = jiffies; 925 926 if (index == Q_INDEX) { 927 queue->length++; 928 } else if (index == Q_INDEX_DONE) { 929 queue->length--; 930 queue->count++; 931 } 932 933 spin_unlock_irqrestore(&queue->index_lock, irqflags); 934} 935 936void rt2x00queue_pause_queue(struct data_queue *queue) 937{ 938 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || 939 !test_bit(QUEUE_STARTED, &queue->flags) || 940 test_and_set_bit(QUEUE_PAUSED, &queue->flags)) 941 return; 942 943 switch (queue->qid) { 944 case QID_AC_VO: 945 case QID_AC_VI: 946 case QID_AC_BE: 947 case QID_AC_BK: 948 /* 949 * For TX queues, we have to disable the queue 950 * inside mac80211. 951 */ 952 ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid); 953 break; 954 default: 955 break; 956 } 957} 958EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue); 959 960void rt2x00queue_unpause_queue(struct data_queue *queue) 961{ 962 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || 963 !test_bit(QUEUE_STARTED, &queue->flags) || 964 !test_and_clear_bit(QUEUE_PAUSED, &queue->flags)) 965 return; 966 967 switch (queue->qid) { 968 case QID_AC_VO: 969 case QID_AC_VI: 970 case QID_AC_BE: 971 case QID_AC_BK: 972 /* 973 * For TX queues, we have to enable the queue 974 * inside mac80211. 975 */ 976 ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid); 977 break; 978 case QID_RX: 979 /* 980 * For RX we need to kick the queue now in order to 981 * receive frames. 982 */ 983 queue->rt2x00dev->ops->lib->kick_queue(queue); 984 default: 985 break; 986 } 987} 988EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue); 989 990void rt2x00queue_start_queue(struct data_queue *queue) 991{ 992 mutex_lock(&queue->status_lock); 993 994 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || 995 test_and_set_bit(QUEUE_STARTED, &queue->flags)) { 996 mutex_unlock(&queue->status_lock); 997 return; 998 } 999 1000 set_bit(QUEUE_PAUSED, &queue->flags); 1001 1002 queue->rt2x00dev->ops->lib->start_queue(queue); 1003 1004 rt2x00queue_unpause_queue(queue); 1005 1006 mutex_unlock(&queue->status_lock); 1007} 1008EXPORT_SYMBOL_GPL(rt2x00queue_start_queue); 1009 1010void rt2x00queue_stop_queue(struct data_queue *queue) 1011{ 1012 mutex_lock(&queue->status_lock); 1013 1014 if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) { 1015 mutex_unlock(&queue->status_lock); 1016 return; 1017 } 1018 1019 rt2x00queue_pause_queue(queue); 1020 1021 queue->rt2x00dev->ops->lib->stop_queue(queue); 1022 1023 mutex_unlock(&queue->status_lock); 1024} 1025EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue); 1026 1027void rt2x00queue_flush_queue(struct data_queue *queue, bool drop) 1028{ 1029 bool started; 1030 bool tx_queue = 1031 (queue->qid == QID_AC_VO) || 1032 (queue->qid == QID_AC_VI) || 1033 (queue->qid == QID_AC_BE) || 1034 (queue->qid == QID_AC_BK); 1035 1036 mutex_lock(&queue->status_lock); 1037 1038 /* 1039 * If the queue has been started, we must stop it temporarily 1040 * to prevent any new frames to be queued on the device. If 1041 * we are not dropping the pending frames, the queue must 1042 * only be stopped in the software and not the hardware, 1043 * otherwise the queue will never become empty on its own. 1044 */ 1045 started = test_bit(QUEUE_STARTED, &queue->flags); 1046 if (started) { 1047 /* 1048 * Pause the queue 1049 */ 1050 rt2x00queue_pause_queue(queue); 1051 1052 /* 1053 * If we are not supposed to drop any pending 1054 * frames, this means we must force a start (=kick) 1055 * to the queue to make sure the hardware will 1056 * start transmitting. 1057 */ 1058 if (!drop && tx_queue) 1059 queue->rt2x00dev->ops->lib->kick_queue(queue); 1060 } 1061 1062 /* 1063 * Check if driver supports flushing, if that is the case we can 1064 * defer the flushing to the driver. Otherwise we must use the 1065 * alternative which just waits for the queue to become empty. 1066 */ 1067 if (likely(queue->rt2x00dev->ops->lib->flush_queue)) 1068 queue->rt2x00dev->ops->lib->flush_queue(queue, drop); 1069 1070 /* 1071 * The queue flush has failed... 1072 */ 1073 if (unlikely(!rt2x00queue_empty(queue))) 1074 WARNING(queue->rt2x00dev, "Queue %d failed to flush\n", queue->qid); 1075 1076 /* 1077 * Restore the queue to the previous status 1078 */ 1079 if (started) 1080 rt2x00queue_unpause_queue(queue); 1081 1082 mutex_unlock(&queue->status_lock); 1083} 1084EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue); 1085 1086void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev) 1087{ 1088 struct data_queue *queue; 1089 1090 /* 1091 * rt2x00queue_start_queue will call ieee80211_wake_queue 1092 * for each queue after is has been properly initialized. 1093 */ 1094 tx_queue_for_each(rt2x00dev, queue) 1095 rt2x00queue_start_queue(queue); 1096 1097 rt2x00queue_start_queue(rt2x00dev->rx); 1098} 1099EXPORT_SYMBOL_GPL(rt2x00queue_start_queues); 1100 1101void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev) 1102{ 1103 struct data_queue *queue; 1104 1105 /* 1106 * rt2x00queue_stop_queue will call ieee80211_stop_queue 1107 * as well, but we are completely shutting doing everything 1108 * now, so it is much safer to stop all TX queues at once, 1109 * and use rt2x00queue_stop_queue for cleaning up. 1110 */ 1111 ieee80211_stop_queues(rt2x00dev->hw); 1112 1113 tx_queue_for_each(rt2x00dev, queue) 1114 rt2x00queue_stop_queue(queue); 1115 1116 rt2x00queue_stop_queue(rt2x00dev->rx); 1117} 1118EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues); 1119 1120void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop) 1121{ 1122 struct data_queue *queue; 1123 1124 tx_queue_for_each(rt2x00dev, queue) 1125 rt2x00queue_flush_queue(queue, drop); 1126 1127 rt2x00queue_flush_queue(rt2x00dev->rx, drop); 1128} 1129EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues); 1130 1131static void rt2x00queue_reset(struct data_queue *queue) 1132{ 1133 unsigned long irqflags; 1134 unsigned int i; 1135 1136 spin_lock_irqsave(&queue->index_lock, irqflags); 1137 1138 queue->count = 0; 1139 queue->length = 0; 1140 1141 for (i = 0; i < Q_INDEX_MAX; i++) 1142 queue->index[i] = 0; 1143 1144 spin_unlock_irqrestore(&queue->index_lock, irqflags); 1145} 1146 1147void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev) 1148{ 1149 struct data_queue *queue; 1150 unsigned int i; 1151 1152 queue_for_each(rt2x00dev, queue) { 1153 rt2x00queue_reset(queue); 1154 1155 for (i = 0; i < queue->limit; i++) 1156 rt2x00dev->ops->lib->clear_entry(&queue->entries[i]); 1157 } 1158} 1159 1160static int rt2x00queue_alloc_entries(struct data_queue *queue, 1161 const struct data_queue_desc *qdesc) 1162{ 1163 struct queue_entry *entries; 1164 unsigned int entry_size; 1165 unsigned int i; 1166 1167 rt2x00queue_reset(queue); 1168 1169 queue->limit = qdesc->entry_num; 1170 queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10); 1171 queue->data_size = qdesc->data_size; 1172 queue->desc_size = qdesc->desc_size; 1173 1174 /* 1175 * Allocate all queue entries. 1176 */ 1177 entry_size = sizeof(*entries) + qdesc->priv_size; 1178 entries = kcalloc(queue->limit, entry_size, GFP_KERNEL); 1179 if (!entries) 1180 return -ENOMEM; 1181 1182#define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \ 1183 (((char *)(__base)) + ((__limit) * (__esize)) + \ 1184 ((__index) * (__psize))) 1185 1186 for (i = 0; i < queue->limit; i++) { 1187 entries[i].flags = 0; 1188 entries[i].queue = queue; 1189 entries[i].skb = NULL; 1190 entries[i].entry_idx = i; 1191 entries[i].priv_data = 1192 QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit, 1193 sizeof(*entries), qdesc->priv_size); 1194 } 1195 1196#undef QUEUE_ENTRY_PRIV_OFFSET 1197 1198 queue->entries = entries; 1199 1200 return 0; 1201} 1202 1203static void rt2x00queue_free_skbs(struct data_queue *queue) 1204{ 1205 unsigned int i; 1206 1207 if (!queue->entries) 1208 return; 1209 1210 for (i = 0; i < queue->limit; i++) { 1211 rt2x00queue_free_skb(&queue->entries[i]); 1212 } 1213} 1214 1215static int rt2x00queue_alloc_rxskbs(struct data_queue *queue) 1216{ 1217 unsigned int i; 1218 struct sk_buff *skb; 1219 1220 for (i = 0; i < queue->limit; i++) { 1221 skb = rt2x00queue_alloc_rxskb(&queue->entries[i], GFP_KERNEL); 1222 if (!skb) 1223 return -ENOMEM; 1224 queue->entries[i].skb = skb; 1225 } 1226 1227 return 0; 1228} 1229 1230int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev) 1231{ 1232 struct data_queue *queue; 1233 int status; 1234 1235 status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx); 1236 if (status) 1237 goto exit; 1238 1239 tx_queue_for_each(rt2x00dev, queue) { 1240 status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx); 1241 if (status) 1242 goto exit; 1243 } 1244 1245 status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn); 1246 if (status) 1247 goto exit; 1248 1249 if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) { 1250 status = rt2x00queue_alloc_entries(rt2x00dev->atim, 1251 rt2x00dev->ops->atim); 1252 if (status) 1253 goto exit; 1254 } 1255 1256 status = rt2x00queue_alloc_rxskbs(rt2x00dev->rx); 1257 if (status) 1258 goto exit; 1259 1260 return 0; 1261 1262exit: 1263 ERROR(rt2x00dev, "Queue entries allocation failed.\n"); 1264 1265 rt2x00queue_uninitialize(rt2x00dev); 1266 1267 return status; 1268} 1269 1270void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev) 1271{ 1272 struct data_queue *queue; 1273 1274 rt2x00queue_free_skbs(rt2x00dev->rx); 1275 1276 queue_for_each(rt2x00dev, queue) { 1277 kfree(queue->entries); 1278 queue->entries = NULL; 1279 } 1280} 1281 1282static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev, 1283 struct data_queue *queue, enum data_queue_qid qid) 1284{ 1285 mutex_init(&queue->status_lock); 1286 spin_lock_init(&queue->tx_lock); 1287 spin_lock_init(&queue->index_lock); 1288 1289 queue->rt2x00dev = rt2x00dev; 1290 queue->qid = qid; 1291 queue->txop = 0; 1292 queue->aifs = 2; 1293 queue->cw_min = 5; 1294 queue->cw_max = 10; 1295} 1296 1297int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev) 1298{ 1299 struct data_queue *queue; 1300 enum data_queue_qid qid; 1301 unsigned int req_atim = 1302 !!test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags); 1303 1304 /* 1305 * We need the following queues: 1306 * RX: 1 1307 * TX: ops->tx_queues 1308 * Beacon: 1 1309 * Atim: 1 (if required) 1310 */ 1311 rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim; 1312 1313 queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL); 1314 if (!queue) { 1315 ERROR(rt2x00dev, "Queue allocation failed.\n"); 1316 return -ENOMEM; 1317 } 1318 1319 /* 1320 * Initialize pointers 1321 */ 1322 rt2x00dev->rx = queue; 1323 rt2x00dev->tx = &queue[1]; 1324 rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues]; 1325 rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL; 1326 1327 /* 1328 * Initialize queue parameters. 1329 * RX: qid = QID_RX 1330 * TX: qid = QID_AC_VO + index 1331 * TX: cw_min: 2^5 = 32. 1332 * TX: cw_max: 2^10 = 1024. 1333 * BCN: qid = QID_BEACON 1334 * ATIM: qid = QID_ATIM 1335 */ 1336 rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX); 1337 1338 qid = QID_AC_VO; 1339 tx_queue_for_each(rt2x00dev, queue) 1340 rt2x00queue_init(rt2x00dev, queue, qid++); 1341 1342 rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON); 1343 if (req_atim) 1344 rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM); 1345 1346 return 0; 1347} 1348 1349void rt2x00queue_free(struct rt2x00_dev *rt2x00dev) 1350{ 1351 kfree(rt2x00dev->rx); 1352 rt2x00dev->rx = NULL; 1353 rt2x00dev->tx = NULL; 1354 rt2x00dev->bcn = NULL; 1355}