at v3.0 1256 lines 34 kB view raw
1/* 2 Copyright (C) 2010 Willow Garage <http://www.willowgarage.com> 3 Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com> 4 Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com> 5 <http://rt2x00.serialmonkey.com> 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License as published by 9 the Free Software Foundation; either version 2 of the License, or 10 (at your option) any later version. 11 12 This program is distributed in the hope that it will be useful, 13 but WITHOUT ANY WARRANTY; without even the implied warranty of 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 GNU General Public License for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with this program; if not, write to the 19 Free Software Foundation, Inc., 20 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 21 */ 22 23/* 24 Module: rt2x00lib 25 Abstract: rt2x00 queue specific routines. 26 */ 27 28#include <linux/slab.h> 29#include <linux/kernel.h> 30#include <linux/module.h> 31#include <linux/dma-mapping.h> 32 33#include "rt2x00.h" 34#include "rt2x00lib.h" 35 36struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry) 37{ 38 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 39 struct sk_buff *skb; 40 struct skb_frame_desc *skbdesc; 41 unsigned int frame_size; 42 unsigned int head_size = 0; 43 unsigned int tail_size = 0; 44 45 /* 46 * The frame size includes descriptor size, because the 47 * hardware directly receive the frame into the skbuffer. 48 */ 49 frame_size = entry->queue->data_size + entry->queue->desc_size; 50 51 /* 52 * The payload should be aligned to a 4-byte boundary, 53 * this means we need at least 3 bytes for moving the frame 54 * into the correct offset. 55 */ 56 head_size = 4; 57 58 /* 59 * For IV/EIV/ICV assembly we must make sure there is 60 * at least 8 bytes bytes available in headroom for IV/EIV 61 * and 8 bytes for ICV data as tailroon. 62 */ 63 if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags)) { 64 head_size += 8; 65 tail_size += 8; 66 } 67 68 /* 69 * Allocate skbuffer. 70 */ 71 skb = dev_alloc_skb(frame_size + head_size + tail_size); 72 if (!skb) 73 return NULL; 74 75 /* 76 * Make sure we not have a frame with the requested bytes 77 * available in the head and tail. 78 */ 79 skb_reserve(skb, head_size); 80 skb_put(skb, frame_size); 81 82 /* 83 * Populate skbdesc. 84 */ 85 skbdesc = get_skb_frame_desc(skb); 86 memset(skbdesc, 0, sizeof(*skbdesc)); 87 skbdesc->entry = entry; 88 89 if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) { 90 skbdesc->skb_dma = dma_map_single(rt2x00dev->dev, 91 skb->data, 92 skb->len, 93 DMA_FROM_DEVICE); 94 skbdesc->flags |= SKBDESC_DMA_MAPPED_RX; 95 } 96 97 return skb; 98} 99 100void rt2x00queue_map_txskb(struct queue_entry *entry) 101{ 102 struct device *dev = entry->queue->rt2x00dev->dev; 103 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 104 105 skbdesc->skb_dma = 106 dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE); 107 skbdesc->flags |= SKBDESC_DMA_MAPPED_TX; 108} 109EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb); 110 111void rt2x00queue_unmap_skb(struct queue_entry *entry) 112{ 113 struct device *dev = entry->queue->rt2x00dev->dev; 114 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 115 116 if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) { 117 dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len, 118 DMA_FROM_DEVICE); 119 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX; 120 } else if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) { 121 dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len, 122 DMA_TO_DEVICE); 123 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX; 124 } 125} 126EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb); 127 128void rt2x00queue_free_skb(struct queue_entry *entry) 129{ 130 if (!entry->skb) 131 return; 132 133 rt2x00queue_unmap_skb(entry); 134 dev_kfree_skb_any(entry->skb); 135 entry->skb = NULL; 136} 137 138void rt2x00queue_align_frame(struct sk_buff *skb) 139{ 140 unsigned int frame_length = skb->len; 141 unsigned int align = ALIGN_SIZE(skb, 0); 142 143 if (!align) 144 return; 145 146 skb_push(skb, align); 147 memmove(skb->data, skb->data + align, frame_length); 148 skb_trim(skb, frame_length); 149} 150 151void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length) 152{ 153 unsigned int payload_length = skb->len - header_length; 154 unsigned int header_align = ALIGN_SIZE(skb, 0); 155 unsigned int payload_align = ALIGN_SIZE(skb, header_length); 156 unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0; 157 158 /* 159 * Adjust the header alignment if the payload needs to be moved more 160 * than the header. 161 */ 162 if (payload_align > header_align) 163 header_align += 4; 164 165 /* There is nothing to do if no alignment is needed */ 166 if (!header_align) 167 return; 168 169 /* Reserve the amount of space needed in front of the frame */ 170 skb_push(skb, header_align); 171 172 /* 173 * Move the header. 174 */ 175 memmove(skb->data, skb->data + header_align, header_length); 176 177 /* Move the payload, if present and if required */ 178 if (payload_length && payload_align) 179 memmove(skb->data + header_length + l2pad, 180 skb->data + header_length + l2pad + payload_align, 181 payload_length); 182 183 /* Trim the skb to the correct size */ 184 skb_trim(skb, header_length + l2pad + payload_length); 185} 186 187void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length) 188{ 189 /* 190 * L2 padding is only present if the skb contains more than just the 191 * IEEE 802.11 header. 192 */ 193 unsigned int l2pad = (skb->len > header_length) ? 194 L2PAD_SIZE(header_length) : 0; 195 196 if (!l2pad) 197 return; 198 199 memmove(skb->data + l2pad, skb->data, header_length); 200 skb_pull(skb, l2pad); 201} 202 203static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry, 204 struct txentry_desc *txdesc) 205{ 206 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); 207 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data; 208 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif); 209 unsigned long irqflags; 210 211 if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) 212 return; 213 214 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); 215 216 if (!test_bit(REQUIRE_SW_SEQNO, &entry->queue->rt2x00dev->cap_flags)) 217 return; 218 219 /* 220 * The hardware is not able to insert a sequence number. Assign a 221 * software generated one here. 222 * 223 * This is wrong because beacons are not getting sequence 224 * numbers assigned properly. 225 * 226 * A secondary problem exists for drivers that cannot toggle 227 * sequence counting per-frame, since those will override the 228 * sequence counter given by mac80211. 229 */ 230 spin_lock_irqsave(&intf->seqlock, irqflags); 231 232 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) 233 intf->seqno += 0x10; 234 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); 235 hdr->seq_ctrl |= cpu_to_le16(intf->seqno); 236 237 spin_unlock_irqrestore(&intf->seqlock, irqflags); 238 239} 240 241static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry, 242 struct txentry_desc *txdesc, 243 const struct rt2x00_rate *hwrate) 244{ 245 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 246 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); 247 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; 248 unsigned int data_length; 249 unsigned int duration; 250 unsigned int residual; 251 252 /* 253 * Determine with what IFS priority this frame should be send. 254 * Set ifs to IFS_SIFS when the this is not the first fragment, 255 * or this fragment came after RTS/CTS. 256 */ 257 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) 258 txdesc->u.plcp.ifs = IFS_BACKOFF; 259 else 260 txdesc->u.plcp.ifs = IFS_SIFS; 261 262 /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */ 263 data_length = entry->skb->len + 4; 264 data_length += rt2x00crypto_tx_overhead(rt2x00dev, entry->skb); 265 266 /* 267 * PLCP setup 268 * Length calculation depends on OFDM/CCK rate. 269 */ 270 txdesc->u.plcp.signal = hwrate->plcp; 271 txdesc->u.plcp.service = 0x04; 272 273 if (hwrate->flags & DEV_RATE_OFDM) { 274 txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f; 275 txdesc->u.plcp.length_low = data_length & 0x3f; 276 } else { 277 /* 278 * Convert length to microseconds. 279 */ 280 residual = GET_DURATION_RES(data_length, hwrate->bitrate); 281 duration = GET_DURATION(data_length, hwrate->bitrate); 282 283 if (residual != 0) { 284 duration++; 285 286 /* 287 * Check if we need to set the Length Extension 288 */ 289 if (hwrate->bitrate == 110 && residual <= 30) 290 txdesc->u.plcp.service |= 0x80; 291 } 292 293 txdesc->u.plcp.length_high = (duration >> 8) & 0xff; 294 txdesc->u.plcp.length_low = duration & 0xff; 295 296 /* 297 * When preamble is enabled we should set the 298 * preamble bit for the signal. 299 */ 300 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 301 txdesc->u.plcp.signal |= 0x08; 302 } 303} 304 305static void rt2x00queue_create_tx_descriptor_ht(struct queue_entry *entry, 306 struct txentry_desc *txdesc, 307 const struct rt2x00_rate *hwrate) 308{ 309 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); 310 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; 311 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data; 312 313 if (tx_info->control.sta) 314 txdesc->u.ht.mpdu_density = 315 tx_info->control.sta->ht_cap.ampdu_density; 316 317 txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */ 318 319 /* 320 * Only one STBC stream is supported for now. 321 */ 322 if (tx_info->flags & IEEE80211_TX_CTL_STBC) 323 txdesc->u.ht.stbc = 1; 324 325 /* 326 * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the 327 * mcs rate to be used 328 */ 329 if (txrate->flags & IEEE80211_TX_RC_MCS) { 330 txdesc->u.ht.mcs = txrate->idx; 331 332 /* 333 * MIMO PS should be set to 1 for STA's using dynamic SM PS 334 * when using more then one tx stream (>MCS7). 335 */ 336 if (tx_info->control.sta && txdesc->u.ht.mcs > 7 && 337 ((tx_info->control.sta->ht_cap.cap & 338 IEEE80211_HT_CAP_SM_PS) >> 339 IEEE80211_HT_CAP_SM_PS_SHIFT) == 340 WLAN_HT_CAP_SM_PS_DYNAMIC) 341 __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags); 342 } else { 343 txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs); 344 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 345 txdesc->u.ht.mcs |= 0x08; 346 } 347 348 /* 349 * This frame is eligible for an AMPDU, however, don't aggregate 350 * frames that are intended to probe a specific tx rate. 351 */ 352 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU && 353 !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) 354 __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags); 355 356 /* 357 * Set 40Mhz mode if necessary (for legacy rates this will 358 * duplicate the frame to both channels). 359 */ 360 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH || 361 txrate->flags & IEEE80211_TX_RC_DUP_DATA) 362 __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags); 363 if (txrate->flags & IEEE80211_TX_RC_SHORT_GI) 364 __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags); 365 366 /* 367 * Determine IFS values 368 * - Use TXOP_BACKOFF for management frames except beacons 369 * - Use TXOP_SIFS for fragment bursts 370 * - Use TXOP_HTTXOP for everything else 371 * 372 * Note: rt2800 devices won't use CTS protection (if used) 373 * for frames not transmitted with TXOP_HTTXOP 374 */ 375 if (ieee80211_is_mgmt(hdr->frame_control) && 376 !ieee80211_is_beacon(hdr->frame_control)) 377 txdesc->u.ht.txop = TXOP_BACKOFF; 378 else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)) 379 txdesc->u.ht.txop = TXOP_SIFS; 380 else 381 txdesc->u.ht.txop = TXOP_HTTXOP; 382} 383 384static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry, 385 struct txentry_desc *txdesc) 386{ 387 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 388 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); 389 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data; 390 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; 391 struct ieee80211_rate *rate; 392 const struct rt2x00_rate *hwrate = NULL; 393 394 memset(txdesc, 0, sizeof(*txdesc)); 395 396 /* 397 * Header and frame information. 398 */ 399 txdesc->length = entry->skb->len; 400 txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb); 401 402 /* 403 * Check whether this frame is to be acked. 404 */ 405 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) 406 __set_bit(ENTRY_TXD_ACK, &txdesc->flags); 407 408 /* 409 * Check if this is a RTS/CTS frame 410 */ 411 if (ieee80211_is_rts(hdr->frame_control) || 412 ieee80211_is_cts(hdr->frame_control)) { 413 __set_bit(ENTRY_TXD_BURST, &txdesc->flags); 414 if (ieee80211_is_rts(hdr->frame_control)) 415 __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags); 416 else 417 __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags); 418 if (tx_info->control.rts_cts_rate_idx >= 0) 419 rate = 420 ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info); 421 } 422 423 /* 424 * Determine retry information. 425 */ 426 txdesc->retry_limit = tx_info->control.rates[0].count - 1; 427 if (txdesc->retry_limit >= rt2x00dev->long_retry) 428 __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags); 429 430 /* 431 * Check if more fragments are pending 432 */ 433 if (ieee80211_has_morefrags(hdr->frame_control)) { 434 __set_bit(ENTRY_TXD_BURST, &txdesc->flags); 435 __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags); 436 } 437 438 /* 439 * Check if more frames (!= fragments) are pending 440 */ 441 if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES) 442 __set_bit(ENTRY_TXD_BURST, &txdesc->flags); 443 444 /* 445 * Beacons and probe responses require the tsf timestamp 446 * to be inserted into the frame. 447 */ 448 if (ieee80211_is_beacon(hdr->frame_control) || 449 ieee80211_is_probe_resp(hdr->frame_control)) 450 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags); 451 452 if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) && 453 !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) 454 __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags); 455 456 /* 457 * Determine rate modulation. 458 */ 459 if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD) 460 txdesc->rate_mode = RATE_MODE_HT_GREENFIELD; 461 else if (txrate->flags & IEEE80211_TX_RC_MCS) 462 txdesc->rate_mode = RATE_MODE_HT_MIX; 463 else { 464 rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info); 465 hwrate = rt2x00_get_rate(rate->hw_value); 466 if (hwrate->flags & DEV_RATE_OFDM) 467 txdesc->rate_mode = RATE_MODE_OFDM; 468 else 469 txdesc->rate_mode = RATE_MODE_CCK; 470 } 471 472 /* 473 * Apply TX descriptor handling by components 474 */ 475 rt2x00crypto_create_tx_descriptor(entry, txdesc); 476 rt2x00queue_create_tx_descriptor_seq(entry, txdesc); 477 478 if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags)) 479 rt2x00queue_create_tx_descriptor_ht(entry, txdesc, hwrate); 480 else 481 rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate); 482} 483 484static int rt2x00queue_write_tx_data(struct queue_entry *entry, 485 struct txentry_desc *txdesc) 486{ 487 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 488 489 /* 490 * This should not happen, we already checked the entry 491 * was ours. When the hardware disagrees there has been 492 * a queue corruption! 493 */ 494 if (unlikely(rt2x00dev->ops->lib->get_entry_state && 495 rt2x00dev->ops->lib->get_entry_state(entry))) { 496 ERROR(rt2x00dev, 497 "Corrupt queue %d, accessing entry which is not ours.\n" 498 "Please file bug report to %s.\n", 499 entry->queue->qid, DRV_PROJECT); 500 return -EINVAL; 501 } 502 503 /* 504 * Add the requested extra tx headroom in front of the skb. 505 */ 506 skb_push(entry->skb, rt2x00dev->ops->extra_tx_headroom); 507 memset(entry->skb->data, 0, rt2x00dev->ops->extra_tx_headroom); 508 509 /* 510 * Call the driver's write_tx_data function, if it exists. 511 */ 512 if (rt2x00dev->ops->lib->write_tx_data) 513 rt2x00dev->ops->lib->write_tx_data(entry, txdesc); 514 515 /* 516 * Map the skb to DMA. 517 */ 518 if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) 519 rt2x00queue_map_txskb(entry); 520 521 return 0; 522} 523 524static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry, 525 struct txentry_desc *txdesc) 526{ 527 struct data_queue *queue = entry->queue; 528 529 queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc); 530 531 /* 532 * All processing on the frame has been completed, this means 533 * it is now ready to be dumped to userspace through debugfs. 534 */ 535 rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb); 536} 537 538static void rt2x00queue_kick_tx_queue(struct data_queue *queue, 539 struct txentry_desc *txdesc) 540{ 541 /* 542 * Check if we need to kick the queue, there are however a few rules 543 * 1) Don't kick unless this is the last in frame in a burst. 544 * When the burst flag is set, this frame is always followed 545 * by another frame which in some way are related to eachother. 546 * This is true for fragments, RTS or CTS-to-self frames. 547 * 2) Rule 1 can be broken when the available entries 548 * in the queue are less then a certain threshold. 549 */ 550 if (rt2x00queue_threshold(queue) || 551 !test_bit(ENTRY_TXD_BURST, &txdesc->flags)) 552 queue->rt2x00dev->ops->lib->kick_queue(queue); 553} 554 555int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, 556 bool local) 557{ 558 struct ieee80211_tx_info *tx_info; 559 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX); 560 struct txentry_desc txdesc; 561 struct skb_frame_desc *skbdesc; 562 u8 rate_idx, rate_flags; 563 564 if (unlikely(rt2x00queue_full(queue))) { 565 ERROR(queue->rt2x00dev, 566 "Dropping frame due to full tx queue %d.\n", queue->qid); 567 return -ENOBUFS; 568 } 569 570 if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, 571 &entry->flags))) { 572 ERROR(queue->rt2x00dev, 573 "Arrived at non-free entry in the non-full queue %d.\n" 574 "Please file bug report to %s.\n", 575 queue->qid, DRV_PROJECT); 576 return -EINVAL; 577 } 578 579 /* 580 * Copy all TX descriptor information into txdesc, 581 * after that we are free to use the skb->cb array 582 * for our information. 583 */ 584 entry->skb = skb; 585 rt2x00queue_create_tx_descriptor(entry, &txdesc); 586 587 /* 588 * All information is retrieved from the skb->cb array, 589 * now we should claim ownership of the driver part of that 590 * array, preserving the bitrate index and flags. 591 */ 592 tx_info = IEEE80211_SKB_CB(skb); 593 rate_idx = tx_info->control.rates[0].idx; 594 rate_flags = tx_info->control.rates[0].flags; 595 skbdesc = get_skb_frame_desc(skb); 596 memset(skbdesc, 0, sizeof(*skbdesc)); 597 skbdesc->entry = entry; 598 skbdesc->tx_rate_idx = rate_idx; 599 skbdesc->tx_rate_flags = rate_flags; 600 601 if (local) 602 skbdesc->flags |= SKBDESC_NOT_MAC80211; 603 604 /* 605 * When hardware encryption is supported, and this frame 606 * is to be encrypted, we should strip the IV/EIV data from 607 * the frame so we can provide it to the driver separately. 608 */ 609 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) && 610 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) { 611 if (test_bit(REQUIRE_COPY_IV, &queue->rt2x00dev->cap_flags)) 612 rt2x00crypto_tx_copy_iv(skb, &txdesc); 613 else 614 rt2x00crypto_tx_remove_iv(skb, &txdesc); 615 } 616 617 /* 618 * When DMA allocation is required we should guarantee to the 619 * driver that the DMA is aligned to a 4-byte boundary. 620 * However some drivers require L2 padding to pad the payload 621 * rather then the header. This could be a requirement for 622 * PCI and USB devices, while header alignment only is valid 623 * for PCI devices. 624 */ 625 if (test_bit(REQUIRE_L2PAD, &queue->rt2x00dev->cap_flags)) 626 rt2x00queue_insert_l2pad(entry->skb, txdesc.header_length); 627 else if (test_bit(REQUIRE_DMA, &queue->rt2x00dev->cap_flags)) 628 rt2x00queue_align_frame(entry->skb); 629 630 /* 631 * It could be possible that the queue was corrupted and this 632 * call failed. Since we always return NETDEV_TX_OK to mac80211, 633 * this frame will simply be dropped. 634 */ 635 if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) { 636 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); 637 entry->skb = NULL; 638 return -EIO; 639 } 640 641 set_bit(ENTRY_DATA_PENDING, &entry->flags); 642 643 rt2x00queue_index_inc(entry, Q_INDEX); 644 rt2x00queue_write_tx_descriptor(entry, &txdesc); 645 rt2x00queue_kick_tx_queue(queue, &txdesc); 646 647 return 0; 648} 649 650int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev, 651 struct ieee80211_vif *vif) 652{ 653 struct rt2x00_intf *intf = vif_to_intf(vif); 654 655 if (unlikely(!intf->beacon)) 656 return -ENOBUFS; 657 658 mutex_lock(&intf->beacon_skb_mutex); 659 660 /* 661 * Clean up the beacon skb. 662 */ 663 rt2x00queue_free_skb(intf->beacon); 664 665 /* 666 * Clear beacon (single bssid devices don't need to clear the beacon 667 * since the beacon queue will get stopped anyway). 668 */ 669 if (rt2x00dev->ops->lib->clear_beacon) 670 rt2x00dev->ops->lib->clear_beacon(intf->beacon); 671 672 mutex_unlock(&intf->beacon_skb_mutex); 673 674 return 0; 675} 676 677int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev, 678 struct ieee80211_vif *vif) 679{ 680 struct rt2x00_intf *intf = vif_to_intf(vif); 681 struct skb_frame_desc *skbdesc; 682 struct txentry_desc txdesc; 683 684 if (unlikely(!intf->beacon)) 685 return -ENOBUFS; 686 687 /* 688 * Clean up the beacon skb. 689 */ 690 rt2x00queue_free_skb(intf->beacon); 691 692 intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif); 693 if (!intf->beacon->skb) 694 return -ENOMEM; 695 696 /* 697 * Copy all TX descriptor information into txdesc, 698 * after that we are free to use the skb->cb array 699 * for our information. 700 */ 701 rt2x00queue_create_tx_descriptor(intf->beacon, &txdesc); 702 703 /* 704 * Fill in skb descriptor 705 */ 706 skbdesc = get_skb_frame_desc(intf->beacon->skb); 707 memset(skbdesc, 0, sizeof(*skbdesc)); 708 skbdesc->entry = intf->beacon; 709 710 /* 711 * Send beacon to hardware. 712 */ 713 rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc); 714 715 return 0; 716 717} 718 719int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev, 720 struct ieee80211_vif *vif) 721{ 722 struct rt2x00_intf *intf = vif_to_intf(vif); 723 int ret; 724 725 mutex_lock(&intf->beacon_skb_mutex); 726 ret = rt2x00queue_update_beacon_locked(rt2x00dev, vif); 727 mutex_unlock(&intf->beacon_skb_mutex); 728 729 return ret; 730} 731 732bool rt2x00queue_for_each_entry(struct data_queue *queue, 733 enum queue_index start, 734 enum queue_index end, 735 void *data, 736 bool (*fn)(struct queue_entry *entry, 737 void *data)) 738{ 739 unsigned long irqflags; 740 unsigned int index_start; 741 unsigned int index_end; 742 unsigned int i; 743 744 if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) { 745 ERROR(queue->rt2x00dev, 746 "Entry requested from invalid index range (%d - %d)\n", 747 start, end); 748 return true; 749 } 750 751 /* 752 * Only protect the range we are going to loop over, 753 * if during our loop a extra entry is set to pending 754 * it should not be kicked during this run, since it 755 * is part of another TX operation. 756 */ 757 spin_lock_irqsave(&queue->index_lock, irqflags); 758 index_start = queue->index[start]; 759 index_end = queue->index[end]; 760 spin_unlock_irqrestore(&queue->index_lock, irqflags); 761 762 /* 763 * Start from the TX done pointer, this guarantees that we will 764 * send out all frames in the correct order. 765 */ 766 if (index_start < index_end) { 767 for (i = index_start; i < index_end; i++) { 768 if (fn(&queue->entries[i], data)) 769 return true; 770 } 771 } else { 772 for (i = index_start; i < queue->limit; i++) { 773 if (fn(&queue->entries[i], data)) 774 return true; 775 } 776 777 for (i = 0; i < index_end; i++) { 778 if (fn(&queue->entries[i], data)) 779 return true; 780 } 781 } 782 783 return false; 784} 785EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry); 786 787struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue, 788 enum queue_index index) 789{ 790 struct queue_entry *entry; 791 unsigned long irqflags; 792 793 if (unlikely(index >= Q_INDEX_MAX)) { 794 ERROR(queue->rt2x00dev, 795 "Entry requested from invalid index type (%d)\n", index); 796 return NULL; 797 } 798 799 spin_lock_irqsave(&queue->index_lock, irqflags); 800 801 entry = &queue->entries[queue->index[index]]; 802 803 spin_unlock_irqrestore(&queue->index_lock, irqflags); 804 805 return entry; 806} 807EXPORT_SYMBOL_GPL(rt2x00queue_get_entry); 808 809void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index) 810{ 811 struct data_queue *queue = entry->queue; 812 unsigned long irqflags; 813 814 if (unlikely(index >= Q_INDEX_MAX)) { 815 ERROR(queue->rt2x00dev, 816 "Index change on invalid index type (%d)\n", index); 817 return; 818 } 819 820 spin_lock_irqsave(&queue->index_lock, irqflags); 821 822 queue->index[index]++; 823 if (queue->index[index] >= queue->limit) 824 queue->index[index] = 0; 825 826 entry->last_action = jiffies; 827 828 if (index == Q_INDEX) { 829 queue->length++; 830 } else if (index == Q_INDEX_DONE) { 831 queue->length--; 832 queue->count++; 833 } 834 835 spin_unlock_irqrestore(&queue->index_lock, irqflags); 836} 837 838void rt2x00queue_pause_queue(struct data_queue *queue) 839{ 840 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || 841 !test_bit(QUEUE_STARTED, &queue->flags) || 842 test_and_set_bit(QUEUE_PAUSED, &queue->flags)) 843 return; 844 845 switch (queue->qid) { 846 case QID_AC_VO: 847 case QID_AC_VI: 848 case QID_AC_BE: 849 case QID_AC_BK: 850 /* 851 * For TX queues, we have to disable the queue 852 * inside mac80211. 853 */ 854 ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid); 855 break; 856 default: 857 break; 858 } 859} 860EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue); 861 862void rt2x00queue_unpause_queue(struct data_queue *queue) 863{ 864 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || 865 !test_bit(QUEUE_STARTED, &queue->flags) || 866 !test_and_clear_bit(QUEUE_PAUSED, &queue->flags)) 867 return; 868 869 switch (queue->qid) { 870 case QID_AC_VO: 871 case QID_AC_VI: 872 case QID_AC_BE: 873 case QID_AC_BK: 874 /* 875 * For TX queues, we have to enable the queue 876 * inside mac80211. 877 */ 878 ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid); 879 break; 880 case QID_RX: 881 /* 882 * For RX we need to kick the queue now in order to 883 * receive frames. 884 */ 885 queue->rt2x00dev->ops->lib->kick_queue(queue); 886 default: 887 break; 888 } 889} 890EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue); 891 892void rt2x00queue_start_queue(struct data_queue *queue) 893{ 894 mutex_lock(&queue->status_lock); 895 896 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || 897 test_and_set_bit(QUEUE_STARTED, &queue->flags)) { 898 mutex_unlock(&queue->status_lock); 899 return; 900 } 901 902 set_bit(QUEUE_PAUSED, &queue->flags); 903 904 queue->rt2x00dev->ops->lib->start_queue(queue); 905 906 rt2x00queue_unpause_queue(queue); 907 908 mutex_unlock(&queue->status_lock); 909} 910EXPORT_SYMBOL_GPL(rt2x00queue_start_queue); 911 912void rt2x00queue_stop_queue(struct data_queue *queue) 913{ 914 mutex_lock(&queue->status_lock); 915 916 if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) { 917 mutex_unlock(&queue->status_lock); 918 return; 919 } 920 921 rt2x00queue_pause_queue(queue); 922 923 queue->rt2x00dev->ops->lib->stop_queue(queue); 924 925 mutex_unlock(&queue->status_lock); 926} 927EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue); 928 929void rt2x00queue_flush_queue(struct data_queue *queue, bool drop) 930{ 931 bool started; 932 bool tx_queue = 933 (queue->qid == QID_AC_VO) || 934 (queue->qid == QID_AC_VI) || 935 (queue->qid == QID_AC_BE) || 936 (queue->qid == QID_AC_BK); 937 938 mutex_lock(&queue->status_lock); 939 940 /* 941 * If the queue has been started, we must stop it temporarily 942 * to prevent any new frames to be queued on the device. If 943 * we are not dropping the pending frames, the queue must 944 * only be stopped in the software and not the hardware, 945 * otherwise the queue will never become empty on its own. 946 */ 947 started = test_bit(QUEUE_STARTED, &queue->flags); 948 if (started) { 949 /* 950 * Pause the queue 951 */ 952 rt2x00queue_pause_queue(queue); 953 954 /* 955 * If we are not supposed to drop any pending 956 * frames, this means we must force a start (=kick) 957 * to the queue to make sure the hardware will 958 * start transmitting. 959 */ 960 if (!drop && tx_queue) 961 queue->rt2x00dev->ops->lib->kick_queue(queue); 962 } 963 964 /* 965 * Check if driver supports flushing, if that is the case we can 966 * defer the flushing to the driver. Otherwise we must use the 967 * alternative which just waits for the queue to become empty. 968 */ 969 if (likely(queue->rt2x00dev->ops->lib->flush_queue)) 970 queue->rt2x00dev->ops->lib->flush_queue(queue, drop); 971 972 /* 973 * The queue flush has failed... 974 */ 975 if (unlikely(!rt2x00queue_empty(queue))) 976 WARNING(queue->rt2x00dev, "Queue %d failed to flush\n", queue->qid); 977 978 /* 979 * Restore the queue to the previous status 980 */ 981 if (started) 982 rt2x00queue_unpause_queue(queue); 983 984 mutex_unlock(&queue->status_lock); 985} 986EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue); 987 988void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev) 989{ 990 struct data_queue *queue; 991 992 /* 993 * rt2x00queue_start_queue will call ieee80211_wake_queue 994 * for each queue after is has been properly initialized. 995 */ 996 tx_queue_for_each(rt2x00dev, queue) 997 rt2x00queue_start_queue(queue); 998 999 rt2x00queue_start_queue(rt2x00dev->rx); 1000} 1001EXPORT_SYMBOL_GPL(rt2x00queue_start_queues); 1002 1003void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev) 1004{ 1005 struct data_queue *queue; 1006 1007 /* 1008 * rt2x00queue_stop_queue will call ieee80211_stop_queue 1009 * as well, but we are completely shutting doing everything 1010 * now, so it is much safer to stop all TX queues at once, 1011 * and use rt2x00queue_stop_queue for cleaning up. 1012 */ 1013 ieee80211_stop_queues(rt2x00dev->hw); 1014 1015 tx_queue_for_each(rt2x00dev, queue) 1016 rt2x00queue_stop_queue(queue); 1017 1018 rt2x00queue_stop_queue(rt2x00dev->rx); 1019} 1020EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues); 1021 1022void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop) 1023{ 1024 struct data_queue *queue; 1025 1026 tx_queue_for_each(rt2x00dev, queue) 1027 rt2x00queue_flush_queue(queue, drop); 1028 1029 rt2x00queue_flush_queue(rt2x00dev->rx, drop); 1030} 1031EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues); 1032 1033static void rt2x00queue_reset(struct data_queue *queue) 1034{ 1035 unsigned long irqflags; 1036 unsigned int i; 1037 1038 spin_lock_irqsave(&queue->index_lock, irqflags); 1039 1040 queue->count = 0; 1041 queue->length = 0; 1042 1043 for (i = 0; i < Q_INDEX_MAX; i++) 1044 queue->index[i] = 0; 1045 1046 spin_unlock_irqrestore(&queue->index_lock, irqflags); 1047} 1048 1049void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev) 1050{ 1051 struct data_queue *queue; 1052 unsigned int i; 1053 1054 queue_for_each(rt2x00dev, queue) { 1055 rt2x00queue_reset(queue); 1056 1057 for (i = 0; i < queue->limit; i++) 1058 rt2x00dev->ops->lib->clear_entry(&queue->entries[i]); 1059 } 1060} 1061 1062static int rt2x00queue_alloc_entries(struct data_queue *queue, 1063 const struct data_queue_desc *qdesc) 1064{ 1065 struct queue_entry *entries; 1066 unsigned int entry_size; 1067 unsigned int i; 1068 1069 rt2x00queue_reset(queue); 1070 1071 queue->limit = qdesc->entry_num; 1072 queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10); 1073 queue->data_size = qdesc->data_size; 1074 queue->desc_size = qdesc->desc_size; 1075 1076 /* 1077 * Allocate all queue entries. 1078 */ 1079 entry_size = sizeof(*entries) + qdesc->priv_size; 1080 entries = kcalloc(queue->limit, entry_size, GFP_KERNEL); 1081 if (!entries) 1082 return -ENOMEM; 1083 1084#define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \ 1085 (((char *)(__base)) + ((__limit) * (__esize)) + \ 1086 ((__index) * (__psize))) 1087 1088 for (i = 0; i < queue->limit; i++) { 1089 entries[i].flags = 0; 1090 entries[i].queue = queue; 1091 entries[i].skb = NULL; 1092 entries[i].entry_idx = i; 1093 entries[i].priv_data = 1094 QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit, 1095 sizeof(*entries), qdesc->priv_size); 1096 } 1097 1098#undef QUEUE_ENTRY_PRIV_OFFSET 1099 1100 queue->entries = entries; 1101 1102 return 0; 1103} 1104 1105static void rt2x00queue_free_skbs(struct data_queue *queue) 1106{ 1107 unsigned int i; 1108 1109 if (!queue->entries) 1110 return; 1111 1112 for (i = 0; i < queue->limit; i++) { 1113 rt2x00queue_free_skb(&queue->entries[i]); 1114 } 1115} 1116 1117static int rt2x00queue_alloc_rxskbs(struct data_queue *queue) 1118{ 1119 unsigned int i; 1120 struct sk_buff *skb; 1121 1122 for (i = 0; i < queue->limit; i++) { 1123 skb = rt2x00queue_alloc_rxskb(&queue->entries[i]); 1124 if (!skb) 1125 return -ENOMEM; 1126 queue->entries[i].skb = skb; 1127 } 1128 1129 return 0; 1130} 1131 1132int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev) 1133{ 1134 struct data_queue *queue; 1135 int status; 1136 1137 status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx); 1138 if (status) 1139 goto exit; 1140 1141 tx_queue_for_each(rt2x00dev, queue) { 1142 status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx); 1143 if (status) 1144 goto exit; 1145 } 1146 1147 status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn); 1148 if (status) 1149 goto exit; 1150 1151 if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) { 1152 status = rt2x00queue_alloc_entries(rt2x00dev->atim, 1153 rt2x00dev->ops->atim); 1154 if (status) 1155 goto exit; 1156 } 1157 1158 status = rt2x00queue_alloc_rxskbs(rt2x00dev->rx); 1159 if (status) 1160 goto exit; 1161 1162 return 0; 1163 1164exit: 1165 ERROR(rt2x00dev, "Queue entries allocation failed.\n"); 1166 1167 rt2x00queue_uninitialize(rt2x00dev); 1168 1169 return status; 1170} 1171 1172void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev) 1173{ 1174 struct data_queue *queue; 1175 1176 rt2x00queue_free_skbs(rt2x00dev->rx); 1177 1178 queue_for_each(rt2x00dev, queue) { 1179 kfree(queue->entries); 1180 queue->entries = NULL; 1181 } 1182} 1183 1184static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev, 1185 struct data_queue *queue, enum data_queue_qid qid) 1186{ 1187 mutex_init(&queue->status_lock); 1188 spin_lock_init(&queue->index_lock); 1189 1190 queue->rt2x00dev = rt2x00dev; 1191 queue->qid = qid; 1192 queue->txop = 0; 1193 queue->aifs = 2; 1194 queue->cw_min = 5; 1195 queue->cw_max = 10; 1196} 1197 1198int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev) 1199{ 1200 struct data_queue *queue; 1201 enum data_queue_qid qid; 1202 unsigned int req_atim = 1203 !!test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags); 1204 1205 /* 1206 * We need the following queues: 1207 * RX: 1 1208 * TX: ops->tx_queues 1209 * Beacon: 1 1210 * Atim: 1 (if required) 1211 */ 1212 rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim; 1213 1214 queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL); 1215 if (!queue) { 1216 ERROR(rt2x00dev, "Queue allocation failed.\n"); 1217 return -ENOMEM; 1218 } 1219 1220 /* 1221 * Initialize pointers 1222 */ 1223 rt2x00dev->rx = queue; 1224 rt2x00dev->tx = &queue[1]; 1225 rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues]; 1226 rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL; 1227 1228 /* 1229 * Initialize queue parameters. 1230 * RX: qid = QID_RX 1231 * TX: qid = QID_AC_VO + index 1232 * TX: cw_min: 2^5 = 32. 1233 * TX: cw_max: 2^10 = 1024. 1234 * BCN: qid = QID_BEACON 1235 * ATIM: qid = QID_ATIM 1236 */ 1237 rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX); 1238 1239 qid = QID_AC_VO; 1240 tx_queue_for_each(rt2x00dev, queue) 1241 rt2x00queue_init(rt2x00dev, queue, qid++); 1242 1243 rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON); 1244 if (req_atim) 1245 rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM); 1246 1247 return 0; 1248} 1249 1250void rt2x00queue_free(struct rt2x00_dev *rt2x00dev) 1251{ 1252 kfree(rt2x00dev->rx); 1253 rt2x00dev->rx = NULL; 1254 rt2x00dev->tx = NULL; 1255 rt2x00dev->bcn = NULL; 1256}