Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * HT handling
3 *
4 * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi>
5 * Copyright 2002-2005, Instant802 Networks, Inc.
6 * Copyright 2005-2006, Devicescape Software, Inc.
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2007-2010, Intel Corporation
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#include <linux/ieee80211.h>
17#include <linux/slab.h>
18#include <net/mac80211.h>
19#include "ieee80211_i.h"
20#include "driver-ops.h"
21#include "wme.h"
22
23/**
24 * DOC: TX A-MPDU aggregation
25 *
26 * Aggregation on the TX side requires setting the hardware flag
27 * %IEEE80211_HW_AMPDU_AGGREGATION. The driver will then be handed
28 * packets with a flag indicating A-MPDU aggregation. The driver
29 * or device is responsible for actually aggregating the frames,
30 * as well as deciding how many and which to aggregate.
31 *
32 * When TX aggregation is started by some subsystem (usually the rate
33 * control algorithm would be appropriate) by calling the
34 * ieee80211_start_tx_ba_session() function, the driver will be
35 * notified via its @ampdu_action function, with the
36 * %IEEE80211_AMPDU_TX_START action.
37 *
38 * In response to that, the driver is later required to call the
39 * ieee80211_start_tx_ba_cb_irqsafe() function, which will really
40 * start the aggregation session after the peer has also responded.
41 * If the peer responds negatively, the session will be stopped
42 * again right away. Note that it is possible for the aggregation
43 * session to be stopped before the driver has indicated that it
44 * is done setting it up, in which case it must not indicate the
45 * setup completion.
46 *
47 * Also note that, since we also need to wait for a response from
48 * the peer, the driver is notified of the completion of the
49 * handshake by the %IEEE80211_AMPDU_TX_OPERATIONAL action to the
50 * @ampdu_action callback.
51 *
52 * Similarly, when the aggregation session is stopped by the peer
53 * or something calling ieee80211_stop_tx_ba_session(), the driver's
54 * @ampdu_action function will be called with the action
55 * %IEEE80211_AMPDU_TX_STOP. In this case, the call must not fail,
56 * and the driver must later call ieee80211_stop_tx_ba_cb_irqsafe().
57 */
58
59static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
60 const u8 *da, u16 tid,
61 u8 dialog_token, u16 start_seq_num,
62 u16 agg_size, u16 timeout)
63{
64 struct ieee80211_local *local = sdata->local;
65 struct sk_buff *skb;
66 struct ieee80211_mgmt *mgmt;
67 u16 capab;
68
69 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
70
71 if (!skb) {
72 printk(KERN_ERR "%s: failed to allocate buffer "
73 "for addba request frame\n", sdata->name);
74 return;
75 }
76 skb_reserve(skb, local->hw.extra_tx_headroom);
77 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
78 memset(mgmt, 0, 24);
79 memcpy(mgmt->da, da, ETH_ALEN);
80 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
81 if (sdata->vif.type == NL80211_IFTYPE_AP ||
82 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
83 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
84 else if (sdata->vif.type == NL80211_IFTYPE_STATION)
85 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
86
87 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
88 IEEE80211_STYPE_ACTION);
89
90 skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req));
91
92 mgmt->u.action.category = WLAN_CATEGORY_BACK;
93 mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ;
94
95 mgmt->u.action.u.addba_req.dialog_token = dialog_token;
96 capab = (u16)(1 << 1); /* bit 1 aggregation policy */
97 capab |= (u16)(tid << 2); /* bit 5:2 TID number */
98 capab |= (u16)(agg_size << 6); /* bit 15:6 max size of aggergation */
99
100 mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab);
101
102 mgmt->u.action.u.addba_req.timeout = cpu_to_le16(timeout);
103 mgmt->u.action.u.addba_req.start_seq_num =
104 cpu_to_le16(start_seq_num << 4);
105
106 ieee80211_tx_skb(sdata, skb);
107}
108
109void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn)
110{
111 struct ieee80211_local *local = sdata->local;
112 struct sk_buff *skb;
113 struct ieee80211_bar *bar;
114 u16 bar_control = 0;
115
116 skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom);
117 if (!skb) {
118 printk(KERN_ERR "%s: failed to allocate buffer for "
119 "bar frame\n", sdata->name);
120 return;
121 }
122 skb_reserve(skb, local->hw.extra_tx_headroom);
123 bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar));
124 memset(bar, 0, sizeof(*bar));
125 bar->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
126 IEEE80211_STYPE_BACK_REQ);
127 memcpy(bar->ra, ra, ETH_ALEN);
128 memcpy(bar->ta, sdata->vif.addr, ETH_ALEN);
129 bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL;
130 bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA;
131 bar_control |= (u16)(tid << 12);
132 bar->control = cpu_to_le16(bar_control);
133 bar->start_seq_num = cpu_to_le16(ssn);
134
135 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
136 ieee80211_tx_skb(sdata, skb);
137}
138
139void ieee80211_assign_tid_tx(struct sta_info *sta, int tid,
140 struct tid_ampdu_tx *tid_tx)
141{
142 lockdep_assert_held(&sta->ampdu_mlme.mtx);
143 lockdep_assert_held(&sta->lock);
144 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx);
145}
146
147int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
148 enum ieee80211_back_parties initiator,
149 bool tx)
150{
151 struct ieee80211_local *local = sta->local;
152 struct tid_ampdu_tx *tid_tx;
153 int ret;
154
155 lockdep_assert_held(&sta->ampdu_mlme.mtx);
156
157 spin_lock_bh(&sta->lock);
158
159 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
160 if (!tid_tx) {
161 spin_unlock_bh(&sta->lock);
162 return -ENOENT;
163 }
164
165 if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
166 /* not even started yet! */
167 ieee80211_assign_tid_tx(sta, tid, NULL);
168 spin_unlock_bh(&sta->lock);
169 kfree_rcu(tid_tx, rcu_head);
170 return 0;
171 }
172
173 spin_unlock_bh(&sta->lock);
174
175#ifdef CONFIG_MAC80211_HT_DEBUG
176 printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n",
177 sta->sta.addr, tid);
178#endif /* CONFIG_MAC80211_HT_DEBUG */
179
180 set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
181
182 del_timer_sync(&tid_tx->addba_resp_timer);
183
184 /*
185 * After this packets are no longer handed right through
186 * to the driver but are put onto tid_tx->pending instead,
187 * with locking to ensure proper access.
188 */
189 clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
190
191 tid_tx->stop_initiator = initiator;
192 tid_tx->tx_stop = tx;
193
194 ret = drv_ampdu_action(local, sta->sdata,
195 IEEE80211_AMPDU_TX_STOP,
196 &sta->sta, tid, NULL, 0);
197
198 /* HW shall not deny going back to legacy */
199 if (WARN_ON(ret)) {
200 /*
201 * We may have pending packets get stuck in this case...
202 * Not bothering with a workaround for now.
203 */
204 }
205
206 return ret;
207}
208
209/*
210 * After sending add Block Ack request we activated a timer until
211 * add Block Ack response will arrive from the recipient.
212 * If this timer expires sta_addba_resp_timer_expired will be executed.
213 */
214static void sta_addba_resp_timer_expired(unsigned long data)
215{
216 /* not an elegant detour, but there is no choice as the timer passes
217 * only one argument, and both sta_info and TID are needed, so init
218 * flow in sta_info_create gives the TID as data, while the timer_to_id
219 * array gives the sta through container_of */
220 u16 tid = *(u8 *)data;
221 struct sta_info *sta = container_of((void *)data,
222 struct sta_info, timer_to_tid[tid]);
223 struct tid_ampdu_tx *tid_tx;
224
225 /* check if the TID waits for addBA response */
226 rcu_read_lock();
227 tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
228 if (!tid_tx ||
229 test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) {
230 rcu_read_unlock();
231#ifdef CONFIG_MAC80211_HT_DEBUG
232 printk(KERN_DEBUG "timer expired on tid %d but we are not "
233 "(or no longer) expecting addBA response there\n",
234 tid);
235#endif
236 return;
237 }
238
239#ifdef CONFIG_MAC80211_HT_DEBUG
240 printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid);
241#endif
242
243 ieee80211_stop_tx_ba_session(&sta->sta, tid);
244 rcu_read_unlock();
245}
246
247static inline int ieee80211_ac_from_tid(int tid)
248{
249 return ieee802_1d_to_ac[tid & 7];
250}
251
252/*
253 * When multiple aggregation sessions on multiple stations
254 * are being created/destroyed simultaneously, we need to
255 * refcount the global queue stop caused by that in order
256 * to not get into a situation where one of the aggregation
257 * setup or teardown re-enables queues before the other is
258 * ready to handle that.
259 *
260 * These two functions take care of this issue by keeping
261 * a global "agg_queue_stop" refcount.
262 */
263static void __acquires(agg_queue)
264ieee80211_stop_queue_agg(struct ieee80211_local *local, int tid)
265{
266 int queue = ieee80211_ac_from_tid(tid);
267
268 if (atomic_inc_return(&local->agg_queue_stop[queue]) == 1)
269 ieee80211_stop_queue_by_reason(
270 &local->hw, queue,
271 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
272 __acquire(agg_queue);
273}
274
275static void __releases(agg_queue)
276ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid)
277{
278 int queue = ieee80211_ac_from_tid(tid);
279
280 if (atomic_dec_return(&local->agg_queue_stop[queue]) == 0)
281 ieee80211_wake_queue_by_reason(
282 &local->hw, queue,
283 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
284 __release(agg_queue);
285}
286
287void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
288{
289 struct tid_ampdu_tx *tid_tx;
290 struct ieee80211_local *local = sta->local;
291 struct ieee80211_sub_if_data *sdata = sta->sdata;
292 u16 start_seq_num;
293 int ret;
294
295 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
296
297 /*
298 * While we're asking the driver about the aggregation,
299 * stop the AC queue so that we don't have to worry
300 * about frames that came in while we were doing that,
301 * which would require us to put them to the AC pending
302 * afterwards which just makes the code more complex.
303 */
304 ieee80211_stop_queue_agg(local, tid);
305
306 clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
307
308 /*
309 * make sure no packets are being processed to get
310 * valid starting sequence number
311 */
312 synchronize_net();
313
314 start_seq_num = sta->tid_seq[tid] >> 4;
315
316 ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
317 &sta->sta, tid, &start_seq_num, 0);
318 if (ret) {
319#ifdef CONFIG_MAC80211_HT_DEBUG
320 printk(KERN_DEBUG "BA request denied - HW unavailable for"
321 " tid %d\n", tid);
322#endif
323 spin_lock_bh(&sta->lock);
324 ieee80211_assign_tid_tx(sta, tid, NULL);
325 spin_unlock_bh(&sta->lock);
326
327 ieee80211_wake_queue_agg(local, tid);
328 kfree_rcu(tid_tx, rcu_head);
329 return;
330 }
331
332 /* we can take packets again now */
333 ieee80211_wake_queue_agg(local, tid);
334
335 /* activate the timer for the recipient's addBA response */
336 mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
337#ifdef CONFIG_MAC80211_HT_DEBUG
338 printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
339#endif
340
341 spin_lock_bh(&sta->lock);
342 sta->ampdu_mlme.addba_req_num[tid]++;
343 spin_unlock_bh(&sta->lock);
344
345 /* send AddBA request */
346 ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
347 tid_tx->dialog_token, start_seq_num,
348 local->hw.max_tx_aggregation_subframes,
349 tid_tx->timeout);
350}
351
352int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
353 u16 timeout)
354{
355 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
356 struct ieee80211_sub_if_data *sdata = sta->sdata;
357 struct ieee80211_local *local = sdata->local;
358 struct tid_ampdu_tx *tid_tx;
359 int ret = 0;
360
361 trace_api_start_tx_ba_session(pubsta, tid);
362
363 if (WARN_ON(!local->ops->ampdu_action))
364 return -EINVAL;
365
366 if ((tid >= STA_TID_NUM) ||
367 !(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION))
368 return -EINVAL;
369
370#ifdef CONFIG_MAC80211_HT_DEBUG
371 printk(KERN_DEBUG "Open BA session requested for %pM tid %u\n",
372 pubsta->addr, tid);
373#endif /* CONFIG_MAC80211_HT_DEBUG */
374
375 /*
376 * The aggregation code is not prepared to handle
377 * anything but STA/AP due to the BSSID handling.
378 * IBSS could work in the code but isn't supported
379 * by drivers or the standard.
380 */
381 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
382 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
383 sdata->vif.type != NL80211_IFTYPE_AP)
384 return -EINVAL;
385
386 if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) {
387#ifdef CONFIG_MAC80211_HT_DEBUG
388 printk(KERN_DEBUG "BA sessions blocked. "
389 "Denying BA session request\n");
390#endif
391 return -EINVAL;
392 }
393
394 spin_lock_bh(&sta->lock);
395
396 /* we have tried too many times, receiver does not want A-MPDU */
397 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
398 ret = -EBUSY;
399 goto err_unlock_sta;
400 }
401
402 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
403 /* check if the TID is not in aggregation flow already */
404 if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) {
405#ifdef CONFIG_MAC80211_HT_DEBUG
406 printk(KERN_DEBUG "BA request denied - session is not "
407 "idle on tid %u\n", tid);
408#endif /* CONFIG_MAC80211_HT_DEBUG */
409 ret = -EAGAIN;
410 goto err_unlock_sta;
411 }
412
413 /* prepare A-MPDU MLME for Tx aggregation */
414 tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
415 if (!tid_tx) {
416#ifdef CONFIG_MAC80211_HT_DEBUG
417 if (net_ratelimit())
418 printk(KERN_ERR "allocate tx mlme to tid %d failed\n",
419 tid);
420#endif
421 ret = -ENOMEM;
422 goto err_unlock_sta;
423 }
424
425 skb_queue_head_init(&tid_tx->pending);
426 __set_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
427
428 tid_tx->timeout = timeout;
429
430 /* Tx timer */
431 tid_tx->addba_resp_timer.function = sta_addba_resp_timer_expired;
432 tid_tx->addba_resp_timer.data = (unsigned long)&sta->timer_to_tid[tid];
433 init_timer(&tid_tx->addba_resp_timer);
434
435 /* assign a dialog token */
436 sta->ampdu_mlme.dialog_token_allocator++;
437 tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator;
438
439 /*
440 * Finally, assign it to the start array; the work item will
441 * collect it and move it to the normal array.
442 */
443 sta->ampdu_mlme.tid_start_tx[tid] = tid_tx;
444
445 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
446
447 /* this flow continues off the work */
448 err_unlock_sta:
449 spin_unlock_bh(&sta->lock);
450 return ret;
451}
452EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
453
454/*
455 * splice packets from the STA's pending to the local pending,
456 * requires a call to ieee80211_agg_splice_finish later
457 */
458static void __acquires(agg_queue)
459ieee80211_agg_splice_packets(struct ieee80211_local *local,
460 struct tid_ampdu_tx *tid_tx, u16 tid)
461{
462 int queue = ieee80211_ac_from_tid(tid);
463 unsigned long flags;
464
465 ieee80211_stop_queue_agg(local, tid);
466
467 if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
468 " from the pending queue\n", tid))
469 return;
470
471 if (!skb_queue_empty(&tid_tx->pending)) {
472 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
473 /* copy over remaining packets */
474 skb_queue_splice_tail_init(&tid_tx->pending,
475 &local->pending[queue]);
476 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
477 }
478}
479
480static void __releases(agg_queue)
481ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
482{
483 ieee80211_wake_queue_agg(local, tid);
484}
485
486static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
487 struct sta_info *sta, u16 tid)
488{
489 struct tid_ampdu_tx *tid_tx;
490
491 lockdep_assert_held(&sta->ampdu_mlme.mtx);
492
493 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
494
495#ifdef CONFIG_MAC80211_HT_DEBUG
496 printk(KERN_DEBUG "Aggregation is on for tid %d\n", tid);
497#endif
498
499 drv_ampdu_action(local, sta->sdata,
500 IEEE80211_AMPDU_TX_OPERATIONAL,
501 &sta->sta, tid, NULL, tid_tx->buf_size);
502
503 /*
504 * synchronize with TX path, while splicing the TX path
505 * should block so it won't put more packets onto pending.
506 */
507 spin_lock_bh(&sta->lock);
508
509 ieee80211_agg_splice_packets(local, tid_tx, tid);
510 /*
511 * Now mark as operational. This will be visible
512 * in the TX path, and lets it go lock-free in
513 * the common case.
514 */
515 set_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
516 ieee80211_agg_splice_finish(local, tid);
517
518 spin_unlock_bh(&sta->lock);
519}
520
521void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
522{
523 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
524 struct ieee80211_local *local = sdata->local;
525 struct sta_info *sta;
526 struct tid_ampdu_tx *tid_tx;
527
528 trace_api_start_tx_ba_cb(sdata, ra, tid);
529
530 if (tid >= STA_TID_NUM) {
531#ifdef CONFIG_MAC80211_HT_DEBUG
532 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
533 tid, STA_TID_NUM);
534#endif
535 return;
536 }
537
538 mutex_lock(&local->sta_mtx);
539 sta = sta_info_get(sdata, ra);
540 if (!sta) {
541 mutex_unlock(&local->sta_mtx);
542#ifdef CONFIG_MAC80211_HT_DEBUG
543 printk(KERN_DEBUG "Could not find station: %pM\n", ra);
544#endif
545 return;
546 }
547
548 mutex_lock(&sta->ampdu_mlme.mtx);
549 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
550
551 if (WARN_ON(!tid_tx)) {
552#ifdef CONFIG_MAC80211_HT_DEBUG
553 printk(KERN_DEBUG "addBA was not requested!\n");
554#endif
555 goto unlock;
556 }
557
558 if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
559 goto unlock;
560
561 if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
562 ieee80211_agg_tx_operational(local, sta, tid);
563
564 unlock:
565 mutex_unlock(&sta->ampdu_mlme.mtx);
566 mutex_unlock(&local->sta_mtx);
567}
568
569void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
570 const u8 *ra, u16 tid)
571{
572 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
573 struct ieee80211_local *local = sdata->local;
574 struct ieee80211_ra_tid *ra_tid;
575 struct sk_buff *skb = dev_alloc_skb(0);
576
577 if (unlikely(!skb)) {
578#ifdef CONFIG_MAC80211_HT_DEBUG
579 if (net_ratelimit())
580 printk(KERN_WARNING "%s: Not enough memory, "
581 "dropping start BA session", sdata->name);
582#endif
583 return;
584 }
585 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
586 memcpy(&ra_tid->ra, ra, ETH_ALEN);
587 ra_tid->tid = tid;
588
589 skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_START;
590 skb_queue_tail(&sdata->skb_queue, skb);
591 ieee80211_queue_work(&local->hw, &sdata->work);
592}
593EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
594
595int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
596 enum ieee80211_back_parties initiator,
597 bool tx)
598{
599 int ret;
600
601 mutex_lock(&sta->ampdu_mlme.mtx);
602
603 ret = ___ieee80211_stop_tx_ba_session(sta, tid, initiator, tx);
604
605 mutex_unlock(&sta->ampdu_mlme.mtx);
606
607 return ret;
608}
609
610int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
611{
612 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
613 struct ieee80211_sub_if_data *sdata = sta->sdata;
614 struct ieee80211_local *local = sdata->local;
615 struct tid_ampdu_tx *tid_tx;
616 int ret = 0;
617
618 trace_api_stop_tx_ba_session(pubsta, tid);
619
620 if (!local->ops->ampdu_action)
621 return -EINVAL;
622
623 if (tid >= STA_TID_NUM)
624 return -EINVAL;
625
626 spin_lock_bh(&sta->lock);
627 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
628
629 if (!tid_tx) {
630 ret = -ENOENT;
631 goto unlock;
632 }
633
634 if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
635 /* already in progress stopping it */
636 ret = 0;
637 goto unlock;
638 }
639
640 set_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state);
641 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
642
643 unlock:
644 spin_unlock_bh(&sta->lock);
645 return ret;
646}
647EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
648
649void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
650{
651 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
652 struct ieee80211_local *local = sdata->local;
653 struct sta_info *sta;
654 struct tid_ampdu_tx *tid_tx;
655
656 trace_api_stop_tx_ba_cb(sdata, ra, tid);
657
658 if (tid >= STA_TID_NUM) {
659#ifdef CONFIG_MAC80211_HT_DEBUG
660 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
661 tid, STA_TID_NUM);
662#endif
663 return;
664 }
665
666#ifdef CONFIG_MAC80211_HT_DEBUG
667 printk(KERN_DEBUG "Stopping Tx BA session for %pM tid %d\n",
668 ra, tid);
669#endif /* CONFIG_MAC80211_HT_DEBUG */
670
671 mutex_lock(&local->sta_mtx);
672
673 sta = sta_info_get(sdata, ra);
674 if (!sta) {
675#ifdef CONFIG_MAC80211_HT_DEBUG
676 printk(KERN_DEBUG "Could not find station: %pM\n", ra);
677#endif
678 goto unlock;
679 }
680
681 mutex_lock(&sta->ampdu_mlme.mtx);
682 spin_lock_bh(&sta->lock);
683 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
684
685 if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
686#ifdef CONFIG_MAC80211_HT_DEBUG
687 printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n");
688#endif
689 goto unlock_sta;
690 }
691
692 if (tid_tx->stop_initiator == WLAN_BACK_INITIATOR && tid_tx->tx_stop)
693 ieee80211_send_delba(sta->sdata, ra, tid,
694 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
695
696 /*
697 * When we get here, the TX path will not be lockless any more wrt.
698 * aggregation, since the OPERATIONAL bit has long been cleared.
699 * Thus it will block on getting the lock, if it occurs. So if we
700 * stop the queue now, we will not get any more packets, and any
701 * that might be being processed will wait for us here, thereby
702 * guaranteeing that no packets go to the tid_tx pending queue any
703 * more.
704 */
705
706 ieee80211_agg_splice_packets(local, tid_tx, tid);
707
708 /* future packets must not find the tid_tx struct any more */
709 ieee80211_assign_tid_tx(sta, tid, NULL);
710
711 ieee80211_agg_splice_finish(local, tid);
712
713 kfree_rcu(tid_tx, rcu_head);
714
715 unlock_sta:
716 spin_unlock_bh(&sta->lock);
717 mutex_unlock(&sta->ampdu_mlme.mtx);
718 unlock:
719 mutex_unlock(&local->sta_mtx);
720}
721
722void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
723 const u8 *ra, u16 tid)
724{
725 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
726 struct ieee80211_local *local = sdata->local;
727 struct ieee80211_ra_tid *ra_tid;
728 struct sk_buff *skb = dev_alloc_skb(0);
729
730 if (unlikely(!skb)) {
731#ifdef CONFIG_MAC80211_HT_DEBUG
732 if (net_ratelimit())
733 printk(KERN_WARNING "%s: Not enough memory, "
734 "dropping stop BA session", sdata->name);
735#endif
736 return;
737 }
738 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
739 memcpy(&ra_tid->ra, ra, ETH_ALEN);
740 ra_tid->tid = tid;
741
742 skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_STOP;
743 skb_queue_tail(&sdata->skb_queue, skb);
744 ieee80211_queue_work(&local->hw, &sdata->work);
745}
746EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
747
748
749void ieee80211_process_addba_resp(struct ieee80211_local *local,
750 struct sta_info *sta,
751 struct ieee80211_mgmt *mgmt,
752 size_t len)
753{
754 struct tid_ampdu_tx *tid_tx;
755 u16 capab, tid;
756 u8 buf_size;
757
758 capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
759 tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
760 buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6;
761
762 mutex_lock(&sta->ampdu_mlme.mtx);
763
764 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
765 if (!tid_tx)
766 goto out;
767
768 if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) {
769#ifdef CONFIG_MAC80211_HT_DEBUG
770 printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid);
771#endif
772 goto out;
773 }
774
775 del_timer(&tid_tx->addba_resp_timer);
776
777#ifdef CONFIG_MAC80211_HT_DEBUG
778 printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid);
779#endif
780
781 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
782 == WLAN_STATUS_SUCCESS) {
783 /*
784 * IEEE 802.11-2007 7.3.1.14:
785 * In an ADDBA Response frame, when the Status Code field
786 * is set to 0, the Buffer Size subfield is set to a value
787 * of at least 1.
788 */
789 if (!buf_size)
790 goto out;
791
792 if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED,
793 &tid_tx->state)) {
794 /* ignore duplicate response */
795 goto out;
796 }
797
798 tid_tx->buf_size = buf_size;
799
800 if (test_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))
801 ieee80211_agg_tx_operational(local, sta, tid);
802
803 sta->ampdu_mlme.addba_req_num[tid] = 0;
804 } else {
805 ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR,
806 true);
807 }
808
809 out:
810 mutex_unlock(&sta->ampdu_mlme.mtx);
811}