Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * O(1) TX queue with built-in allocator.
4 *
5 * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
6 * Copyright (c) 2010, ST-Ericsson
7 */
8#include <linux/sched.h>
9#include <net/mac80211.h>
10
11#include "queue.h"
12#include "wfx.h"
13#include "sta.h"
14#include "data_tx.h"
15
16void wfx_tx_lock(struct wfx_dev *wdev)
17{
18 atomic_inc(&wdev->tx_lock);
19}
20
21void wfx_tx_unlock(struct wfx_dev *wdev)
22{
23 int tx_lock = atomic_dec_return(&wdev->tx_lock);
24
25 WARN(tx_lock < 0, "inconsistent tx_lock value");
26 if (!tx_lock)
27 wfx_bh_request_tx(wdev);
28}
29
30void wfx_tx_flush(struct wfx_dev *wdev)
31{
32 int ret;
33
34 WARN(!atomic_read(&wdev->tx_lock), "tx_lock is not locked");
35
36 // Do not wait for any reply if chip is frozen
37 if (wdev->chip_frozen)
38 return;
39
40 mutex_lock(&wdev->hif_cmd.lock);
41 ret = wait_event_timeout(wdev->hif.tx_buffers_empty,
42 !wdev->hif.tx_buffers_used,
43 msecs_to_jiffies(3000));
44 if (!ret) {
45 dev_warn(wdev->dev, "cannot flush tx buffers (%d still busy)\n",
46 wdev->hif.tx_buffers_used);
47 wfx_pending_dump_old_frames(wdev, 3000);
48 // FIXME: drop pending frames here
49 wdev->chip_frozen = 1;
50 }
51 mutex_unlock(&wdev->hif_cmd.lock);
52}
53
54void wfx_tx_lock_flush(struct wfx_dev *wdev)
55{
56 wfx_tx_lock(wdev);
57 wfx_tx_flush(wdev);
58}
59
60void wfx_tx_queues_lock(struct wfx_dev *wdev)
61{
62 int i;
63 struct wfx_queue *queue;
64
65 for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
66 queue = &wdev->tx_queue[i];
67 spin_lock_bh(&queue->queue.lock);
68 if (queue->tx_locked_cnt++ == 0)
69 ieee80211_stop_queue(wdev->hw, queue->queue_id);
70 spin_unlock_bh(&queue->queue.lock);
71 }
72}
73
74void wfx_tx_queues_unlock(struct wfx_dev *wdev)
75{
76 int i;
77 struct wfx_queue *queue;
78
79 for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
80 queue = &wdev->tx_queue[i];
81 spin_lock_bh(&queue->queue.lock);
82 WARN(!queue->tx_locked_cnt, "queue already unlocked");
83 if (--queue->tx_locked_cnt == 0)
84 ieee80211_wake_queue(wdev->hw, queue->queue_id);
85 spin_unlock_bh(&queue->queue.lock);
86 }
87}
88
89/* If successful, LOCKS the TX queue! */
90void wfx_tx_queues_wait_empty_vif(struct wfx_vif *wvif)
91{
92 int i;
93 bool done;
94 struct wfx_queue *queue;
95 struct sk_buff *item;
96 struct wfx_dev *wdev = wvif->wdev;
97 struct hif_msg *hif;
98
99 if (wvif->wdev->chip_frozen) {
100 wfx_tx_lock_flush(wdev);
101 wfx_tx_queues_clear(wdev);
102 return;
103 }
104
105 do {
106 done = true;
107 wfx_tx_lock_flush(wdev);
108 for (i = 0; i < IEEE80211_NUM_ACS && done; ++i) {
109 queue = &wdev->tx_queue[i];
110 spin_lock_bh(&queue->queue.lock);
111 skb_queue_walk(&queue->queue, item) {
112 hif = (struct hif_msg *) item->data;
113 if (hif->interface == wvif->id)
114 done = false;
115 }
116 spin_unlock_bh(&queue->queue.lock);
117 }
118 if (!done) {
119 wfx_tx_unlock(wdev);
120 msleep(20);
121 }
122 } while (!done);
123}
124
125static void wfx_tx_queue_clear(struct wfx_dev *wdev, struct wfx_queue *queue,
126 struct sk_buff_head *gc_list)
127{
128 int i;
129 struct sk_buff *item;
130 struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
131
132 spin_lock_bh(&queue->queue.lock);
133 while ((item = __skb_dequeue(&queue->queue)) != NULL)
134 skb_queue_head(gc_list, item);
135 spin_lock_bh(&stats->pending.lock);
136 for (i = 0; i < ARRAY_SIZE(stats->link_map_cache); ++i) {
137 stats->link_map_cache[i] -= queue->link_map_cache[i];
138 queue->link_map_cache[i] = 0;
139 }
140 spin_unlock_bh(&stats->pending.lock);
141 spin_unlock_bh(&queue->queue.lock);
142}
143
144void wfx_tx_queues_clear(struct wfx_dev *wdev)
145{
146 int i;
147 struct sk_buff *item;
148 struct sk_buff_head gc_list;
149 struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
150
151 skb_queue_head_init(&gc_list);
152 for (i = 0; i < IEEE80211_NUM_ACS; ++i)
153 wfx_tx_queue_clear(wdev, &wdev->tx_queue[i], &gc_list);
154 wake_up(&stats->wait_link_id_empty);
155 while ((item = skb_dequeue(&gc_list)) != NULL)
156 wfx_skb_dtor(wdev, item);
157}
158
159void wfx_tx_queues_init(struct wfx_dev *wdev)
160{
161 int i;
162
163 memset(&wdev->tx_queue_stats, 0, sizeof(wdev->tx_queue_stats));
164 memset(wdev->tx_queue, 0, sizeof(wdev->tx_queue));
165 skb_queue_head_init(&wdev->tx_queue_stats.pending);
166 init_waitqueue_head(&wdev->tx_queue_stats.wait_link_id_empty);
167
168 for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
169 wdev->tx_queue[i].queue_id = i;
170 skb_queue_head_init(&wdev->tx_queue[i].queue);
171 }
172}
173
174void wfx_tx_queues_deinit(struct wfx_dev *wdev)
175{
176 WARN_ON(!skb_queue_empty(&wdev->tx_queue_stats.pending));
177 wfx_tx_queues_clear(wdev);
178}
179
180size_t wfx_tx_queue_get_num_queued(struct wfx_queue *queue,
181 u32 link_id_map)
182{
183 size_t ret;
184 int i, bit;
185
186 if (!link_id_map)
187 return 0;
188
189 spin_lock_bh(&queue->queue.lock);
190 if (link_id_map == (u32)-1) {
191 ret = skb_queue_len(&queue->queue);
192 } else {
193 ret = 0;
194 for (i = 0, bit = 1; i < ARRAY_SIZE(queue->link_map_cache);
195 ++i, bit <<= 1) {
196 if (link_id_map & bit)
197 ret += queue->link_map_cache[i];
198 }
199 }
200 spin_unlock_bh(&queue->queue.lock);
201 return ret;
202}
203
204void wfx_tx_queue_put(struct wfx_dev *wdev, struct wfx_queue *queue,
205 struct sk_buff *skb)
206{
207 struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
208 struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
209
210 WARN(tx_priv->link_id >= ARRAY_SIZE(stats->link_map_cache), "invalid link-id value");
211 spin_lock_bh(&queue->queue.lock);
212 __skb_queue_tail(&queue->queue, skb);
213
214 ++queue->link_map_cache[tx_priv->link_id];
215
216 spin_lock_bh(&stats->pending.lock);
217 ++stats->link_map_cache[tx_priv->link_id];
218 spin_unlock_bh(&stats->pending.lock);
219 spin_unlock_bh(&queue->queue.lock);
220}
221
222static struct sk_buff *wfx_tx_queue_get(struct wfx_dev *wdev,
223 struct wfx_queue *queue,
224 u32 link_id_map)
225{
226 struct sk_buff *skb = NULL;
227 struct sk_buff *item;
228 struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
229 struct wfx_tx_priv *tx_priv;
230 bool wakeup_stats = false;
231
232 spin_lock_bh(&queue->queue.lock);
233 skb_queue_walk(&queue->queue, item) {
234 tx_priv = wfx_skb_tx_priv(item);
235 if (link_id_map & BIT(tx_priv->link_id)) {
236 skb = item;
237 break;
238 }
239 }
240 WARN_ON(!skb);
241 if (skb) {
242 tx_priv = wfx_skb_tx_priv(skb);
243 tx_priv->xmit_timestamp = ktime_get();
244 __skb_unlink(skb, &queue->queue);
245 --queue->link_map_cache[tx_priv->link_id];
246
247 spin_lock_bh(&stats->pending.lock);
248 __skb_queue_tail(&stats->pending, skb);
249 if (!--stats->link_map_cache[tx_priv->link_id])
250 wakeup_stats = true;
251 spin_unlock_bh(&stats->pending.lock);
252 }
253 spin_unlock_bh(&queue->queue.lock);
254 if (wakeup_stats)
255 wake_up(&stats->wait_link_id_empty);
256 return skb;
257}
258
259int wfx_pending_requeue(struct wfx_dev *wdev, struct sk_buff *skb)
260{
261 struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
262 struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
263 struct wfx_queue *queue = &wdev->tx_queue[skb_get_queue_mapping(skb)];
264
265 WARN_ON(skb_get_queue_mapping(skb) > 3);
266 spin_lock_bh(&queue->queue.lock);
267 ++queue->link_map_cache[tx_priv->link_id];
268
269 spin_lock_bh(&stats->pending.lock);
270 ++stats->link_map_cache[tx_priv->link_id];
271 __skb_unlink(skb, &stats->pending);
272 spin_unlock_bh(&stats->pending.lock);
273 __skb_queue_tail(&queue->queue, skb);
274 spin_unlock_bh(&queue->queue.lock);
275 return 0;
276}
277
278int wfx_pending_remove(struct wfx_dev *wdev, struct sk_buff *skb)
279{
280 struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
281
282 spin_lock_bh(&stats->pending.lock);
283 __skb_unlink(skb, &stats->pending);
284 spin_unlock_bh(&stats->pending.lock);
285 wfx_skb_dtor(wdev, skb);
286
287 return 0;
288}
289
290struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id)
291{
292 struct sk_buff *skb;
293 struct hif_req_tx *req;
294 struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
295
296 spin_lock_bh(&stats->pending.lock);
297 skb_queue_walk(&stats->pending, skb) {
298 req = wfx_skb_txreq(skb);
299 if (req->packet_id == packet_id) {
300 spin_unlock_bh(&stats->pending.lock);
301 return skb;
302 }
303 }
304 spin_unlock_bh(&stats->pending.lock);
305 WARN(1, "cannot find packet in pending queue");
306 return NULL;
307}
308
309void wfx_pending_dump_old_frames(struct wfx_dev *wdev, unsigned int limit_ms)
310{
311 struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
312 ktime_t now = ktime_get();
313 struct wfx_tx_priv *tx_priv;
314 struct hif_req_tx *req;
315 struct sk_buff *skb;
316 bool first = true;
317
318 spin_lock_bh(&stats->pending.lock);
319 skb_queue_walk(&stats->pending, skb) {
320 tx_priv = wfx_skb_tx_priv(skb);
321 req = wfx_skb_txreq(skb);
322 if (ktime_after(now, ktime_add_ms(tx_priv->xmit_timestamp,
323 limit_ms))) {
324 if (first) {
325 dev_info(wdev->dev, "frames stuck in firmware since %dms or more:\n",
326 limit_ms);
327 first = false;
328 }
329 dev_info(wdev->dev, " id %08x sent %lldms ago\n",
330 req->packet_id,
331 ktime_ms_delta(now, tx_priv->xmit_timestamp));
332 }
333 }
334 spin_unlock_bh(&stats->pending.lock);
335}
336
337unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev,
338 struct sk_buff *skb)
339{
340 ktime_t now = ktime_get();
341 struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
342
343 return ktime_us_delta(now, tx_priv->xmit_timestamp);
344}
345
346bool wfx_tx_queues_is_empty(struct wfx_dev *wdev)
347{
348 int i;
349 struct sk_buff_head *queue;
350 bool ret = true;
351
352 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
353 queue = &wdev->tx_queue[i].queue;
354 spin_lock_bh(&queue->lock);
355 if (!skb_queue_empty(queue))
356 ret = false;
357 spin_unlock_bh(&queue->lock);
358 }
359 return ret;
360}
361
362static bool hif_handle_tx_data(struct wfx_vif *wvif, struct sk_buff *skb,
363 struct wfx_queue *queue)
364{
365 bool handled = false;
366 struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
367 struct hif_req_tx *req = wfx_skb_txreq(skb);
368 struct ieee80211_hdr *frame = (struct ieee80211_hdr *) (req->frame + req->data_flags.fc_offset);
369
370 enum {
371 do_probe,
372 do_drop,
373 do_wep,
374 do_tx,
375 } action = do_tx;
376
377 switch (wvif->vif->type) {
378 case NL80211_IFTYPE_STATION:
379 if (wvif->state < WFX_STATE_PRE_STA)
380 action = do_drop;
381 break;
382 case NL80211_IFTYPE_AP:
383 if (!wvif->state) {
384 action = do_drop;
385 } else if (!(BIT(tx_priv->raw_link_id) &
386 (BIT(0) | wvif->link_id_map))) {
387 dev_warn(wvif->wdev->dev, "a frame with expired link-id is dropped\n");
388 action = do_drop;
389 }
390 break;
391 case NL80211_IFTYPE_ADHOC:
392 if (wvif->state != WFX_STATE_IBSS)
393 action = do_drop;
394 break;
395 case NL80211_IFTYPE_MONITOR:
396 default:
397 action = do_drop;
398 break;
399 }
400
401 if (action == do_tx) {
402 if (ieee80211_is_nullfunc(frame->frame_control)) {
403 mutex_lock(&wvif->bss_loss_lock);
404 if (wvif->bss_loss_state) {
405 wvif->bss_loss_confirm_id = req->packet_id;
406 req->queue_id.queue_id = HIF_QUEUE_ID_VOICE;
407 }
408 mutex_unlock(&wvif->bss_loss_lock);
409 } else if (ieee80211_has_protected(frame->frame_control) &&
410 tx_priv->hw_key &&
411 tx_priv->hw_key->keyidx != wvif->wep_default_key_id &&
412 (tx_priv->hw_key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
413 tx_priv->hw_key->cipher == WLAN_CIPHER_SUITE_WEP104)) {
414 action = do_wep;
415 }
416 }
417
418 switch (action) {
419 case do_drop:
420 wfx_pending_remove(wvif->wdev, skb);
421 handled = true;
422 break;
423 case do_wep:
424 wfx_tx_lock(wvif->wdev);
425 WARN_ON(wvif->wep_pending_skb);
426 wvif->wep_default_key_id = tx_priv->hw_key->keyidx;
427 wvif->wep_pending_skb = skb;
428 if (!schedule_work(&wvif->wep_key_work))
429 wfx_tx_unlock(wvif->wdev);
430 handled = true;
431 break;
432 case do_tx:
433 break;
434 default:
435 /* Do nothing */
436 break;
437 }
438 return handled;
439}
440
441static int wfx_get_prio_queue(struct wfx_vif *wvif,
442 u32 tx_allowed_mask, int *total)
443{
444 static const int urgent = BIT(WFX_LINK_ID_AFTER_DTIM) |
445 BIT(WFX_LINK_ID_UAPSD);
446 struct hif_req_edca_queue_params *edca;
447 unsigned int score, best = -1;
448 int winner = -1;
449 int i;
450
451 /* search for a winner using edca params */
452 for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
453 int queued;
454
455 edca = &wvif->edca.params[i];
456 queued = wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[i],
457 tx_allowed_mask);
458 if (!queued)
459 continue;
460 *total += queued;
461 score = ((edca->aifsn + edca->cw_min) << 16) +
462 ((edca->cw_max - edca->cw_min) *
463 (get_random_int() & 0xFFFF));
464 if (score < best && (winner < 0 || i != 3)) {
465 best = score;
466 winner = i;
467 }
468 }
469
470 /* override winner if bursting */
471 if (winner >= 0 && wvif->wdev->tx_burst_idx >= 0 &&
472 winner != wvif->wdev->tx_burst_idx &&
473 !wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[winner],
474 tx_allowed_mask & urgent) &&
475 wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[wvif->wdev->tx_burst_idx], tx_allowed_mask))
476 winner = wvif->wdev->tx_burst_idx;
477
478 return winner;
479}
480
481static int wfx_tx_queue_mask_get(struct wfx_vif *wvif,
482 struct wfx_queue **queue_p,
483 u32 *tx_allowed_mask_p,
484 bool *more)
485{
486 int idx;
487 u32 tx_allowed_mask;
488 int total = 0;
489
490 /* Search for a queue with multicast frames buffered */
491 if (wvif->mcast_tx) {
492 tx_allowed_mask = BIT(WFX_LINK_ID_AFTER_DTIM);
493 idx = wfx_get_prio_queue(wvif, tx_allowed_mask, &total);
494 if (idx >= 0) {
495 *more = total > 1;
496 goto found;
497 }
498 }
499
500 /* Search for unicast traffic */
501 tx_allowed_mask = ~wvif->sta_asleep_mask;
502 tx_allowed_mask |= BIT(WFX_LINK_ID_UAPSD);
503 if (wvif->sta_asleep_mask) {
504 tx_allowed_mask |= wvif->pspoll_mask;
505 tx_allowed_mask &= ~BIT(WFX_LINK_ID_AFTER_DTIM);
506 } else {
507 tx_allowed_mask |= BIT(WFX_LINK_ID_AFTER_DTIM);
508 }
509 idx = wfx_get_prio_queue(wvif, tx_allowed_mask, &total);
510 if (idx < 0)
511 return -ENOENT;
512
513found:
514 *queue_p = &wvif->wdev->tx_queue[idx];
515 *tx_allowed_mask_p = tx_allowed_mask;
516 return 0;
517}
518
519struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev)
520{
521 struct sk_buff *skb;
522 struct hif_msg *hif = NULL;
523 struct hif_req_tx *req = NULL;
524 struct wfx_queue *queue = NULL;
525 struct wfx_queue *vif_queue = NULL;
526 u32 tx_allowed_mask = 0;
527 u32 vif_tx_allowed_mask = 0;
528 const struct wfx_tx_priv *tx_priv = NULL;
529 struct wfx_vif *wvif;
530 /* More is used only for broadcasts. */
531 bool more = false;
532 bool vif_more = false;
533 int not_found;
534 int burst;
535
536 for (;;) {
537 int ret = -ENOENT;
538 int queue_num;
539 struct ieee80211_hdr *hdr;
540
541 if (atomic_read(&wdev->tx_lock))
542 return NULL;
543
544 wvif = NULL;
545 while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
546 spin_lock_bh(&wvif->ps_state_lock);
547
548 not_found = wfx_tx_queue_mask_get(wvif, &vif_queue,
549 &vif_tx_allowed_mask,
550 &vif_more);
551
552 if (wvif->mcast_buffered && (not_found || !vif_more) &&
553 (wvif->mcast_tx ||
554 !wvif->sta_asleep_mask)) {
555 wvif->mcast_buffered = false;
556 if (wvif->mcast_tx) {
557 wvif->mcast_tx = false;
558 schedule_work(&wvif->mcast_stop_work);
559 }
560 }
561
562 spin_unlock_bh(&wvif->ps_state_lock);
563
564 if (vif_more) {
565 more = true;
566 tx_allowed_mask = vif_tx_allowed_mask;
567 queue = vif_queue;
568 ret = 0;
569 break;
570 } else if (!not_found) {
571 if (queue && queue != vif_queue)
572 dev_info(wdev->dev, "vifs disagree about queue priority\n");
573 tx_allowed_mask |= vif_tx_allowed_mask;
574 queue = vif_queue;
575 ret = 0;
576 }
577 }
578
579 if (ret)
580 return NULL;
581
582 queue_num = queue - wdev->tx_queue;
583
584 skb = wfx_tx_queue_get(wdev, queue, tx_allowed_mask);
585 if (!skb)
586 continue;
587 tx_priv = wfx_skb_tx_priv(skb);
588 hif = (struct hif_msg *) skb->data;
589 wvif = wdev_to_wvif(wdev, hif->interface);
590 WARN_ON(!wvif);
591
592 if (hif_handle_tx_data(wvif, skb, queue))
593 continue; /* Handled by WSM */
594
595 wvif->pspoll_mask &= ~BIT(tx_priv->raw_link_id);
596
597 /* allow bursting if txop is set */
598 if (wvif->edca.params[queue_num].tx_op_limit)
599 burst = (int)wfx_tx_queue_get_num_queued(queue, tx_allowed_mask) + 1;
600 else
601 burst = 1;
602
603 /* store index of bursting queue */
604 if (burst > 1)
605 wdev->tx_burst_idx = queue_num;
606 else
607 wdev->tx_burst_idx = -1;
608
609 /* more buffered multicast/broadcast frames
610 * ==> set MoreData flag in IEEE 802.11 header
611 * to inform PS STAs
612 */
613 if (more) {
614 req = (struct hif_req_tx *) hif->body;
615 hdr = (struct ieee80211_hdr *) (req->frame + req->data_flags.fc_offset);
616 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
617 }
618 return hif;
619 }
620}