Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * O(1) TX queue with built-in allocator.
4 *
5 * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
6 * Copyright (c) 2010, ST-Ericsson
7 */
8#include <linux/sched.h>
9#include <net/mac80211.h>
10
11#include "queue.h"
12#include "wfx.h"
13#include "sta.h"
14#include "data_tx.h"
15
16void wfx_tx_lock(struct wfx_dev *wdev)
17{
18 atomic_inc(&wdev->tx_lock);
19}
20
21void wfx_tx_unlock(struct wfx_dev *wdev)
22{
23 int tx_lock = atomic_dec_return(&wdev->tx_lock);
24
25 WARN(tx_lock < 0, "inconsistent tx_lock value");
26 if (!tx_lock)
27 wfx_bh_request_tx(wdev);
28}
29
30void wfx_tx_flush(struct wfx_dev *wdev)
31{
32 int ret;
33
34 // Do not wait for any reply if chip is frozen
35 if (wdev->chip_frozen)
36 return;
37
38 mutex_lock(&wdev->hif_cmd.lock);
39 ret = wait_event_timeout(wdev->hif.tx_buffers_empty,
40 !wdev->hif.tx_buffers_used,
41 msecs_to_jiffies(3000));
42 if (!ret) {
43 dev_warn(wdev->dev, "cannot flush tx buffers (%d still busy)\n",
44 wdev->hif.tx_buffers_used);
45 wfx_pending_dump_old_frames(wdev, 3000);
46 // FIXME: drop pending frames here
47 wdev->chip_frozen = 1;
48 }
49 mutex_unlock(&wdev->hif_cmd.lock);
50}
51
52void wfx_tx_lock_flush(struct wfx_dev *wdev)
53{
54 wfx_tx_lock(wdev);
55 wfx_tx_flush(wdev);
56}
57
58void wfx_tx_queues_lock(struct wfx_dev *wdev)
59{
60 int i;
61 struct wfx_queue *queue;
62
63 for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
64 queue = &wdev->tx_queue[i];
65 spin_lock_bh(&queue->queue.lock);
66 if (queue->tx_locked_cnt++ == 0)
67 ieee80211_stop_queue(wdev->hw, queue->queue_id);
68 spin_unlock_bh(&queue->queue.lock);
69 }
70}
71
72void wfx_tx_queues_unlock(struct wfx_dev *wdev)
73{
74 int i;
75 struct wfx_queue *queue;
76
77 for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
78 queue = &wdev->tx_queue[i];
79 spin_lock_bh(&queue->queue.lock);
80 WARN(!queue->tx_locked_cnt, "queue already unlocked");
81 if (--queue->tx_locked_cnt == 0)
82 ieee80211_wake_queue(wdev->hw, queue->queue_id);
83 spin_unlock_bh(&queue->queue.lock);
84 }
85}
86
87/* If successful, LOCKS the TX queue! */
88void wfx_tx_queues_wait_empty_vif(struct wfx_vif *wvif)
89{
90 int i;
91 bool done;
92 struct wfx_queue *queue;
93 struct sk_buff *item;
94 struct wfx_dev *wdev = wvif->wdev;
95 struct hif_msg *hif;
96
97 if (wvif->wdev->chip_frozen) {
98 wfx_tx_lock_flush(wdev);
99 wfx_tx_queues_clear(wdev);
100 return;
101 }
102
103 do {
104 done = true;
105 wfx_tx_lock_flush(wdev);
106 for (i = 0; i < IEEE80211_NUM_ACS && done; ++i) {
107 queue = &wdev->tx_queue[i];
108 spin_lock_bh(&queue->queue.lock);
109 skb_queue_walk(&queue->queue, item) {
110 hif = (struct hif_msg *) item->data;
111 if (hif->interface == wvif->id)
112 done = false;
113 }
114 spin_unlock_bh(&queue->queue.lock);
115 }
116 if (!done) {
117 wfx_tx_unlock(wdev);
118 msleep(20);
119 }
120 } while (!done);
121}
122
123static void wfx_tx_queue_clear(struct wfx_dev *wdev, struct wfx_queue *queue,
124 struct sk_buff_head *gc_list)
125{
126 int i;
127 struct sk_buff *item;
128 struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
129
130 spin_lock_bh(&queue->queue.lock);
131 while ((item = __skb_dequeue(&queue->queue)) != NULL)
132 skb_queue_head(gc_list, item);
133 spin_lock_nested(&stats->pending.lock, 1);
134 for (i = 0; i < ARRAY_SIZE(stats->link_map_cache); ++i) {
135 stats->link_map_cache[i] -= queue->link_map_cache[i];
136 queue->link_map_cache[i] = 0;
137 }
138 spin_unlock(&stats->pending.lock);
139 spin_unlock_bh(&queue->queue.lock);
140}
141
142void wfx_tx_queues_clear(struct wfx_dev *wdev)
143{
144 int i;
145 struct sk_buff *item;
146 struct sk_buff_head gc_list;
147 struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
148
149 skb_queue_head_init(&gc_list);
150 for (i = 0; i < IEEE80211_NUM_ACS; ++i)
151 wfx_tx_queue_clear(wdev, &wdev->tx_queue[i], &gc_list);
152 wake_up(&stats->wait_link_id_empty);
153 while ((item = skb_dequeue(&gc_list)) != NULL)
154 wfx_skb_dtor(wdev, item);
155}
156
157void wfx_tx_queues_init(struct wfx_dev *wdev)
158{
159 int i;
160
161 memset(&wdev->tx_queue_stats, 0, sizeof(wdev->tx_queue_stats));
162 memset(wdev->tx_queue, 0, sizeof(wdev->tx_queue));
163 skb_queue_head_init(&wdev->tx_queue_stats.pending);
164 init_waitqueue_head(&wdev->tx_queue_stats.wait_link_id_empty);
165
166 for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
167 wdev->tx_queue[i].queue_id = i;
168 skb_queue_head_init(&wdev->tx_queue[i].queue);
169 }
170}
171
172void wfx_tx_queues_deinit(struct wfx_dev *wdev)
173{
174 WARN_ON(!skb_queue_empty(&wdev->tx_queue_stats.pending));
175 wfx_tx_queues_clear(wdev);
176}
177
178int wfx_tx_queue_get_num_queued(struct wfx_queue *queue, u32 link_id_map)
179{
180 int ret, i;
181
182 if (!link_id_map)
183 return 0;
184
185 spin_lock_bh(&queue->queue.lock);
186 if (link_id_map == (u32)-1) {
187 ret = skb_queue_len(&queue->queue);
188 } else {
189 ret = 0;
190 for (i = 0; i < ARRAY_SIZE(queue->link_map_cache); i++)
191 if (link_id_map & BIT(i))
192 ret += queue->link_map_cache[i];
193 }
194 spin_unlock_bh(&queue->queue.lock);
195 return ret;
196}
197
198void wfx_tx_queue_put(struct wfx_dev *wdev, struct wfx_queue *queue,
199 struct sk_buff *skb)
200{
201 struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
202 struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
203
204 WARN(tx_priv->link_id >= ARRAY_SIZE(stats->link_map_cache), "invalid link-id value");
205 spin_lock_bh(&queue->queue.lock);
206 __skb_queue_tail(&queue->queue, skb);
207
208 ++queue->link_map_cache[tx_priv->link_id];
209
210 spin_lock_nested(&stats->pending.lock, 1);
211 ++stats->link_map_cache[tx_priv->link_id];
212 spin_unlock(&stats->pending.lock);
213 spin_unlock_bh(&queue->queue.lock);
214}
215
216static struct sk_buff *wfx_tx_queue_get(struct wfx_dev *wdev,
217 struct wfx_queue *queue,
218 u32 link_id_map)
219{
220 struct sk_buff *skb = NULL;
221 struct sk_buff *item;
222 struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
223 struct wfx_tx_priv *tx_priv;
224 bool wakeup_stats = false;
225
226 spin_lock_bh(&queue->queue.lock);
227 skb_queue_walk(&queue->queue, item) {
228 tx_priv = wfx_skb_tx_priv(item);
229 if (link_id_map & BIT(tx_priv->link_id)) {
230 skb = item;
231 break;
232 }
233 }
234 if (skb) {
235 tx_priv = wfx_skb_tx_priv(skb);
236 tx_priv->xmit_timestamp = ktime_get();
237 __skb_unlink(skb, &queue->queue);
238 --queue->link_map_cache[tx_priv->link_id];
239
240 spin_lock_nested(&stats->pending.lock, 1);
241 __skb_queue_tail(&stats->pending, skb);
242 if (!--stats->link_map_cache[tx_priv->link_id])
243 wakeup_stats = true;
244 spin_unlock(&stats->pending.lock);
245 }
246 spin_unlock_bh(&queue->queue.lock);
247 if (wakeup_stats)
248 wake_up(&stats->wait_link_id_empty);
249 return skb;
250}
251
252int wfx_pending_requeue(struct wfx_dev *wdev, struct sk_buff *skb)
253{
254 struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
255 struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
256 struct wfx_queue *queue = &wdev->tx_queue[skb_get_queue_mapping(skb)];
257
258 WARN_ON(skb_get_queue_mapping(skb) > 3);
259 spin_lock_bh(&queue->queue.lock);
260 ++queue->link_map_cache[tx_priv->link_id];
261
262 spin_lock_nested(&stats->pending.lock, 1);
263 ++stats->link_map_cache[tx_priv->link_id];
264 __skb_unlink(skb, &stats->pending);
265 spin_unlock(&stats->pending.lock);
266 __skb_queue_tail(&queue->queue, skb);
267 spin_unlock_bh(&queue->queue.lock);
268 return 0;
269}
270
271int wfx_pending_remove(struct wfx_dev *wdev, struct sk_buff *skb)
272{
273 struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
274
275 spin_lock_bh(&stats->pending.lock);
276 __skb_unlink(skb, &stats->pending);
277 spin_unlock_bh(&stats->pending.lock);
278 wfx_skb_dtor(wdev, skb);
279
280 return 0;
281}
282
283struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id)
284{
285 struct sk_buff *skb;
286 struct hif_req_tx *req;
287 struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
288
289 spin_lock_bh(&stats->pending.lock);
290 skb_queue_walk(&stats->pending, skb) {
291 req = wfx_skb_txreq(skb);
292 if (req->packet_id == packet_id) {
293 spin_unlock_bh(&stats->pending.lock);
294 return skb;
295 }
296 }
297 spin_unlock_bh(&stats->pending.lock);
298 WARN(1, "cannot find packet in pending queue");
299 return NULL;
300}
301
302void wfx_pending_dump_old_frames(struct wfx_dev *wdev, unsigned int limit_ms)
303{
304 struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
305 ktime_t now = ktime_get();
306 struct wfx_tx_priv *tx_priv;
307 struct hif_req_tx *req;
308 struct sk_buff *skb;
309 bool first = true;
310
311 spin_lock_bh(&stats->pending.lock);
312 skb_queue_walk(&stats->pending, skb) {
313 tx_priv = wfx_skb_tx_priv(skb);
314 req = wfx_skb_txreq(skb);
315 if (ktime_after(now, ktime_add_ms(tx_priv->xmit_timestamp,
316 limit_ms))) {
317 if (first) {
318 dev_info(wdev->dev, "frames stuck in firmware since %dms or more:\n",
319 limit_ms);
320 first = false;
321 }
322 dev_info(wdev->dev, " id %08x sent %lldms ago\n",
323 req->packet_id,
324 ktime_ms_delta(now, tx_priv->xmit_timestamp));
325 }
326 }
327 spin_unlock_bh(&stats->pending.lock);
328}
329
330unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev,
331 struct sk_buff *skb)
332{
333 ktime_t now = ktime_get();
334 struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
335
336 return ktime_us_delta(now, tx_priv->xmit_timestamp);
337}
338
339bool wfx_tx_queues_is_empty(struct wfx_dev *wdev)
340{
341 int i;
342 struct sk_buff_head *queue;
343 bool ret = true;
344
345 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
346 queue = &wdev->tx_queue[i].queue;
347 spin_lock_bh(&queue->lock);
348 if (!skb_queue_empty(queue))
349 ret = false;
350 spin_unlock_bh(&queue->lock);
351 }
352 return ret;
353}
354
355static bool hif_handle_tx_data(struct wfx_vif *wvif, struct sk_buff *skb,
356 struct wfx_queue *queue)
357{
358 struct hif_req_tx *req = wfx_skb_txreq(skb);
359 struct ieee80211_key_conf *hw_key = wfx_skb_tx_priv(skb)->hw_key;
360 struct ieee80211_hdr *frame =
361 (struct ieee80211_hdr *)(req->frame + req->data_flags.fc_offset);
362
363 // FIXME: mac80211 is smart enough to handle BSS loss. Driver should not
364 // try to do anything about that.
365 if (ieee80211_is_nullfunc(frame->frame_control)) {
366 mutex_lock(&wvif->bss_loss_lock);
367 if (wvif->bss_loss_state) {
368 wvif->bss_loss_confirm_id = req->packet_id;
369 req->queue_id.queue_id = HIF_QUEUE_ID_VOICE;
370 }
371 mutex_unlock(&wvif->bss_loss_lock);
372 }
373
374 // FIXME: identify the exact scenario matched by this condition. Does it
375 // happen yet?
376 if (ieee80211_has_protected(frame->frame_control) &&
377 hw_key && hw_key->keyidx != wvif->wep_default_key_id &&
378 (hw_key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
379 hw_key->cipher == WLAN_CIPHER_SUITE_WEP104)) {
380 wfx_tx_lock(wvif->wdev);
381 WARN_ON(wvif->wep_pending_skb);
382 wvif->wep_default_key_id = hw_key->keyidx;
383 wvif->wep_pending_skb = skb;
384 if (!schedule_work(&wvif->wep_key_work))
385 wfx_tx_unlock(wvif->wdev);
386 return true;
387 } else {
388 return false;
389 }
390}
391
392static int wfx_get_prio_queue(struct wfx_vif *wvif,
393 u32 tx_allowed_mask, int *total)
394{
395 static const int urgent = BIT(WFX_LINK_ID_AFTER_DTIM) |
396 BIT(WFX_LINK_ID_UAPSD);
397 const struct ieee80211_tx_queue_params *edca;
398 unsigned int score, best = -1;
399 int winner = -1;
400 int i;
401
402 /* search for a winner using edca params */
403 for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
404 int queued;
405
406 edca = &wvif->edca_params[i];
407 queued = wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[i],
408 tx_allowed_mask);
409 if (!queued)
410 continue;
411 *total += queued;
412 score = ((edca->aifs + edca->cw_min) << 16) +
413 ((edca->cw_max - edca->cw_min) *
414 (get_random_int() & 0xFFFF));
415 if (score < best && (winner < 0 || i != 3)) {
416 best = score;
417 winner = i;
418 }
419 }
420
421 /* override winner if bursting */
422 if (winner >= 0 && wvif->wdev->tx_burst_idx >= 0 &&
423 winner != wvif->wdev->tx_burst_idx &&
424 !wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[winner],
425 tx_allowed_mask & urgent) &&
426 wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[wvif->wdev->tx_burst_idx], tx_allowed_mask))
427 winner = wvif->wdev->tx_burst_idx;
428
429 return winner;
430}
431
432static int wfx_tx_queue_mask_get(struct wfx_vif *wvif,
433 struct wfx_queue **queue_p,
434 u32 *tx_allowed_mask_p)
435{
436 int idx;
437 u32 tx_allowed_mask;
438 int total = 0;
439
440 /* Search for unicast traffic */
441 tx_allowed_mask = ~wvif->sta_asleep_mask;
442 tx_allowed_mask |= BIT(WFX_LINK_ID_UAPSD);
443 if (wvif->sta_asleep_mask)
444 tx_allowed_mask &= ~BIT(WFX_LINK_ID_AFTER_DTIM);
445 else
446 tx_allowed_mask |= BIT(WFX_LINK_ID_AFTER_DTIM);
447 idx = wfx_get_prio_queue(wvif, tx_allowed_mask, &total);
448 if (idx < 0)
449 return -ENOENT;
450
451 *queue_p = &wvif->wdev->tx_queue[idx];
452 *tx_allowed_mask_p = tx_allowed_mask;
453 return 0;
454}
455
456struct hif_msg *wfx_tx_queues_get_after_dtim(struct wfx_vif *wvif)
457{
458 struct wfx_dev *wdev = wvif->wdev;
459 struct ieee80211_tx_info *tx_info;
460 struct hif_msg *hif;
461 struct sk_buff *skb;
462 int i;
463
464 for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
465 skb_queue_walk(&wdev->tx_queue[i].queue, skb) {
466 tx_info = IEEE80211_SKB_CB(skb);
467 hif = (struct hif_msg *)skb->data;
468 if ((tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) &&
469 (hif->interface == wvif->id))
470 return (struct hif_msg *)skb->data;
471 }
472 }
473 return NULL;
474}
475
476struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev)
477{
478 struct sk_buff *skb;
479 struct hif_msg *hif = NULL;
480 struct wfx_queue *queue = NULL;
481 struct wfx_queue *vif_queue = NULL;
482 u32 tx_allowed_mask = 0;
483 u32 vif_tx_allowed_mask = 0;
484 struct wfx_vif *wvif;
485 int not_found;
486 int burst;
487 int i;
488
489 if (atomic_read(&wdev->tx_lock))
490 return NULL;
491
492 wvif = NULL;
493 while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
494 if (wvif->after_dtim_tx_allowed) {
495 for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
496 skb = wfx_tx_queue_get(wvif->wdev,
497 &wdev->tx_queue[i],
498 BIT(WFX_LINK_ID_AFTER_DTIM));
499 if (skb) {
500 hif = (struct hif_msg *)skb->data;
501 // Cannot happen since only one vif can
502 // be AP at time
503 WARN_ON(wvif->id != hif->interface);
504 return hif;
505 }
506 }
507 // No more multicast to sent
508 wvif->after_dtim_tx_allowed = false;
509 schedule_work(&wvif->update_tim_work);
510 }
511 }
512
513 for (;;) {
514 int ret = -ENOENT;
515 int queue_num;
516
517 wvif = NULL;
518 while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
519 spin_lock_bh(&wvif->ps_state_lock);
520
521 not_found = wfx_tx_queue_mask_get(wvif, &vif_queue,
522 &vif_tx_allowed_mask);
523
524 spin_unlock_bh(&wvif->ps_state_lock);
525
526 if (!not_found) {
527 if (queue && queue != vif_queue)
528 dev_info(wdev->dev, "vifs disagree about queue priority\n");
529 tx_allowed_mask |= vif_tx_allowed_mask;
530 queue = vif_queue;
531 ret = 0;
532 }
533 }
534
535 if (ret)
536 return NULL;
537
538 queue_num = queue - wdev->tx_queue;
539
540 skb = wfx_tx_queue_get(wdev, queue, tx_allowed_mask);
541 if (!skb)
542 continue;
543 hif = (struct hif_msg *)skb->data;
544 wvif = wdev_to_wvif(wdev, hif->interface);
545 WARN_ON(!wvif);
546
547 if (hif_handle_tx_data(wvif, skb, queue))
548 continue; /* Handled by WSM */
549
550 /* allow bursting if txop is set */
551 if (wvif->edca_params[queue_num].txop)
552 burst = wfx_tx_queue_get_num_queued(queue, tx_allowed_mask) + 1;
553 else
554 burst = 1;
555
556 /* store index of bursting queue */
557 if (burst > 1)
558 wdev->tx_burst_idx = queue_num;
559 else
560 wdev->tx_burst_idx = -1;
561
562 return hif;
563 }
564}