Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at master 549 lines 14 kB view raw
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers 4 * 5 * Copyright (c) 2010, ST-Ericsson 6 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> 7 */ 8 9#include <net/mac80211.h> 10#include <linux/sched.h> 11#include <linux/jiffies.h> 12#include "queue.h" 13#include "cw1200.h" 14#include "debug.h" 15 16/* private */ struct cw1200_queue_item 17{ 18 struct list_head head; 19 struct sk_buff *skb; 20 u32 packet_id; 21 unsigned long queue_timestamp; 22 unsigned long xmit_timestamp; 23 struct cw1200_txpriv txpriv; 24 u8 generation; 25}; 26 27static inline void __cw1200_queue_lock(struct cw1200_queue *queue) 28{ 29 struct cw1200_queue_stats *stats = queue->stats; 30 if (queue->tx_locked_cnt++ == 0) { 31 pr_debug("[TX] Queue %d is locked.\n", 32 queue->queue_id); 33 ieee80211_stop_queue(stats->priv->hw, queue->queue_id); 34 } 35} 36 37static inline void __cw1200_queue_unlock(struct cw1200_queue *queue) 38{ 39 struct cw1200_queue_stats *stats = queue->stats; 40 BUG_ON(!queue->tx_locked_cnt); 41 if (--queue->tx_locked_cnt == 0) { 42 pr_debug("[TX] Queue %d is unlocked.\n", 43 queue->queue_id); 44 ieee80211_wake_queue(stats->priv->hw, queue->queue_id); 45 } 46} 47 48static inline void cw1200_queue_parse_id(u32 packet_id, u8 *queue_generation, 49 u8 *queue_id, u8 *item_generation, 50 u8 *item_id) 51{ 52 *item_id = (packet_id >> 0) & 0xFF; 53 *item_generation = (packet_id >> 8) & 0xFF; 54 *queue_id = (packet_id >> 16) & 0xFF; 55 *queue_generation = (packet_id >> 24) & 0xFF; 56} 57 58static inline u32 cw1200_queue_mk_packet_id(u8 queue_generation, u8 queue_id, 59 u8 item_generation, u8 item_id) 60{ 61 return ((u32)item_id << 0) | 62 ((u32)item_generation << 8) | 63 ((u32)queue_id << 16) | 64 ((u32)queue_generation << 24); 65} 66 67static void cw1200_queue_post_gc(struct cw1200_queue_stats *stats, 68 struct list_head *gc_list) 69{ 70 struct cw1200_queue_item *item, *tmp; 71 72 list_for_each_entry_safe(item, tmp, gc_list, head) { 73 list_del(&item->head); 74 stats->skb_dtor(stats->priv, item->skb, &item->txpriv); 75 kfree(item); 76 } 77} 78 79static void cw1200_queue_register_post_gc(struct list_head *gc_list, 80 struct cw1200_queue_item *item) 81{ 82 struct cw1200_queue_item *gc_item; 83 gc_item = kmemdup(item, sizeof(struct cw1200_queue_item), 84 GFP_ATOMIC); 85 BUG_ON(!gc_item); 86 list_add_tail(&gc_item->head, gc_list); 87} 88 89static void __cw1200_queue_gc(struct cw1200_queue *queue, 90 struct list_head *head, 91 bool unlock) 92{ 93 struct cw1200_queue_stats *stats = queue->stats; 94 struct cw1200_queue_item *item = NULL, *iter, *tmp; 95 bool wakeup_stats = false; 96 97 list_for_each_entry_safe(iter, tmp, &queue->queue, head) { 98 if (time_is_after_jiffies(iter->queue_timestamp + queue->ttl)) { 99 item = iter; 100 break; 101 } 102 --queue->num_queued; 103 --queue->link_map_cache[iter->txpriv.link_id]; 104 spin_lock_bh(&stats->lock); 105 --stats->num_queued; 106 if (!--stats->link_map_cache[iter->txpriv.link_id]) 107 wakeup_stats = true; 108 spin_unlock_bh(&stats->lock); 109 cw1200_debug_tx_ttl(stats->priv); 110 cw1200_queue_register_post_gc(head, iter); 111 iter->skb = NULL; 112 list_move_tail(&iter->head, &queue->free_pool); 113 } 114 115 if (wakeup_stats) 116 wake_up(&stats->wait_link_id_empty); 117 118 if (queue->overfull) { 119 if (queue->num_queued <= (queue->capacity >> 1)) { 120 queue->overfull = false; 121 if (unlock) 122 __cw1200_queue_unlock(queue); 123 } else if (item) { 124 unsigned long tmo = item->queue_timestamp + queue->ttl; 125 mod_timer(&queue->gc, tmo); 126 cw1200_pm_stay_awake(&stats->priv->pm_state, 127 tmo - jiffies); 128 } 129 } 130} 131 132static void cw1200_queue_gc(struct timer_list *t) 133{ 134 LIST_HEAD(list); 135 struct cw1200_queue *queue = 136 timer_container_of(queue, t, gc); 137 138 spin_lock_bh(&queue->lock); 139 __cw1200_queue_gc(queue, &list, true); 140 spin_unlock_bh(&queue->lock); 141 cw1200_queue_post_gc(queue->stats, &list); 142} 143 144int cw1200_queue_stats_init(struct cw1200_queue_stats *stats, 145 size_t map_capacity, 146 cw1200_queue_skb_dtor_t skb_dtor, 147 struct cw1200_common *priv) 148{ 149 memset(stats, 0, sizeof(*stats)); 150 stats->map_capacity = map_capacity; 151 stats->skb_dtor = skb_dtor; 152 stats->priv = priv; 153 spin_lock_init(&stats->lock); 154 init_waitqueue_head(&stats->wait_link_id_empty); 155 156 stats->link_map_cache = kzalloc_objs(int, map_capacity); 157 if (!stats->link_map_cache) 158 return -ENOMEM; 159 160 return 0; 161} 162 163int cw1200_queue_init(struct cw1200_queue *queue, 164 struct cw1200_queue_stats *stats, 165 u8 queue_id, 166 size_t capacity, 167 unsigned long ttl) 168{ 169 size_t i; 170 171 memset(queue, 0, sizeof(*queue)); 172 queue->stats = stats; 173 queue->capacity = capacity; 174 queue->queue_id = queue_id; 175 queue->ttl = ttl; 176 INIT_LIST_HEAD(&queue->queue); 177 INIT_LIST_HEAD(&queue->pending); 178 INIT_LIST_HEAD(&queue->free_pool); 179 spin_lock_init(&queue->lock); 180 timer_setup(&queue->gc, cw1200_queue_gc, 0); 181 182 queue->pool = kzalloc_objs(struct cw1200_queue_item, capacity); 183 if (!queue->pool) 184 return -ENOMEM; 185 186 queue->link_map_cache = kzalloc_objs(int, stats->map_capacity); 187 if (!queue->link_map_cache) { 188 kfree(queue->pool); 189 queue->pool = NULL; 190 return -ENOMEM; 191 } 192 193 for (i = 0; i < capacity; ++i) 194 list_add_tail(&queue->pool[i].head, &queue->free_pool); 195 196 return 0; 197} 198 199int cw1200_queue_clear(struct cw1200_queue *queue) 200{ 201 int i; 202 LIST_HEAD(gc_list); 203 struct cw1200_queue_stats *stats = queue->stats; 204 struct cw1200_queue_item *item, *tmp; 205 206 spin_lock_bh(&queue->lock); 207 queue->generation++; 208 list_splice_tail_init(&queue->queue, &queue->pending); 209 list_for_each_entry_safe(item, tmp, &queue->pending, head) { 210 WARN_ON(!item->skb); 211 cw1200_queue_register_post_gc(&gc_list, item); 212 item->skb = NULL; 213 list_move_tail(&item->head, &queue->free_pool); 214 } 215 queue->num_queued = 0; 216 queue->num_pending = 0; 217 218 spin_lock_bh(&stats->lock); 219 for (i = 0; i < stats->map_capacity; ++i) { 220 stats->num_queued -= queue->link_map_cache[i]; 221 stats->link_map_cache[i] -= queue->link_map_cache[i]; 222 queue->link_map_cache[i] = 0; 223 } 224 spin_unlock_bh(&stats->lock); 225 if (queue->overfull) { 226 queue->overfull = false; 227 __cw1200_queue_unlock(queue); 228 } 229 spin_unlock_bh(&queue->lock); 230 wake_up(&stats->wait_link_id_empty); 231 cw1200_queue_post_gc(stats, &gc_list); 232 return 0; 233} 234 235void cw1200_queue_stats_deinit(struct cw1200_queue_stats *stats) 236{ 237 kfree(stats->link_map_cache); 238 stats->link_map_cache = NULL; 239} 240 241void cw1200_queue_deinit(struct cw1200_queue *queue) 242{ 243 cw1200_queue_clear(queue); 244 timer_delete_sync(&queue->gc); 245 INIT_LIST_HEAD(&queue->free_pool); 246 kfree(queue->pool); 247 kfree(queue->link_map_cache); 248 queue->pool = NULL; 249 queue->link_map_cache = NULL; 250 queue->capacity = 0; 251} 252 253size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue, 254 u32 link_id_map) 255{ 256 size_t ret; 257 int i, bit; 258 size_t map_capacity = queue->stats->map_capacity; 259 260 if (!link_id_map) 261 return 0; 262 263 spin_lock_bh(&queue->lock); 264 if (link_id_map == (u32)-1) { 265 ret = queue->num_queued - queue->num_pending; 266 } else { 267 ret = 0; 268 for (i = 0, bit = 1; i < map_capacity; ++i, bit <<= 1) { 269 if (link_id_map & bit) 270 ret += queue->link_map_cache[i]; 271 } 272 } 273 spin_unlock_bh(&queue->lock); 274 return ret; 275} 276 277int cw1200_queue_put(struct cw1200_queue *queue, 278 struct sk_buff *skb, 279 struct cw1200_txpriv *txpriv) 280{ 281 int ret = 0; 282 struct cw1200_queue_stats *stats = queue->stats; 283 284 if (txpriv->link_id >= queue->stats->map_capacity) 285 return -EINVAL; 286 287 spin_lock_bh(&queue->lock); 288 if (!WARN_ON(list_empty(&queue->free_pool))) { 289 struct cw1200_queue_item *item = list_first_entry( 290 &queue->free_pool, struct cw1200_queue_item, head); 291 BUG_ON(item->skb); 292 293 list_move_tail(&item->head, &queue->queue); 294 item->skb = skb; 295 item->txpriv = *txpriv; 296 item->generation = 0; 297 item->packet_id = cw1200_queue_mk_packet_id(queue->generation, 298 queue->queue_id, 299 item->generation, 300 item - queue->pool); 301 item->queue_timestamp = jiffies; 302 303 ++queue->num_queued; 304 ++queue->link_map_cache[txpriv->link_id]; 305 306 spin_lock_bh(&stats->lock); 307 ++stats->num_queued; 308 ++stats->link_map_cache[txpriv->link_id]; 309 spin_unlock_bh(&stats->lock); 310 311 /* TX may happen in parallel sometimes. 312 * Leave extra queue slots so we don't overflow. 313 */ 314 if (queue->overfull == false && 315 queue->num_queued >= 316 (queue->capacity - (num_present_cpus() - 1))) { 317 queue->overfull = true; 318 __cw1200_queue_lock(queue); 319 mod_timer(&queue->gc, jiffies); 320 } 321 } else { 322 ret = -ENOENT; 323 } 324 spin_unlock_bh(&queue->lock); 325 return ret; 326} 327 328int cw1200_queue_get(struct cw1200_queue *queue, 329 u32 link_id_map, 330 struct wsm_tx **tx, 331 struct ieee80211_tx_info **tx_info, 332 const struct cw1200_txpriv **txpriv) 333{ 334 int ret = -ENOENT; 335 struct cw1200_queue_item *item; 336 struct cw1200_queue_stats *stats = queue->stats; 337 bool wakeup_stats = false; 338 339 spin_lock_bh(&queue->lock); 340 list_for_each_entry(item, &queue->queue, head) { 341 if (link_id_map & BIT(item->txpriv.link_id)) { 342 ret = 0; 343 break; 344 } 345 } 346 347 if (!WARN_ON(ret)) { 348 *tx = (struct wsm_tx *)item->skb->data; 349 *tx_info = IEEE80211_SKB_CB(item->skb); 350 *txpriv = &item->txpriv; 351 (*tx)->packet_id = item->packet_id; 352 list_move_tail(&item->head, &queue->pending); 353 ++queue->num_pending; 354 --queue->link_map_cache[item->txpriv.link_id]; 355 item->xmit_timestamp = jiffies; 356 357 spin_lock_bh(&stats->lock); 358 --stats->num_queued; 359 if (!--stats->link_map_cache[item->txpriv.link_id]) 360 wakeup_stats = true; 361 spin_unlock_bh(&stats->lock); 362 } 363 spin_unlock_bh(&queue->lock); 364 if (wakeup_stats) 365 wake_up(&stats->wait_link_id_empty); 366 return ret; 367} 368 369int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id) 370{ 371 int ret = 0; 372 u8 queue_generation, queue_id, item_generation, item_id; 373 struct cw1200_queue_item *item; 374 struct cw1200_queue_stats *stats = queue->stats; 375 376 cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id, 377 &item_generation, &item_id); 378 379 item = &queue->pool[item_id]; 380 381 spin_lock_bh(&queue->lock); 382 BUG_ON(queue_id != queue->queue_id); 383 if (queue_generation != queue->generation) { 384 ret = -ENOENT; 385 } else if (item_id >= (unsigned) queue->capacity) { 386 WARN_ON(1); 387 ret = -EINVAL; 388 } else if (item->generation != item_generation) { 389 WARN_ON(1); 390 ret = -ENOENT; 391 } else { 392 --queue->num_pending; 393 ++queue->link_map_cache[item->txpriv.link_id]; 394 395 spin_lock_bh(&stats->lock); 396 ++stats->num_queued; 397 ++stats->link_map_cache[item->txpriv.link_id]; 398 spin_unlock_bh(&stats->lock); 399 400 item->generation = ++item_generation; 401 item->packet_id = cw1200_queue_mk_packet_id(queue_generation, 402 queue_id, 403 item_generation, 404 item_id); 405 list_move(&item->head, &queue->queue); 406 } 407 spin_unlock_bh(&queue->lock); 408 return ret; 409} 410 411int cw1200_queue_remove(struct cw1200_queue *queue, u32 packet_id) 412{ 413 int ret = 0; 414 u8 queue_generation, queue_id, item_generation, item_id; 415 struct cw1200_queue_item *item; 416 struct cw1200_queue_stats *stats = queue->stats; 417 struct sk_buff *gc_skb = NULL; 418 struct cw1200_txpriv gc_txpriv; 419 420 cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id, 421 &item_generation, &item_id); 422 423 item = &queue->pool[item_id]; 424 425 spin_lock_bh(&queue->lock); 426 BUG_ON(queue_id != queue->queue_id); 427 if (queue_generation != queue->generation) { 428 ret = -ENOENT; 429 } else if (item_id >= (unsigned) queue->capacity) { 430 WARN_ON(1); 431 ret = -EINVAL; 432 } else if (item->generation != item_generation) { 433 WARN_ON(1); 434 ret = -ENOENT; 435 } else { 436 gc_txpriv = item->txpriv; 437 gc_skb = item->skb; 438 item->skb = NULL; 439 --queue->num_pending; 440 --queue->num_queued; 441 ++queue->num_sent; 442 ++item->generation; 443 /* Do not use list_move_tail here, but list_move: 444 * try to utilize cache row. 445 */ 446 list_move(&item->head, &queue->free_pool); 447 448 if (queue->overfull && 449 (queue->num_queued <= (queue->capacity >> 1))) { 450 queue->overfull = false; 451 __cw1200_queue_unlock(queue); 452 } 453 } 454 spin_unlock_bh(&queue->lock); 455 456 if (gc_skb) 457 stats->skb_dtor(stats->priv, gc_skb, &gc_txpriv); 458 459 return ret; 460} 461 462int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packet_id, 463 struct sk_buff **skb, 464 const struct cw1200_txpriv **txpriv) 465{ 466 int ret = 0; 467 u8 queue_generation, queue_id, item_generation, item_id; 468 struct cw1200_queue_item *item; 469 cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id, 470 &item_generation, &item_id); 471 472 item = &queue->pool[item_id]; 473 474 spin_lock_bh(&queue->lock); 475 BUG_ON(queue_id != queue->queue_id); 476 if (queue_generation != queue->generation) { 477 ret = -ENOENT; 478 } else if (item_id >= (unsigned) queue->capacity) { 479 WARN_ON(1); 480 ret = -EINVAL; 481 } else if (item->generation != item_generation) { 482 WARN_ON(1); 483 ret = -ENOENT; 484 } else { 485 *skb = item->skb; 486 *txpriv = &item->txpriv; 487 } 488 spin_unlock_bh(&queue->lock); 489 return ret; 490} 491 492void cw1200_queue_lock(struct cw1200_queue *queue) 493{ 494 spin_lock_bh(&queue->lock); 495 __cw1200_queue_lock(queue); 496 spin_unlock_bh(&queue->lock); 497} 498 499void cw1200_queue_unlock(struct cw1200_queue *queue) 500{ 501 spin_lock_bh(&queue->lock); 502 __cw1200_queue_unlock(queue); 503 spin_unlock_bh(&queue->lock); 504} 505 506bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue, 507 unsigned long *timestamp, 508 u32 pending_frame_id) 509{ 510 struct cw1200_queue_item *item; 511 bool ret; 512 513 spin_lock_bh(&queue->lock); 514 ret = !list_empty(&queue->pending); 515 if (ret) { 516 list_for_each_entry(item, &queue->pending, head) { 517 if (item->packet_id != pending_frame_id) 518 if (time_before(item->xmit_timestamp, 519 *timestamp)) 520 *timestamp = item->xmit_timestamp; 521 } 522 } 523 spin_unlock_bh(&queue->lock); 524 return ret; 525} 526 527bool cw1200_queue_stats_is_empty(struct cw1200_queue_stats *stats, 528 u32 link_id_map) 529{ 530 bool empty = true; 531 532 spin_lock_bh(&stats->lock); 533 if (link_id_map == (u32)-1) { 534 empty = stats->num_queued == 0; 535 } else { 536 int i; 537 for (i = 0; i < stats->map_capacity; ++i) { 538 if (link_id_map & BIT(i)) { 539 if (stats->link_map_cache[i]) { 540 empty = false; 541 break; 542 } 543 } 544 } 545 } 546 spin_unlock_bh(&stats->lock); 547 548 return empty; 549}