iwlwifi: avoid Tx queue memory allocation in interface down

We used to free all the Tx queues memory when interface is brought
down and reallocate them again in interface up. This requires
order-4 allocation for txq->cmd[]. In situations like s2ram, this
usually leads to allocation failure in the memory subsystem. The
patch fixed this problem by allocating the Tx queues memory only at
the first time. Later iwl_down/iwl_up only initialize but don't
free and reallocate them. The memory is freed at the device removal
time. BTW, we have already done this for the Rx queue.

This fixed bug https://bugzilla.kernel.org/show_bug.cgi?id=15551

Signed-off-by: Zhu Yi <yi.zhu@intel.com>
Acked-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>

authored by Zhu Yi and committed by Reinette Chatre de0f60ea 04f2dec1

+60 -15
+7 -4
drivers/net/wireless/iwlwifi/iwl-core.c
··· 307 308 spin_unlock_irqrestore(&priv->lock, flags); 309 310 - /* Allocate and init all Tx and Command queues */ 311 - ret = iwl_txq_ctx_reset(priv); 312 - if (ret) 313 - return ret; 314 315 set_bit(STATUS_INIT, &priv->status); 316
··· 307 308 spin_unlock_irqrestore(&priv->lock, flags); 309 310 + /* Allocate or reset and init all Tx and Command queues */ 311 + if (!priv->txq) { 312 + ret = iwl_txq_ctx_alloc(priv); 313 + if (ret) 314 + return ret; 315 + } else 316 + iwl_txq_ctx_reset(priv); 317 318 set_bit(STATUS_INIT, &priv->status); 319
+4 -1
drivers/net/wireless/iwlwifi/iwl-core.h
··· 442 /***************************************************** 443 * TX 444 ******************************************************/ 445 - int iwl_txq_ctx_reset(struct iwl_priv *priv); 446 void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); 447 int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, 448 struct iwl_tx_queue *txq, ··· 457 void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq); 458 int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, 459 int slots_num, u32 txq_id); 460 void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id); 461 int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn); 462 int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid);
··· 442 /***************************************************** 443 * TX 444 ******************************************************/ 445 + int iwl_txq_ctx_alloc(struct iwl_priv *priv); 446 + void iwl_txq_ctx_reset(struct iwl_priv *priv); 447 void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); 448 int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, 449 struct iwl_tx_queue *txq, ··· 456 void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq); 457 int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, 458 int slots_num, u32 txq_id); 459 + void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, 460 + int slots_num, u32 txq_id); 461 void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id); 462 int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn); 463 int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid);
+49 -10
drivers/net/wireless/iwlwifi/iwl-tx.c
··· 433 } 434 EXPORT_SYMBOL(iwl_tx_queue_init); 435 436 /** 437 * iwl_hw_txq_ctx_free - Free TXQ Context 438 * ··· 464 465 /* Tx queues */ 466 if (priv->txq) { 467 - for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; 468 - txq_id++) 469 if (txq_id == IWL_CMD_QUEUE_NUM) 470 iwl_cmd_queue_free(priv); 471 else ··· 480 EXPORT_SYMBOL(iwl_hw_txq_ctx_free); 481 482 /** 483 - * iwl_txq_ctx_reset - Reset TX queue context 484 - * Destroys all DMA structures and initialize them again 485 * 486 * @param priv 487 * @return error code 488 */ 489 - int iwl_txq_ctx_reset(struct iwl_priv *priv) 490 { 491 - int ret = 0; 492 int txq_id, slots_num; 493 unsigned long flags; 494 ··· 546 return ret; 547 } 548 549 /** 550 - * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory 551 */ 552 void iwl_txq_ctx_stop(struct iwl_priv *priv) 553 { ··· 590 1000); 591 } 592 spin_unlock_irqrestore(&priv->lock, flags); 593 - 594 - /* Deallocate memory for all Tx queues */ 595 - iwl_hw_txq_ctx_free(priv); 596 } 597 EXPORT_SYMBOL(iwl_txq_ctx_stop); 598
··· 433 } 434 EXPORT_SYMBOL(iwl_tx_queue_init); 435 436 + void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, 437 + int slots_num, u32 txq_id) 438 + { 439 + int actual_slots = slots_num; 440 + 441 + if (txq_id == IWL_CMD_QUEUE_NUM) 442 + actual_slots++; 443 + 444 + memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots); 445 + 446 + txq->need_update = 0; 447 + 448 + /* Initialize queue's high/low-water marks, and head/tail indexes */ 449 + iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); 450 + 451 + /* Tell device where to find queue */ 452 + priv->cfg->ops->lib->txq_init(priv, txq); 453 + } 454 + EXPORT_SYMBOL(iwl_tx_queue_reset); 455 + 456 /** 457 * iwl_hw_txq_ctx_free - Free TXQ Context 458 * ··· 444 445 /* Tx queues */ 446 if (priv->txq) { 447 + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) 448 if (txq_id == IWL_CMD_QUEUE_NUM) 449 iwl_cmd_queue_free(priv); 450 else ··· 461 EXPORT_SYMBOL(iwl_hw_txq_ctx_free); 462 463 /** 464 + * iwl_txq_ctx_alloc - allocate TX queue context 465 + * Allocate all Tx DMA structures and initialize them 466 * 467 * @param priv 468 * @return error code 469 */ 470 + int iwl_txq_ctx_alloc(struct iwl_priv *priv) 471 { 472 + int ret; 473 int txq_id, slots_num; 474 unsigned long flags; 475 ··· 527 return ret; 528 } 529 530 + void iwl_txq_ctx_reset(struct iwl_priv *priv) 531 + { 532 + int txq_id, slots_num; 533 + unsigned long flags; 534 + 535 + spin_lock_irqsave(&priv->lock, flags); 536 + 537 + /* Turn off all Tx DMA fifos */ 538 + priv->cfg->ops->lib->txq_set_sched(priv, 0); 539 + 540 + /* Tell NIC where to find the "keep warm" buffer */ 541 + iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); 542 + 543 + spin_unlock_irqrestore(&priv->lock, flags); 544 + 545 + /* Alloc and init all Tx queues, including the command queue (#4) */ 546 + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { 547 + slots_num = txq_id == IWL_CMD_QUEUE_NUM ? 548 + TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 549 + iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id); 550 + } 551 + } 552 + 553 /** 554 + * iwl_txq_ctx_stop - Stop all Tx DMA channels 555 */ 556 void iwl_txq_ctx_stop(struct iwl_priv *priv) 557 { ··· 548 1000); 549 } 550 spin_unlock_irqrestore(&priv->lock, flags); 551 } 552 EXPORT_SYMBOL(iwl_txq_ctx_stop); 553