Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

snic: Fix for missing interrupts

- On posting an IO to the firmware, adapter generates an interrupt.
Due to hardware issues, sometimes the adapter fails to generate
the interrupt. This behavior skips updating transmit queue-
counters, which in turn causes the queue full condition. The fix
addresses the queue full condition.

- The fix also reserves a slot in transmit queue for hba reset.
when queue full is observed during IO, there will always be room
to post hba reset command.

Signed-off-by: Narsimhulu Musini <nmusini@cisco.com>
Signed-off-by: Sesidhar Baddela <sebaddel@cisco.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>

authored by

Narsimhulu Musini and committed by
Martin K. Petersen
c9747821 58fcf920

+60 -8
+3 -1
drivers/scsi/snic/snic_fwint.h
··· 414 414 /* Payload 88 bytes = 128 - 24 - 16 */ 415 415 #define SNIC_HOST_REQ_PAYLOAD ((int)(SNIC_HOST_REQ_LEN - \ 416 416 sizeof(struct snic_io_hdr) - \ 417 - (2 * sizeof(u64)))) 417 + (2 * sizeof(u64)) - sizeof(ulong))) 418 418 419 419 /* 420 420 * snic_host_req: host -> firmware request ··· 448 448 /* hba reset */ 449 449 struct snic_hba_reset reset; 450 450 } u; 451 + 452 + ulong req_pa; 451 453 }; /* end of snic_host_req structure */ 452 454 453 455
+57 -7
drivers/scsi/snic/snic_io.c
··· 48 48 SNIC_TRC(snic->shost->host_no, 0, 0, 49 49 ((ulong)(buf->os_buf) - sizeof(struct snic_req_info)), 0, 0, 50 50 0); 51 - pci_unmap_single(snic->pdev, buf->dma_addr, buf->len, PCI_DMA_TODEVICE); 51 + 52 52 buf->os_buf = NULL; 53 53 } 54 54 ··· 137 137 return 0; 138 138 } 139 139 140 + static int 141 + snic_wqdesc_avail(struct snic *snic, int q_num, int req_type) 142 + { 143 + int nr_wqdesc = snic->config.wq_enet_desc_count; 144 + 145 + if (q_num > 0) { 146 + /* 147 + * Multi Queue case, additional care is required. 148 + * Per WQ active requests need to be maintained. 149 + */ 150 + SNIC_HOST_INFO(snic->shost, "desc_avail: Multi Queue case.\n"); 151 + SNIC_BUG_ON(q_num > 0); 152 + 153 + return -1; 154 + } 155 + 156 + nr_wqdesc -= atomic64_read(&snic->s_stats.fw.actv_reqs); 157 + 158 + return ((req_type == SNIC_REQ_HBA_RESET) ? nr_wqdesc : nr_wqdesc - 1); 159 + } 160 + 140 161 int 141 162 snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len) 142 163 { 143 164 dma_addr_t pa = 0; 144 165 unsigned long flags; 145 166 struct snic_fw_stats *fwstats = &snic->s_stats.fw; 167 + struct snic_host_req *req = (struct snic_host_req *) os_buf; 146 168 long act_reqs; 169 + long desc_avail = 0; 147 170 int q_num = 0; 148 171 149 172 snic_print_desc(__func__, os_buf, len); ··· 179 156 return -ENOMEM; 180 157 } 181 158 159 + req->req_pa = (ulong)pa; 160 + 182 161 q_num = snic_select_wq(snic); 183 162 184 163 spin_lock_irqsave(&snic->wq_lock[q_num], flags); 185 - if (!svnic_wq_desc_avail(snic->wq)) { 164 + desc_avail = snic_wqdesc_avail(snic, q_num, req->hdr.type); 165 + if (desc_avail <= 0) { 186 166 pci_unmap_single(snic->pdev, pa, len, PCI_DMA_TODEVICE); 167 + req->req_pa = 0; 187 168 spin_unlock_irqrestore(&snic->wq_lock[q_num], flags); 188 169 atomic64_inc(&snic->s_stats.misc.wq_alloc_fail); 189 170 SNIC_DBG("host = %d, WQ is Full\n", snic->shost->host_no); ··· 196 169 } 197 170 198 171 snic_queue_wq_eth_desc(&snic->wq[q_num], os_buf, pa, len, 0, 0, 1); 172 + /* 173 + * Update stats 174 + * note: when multi queue enabled, fw actv_reqs should be per queue. 175 + */ 176 + act_reqs = atomic64_inc_return(&fwstats->actv_reqs); 199 177 spin_unlock_irqrestore(&snic->wq_lock[q_num], flags); 200 178 201 - /* Update stats */ 202 - act_reqs = atomic64_inc_return(&fwstats->actv_reqs); 203 179 if (act_reqs > atomic64_read(&fwstats->max_actv_reqs)) 204 180 atomic64_set(&fwstats->max_actv_reqs, act_reqs); 205 181 ··· 348 318 "Req_free:rqi %p:ioreq %p:abt %p:dr %p\n", 349 319 rqi, rqi->req, rqi->abort_req, rqi->dr_req); 350 320 351 - if (rqi->abort_req) 352 - mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]); 321 + if (rqi->abort_req) { 322 + if (rqi->abort_req->req_pa) 323 + pci_unmap_single(snic->pdev, 324 + rqi->abort_req->req_pa, 325 + sizeof(struct snic_host_req), 326 + PCI_DMA_TODEVICE); 353 327 354 - if (rqi->dr_req) 328 + mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]); 329 + } 330 + 331 + if (rqi->dr_req) { 332 + if (rqi->dr_req->req_pa) 333 + pci_unmap_single(snic->pdev, 334 + rqi->dr_req->req_pa, 335 + sizeof(struct snic_host_req), 336 + PCI_DMA_TODEVICE); 337 + 355 338 mempool_free(rqi->dr_req, snic->req_pool[SNIC_REQ_TM_CACHE]); 339 + } 340 + 341 + if (rqi->req->req_pa) 342 + pci_unmap_single(snic->pdev, 343 + rqi->req->req_pa, 344 + rqi->req_len, 345 + PCI_DMA_TODEVICE); 356 346 357 347 mempool_free(rqi, snic->req_pool[rqi->rq_pool_type]); 358 348 }