Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
23
24#include <linux/blkdev.h>
25#include <linux/pci.h>
26#include <linux/interrupt.h>
27#include <linux/delay.h>
28#include <linux/slab.h>
29#include <linux/lockdep.h>
30
31#include <scsi/scsi.h>
32#include <scsi/scsi_cmnd.h>
33#include <scsi/scsi_device.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_transport_fc.h>
36#include <scsi/fc/fc_fs.h>
37#include <linux/aer.h>
38#include <linux/crash_dump.h>
39#ifdef CONFIG_X86
40#include <asm/set_memory.h>
41#endif
42
43#include "lpfc_hw4.h"
44#include "lpfc_hw.h"
45#include "lpfc_sli.h"
46#include "lpfc_sli4.h"
47#include "lpfc_nl.h"
48#include "lpfc_disc.h"
49#include "lpfc.h"
50#include "lpfc_scsi.h"
51#include "lpfc_nvme.h"
52#include "lpfc_crtn.h"
53#include "lpfc_logmsg.h"
54#include "lpfc_compat.h"
55#include "lpfc_debugfs.h"
56#include "lpfc_vport.h"
57#include "lpfc_version.h"
58
59/* There are only four IOCB completion types. */
60typedef enum _lpfc_iocb_type {
61 LPFC_UNKNOWN_IOCB,
62 LPFC_UNSOL_IOCB,
63 LPFC_SOL_IOCB,
64 LPFC_ABORT_IOCB
65} lpfc_iocb_type;
66
67
68/* Provide function prototypes local to this module. */
69static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
70 uint32_t);
71static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
72 uint8_t *, uint32_t *);
73static struct lpfc_iocbq *
74lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
75 struct lpfc_iocbq *rspiocbq);
76static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
77 struct hbq_dmabuf *);
78static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
79 struct hbq_dmabuf *dmabuf);
80static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
81 struct lpfc_queue *cq, struct lpfc_cqe *cqe);
82static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
83 int);
84static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
85 struct lpfc_queue *eq,
86 struct lpfc_eqe *eqe);
87static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
88static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
89static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
90static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
91 struct lpfc_queue *cq,
92 struct lpfc_cqe *cqe);
93static uint16_t lpfc_wqe_bpl2sgl(struct lpfc_hba *phba,
94 struct lpfc_iocbq *pwqeq,
95 struct lpfc_sglq *sglq);
96
97union lpfc_wqe128 lpfc_iread_cmd_template;
98union lpfc_wqe128 lpfc_iwrite_cmd_template;
99union lpfc_wqe128 lpfc_icmnd_cmd_template;
100
101/* Setup WQE templates for IOs */
102void lpfc_wqe_cmd_template(void)
103{
104 union lpfc_wqe128 *wqe;
105
106 /* IREAD template */
107 wqe = &lpfc_iread_cmd_template;
108 memset(wqe, 0, sizeof(union lpfc_wqe128));
109
110 /* Word 0, 1, 2 - BDE is variable */
111
112 /* Word 3 - cmd_buff_len, payload_offset_len is zero */
113
114 /* Word 4 - total_xfer_len is variable */
115
116 /* Word 5 - is zero */
117
118 /* Word 6 - ctxt_tag, xri_tag is variable */
119
120 /* Word 7 */
121 bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
122 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
123 bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
124 bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
125
126 /* Word 8 - abort_tag is variable */
127
128 /* Word 9 - reqtag is variable */
129
130 /* Word 10 - dbde, wqes is variable */
131 bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
132 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
133 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
134 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
135 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
136
137 /* Word 11 - pbde is variable */
138 bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, COMMAND_DATA_IN);
139 bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
140 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
141
142 /* Word 12 - is zero */
143
144 /* Word 13, 14, 15 - PBDE is variable */
145
146 /* IWRITE template */
147 wqe = &lpfc_iwrite_cmd_template;
148 memset(wqe, 0, sizeof(union lpfc_wqe128));
149
150 /* Word 0, 1, 2 - BDE is variable */
151
152 /* Word 3 - cmd_buff_len, payload_offset_len is zero */
153
154 /* Word 4 - total_xfer_len is variable */
155
156 /* Word 5 - initial_xfer_len is variable */
157
158 /* Word 6 - ctxt_tag, xri_tag is variable */
159
160 /* Word 7 */
161 bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
162 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
163 bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
164 bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
165
166 /* Word 8 - abort_tag is variable */
167
168 /* Word 9 - reqtag is variable */
169
170 /* Word 10 - dbde, wqes is variable */
171 bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
172 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
173 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
174 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
175 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
176
177 /* Word 11 - pbde is variable */
178 bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, COMMAND_DATA_OUT);
179 bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
180 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
181
182 /* Word 12 - is zero */
183
184 /* Word 13, 14, 15 - PBDE is variable */
185
186 /* ICMND template */
187 wqe = &lpfc_icmnd_cmd_template;
188 memset(wqe, 0, sizeof(union lpfc_wqe128));
189
190 /* Word 0, 1, 2 - BDE is variable */
191
192 /* Word 3 - payload_offset_len is variable */
193
194 /* Word 4, 5 - is zero */
195
196 /* Word 6 - ctxt_tag, xri_tag is variable */
197
198 /* Word 7 */
199 bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
200 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
201 bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
202 bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
203
204 /* Word 8 - abort_tag is variable */
205
206 /* Word 9 - reqtag is variable */
207
208 /* Word 10 - dbde, wqes is variable */
209 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
210 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
211 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
212 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
213 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
214
215 /* Word 11 */
216 bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, COMMAND_DATA_IN);
217 bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
218 bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
219
220 /* Word 12, 13, 14, 15 - is zero */
221}
222
223#if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
224/**
225 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
226 * @srcp: Source memory pointer.
227 * @destp: Destination memory pointer.
228 * @cnt: Number of words required to be copied.
229 * Must be a multiple of sizeof(uint64_t)
230 *
231 * This function is used for copying data between driver memory
232 * and the SLI WQ. This function also changes the endianness
233 * of each word if native endianness is different from SLI
234 * endianness. This function can be called with or without
235 * lock.
236 **/
237static void
238lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
239{
240 uint64_t *src = srcp;
241 uint64_t *dest = destp;
242 int i;
243
244 for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
245 *dest++ = *src++;
246}
247#else
248#define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
249#endif
250
251/**
252 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
253 * @q: The Work Queue to operate on.
254 * @wqe: The work Queue Entry to put on the Work queue.
255 *
256 * This routine will copy the contents of @wqe to the next available entry on
257 * the @q. This function will then ring the Work Queue Doorbell to signal the
258 * HBA to start processing the Work Queue Entry. This function returns 0 if
259 * successful. If no entries are available on @q then this function will return
260 * -ENOMEM.
261 * The caller is expected to hold the hbalock when calling this routine.
262 **/
263static int
264lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
265{
266 union lpfc_wqe *temp_wqe;
267 struct lpfc_register doorbell;
268 uint32_t host_index;
269 uint32_t idx;
270 uint32_t i = 0;
271 uint8_t *tmp;
272 u32 if_type;
273
274 /* sanity check on queue memory */
275 if (unlikely(!q))
276 return -ENOMEM;
277
278 temp_wqe = lpfc_sli4_qe(q, q->host_index);
279
280 /* If the host has not yet processed the next entry then we are done */
281 idx = ((q->host_index + 1) % q->entry_count);
282 if (idx == q->hba_index) {
283 q->WQ_overflow++;
284 return -EBUSY;
285 }
286 q->WQ_posted++;
287 /* set consumption flag every once in a while */
288 if (!((q->host_index + 1) % q->notify_interval))
289 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
290 else
291 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
292 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
293 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
294 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
295 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
296 /* write to DPP aperture taking advatage of Combined Writes */
297 tmp = (uint8_t *)temp_wqe;
298#ifdef __raw_writeq
299 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
300 __raw_writeq(*((uint64_t *)(tmp + i)),
301 q->dpp_regaddr + i);
302#else
303 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
304 __raw_writel(*((uint32_t *)(tmp + i)),
305 q->dpp_regaddr + i);
306#endif
307 }
308 /* ensure WQE bcopy and DPP flushed before doorbell write */
309 wmb();
310
311 /* Update the host index before invoking device */
312 host_index = q->host_index;
313
314 q->host_index = idx;
315
316 /* Ring Doorbell */
317 doorbell.word0 = 0;
318 if (q->db_format == LPFC_DB_LIST_FORMAT) {
319 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
320 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
321 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
322 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
323 q->dpp_id);
324 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
325 q->queue_id);
326 } else {
327 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
328 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
329
330 /* Leave bits <23:16> clear for if_type 6 dpp */
331 if_type = bf_get(lpfc_sli_intf_if_type,
332 &q->phba->sli4_hba.sli_intf);
333 if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
334 bf_set(lpfc_wq_db_list_fm_index, &doorbell,
335 host_index);
336 }
337 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
338 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
339 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
340 } else {
341 return -EINVAL;
342 }
343 writel(doorbell.word0, q->db_regaddr);
344
345 return 0;
346}
347
348/**
349 * lpfc_sli4_wq_release - Updates internal hba index for WQ
350 * @q: The Work Queue to operate on.
351 * @index: The index to advance the hba index to.
352 *
353 * This routine will update the HBA index of a queue to reflect consumption of
354 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
355 * an entry the host calls this function to update the queue's internal
356 * pointers.
357 **/
358static void
359lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
360{
361 /* sanity check on queue memory */
362 if (unlikely(!q))
363 return;
364
365 q->hba_index = index;
366}
367
368/**
369 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
370 * @q: The Mailbox Queue to operate on.
371 * @mqe: The Mailbox Queue Entry to put on the Work queue.
372 *
373 * This routine will copy the contents of @mqe to the next available entry on
374 * the @q. This function will then ring the Work Queue Doorbell to signal the
375 * HBA to start processing the Work Queue Entry. This function returns 0 if
376 * successful. If no entries are available on @q then this function will return
377 * -ENOMEM.
378 * The caller is expected to hold the hbalock when calling this routine.
379 **/
380static uint32_t
381lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
382{
383 struct lpfc_mqe *temp_mqe;
384 struct lpfc_register doorbell;
385
386 /* sanity check on queue memory */
387 if (unlikely(!q))
388 return -ENOMEM;
389 temp_mqe = lpfc_sli4_qe(q, q->host_index);
390
391 /* If the host has not yet processed the next entry then we are done */
392 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
393 return -ENOMEM;
394 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
395 /* Save off the mailbox pointer for completion */
396 q->phba->mbox = (MAILBOX_t *)temp_mqe;
397
398 /* Update the host index before invoking device */
399 q->host_index = ((q->host_index + 1) % q->entry_count);
400
401 /* Ring Doorbell */
402 doorbell.word0 = 0;
403 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
404 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
405 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
406 return 0;
407}
408
409/**
410 * lpfc_sli4_mq_release - Updates internal hba index for MQ
411 * @q: The Mailbox Queue to operate on.
412 *
413 * This routine will update the HBA index of a queue to reflect consumption of
414 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
415 * an entry the host calls this function to update the queue's internal
416 * pointers. This routine returns the number of entries that were consumed by
417 * the HBA.
418 **/
419static uint32_t
420lpfc_sli4_mq_release(struct lpfc_queue *q)
421{
422 /* sanity check on queue memory */
423 if (unlikely(!q))
424 return 0;
425
426 /* Clear the mailbox pointer for completion */
427 q->phba->mbox = NULL;
428 q->hba_index = ((q->hba_index + 1) % q->entry_count);
429 return 1;
430}
431
432/**
433 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
434 * @q: The Event Queue to get the first valid EQE from
435 *
436 * This routine will get the first valid Event Queue Entry from @q, update
437 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
438 * the Queue (no more work to do), or the Queue is full of EQEs that have been
439 * processed, but not popped back to the HBA then this routine will return NULL.
440 **/
441static struct lpfc_eqe *
442lpfc_sli4_eq_get(struct lpfc_queue *q)
443{
444 struct lpfc_eqe *eqe;
445
446 /* sanity check on queue memory */
447 if (unlikely(!q))
448 return NULL;
449 eqe = lpfc_sli4_qe(q, q->host_index);
450
451 /* If the next EQE is not valid then we are done */
452 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
453 return NULL;
454
455 /*
456 * insert barrier for instruction interlock : data from the hardware
457 * must have the valid bit checked before it can be copied and acted
458 * upon. Speculative instructions were allowing a bcopy at the start
459 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
460 * after our return, to copy data before the valid bit check above
461 * was done. As such, some of the copied data was stale. The barrier
462 * ensures the check is before any data is copied.
463 */
464 mb();
465 return eqe;
466}
467
468/**
469 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
470 * @q: The Event Queue to disable interrupts
471 *
472 **/
473void
474lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
475{
476 struct lpfc_register doorbell;
477
478 doorbell.word0 = 0;
479 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
480 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
481 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
482 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
483 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
484 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
485}
486
487/**
488 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
489 * @q: The Event Queue to disable interrupts
490 *
491 **/
492void
493lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
494{
495 struct lpfc_register doorbell;
496
497 doorbell.word0 = 0;
498 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
499 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
500}
501
502/**
503 * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
504 * @phba: adapter with EQ
505 * @q: The Event Queue that the host has completed processing for.
506 * @count: Number of elements that have been consumed
507 * @arm: Indicates whether the host wants to arms this CQ.
508 *
509 * This routine will notify the HBA, by ringing the doorbell, that count
510 * number of EQEs have been processed. The @arm parameter indicates whether
511 * the queue should be rearmed when ringing the doorbell.
512 **/
513void
514lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
515 uint32_t count, bool arm)
516{
517 struct lpfc_register doorbell;
518
519 /* sanity check on queue memory */
520 if (unlikely(!q || (count == 0 && !arm)))
521 return;
522
523 /* ring doorbell for number popped */
524 doorbell.word0 = 0;
525 if (arm) {
526 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
527 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
528 }
529 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
530 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
531 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
532 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
533 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
534 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
535 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
536 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
537 readl(q->phba->sli4_hba.EQDBregaddr);
538}
539
540/**
541 * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
542 * @phba: adapter with EQ
543 * @q: The Event Queue that the host has completed processing for.
544 * @count: Number of elements that have been consumed
545 * @arm: Indicates whether the host wants to arms this CQ.
546 *
547 * This routine will notify the HBA, by ringing the doorbell, that count
548 * number of EQEs have been processed. The @arm parameter indicates whether
549 * the queue should be rearmed when ringing the doorbell.
550 **/
551void
552lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
553 uint32_t count, bool arm)
554{
555 struct lpfc_register doorbell;
556
557 /* sanity check on queue memory */
558 if (unlikely(!q || (count == 0 && !arm)))
559 return;
560
561 /* ring doorbell for number popped */
562 doorbell.word0 = 0;
563 if (arm)
564 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
565 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
566 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
567 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
568 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
569 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
570 readl(q->phba->sli4_hba.EQDBregaddr);
571}
572
573static void
574__lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
575 struct lpfc_eqe *eqe)
576{
577 if (!phba->sli4_hba.pc_sli4_params.eqav)
578 bf_set_le32(lpfc_eqe_valid, eqe, 0);
579
580 eq->host_index = ((eq->host_index + 1) % eq->entry_count);
581
582 /* if the index wrapped around, toggle the valid bit */
583 if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
584 eq->qe_valid = (eq->qe_valid) ? 0 : 1;
585}
586
587static void
588lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
589{
590 struct lpfc_eqe *eqe = NULL;
591 u32 eq_count = 0, cq_count = 0;
592 struct lpfc_cqe *cqe = NULL;
593 struct lpfc_queue *cq = NULL, *childq = NULL;
594 int cqid = 0;
595
596 /* walk all the EQ entries and drop on the floor */
597 eqe = lpfc_sli4_eq_get(eq);
598 while (eqe) {
599 /* Get the reference to the corresponding CQ */
600 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
601 cq = NULL;
602
603 list_for_each_entry(childq, &eq->child_list, list) {
604 if (childq->queue_id == cqid) {
605 cq = childq;
606 break;
607 }
608 }
609 /* If CQ is valid, iterate through it and drop all the CQEs */
610 if (cq) {
611 cqe = lpfc_sli4_cq_get(cq);
612 while (cqe) {
613 __lpfc_sli4_consume_cqe(phba, cq, cqe);
614 cq_count++;
615 cqe = lpfc_sli4_cq_get(cq);
616 }
617 /* Clear and re-arm the CQ */
618 phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
619 LPFC_QUEUE_REARM);
620 cq_count = 0;
621 }
622 __lpfc_sli4_consume_eqe(phba, eq, eqe);
623 eq_count++;
624 eqe = lpfc_sli4_eq_get(eq);
625 }
626
627 /* Clear and re-arm the EQ */
628 phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
629}
630
631static int
632lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
633 uint8_t rearm)
634{
635 struct lpfc_eqe *eqe;
636 int count = 0, consumed = 0;
637
638 if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
639 goto rearm_and_exit;
640
641 eqe = lpfc_sli4_eq_get(eq);
642 while (eqe) {
643 lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
644 __lpfc_sli4_consume_eqe(phba, eq, eqe);
645
646 consumed++;
647 if (!(++count % eq->max_proc_limit))
648 break;
649
650 if (!(count % eq->notify_interval)) {
651 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
652 LPFC_QUEUE_NOARM);
653 consumed = 0;
654 }
655
656 eqe = lpfc_sli4_eq_get(eq);
657 }
658 eq->EQ_processed += count;
659
660 /* Track the max number of EQEs processed in 1 intr */
661 if (count > eq->EQ_max_eqe)
662 eq->EQ_max_eqe = count;
663
664 xchg(&eq->queue_claimed, 0);
665
666rearm_and_exit:
667 /* Always clear the EQ. */
668 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
669
670 return count;
671}
672
673/**
674 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
675 * @q: The Completion Queue to get the first valid CQE from
676 *
677 * This routine will get the first valid Completion Queue Entry from @q, update
678 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
679 * the Queue (no more work to do), or the Queue is full of CQEs that have been
680 * processed, but not popped back to the HBA then this routine will return NULL.
681 **/
682static struct lpfc_cqe *
683lpfc_sli4_cq_get(struct lpfc_queue *q)
684{
685 struct lpfc_cqe *cqe;
686
687 /* sanity check on queue memory */
688 if (unlikely(!q))
689 return NULL;
690 cqe = lpfc_sli4_qe(q, q->host_index);
691
692 /* If the next CQE is not valid then we are done */
693 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
694 return NULL;
695
696 /*
697 * insert barrier for instruction interlock : data from the hardware
698 * must have the valid bit checked before it can be copied and acted
699 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
700 * instructions allowing action on content before valid bit checked,
701 * add barrier here as well. May not be needed as "content" is a
702 * single 32-bit entity here (vs multi word structure for cq's).
703 */
704 mb();
705 return cqe;
706}
707
708static void
709__lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
710 struct lpfc_cqe *cqe)
711{
712 if (!phba->sli4_hba.pc_sli4_params.cqav)
713 bf_set_le32(lpfc_cqe_valid, cqe, 0);
714
715 cq->host_index = ((cq->host_index + 1) % cq->entry_count);
716
717 /* if the index wrapped around, toggle the valid bit */
718 if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
719 cq->qe_valid = (cq->qe_valid) ? 0 : 1;
720}
721
722/**
723 * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
724 * @phba: the adapter with the CQ
725 * @q: The Completion Queue that the host has completed processing for.
726 * @count: the number of elements that were consumed
727 * @arm: Indicates whether the host wants to arms this CQ.
728 *
729 * This routine will notify the HBA, by ringing the doorbell, that the
730 * CQEs have been processed. The @arm parameter specifies whether the
731 * queue should be rearmed when ringing the doorbell.
732 **/
733void
734lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
735 uint32_t count, bool arm)
736{
737 struct lpfc_register doorbell;
738
739 /* sanity check on queue memory */
740 if (unlikely(!q || (count == 0 && !arm)))
741 return;
742
743 /* ring doorbell for number popped */
744 doorbell.word0 = 0;
745 if (arm)
746 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
747 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
748 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
749 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
750 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
751 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
752 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
753}
754
755/**
756 * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
757 * @phba: the adapter with the CQ
758 * @q: The Completion Queue that the host has completed processing for.
759 * @count: the number of elements that were consumed
760 * @arm: Indicates whether the host wants to arms this CQ.
761 *
762 * This routine will notify the HBA, by ringing the doorbell, that the
763 * CQEs have been processed. The @arm parameter specifies whether the
764 * queue should be rearmed when ringing the doorbell.
765 **/
766void
767lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
768 uint32_t count, bool arm)
769{
770 struct lpfc_register doorbell;
771
772 /* sanity check on queue memory */
773 if (unlikely(!q || (count == 0 && !arm)))
774 return;
775
776 /* ring doorbell for number popped */
777 doorbell.word0 = 0;
778 if (arm)
779 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
780 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
781 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
782 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
783}
784
785/*
786 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
787 *
788 * This routine will copy the contents of @wqe to the next available entry on
789 * the @q. This function will then ring the Receive Queue Doorbell to signal the
790 * HBA to start processing the Receive Queue Entry. This function returns the
791 * index that the rqe was copied to if successful. If no entries are available
792 * on @q then this function will return -ENOMEM.
793 * The caller is expected to hold the hbalock when calling this routine.
794 **/
795int
796lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
797 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
798{
799 struct lpfc_rqe *temp_hrqe;
800 struct lpfc_rqe *temp_drqe;
801 struct lpfc_register doorbell;
802 int hq_put_index;
803 int dq_put_index;
804
805 /* sanity check on queue memory */
806 if (unlikely(!hq) || unlikely(!dq))
807 return -ENOMEM;
808 hq_put_index = hq->host_index;
809 dq_put_index = dq->host_index;
810 temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
811 temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
812
813 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
814 return -EINVAL;
815 if (hq_put_index != dq_put_index)
816 return -EINVAL;
817 /* If the host has not yet processed the next entry then we are done */
818 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
819 return -EBUSY;
820 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
821 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
822
823 /* Update the host index to point to the next slot */
824 hq->host_index = ((hq_put_index + 1) % hq->entry_count);
825 dq->host_index = ((dq_put_index + 1) % dq->entry_count);
826 hq->RQ_buf_posted++;
827
828 /* Ring The Header Receive Queue Doorbell */
829 if (!(hq->host_index % hq->notify_interval)) {
830 doorbell.word0 = 0;
831 if (hq->db_format == LPFC_DB_RING_FORMAT) {
832 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
833 hq->notify_interval);
834 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
835 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
836 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
837 hq->notify_interval);
838 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
839 hq->host_index);
840 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
841 } else {
842 return -EINVAL;
843 }
844 writel(doorbell.word0, hq->db_regaddr);
845 }
846 return hq_put_index;
847}
848
849/*
850 * lpfc_sli4_rq_release - Updates internal hba index for RQ
851 *
852 * This routine will update the HBA index of a queue to reflect consumption of
853 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
854 * consumed an entry the host calls this function to update the queue's
855 * internal pointers. This routine returns the number of entries that were
856 * consumed by the HBA.
857 **/
858static uint32_t
859lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
860{
861 /* sanity check on queue memory */
862 if (unlikely(!hq) || unlikely(!dq))
863 return 0;
864
865 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
866 return 0;
867 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
868 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
869 return 1;
870}
871
872/**
873 * lpfc_cmd_iocb - Get next command iocb entry in the ring
874 * @phba: Pointer to HBA context object.
875 * @pring: Pointer to driver SLI ring object.
876 *
877 * This function returns pointer to next command iocb entry
878 * in the command ring. The caller must hold hbalock to prevent
879 * other threads consume the next command iocb.
880 * SLI-2/SLI-3 provide different sized iocbs.
881 **/
882static inline IOCB_t *
883lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
884{
885 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
886 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
887}
888
889/**
890 * lpfc_resp_iocb - Get next response iocb entry in the ring
891 * @phba: Pointer to HBA context object.
892 * @pring: Pointer to driver SLI ring object.
893 *
894 * This function returns pointer to next response iocb entry
895 * in the response ring. The caller must hold hbalock to make sure
896 * that no other thread consume the next response iocb.
897 * SLI-2/SLI-3 provide different sized iocbs.
898 **/
899static inline IOCB_t *
900lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
901{
902 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
903 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
904}
905
906/**
907 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
908 * @phba: Pointer to HBA context object.
909 *
910 * This function is called with hbalock held. This function
911 * allocates a new driver iocb object from the iocb pool. If the
912 * allocation is successful, it returns pointer to the newly
913 * allocated iocb object else it returns NULL.
914 **/
915struct lpfc_iocbq *
916__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
917{
918 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
919 struct lpfc_iocbq * iocbq = NULL;
920
921 lockdep_assert_held(&phba->hbalock);
922
923 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
924 if (iocbq)
925 phba->iocb_cnt++;
926 if (phba->iocb_cnt > phba->iocb_max)
927 phba->iocb_max = phba->iocb_cnt;
928 return iocbq;
929}
930
931/**
932 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
933 * @phba: Pointer to HBA context object.
934 * @xritag: XRI value.
935 *
936 * This function clears the sglq pointer from the array of active
937 * sglq's. The xritag that is passed in is used to index into the
938 * array. Before the xritag can be used it needs to be adjusted
939 * by subtracting the xribase.
940 *
941 * Returns sglq ponter = success, NULL = Failure.
942 **/
943struct lpfc_sglq *
944__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
945{
946 struct lpfc_sglq *sglq;
947
948 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
949 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
950 return sglq;
951}
952
953/**
954 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
955 * @phba: Pointer to HBA context object.
956 * @xritag: XRI value.
957 *
958 * This function returns the sglq pointer from the array of active
959 * sglq's. The xritag that is passed in is used to index into the
960 * array. Before the xritag can be used it needs to be adjusted
961 * by subtracting the xribase.
962 *
963 * Returns sglq ponter = success, NULL = Failure.
964 **/
965struct lpfc_sglq *
966__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
967{
968 struct lpfc_sglq *sglq;
969
970 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
971 return sglq;
972}
973
974/**
975 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
976 * @phba: Pointer to HBA context object.
977 * @xritag: xri used in this exchange.
978 * @rrq: The RRQ to be cleared.
979 *
980 **/
981void
982lpfc_clr_rrq_active(struct lpfc_hba *phba,
983 uint16_t xritag,
984 struct lpfc_node_rrq *rrq)
985{
986 struct lpfc_nodelist *ndlp = NULL;
987
988 /* Lookup did to verify if did is still active on this vport */
989 if (rrq->vport)
990 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
991
992 if (!ndlp)
993 goto out;
994
995 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
996 rrq->send_rrq = 0;
997 rrq->xritag = 0;
998 rrq->rrq_stop_time = 0;
999 }
1000out:
1001 mempool_free(rrq, phba->rrq_pool);
1002}
1003
1004/**
1005 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
1006 * @phba: Pointer to HBA context object.
1007 *
1008 * This function is called with hbalock held. This function
1009 * Checks if stop_time (ratov from setting rrq active) has
1010 * been reached, if it has and the send_rrq flag is set then
1011 * it will call lpfc_send_rrq. If the send_rrq flag is not set
1012 * then it will just call the routine to clear the rrq and
1013 * free the rrq resource.
1014 * The timer is set to the next rrq that is going to expire before
1015 * leaving the routine.
1016 *
1017 **/
1018void
1019lpfc_handle_rrq_active(struct lpfc_hba *phba)
1020{
1021 struct lpfc_node_rrq *rrq;
1022 struct lpfc_node_rrq *nextrrq;
1023 unsigned long next_time;
1024 unsigned long iflags;
1025 LIST_HEAD(send_rrq);
1026
1027 spin_lock_irqsave(&phba->hbalock, iflags);
1028 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1029 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1030 list_for_each_entry_safe(rrq, nextrrq,
1031 &phba->active_rrq_list, list) {
1032 if (time_after(jiffies, rrq->rrq_stop_time))
1033 list_move(&rrq->list, &send_rrq);
1034 else if (time_before(rrq->rrq_stop_time, next_time))
1035 next_time = rrq->rrq_stop_time;
1036 }
1037 spin_unlock_irqrestore(&phba->hbalock, iflags);
1038 if ((!list_empty(&phba->active_rrq_list)) &&
1039 (!(phba->pport->load_flag & FC_UNLOADING)))
1040 mod_timer(&phba->rrq_tmr, next_time);
1041 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
1042 list_del(&rrq->list);
1043 if (!rrq->send_rrq) {
1044 /* this call will free the rrq */
1045 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1046 } else if (lpfc_send_rrq(phba, rrq)) {
1047 /* if we send the rrq then the completion handler
1048 * will clear the bit in the xribitmap.
1049 */
1050 lpfc_clr_rrq_active(phba, rrq->xritag,
1051 rrq);
1052 }
1053 }
1054}
1055
1056/**
1057 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
1058 * @vport: Pointer to vport context object.
1059 * @xri: The xri used in the exchange.
1060 * @did: The targets DID for this exchange.
1061 *
1062 * returns NULL = rrq not found in the phba->active_rrq_list.
1063 * rrq = rrq for this xri and target.
1064 **/
1065struct lpfc_node_rrq *
1066lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
1067{
1068 struct lpfc_hba *phba = vport->phba;
1069 struct lpfc_node_rrq *rrq;
1070 struct lpfc_node_rrq *nextrrq;
1071 unsigned long iflags;
1072
1073 if (phba->sli_rev != LPFC_SLI_REV4)
1074 return NULL;
1075 spin_lock_irqsave(&phba->hbalock, iflags);
1076 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1077 if (rrq->vport == vport && rrq->xritag == xri &&
1078 rrq->nlp_DID == did){
1079 list_del(&rrq->list);
1080 spin_unlock_irqrestore(&phba->hbalock, iflags);
1081 return rrq;
1082 }
1083 }
1084 spin_unlock_irqrestore(&phba->hbalock, iflags);
1085 return NULL;
1086}
1087
1088/**
1089 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
1090 * @vport: Pointer to vport context object.
1091 * @ndlp: Pointer to the lpfc_node_list structure.
1092 * If ndlp is NULL Remove all active RRQs for this vport from the
1093 * phba->active_rrq_list and clear the rrq.
1094 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
1095 **/
1096void
1097lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1098
1099{
1100 struct lpfc_hba *phba = vport->phba;
1101 struct lpfc_node_rrq *rrq;
1102 struct lpfc_node_rrq *nextrrq;
1103 unsigned long iflags;
1104 LIST_HEAD(rrq_list);
1105
1106 if (phba->sli_rev != LPFC_SLI_REV4)
1107 return;
1108 if (!ndlp) {
1109 lpfc_sli4_vport_delete_els_xri_aborted(vport);
1110 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
1111 }
1112 spin_lock_irqsave(&phba->hbalock, iflags);
1113 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1114 if (rrq->vport != vport)
1115 continue;
1116
1117 if (!ndlp || ndlp == lpfc_findnode_did(vport, rrq->nlp_DID))
1118 list_move(&rrq->list, &rrq_list);
1119
1120 }
1121 spin_unlock_irqrestore(&phba->hbalock, iflags);
1122
1123 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
1124 list_del(&rrq->list);
1125 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1126 }
1127}
1128
1129/**
1130 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
1131 * @phba: Pointer to HBA context object.
1132 * @ndlp: Targets nodelist pointer for this exchange.
1133 * @xritag: the xri in the bitmap to test.
1134 *
1135 * This function returns:
1136 * 0 = rrq not active for this xri
1137 * 1 = rrq is valid for this xri.
1138 **/
1139int
1140lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1141 uint16_t xritag)
1142{
1143 if (!ndlp)
1144 return 0;
1145 if (!ndlp->active_rrqs_xri_bitmap)
1146 return 0;
1147 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1148 return 1;
1149 else
1150 return 0;
1151}
1152
1153/**
1154 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1155 * @phba: Pointer to HBA context object.
1156 * @ndlp: nodelist pointer for this target.
1157 * @xritag: xri used in this exchange.
1158 * @rxid: Remote Exchange ID.
1159 * @send_rrq: Flag used to determine if we should send rrq els cmd.
1160 *
1161 * This function takes the hbalock.
1162 * The active bit is always set in the active rrq xri_bitmap even
1163 * if there is no slot avaiable for the other rrq information.
1164 *
1165 * returns 0 rrq actived for this xri
1166 * < 0 No memory or invalid ndlp.
1167 **/
1168int
1169lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1170 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1171{
1172 unsigned long iflags;
1173 struct lpfc_node_rrq *rrq;
1174 int empty;
1175
1176 if (!ndlp)
1177 return -EINVAL;
1178
1179 if (!phba->cfg_enable_rrq)
1180 return -EINVAL;
1181
1182 spin_lock_irqsave(&phba->hbalock, iflags);
1183 if (phba->pport->load_flag & FC_UNLOADING) {
1184 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1185 goto out;
1186 }
1187
1188 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1189 goto out;
1190
1191 if (!ndlp->active_rrqs_xri_bitmap)
1192 goto out;
1193
1194 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1195 goto out;
1196
1197 spin_unlock_irqrestore(&phba->hbalock, iflags);
1198 rrq = mempool_alloc(phba->rrq_pool, GFP_ATOMIC);
1199 if (!rrq) {
1200 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1201 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1202 " DID:0x%x Send:%d\n",
1203 xritag, rxid, ndlp->nlp_DID, send_rrq);
1204 return -EINVAL;
1205 }
1206 if (phba->cfg_enable_rrq == 1)
1207 rrq->send_rrq = send_rrq;
1208 else
1209 rrq->send_rrq = 0;
1210 rrq->xritag = xritag;
1211 rrq->rrq_stop_time = jiffies +
1212 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1213 rrq->nlp_DID = ndlp->nlp_DID;
1214 rrq->vport = ndlp->vport;
1215 rrq->rxid = rxid;
1216 spin_lock_irqsave(&phba->hbalock, iflags);
1217 empty = list_empty(&phba->active_rrq_list);
1218 list_add_tail(&rrq->list, &phba->active_rrq_list);
1219 phba->hba_flag |= HBA_RRQ_ACTIVE;
1220 if (empty)
1221 lpfc_worker_wake_up(phba);
1222 spin_unlock_irqrestore(&phba->hbalock, iflags);
1223 return 0;
1224out:
1225 spin_unlock_irqrestore(&phba->hbalock, iflags);
1226 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1227 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1228 " DID:0x%x Send:%d\n",
1229 xritag, rxid, ndlp->nlp_DID, send_rrq);
1230 return -EINVAL;
1231}
1232
1233/**
1234 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
1235 * @phba: Pointer to HBA context object.
1236 * @piocbq: Pointer to the iocbq.
1237 *
1238 * The driver calls this function with either the nvme ls ring lock
1239 * or the fc els ring lock held depending on the iocb usage. This function
1240 * gets a new driver sglq object from the sglq list. If the list is not empty
1241 * then it is successful, it returns pointer to the newly allocated sglq
1242 * object else it returns NULL.
1243 **/
1244static struct lpfc_sglq *
1245__lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1246{
1247 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1248 struct lpfc_sglq *sglq = NULL;
1249 struct lpfc_sglq *start_sglq = NULL;
1250 struct lpfc_io_buf *lpfc_cmd;
1251 struct lpfc_nodelist *ndlp;
1252 int found = 0;
1253 u8 cmnd;
1254
1255 cmnd = get_job_cmnd(phba, piocbq);
1256
1257 if (piocbq->cmd_flag & LPFC_IO_FCP) {
1258 lpfc_cmd = piocbq->io_buf;
1259 ndlp = lpfc_cmd->rdata->pnode;
1260 } else if ((cmnd == CMD_GEN_REQUEST64_CR) &&
1261 !(piocbq->cmd_flag & LPFC_IO_LIBDFC)) {
1262 ndlp = piocbq->ndlp;
1263 } else if (piocbq->cmd_flag & LPFC_IO_LIBDFC) {
1264 if (piocbq->cmd_flag & LPFC_IO_LOOPBACK)
1265 ndlp = NULL;
1266 else
1267 ndlp = piocbq->ndlp;
1268 } else {
1269 ndlp = piocbq->ndlp;
1270 }
1271
1272 spin_lock(&phba->sli4_hba.sgl_list_lock);
1273 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1274 start_sglq = sglq;
1275 while (!found) {
1276 if (!sglq)
1277 break;
1278 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1279 test_bit(sglq->sli4_lxritag,
1280 ndlp->active_rrqs_xri_bitmap)) {
1281 /* This xri has an rrq outstanding for this DID.
1282 * put it back in the list and get another xri.
1283 */
1284 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1285 sglq = NULL;
1286 list_remove_head(lpfc_els_sgl_list, sglq,
1287 struct lpfc_sglq, list);
1288 if (sglq == start_sglq) {
1289 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1290 sglq = NULL;
1291 break;
1292 } else
1293 continue;
1294 }
1295 sglq->ndlp = ndlp;
1296 found = 1;
1297 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1298 sglq->state = SGL_ALLOCATED;
1299 }
1300 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1301 return sglq;
1302}
1303
1304/**
1305 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1306 * @phba: Pointer to HBA context object.
1307 * @piocbq: Pointer to the iocbq.
1308 *
1309 * This function is called with the sgl_list lock held. This function
1310 * gets a new driver sglq object from the sglq list. If the
1311 * list is not empty then it is successful, it returns pointer to the newly
1312 * allocated sglq object else it returns NULL.
1313 **/
1314struct lpfc_sglq *
1315__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1316{
1317 struct list_head *lpfc_nvmet_sgl_list;
1318 struct lpfc_sglq *sglq = NULL;
1319
1320 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1321
1322 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1323
1324 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1325 if (!sglq)
1326 return NULL;
1327 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1328 sglq->state = SGL_ALLOCATED;
1329 return sglq;
1330}
1331
1332/**
1333 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
1334 * @phba: Pointer to HBA context object.
1335 *
1336 * This function is called with no lock held. This function
1337 * allocates a new driver iocb object from the iocb pool. If the
1338 * allocation is successful, it returns pointer to the newly
1339 * allocated iocb object else it returns NULL.
1340 **/
1341struct lpfc_iocbq *
1342lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1343{
1344 struct lpfc_iocbq * iocbq = NULL;
1345 unsigned long iflags;
1346
1347 spin_lock_irqsave(&phba->hbalock, iflags);
1348 iocbq = __lpfc_sli_get_iocbq(phba);
1349 spin_unlock_irqrestore(&phba->hbalock, iflags);
1350 return iocbq;
1351}
1352
1353/**
1354 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1355 * @phba: Pointer to HBA context object.
1356 * @iocbq: Pointer to driver iocb object.
1357 *
1358 * This function is called to release the driver iocb object
1359 * to the iocb pool. The iotag in the iocb object
1360 * does not change for each use of the iocb object. This function
1361 * clears all other fields of the iocb object when it is freed.
1362 * The sqlq structure that holds the xritag and phys and virtual
1363 * mappings for the scatter gather list is retrieved from the
1364 * active array of sglq. The get of the sglq pointer also clears
1365 * the entry in the array. If the status of the IO indiactes that
1366 * this IO was aborted then the sglq entry it put on the
1367 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1368 * IO has good status or fails for any other reason then the sglq
1369 * entry is added to the free list (lpfc_els_sgl_list). The hbalock is
1370 * asserted held in the code path calling this routine.
1371 **/
1372static void
1373__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1374{
1375 struct lpfc_sglq *sglq;
1376 size_t start_clean = offsetof(struct lpfc_iocbq, wqe);
1377 unsigned long iflag = 0;
1378 struct lpfc_sli_ring *pring;
1379
1380 if (iocbq->sli4_xritag == NO_XRI)
1381 sglq = NULL;
1382 else
1383 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1384
1385
1386 if (sglq) {
1387 if (iocbq->cmd_flag & LPFC_IO_NVMET) {
1388 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1389 iflag);
1390 sglq->state = SGL_FREED;
1391 sglq->ndlp = NULL;
1392 list_add_tail(&sglq->list,
1393 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1394 spin_unlock_irqrestore(
1395 &phba->sli4_hba.sgl_list_lock, iflag);
1396 goto out;
1397 }
1398
1399 if ((iocbq->cmd_flag & LPFC_EXCHANGE_BUSY) &&
1400 (!(unlikely(pci_channel_offline(phba->pcidev)))) &&
1401 sglq->state != SGL_XRI_ABORTED) {
1402 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1403 iflag);
1404
1405 /* Check if we can get a reference on ndlp */
1406 if (sglq->ndlp && !lpfc_nlp_get(sglq->ndlp))
1407 sglq->ndlp = NULL;
1408
1409 list_add(&sglq->list,
1410 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1411 spin_unlock_irqrestore(
1412 &phba->sli4_hba.sgl_list_lock, iflag);
1413 } else {
1414 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1415 iflag);
1416 sglq->state = SGL_FREED;
1417 sglq->ndlp = NULL;
1418 list_add_tail(&sglq->list,
1419 &phba->sli4_hba.lpfc_els_sgl_list);
1420 spin_unlock_irqrestore(
1421 &phba->sli4_hba.sgl_list_lock, iflag);
1422 pring = lpfc_phba_elsring(phba);
1423 /* Check if TXQ queue needs to be serviced */
1424 if (pring && (!list_empty(&pring->txq)))
1425 lpfc_worker_wake_up(phba);
1426 }
1427 }
1428
1429out:
1430 /*
1431 * Clean all volatile data fields, preserve iotag and node struct.
1432 */
1433 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1434 iocbq->sli4_lxritag = NO_XRI;
1435 iocbq->sli4_xritag = NO_XRI;
1436 iocbq->cmd_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | LPFC_IO_CMF |
1437 LPFC_IO_NVME_LS);
1438 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1439}
1440
1441
1442/**
1443 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1444 * @phba: Pointer to HBA context object.
1445 * @iocbq: Pointer to driver iocb object.
1446 *
1447 * This function is called to release the driver iocb object to the
1448 * iocb pool. The iotag in the iocb object does not change for each
1449 * use of the iocb object. This function clears all other fields of
1450 * the iocb object when it is freed. The hbalock is asserted held in
1451 * the code path calling this routine.
1452 **/
1453static void
1454__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1455{
1456 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1457
1458 /*
1459 * Clean all volatile data fields, preserve iotag and node struct.
1460 */
1461 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1462 iocbq->sli4_xritag = NO_XRI;
1463 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1464}
1465
1466/**
1467 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1468 * @phba: Pointer to HBA context object.
1469 * @iocbq: Pointer to driver iocb object.
1470 *
1471 * This function is called with hbalock held to release driver
1472 * iocb object to the iocb pool. The iotag in the iocb object
1473 * does not change for each use of the iocb object. This function
1474 * clears all other fields of the iocb object when it is freed.
1475 **/
1476static void
1477__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1478{
1479 lockdep_assert_held(&phba->hbalock);
1480
1481 phba->__lpfc_sli_release_iocbq(phba, iocbq);
1482 phba->iocb_cnt--;
1483}
1484
1485/**
1486 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1487 * @phba: Pointer to HBA context object.
1488 * @iocbq: Pointer to driver iocb object.
1489 *
1490 * This function is called with no lock held to release the iocb to
1491 * iocb pool.
1492 **/
1493void
1494lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1495{
1496 unsigned long iflags;
1497
1498 /*
1499 * Clean all volatile data fields, preserve iotag and node struct.
1500 */
1501 spin_lock_irqsave(&phba->hbalock, iflags);
1502 __lpfc_sli_release_iocbq(phba, iocbq);
1503 spin_unlock_irqrestore(&phba->hbalock, iflags);
1504}
1505
1506/**
1507 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1508 * @phba: Pointer to HBA context object.
1509 * @iocblist: List of IOCBs.
1510 * @ulpstatus: ULP status in IOCB command field.
1511 * @ulpWord4: ULP word-4 in IOCB command field.
1512 *
1513 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1514 * on the list by invoking the complete callback function associated with the
1515 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1516 * fields.
1517 **/
1518void
1519lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1520 uint32_t ulpstatus, uint32_t ulpWord4)
1521{
1522 struct lpfc_iocbq *piocb;
1523
1524 while (!list_empty(iocblist)) {
1525 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1526 if (piocb->cmd_cmpl) {
1527 if (piocb->cmd_flag & LPFC_IO_NVME) {
1528 lpfc_nvme_cancel_iocb(phba, piocb,
1529 ulpstatus, ulpWord4);
1530 } else {
1531 if (phba->sli_rev == LPFC_SLI_REV4) {
1532 bf_set(lpfc_wcqe_c_status,
1533 &piocb->wcqe_cmpl, ulpstatus);
1534 piocb->wcqe_cmpl.parameter = ulpWord4;
1535 } else {
1536 piocb->iocb.ulpStatus = ulpstatus;
1537 piocb->iocb.un.ulpWord[4] = ulpWord4;
1538 }
1539 (piocb->cmd_cmpl) (phba, piocb, piocb);
1540 }
1541 } else {
1542 lpfc_sli_release_iocbq(phba, piocb);
1543 }
1544 }
1545 return;
1546}
1547
1548/**
1549 * lpfc_sli_iocb_cmd_type - Get the iocb type
1550 * @iocb_cmnd: iocb command code.
1551 *
1552 * This function is called by ring event handler function to get the iocb type.
1553 * This function translates the iocb command to an iocb command type used to
1554 * decide the final disposition of each completed IOCB.
1555 * The function returns
1556 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1557 * LPFC_SOL_IOCB if it is a solicited iocb completion
1558 * LPFC_ABORT_IOCB if it is an abort iocb
1559 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1560 *
1561 * The caller is not required to hold any lock.
1562 **/
1563static lpfc_iocb_type
1564lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1565{
1566 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1567
1568 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1569 return 0;
1570
1571 switch (iocb_cmnd) {
1572 case CMD_XMIT_SEQUENCE_CR:
1573 case CMD_XMIT_SEQUENCE_CX:
1574 case CMD_XMIT_BCAST_CN:
1575 case CMD_XMIT_BCAST_CX:
1576 case CMD_ELS_REQUEST_CR:
1577 case CMD_ELS_REQUEST_CX:
1578 case CMD_CREATE_XRI_CR:
1579 case CMD_CREATE_XRI_CX:
1580 case CMD_GET_RPI_CN:
1581 case CMD_XMIT_ELS_RSP_CX:
1582 case CMD_GET_RPI_CR:
1583 case CMD_FCP_IWRITE_CR:
1584 case CMD_FCP_IWRITE_CX:
1585 case CMD_FCP_IREAD_CR:
1586 case CMD_FCP_IREAD_CX:
1587 case CMD_FCP_ICMND_CR:
1588 case CMD_FCP_ICMND_CX:
1589 case CMD_FCP_TSEND_CX:
1590 case CMD_FCP_TRSP_CX:
1591 case CMD_FCP_TRECEIVE_CX:
1592 case CMD_FCP_AUTO_TRSP_CX:
1593 case CMD_ADAPTER_MSG:
1594 case CMD_ADAPTER_DUMP:
1595 case CMD_XMIT_SEQUENCE64_CR:
1596 case CMD_XMIT_SEQUENCE64_CX:
1597 case CMD_XMIT_BCAST64_CN:
1598 case CMD_XMIT_BCAST64_CX:
1599 case CMD_ELS_REQUEST64_CR:
1600 case CMD_ELS_REQUEST64_CX:
1601 case CMD_FCP_IWRITE64_CR:
1602 case CMD_FCP_IWRITE64_CX:
1603 case CMD_FCP_IREAD64_CR:
1604 case CMD_FCP_IREAD64_CX:
1605 case CMD_FCP_ICMND64_CR:
1606 case CMD_FCP_ICMND64_CX:
1607 case CMD_FCP_TSEND64_CX:
1608 case CMD_FCP_TRSP64_CX:
1609 case CMD_FCP_TRECEIVE64_CX:
1610 case CMD_GEN_REQUEST64_CR:
1611 case CMD_GEN_REQUEST64_CX:
1612 case CMD_XMIT_ELS_RSP64_CX:
1613 case DSSCMD_IWRITE64_CR:
1614 case DSSCMD_IWRITE64_CX:
1615 case DSSCMD_IREAD64_CR:
1616 case DSSCMD_IREAD64_CX:
1617 case CMD_SEND_FRAME:
1618 type = LPFC_SOL_IOCB;
1619 break;
1620 case CMD_ABORT_XRI_CN:
1621 case CMD_ABORT_XRI_CX:
1622 case CMD_CLOSE_XRI_CN:
1623 case CMD_CLOSE_XRI_CX:
1624 case CMD_XRI_ABORTED_CX:
1625 case CMD_ABORT_MXRI64_CN:
1626 case CMD_XMIT_BLS_RSP64_CX:
1627 type = LPFC_ABORT_IOCB;
1628 break;
1629 case CMD_RCV_SEQUENCE_CX:
1630 case CMD_RCV_ELS_REQ_CX:
1631 case CMD_RCV_SEQUENCE64_CX:
1632 case CMD_RCV_ELS_REQ64_CX:
1633 case CMD_ASYNC_STATUS:
1634 case CMD_IOCB_RCV_SEQ64_CX:
1635 case CMD_IOCB_RCV_ELS64_CX:
1636 case CMD_IOCB_RCV_CONT64_CX:
1637 case CMD_IOCB_RET_XRI64_CX:
1638 type = LPFC_UNSOL_IOCB;
1639 break;
1640 case CMD_IOCB_XMIT_MSEQ64_CR:
1641 case CMD_IOCB_XMIT_MSEQ64_CX:
1642 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1643 case CMD_IOCB_RCV_ELS_LIST64_CX:
1644 case CMD_IOCB_CLOSE_EXTENDED_CN:
1645 case CMD_IOCB_ABORT_EXTENDED_CN:
1646 case CMD_IOCB_RET_HBQE64_CN:
1647 case CMD_IOCB_FCP_IBIDIR64_CR:
1648 case CMD_IOCB_FCP_IBIDIR64_CX:
1649 case CMD_IOCB_FCP_ITASKMGT64_CX:
1650 case CMD_IOCB_LOGENTRY_CN:
1651 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1652 printk("%s - Unhandled SLI-3 Command x%x\n",
1653 __func__, iocb_cmnd);
1654 type = LPFC_UNKNOWN_IOCB;
1655 break;
1656 default:
1657 type = LPFC_UNKNOWN_IOCB;
1658 break;
1659 }
1660
1661 return type;
1662}
1663
1664/**
1665 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1666 * @phba: Pointer to HBA context object.
1667 *
1668 * This function is called from SLI initialization code
1669 * to configure every ring of the HBA's SLI interface. The
1670 * caller is not required to hold any lock. This function issues
1671 * a config_ring mailbox command for each ring.
1672 * This function returns zero if successful else returns a negative
1673 * error code.
1674 **/
1675static int
1676lpfc_sli_ring_map(struct lpfc_hba *phba)
1677{
1678 struct lpfc_sli *psli = &phba->sli;
1679 LPFC_MBOXQ_t *pmb;
1680 MAILBOX_t *pmbox;
1681 int i, rc, ret = 0;
1682
1683 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1684 if (!pmb)
1685 return -ENOMEM;
1686 pmbox = &pmb->u.mb;
1687 phba->link_state = LPFC_INIT_MBX_CMDS;
1688 for (i = 0; i < psli->num_rings; i++) {
1689 lpfc_config_ring(phba, i, pmb);
1690 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1691 if (rc != MBX_SUCCESS) {
1692 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1693 "0446 Adapter failed to init (%d), "
1694 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1695 "ring %d\n",
1696 rc, pmbox->mbxCommand,
1697 pmbox->mbxStatus, i);
1698 phba->link_state = LPFC_HBA_ERROR;
1699 ret = -ENXIO;
1700 break;
1701 }
1702 }
1703 mempool_free(pmb, phba->mbox_mem_pool);
1704 return ret;
1705}
1706
1707/**
1708 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1709 * @phba: Pointer to HBA context object.
1710 * @pring: Pointer to driver SLI ring object.
1711 * @piocb: Pointer to the driver iocb object.
1712 *
1713 * The driver calls this function with the hbalock held for SLI3 ports or
1714 * the ring lock held for SLI4 ports. The function adds the
1715 * new iocb to txcmplq of the given ring. This function always returns
1716 * 0. If this function is called for ELS ring, this function checks if
1717 * there is a vport associated with the ELS command. This function also
1718 * starts els_tmofunc timer if this is an ELS command.
1719 **/
1720static int
1721lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1722 struct lpfc_iocbq *piocb)
1723{
1724 u32 ulp_command = 0;
1725
1726 BUG_ON(!piocb);
1727 ulp_command = get_job_cmnd(phba, piocb);
1728
1729 list_add_tail(&piocb->list, &pring->txcmplq);
1730 piocb->cmd_flag |= LPFC_IO_ON_TXCMPLQ;
1731 pring->txcmplq_cnt++;
1732 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1733 (ulp_command != CMD_ABORT_XRI_WQE) &&
1734 (ulp_command != CMD_ABORT_XRI_CN) &&
1735 (ulp_command != CMD_CLOSE_XRI_CN)) {
1736 BUG_ON(!piocb->vport);
1737 if (!(piocb->vport->load_flag & FC_UNLOADING))
1738 mod_timer(&piocb->vport->els_tmofunc,
1739 jiffies +
1740 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1741 }
1742
1743 return 0;
1744}
1745
1746/**
1747 * lpfc_sli_ringtx_get - Get first element of the txq
1748 * @phba: Pointer to HBA context object.
1749 * @pring: Pointer to driver SLI ring object.
1750 *
1751 * This function is called with hbalock held to get next
1752 * iocb in txq of the given ring. If there is any iocb in
1753 * the txq, the function returns first iocb in the list after
1754 * removing the iocb from the list, else it returns NULL.
1755 **/
1756struct lpfc_iocbq *
1757lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1758{
1759 struct lpfc_iocbq *cmd_iocb;
1760
1761 lockdep_assert_held(&phba->hbalock);
1762
1763 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1764 return cmd_iocb;
1765}
1766
1767/**
1768 * lpfc_cmf_sync_cmpl - Process a CMF_SYNC_WQE cmpl
1769 * @phba: Pointer to HBA context object.
1770 * @cmdiocb: Pointer to driver command iocb object.
1771 * @rspiocb: Pointer to driver response iocb object.
1772 *
1773 * This routine will inform the driver of any BW adjustments we need
1774 * to make. These changes will be picked up during the next CMF
1775 * timer interrupt. In addition, any BW changes will be logged
1776 * with LOG_CGN_MGMT.
1777 **/
1778static void
1779lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1780 struct lpfc_iocbq *rspiocb)
1781{
1782 union lpfc_wqe128 *wqe;
1783 uint32_t status, info;
1784 struct lpfc_wcqe_complete *wcqe = &rspiocb->wcqe_cmpl;
1785 uint64_t bw, bwdif, slop;
1786 uint64_t pcent, bwpcent;
1787 int asig, afpin, sigcnt, fpincnt;
1788 int wsigmax, wfpinmax, cg, tdp;
1789 char *s;
1790
1791 /* First check for error */
1792 status = bf_get(lpfc_wcqe_c_status, wcqe);
1793 if (status) {
1794 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1795 "6211 CMF_SYNC_WQE Error "
1796 "req_tag x%x status x%x hwstatus x%x "
1797 "tdatap x%x parm x%x\n",
1798 bf_get(lpfc_wcqe_c_request_tag, wcqe),
1799 bf_get(lpfc_wcqe_c_status, wcqe),
1800 bf_get(lpfc_wcqe_c_hw_status, wcqe),
1801 wcqe->total_data_placed,
1802 wcqe->parameter);
1803 goto out;
1804 }
1805
1806 /* Gather congestion information on a successful cmpl */
1807 info = wcqe->parameter;
1808 phba->cmf_active_info = info;
1809
1810 /* See if firmware info count is valid or has changed */
1811 if (info > LPFC_MAX_CMF_INFO || phba->cmf_info_per_interval == info)
1812 info = 0;
1813 else
1814 phba->cmf_info_per_interval = info;
1815
1816 tdp = bf_get(lpfc_wcqe_c_cmf_bw, wcqe);
1817 cg = bf_get(lpfc_wcqe_c_cmf_cg, wcqe);
1818
1819 /* Get BW requirement from firmware */
1820 bw = (uint64_t)tdp * LPFC_CMF_BLK_SIZE;
1821 if (!bw) {
1822 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1823 "6212 CMF_SYNC_WQE x%x: NULL bw\n",
1824 bf_get(lpfc_wcqe_c_request_tag, wcqe));
1825 goto out;
1826 }
1827
1828 /* Gather information needed for logging if a BW change is required */
1829 wqe = &cmdiocb->wqe;
1830 asig = bf_get(cmf_sync_asig, &wqe->cmf_sync);
1831 afpin = bf_get(cmf_sync_afpin, &wqe->cmf_sync);
1832 fpincnt = bf_get(cmf_sync_wfpincnt, &wqe->cmf_sync);
1833 sigcnt = bf_get(cmf_sync_wsigcnt, &wqe->cmf_sync);
1834 if (phba->cmf_max_bytes_per_interval != bw ||
1835 (asig || afpin || sigcnt || fpincnt)) {
1836 /* Are we increasing or decreasing BW */
1837 if (phba->cmf_max_bytes_per_interval < bw) {
1838 bwdif = bw - phba->cmf_max_bytes_per_interval;
1839 s = "Increase";
1840 } else {
1841 bwdif = phba->cmf_max_bytes_per_interval - bw;
1842 s = "Decrease";
1843 }
1844
1845 /* What is the change percentage */
1846 slop = div_u64(phba->cmf_link_byte_count, 200); /*For rounding*/
1847 pcent = div64_u64(bwdif * 100 + slop,
1848 phba->cmf_link_byte_count);
1849 bwpcent = div64_u64(bw * 100 + slop,
1850 phba->cmf_link_byte_count);
1851 if (asig) {
1852 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1853 "6237 BW Threshold %lld%% (%lld): "
1854 "%lld%% %s: Signal Alarm: cg:%d "
1855 "Info:%u\n",
1856 bwpcent, bw, pcent, s, cg,
1857 phba->cmf_active_info);
1858 } else if (afpin) {
1859 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1860 "6238 BW Threshold %lld%% (%lld): "
1861 "%lld%% %s: FPIN Alarm: cg:%d "
1862 "Info:%u\n",
1863 bwpcent, bw, pcent, s, cg,
1864 phba->cmf_active_info);
1865 } else if (sigcnt) {
1866 wsigmax = bf_get(cmf_sync_wsigmax, &wqe->cmf_sync);
1867 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1868 "6239 BW Threshold %lld%% (%lld): "
1869 "%lld%% %s: Signal Warning: "
1870 "Cnt %d Max %d: cg:%d Info:%u\n",
1871 bwpcent, bw, pcent, s, sigcnt,
1872 wsigmax, cg, phba->cmf_active_info);
1873 } else if (fpincnt) {
1874 wfpinmax = bf_get(cmf_sync_wfpinmax, &wqe->cmf_sync);
1875 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1876 "6240 BW Threshold %lld%% (%lld): "
1877 "%lld%% %s: FPIN Warning: "
1878 "Cnt %d Max %d: cg:%d Info:%u\n",
1879 bwpcent, bw, pcent, s, fpincnt,
1880 wfpinmax, cg, phba->cmf_active_info);
1881 } else {
1882 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1883 "6241 BW Threshold %lld%% (%lld): "
1884 "CMF %lld%% %s: cg:%d Info:%u\n",
1885 bwpcent, bw, pcent, s, cg,
1886 phba->cmf_active_info);
1887 }
1888 } else if (info) {
1889 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1890 "6246 Info Threshold %u\n", info);
1891 }
1892
1893 /* Save BW change to be picked up during next timer interrupt */
1894 phba->cmf_last_sync_bw = bw;
1895out:
1896 lpfc_sli_release_iocbq(phba, cmdiocb);
1897}
1898
1899/**
1900 * lpfc_issue_cmf_sync_wqe - Issue a CMF_SYNC_WQE
1901 * @phba: Pointer to HBA context object.
1902 * @ms: ms to set in WQE interval, 0 means use init op
1903 * @total: Total rcv bytes for this interval
1904 *
1905 * This routine is called every CMF timer interrupt. Its purpose is
1906 * to issue a CMF_SYNC_WQE to the firmware to inform it of any events
1907 * that may indicate we have congestion (FPINs or Signals). Upon
1908 * completion, the firmware will indicate any BW restrictions the
1909 * driver may need to take.
1910 **/
1911int
1912lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
1913{
1914 union lpfc_wqe128 *wqe;
1915 struct lpfc_iocbq *sync_buf;
1916 unsigned long iflags;
1917 u32 ret_val;
1918 u32 atot, wtot, max;
1919
1920 /* First address any alarm / warning activity */
1921 atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0);
1922 wtot = atomic_xchg(&phba->cgn_sync_warn_cnt, 0);
1923
1924 /* ONLY Managed mode will send the CMF_SYNC_WQE to the HBA */
1925 if (phba->cmf_active_mode != LPFC_CFG_MANAGED ||
1926 phba->link_state == LPFC_LINK_DOWN)
1927 return 0;
1928
1929 spin_lock_irqsave(&phba->hbalock, iflags);
1930 sync_buf = __lpfc_sli_get_iocbq(phba);
1931 if (!sync_buf) {
1932 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
1933 "6213 No available WQEs for CMF_SYNC_WQE\n");
1934 ret_val = ENOMEM;
1935 goto out_unlock;
1936 }
1937
1938 wqe = &sync_buf->wqe;
1939
1940 /* WQEs are reused. Clear stale data and set key fields to zero */
1941 memset(wqe, 0, sizeof(*wqe));
1942
1943 /* If this is the very first CMF_SYNC_WQE, issue an init operation */
1944 if (!ms) {
1945 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1946 "6441 CMF Init %d - CMF_SYNC_WQE\n",
1947 phba->fc_eventTag);
1948 bf_set(cmf_sync_op, &wqe->cmf_sync, 1); /* 1=init */
1949 bf_set(cmf_sync_interval, &wqe->cmf_sync, LPFC_CMF_INTERVAL);
1950 goto initpath;
1951 }
1952
1953 bf_set(cmf_sync_op, &wqe->cmf_sync, 0); /* 0=recalc */
1954 bf_set(cmf_sync_interval, &wqe->cmf_sync, ms);
1955
1956 /* Check for alarms / warnings */
1957 if (atot) {
1958 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
1959 /* We hit an Signal alarm condition */
1960 bf_set(cmf_sync_asig, &wqe->cmf_sync, 1);
1961 } else {
1962 /* We hit a FPIN alarm condition */
1963 bf_set(cmf_sync_afpin, &wqe->cmf_sync, 1);
1964 }
1965 } else if (wtot) {
1966 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
1967 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
1968 /* We hit an Signal warning condition */
1969 max = LPFC_SEC_TO_MSEC / lpfc_fabric_cgn_frequency *
1970 lpfc_acqe_cgn_frequency;
1971 bf_set(cmf_sync_wsigmax, &wqe->cmf_sync, max);
1972 bf_set(cmf_sync_wsigcnt, &wqe->cmf_sync, wtot);
1973 } else {
1974 /* We hit a FPIN warning condition */
1975 bf_set(cmf_sync_wfpinmax, &wqe->cmf_sync, 1);
1976 bf_set(cmf_sync_wfpincnt, &wqe->cmf_sync, 1);
1977 }
1978 }
1979
1980 /* Update total read blocks during previous timer interval */
1981 wqe->cmf_sync.read_bytes = (u32)(total / LPFC_CMF_BLK_SIZE);
1982
1983initpath:
1984 bf_set(cmf_sync_ver, &wqe->cmf_sync, LPFC_CMF_SYNC_VER);
1985 wqe->cmf_sync.event_tag = phba->fc_eventTag;
1986 bf_set(cmf_sync_cmnd, &wqe->cmf_sync, CMD_CMF_SYNC_WQE);
1987
1988 /* Setup reqtag to match the wqe completion. */
1989 bf_set(cmf_sync_reqtag, &wqe->cmf_sync, sync_buf->iotag);
1990
1991 bf_set(cmf_sync_qosd, &wqe->cmf_sync, 1);
1992
1993 bf_set(cmf_sync_cmd_type, &wqe->cmf_sync, CMF_SYNC_COMMAND);
1994 bf_set(cmf_sync_wqec, &wqe->cmf_sync, 1);
1995 bf_set(cmf_sync_cqid, &wqe->cmf_sync, LPFC_WQE_CQ_ID_DEFAULT);
1996
1997 sync_buf->vport = phba->pport;
1998 sync_buf->cmd_cmpl = lpfc_cmf_sync_cmpl;
1999 sync_buf->cmd_dmabuf = NULL;
2000 sync_buf->rsp_dmabuf = NULL;
2001 sync_buf->bpl_dmabuf = NULL;
2002 sync_buf->sli4_xritag = NO_XRI;
2003
2004 sync_buf->cmd_flag |= LPFC_IO_CMF;
2005 ret_val = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], sync_buf);
2006 if (ret_val)
2007 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
2008 "6214 Cannot issue CMF_SYNC_WQE: x%x\n",
2009 ret_val);
2010out_unlock:
2011 spin_unlock_irqrestore(&phba->hbalock, iflags);
2012 return ret_val;
2013}
2014
2015/**
2016 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
2017 * @phba: Pointer to HBA context object.
2018 * @pring: Pointer to driver SLI ring object.
2019 *
2020 * This function is called with hbalock held and the caller must post the
2021 * iocb without releasing the lock. If the caller releases the lock,
2022 * iocb slot returned by the function is not guaranteed to be available.
2023 * The function returns pointer to the next available iocb slot if there
2024 * is available slot in the ring, else it returns NULL.
2025 * If the get index of the ring is ahead of the put index, the function
2026 * will post an error attention event to the worker thread to take the
2027 * HBA to offline state.
2028 **/
2029static IOCB_t *
2030lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2031{
2032 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2033 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
2034
2035 lockdep_assert_held(&phba->hbalock);
2036
2037 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
2038 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
2039 pring->sli.sli3.next_cmdidx = 0;
2040
2041 if (unlikely(pring->sli.sli3.local_getidx ==
2042 pring->sli.sli3.next_cmdidx)) {
2043
2044 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
2045
2046 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
2047 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2048 "0315 Ring %d issue: portCmdGet %d "
2049 "is bigger than cmd ring %d\n",
2050 pring->ringno,
2051 pring->sli.sli3.local_getidx,
2052 max_cmd_idx);
2053
2054 phba->link_state = LPFC_HBA_ERROR;
2055 /*
2056 * All error attention handlers are posted to
2057 * worker thread
2058 */
2059 phba->work_ha |= HA_ERATT;
2060 phba->work_hs = HS_FFER3;
2061
2062 lpfc_worker_wake_up(phba);
2063
2064 return NULL;
2065 }
2066
2067 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
2068 return NULL;
2069 }
2070
2071 return lpfc_cmd_iocb(phba, pring);
2072}
2073
2074/**
2075 * lpfc_sli_next_iotag - Get an iotag for the iocb
2076 * @phba: Pointer to HBA context object.
2077 * @iocbq: Pointer to driver iocb object.
2078 *
2079 * This function gets an iotag for the iocb. If there is no unused iotag and
2080 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
2081 * array and assigns a new iotag.
2082 * The function returns the allocated iotag if successful, else returns zero.
2083 * Zero is not a valid iotag.
2084 * The caller is not required to hold any lock.
2085 **/
2086uint16_t
2087lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
2088{
2089 struct lpfc_iocbq **new_arr;
2090 struct lpfc_iocbq **old_arr;
2091 size_t new_len;
2092 struct lpfc_sli *psli = &phba->sli;
2093 uint16_t iotag;
2094
2095 spin_lock_irq(&phba->hbalock);
2096 iotag = psli->last_iotag;
2097 if(++iotag < psli->iocbq_lookup_len) {
2098 psli->last_iotag = iotag;
2099 psli->iocbq_lookup[iotag] = iocbq;
2100 spin_unlock_irq(&phba->hbalock);
2101 iocbq->iotag = iotag;
2102 return iotag;
2103 } else if (psli->iocbq_lookup_len < (0xffff
2104 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
2105 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
2106 spin_unlock_irq(&phba->hbalock);
2107 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
2108 GFP_KERNEL);
2109 if (new_arr) {
2110 spin_lock_irq(&phba->hbalock);
2111 old_arr = psli->iocbq_lookup;
2112 if (new_len <= psli->iocbq_lookup_len) {
2113 /* highly unprobable case */
2114 kfree(new_arr);
2115 iotag = psli->last_iotag;
2116 if(++iotag < psli->iocbq_lookup_len) {
2117 psli->last_iotag = iotag;
2118 psli->iocbq_lookup[iotag] = iocbq;
2119 spin_unlock_irq(&phba->hbalock);
2120 iocbq->iotag = iotag;
2121 return iotag;
2122 }
2123 spin_unlock_irq(&phba->hbalock);
2124 return 0;
2125 }
2126 if (psli->iocbq_lookup)
2127 memcpy(new_arr, old_arr,
2128 ((psli->last_iotag + 1) *
2129 sizeof (struct lpfc_iocbq *)));
2130 psli->iocbq_lookup = new_arr;
2131 psli->iocbq_lookup_len = new_len;
2132 psli->last_iotag = iotag;
2133 psli->iocbq_lookup[iotag] = iocbq;
2134 spin_unlock_irq(&phba->hbalock);
2135 iocbq->iotag = iotag;
2136 kfree(old_arr);
2137 return iotag;
2138 }
2139 } else
2140 spin_unlock_irq(&phba->hbalock);
2141
2142 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2143 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
2144 psli->last_iotag);
2145
2146 return 0;
2147}
2148
2149/**
2150 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
2151 * @phba: Pointer to HBA context object.
2152 * @pring: Pointer to driver SLI ring object.
2153 * @iocb: Pointer to iocb slot in the ring.
2154 * @nextiocb: Pointer to driver iocb object which need to be
2155 * posted to firmware.
2156 *
2157 * This function is called to post a new iocb to the firmware. This
2158 * function copies the new iocb to ring iocb slot and updates the
2159 * ring pointers. It adds the new iocb to txcmplq if there is
2160 * a completion call back for this iocb else the function will free the
2161 * iocb object. The hbalock is asserted held in the code path calling
2162 * this routine.
2163 **/
2164static void
2165lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2166 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
2167{
2168 /*
2169 * Set up an iotag
2170 */
2171 nextiocb->iocb.ulpIoTag = (nextiocb->cmd_cmpl) ? nextiocb->iotag : 0;
2172
2173
2174 if (pring->ringno == LPFC_ELS_RING) {
2175 lpfc_debugfs_slow_ring_trc(phba,
2176 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
2177 *(((uint32_t *) &nextiocb->iocb) + 4),
2178 *(((uint32_t *) &nextiocb->iocb) + 6),
2179 *(((uint32_t *) &nextiocb->iocb) + 7));
2180 }
2181
2182 /*
2183 * Issue iocb command to adapter
2184 */
2185 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
2186 wmb();
2187 pring->stats.iocb_cmd++;
2188
2189 /*
2190 * If there is no completion routine to call, we can release the
2191 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
2192 * that have no rsp ring completion, cmd_cmpl MUST be NULL.
2193 */
2194 if (nextiocb->cmd_cmpl)
2195 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
2196 else
2197 __lpfc_sli_release_iocbq(phba, nextiocb);
2198
2199 /*
2200 * Let the HBA know what IOCB slot will be the next one the
2201 * driver will put a command into.
2202 */
2203 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
2204 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
2205}
2206
2207/**
2208 * lpfc_sli_update_full_ring - Update the chip attention register
2209 * @phba: Pointer to HBA context object.
2210 * @pring: Pointer to driver SLI ring object.
2211 *
2212 * The caller is not required to hold any lock for calling this function.
2213 * This function updates the chip attention bits for the ring to inform firmware
2214 * that there are pending work to be done for this ring and requests an
2215 * interrupt when there is space available in the ring. This function is
2216 * called when the driver is unable to post more iocbs to the ring due
2217 * to unavailability of space in the ring.
2218 **/
2219static void
2220lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2221{
2222 int ringno = pring->ringno;
2223
2224 pring->flag |= LPFC_CALL_RING_AVAILABLE;
2225
2226 wmb();
2227
2228 /*
2229 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
2230 * The HBA will tell us when an IOCB entry is available.
2231 */
2232 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
2233 readl(phba->CAregaddr); /* flush */
2234
2235 pring->stats.iocb_cmd_full++;
2236}
2237
2238/**
2239 * lpfc_sli_update_ring - Update chip attention register
2240 * @phba: Pointer to HBA context object.
2241 * @pring: Pointer to driver SLI ring object.
2242 *
2243 * This function updates the chip attention register bit for the
2244 * given ring to inform HBA that there is more work to be done
2245 * in this ring. The caller is not required to hold any lock.
2246 **/
2247static void
2248lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2249{
2250 int ringno = pring->ringno;
2251
2252 /*
2253 * Tell the HBA that there is work to do in this ring.
2254 */
2255 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
2256 wmb();
2257 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
2258 readl(phba->CAregaddr); /* flush */
2259 }
2260}
2261
2262/**
2263 * lpfc_sli_resume_iocb - Process iocbs in the txq
2264 * @phba: Pointer to HBA context object.
2265 * @pring: Pointer to driver SLI ring object.
2266 *
2267 * This function is called with hbalock held to post pending iocbs
2268 * in the txq to the firmware. This function is called when driver
2269 * detects space available in the ring.
2270 **/
2271static void
2272lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2273{
2274 IOCB_t *iocb;
2275 struct lpfc_iocbq *nextiocb;
2276
2277 lockdep_assert_held(&phba->hbalock);
2278
2279 /*
2280 * Check to see if:
2281 * (a) there is anything on the txq to send
2282 * (b) link is up
2283 * (c) link attention events can be processed (fcp ring only)
2284 * (d) IOCB processing is not blocked by the outstanding mbox command.
2285 */
2286
2287 if (lpfc_is_link_up(phba) &&
2288 (!list_empty(&pring->txq)) &&
2289 (pring->ringno != LPFC_FCP_RING ||
2290 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
2291
2292 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2293 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
2294 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
2295
2296 if (iocb)
2297 lpfc_sli_update_ring(phba, pring);
2298 else
2299 lpfc_sli_update_full_ring(phba, pring);
2300 }
2301
2302 return;
2303}
2304
2305/**
2306 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
2307 * @phba: Pointer to HBA context object.
2308 * @hbqno: HBQ number.
2309 *
2310 * This function is called with hbalock held to get the next
2311 * available slot for the given HBQ. If there is free slot
2312 * available for the HBQ it will return pointer to the next available
2313 * HBQ entry else it will return NULL.
2314 **/
2315static struct lpfc_hbq_entry *
2316lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
2317{
2318 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2319
2320 lockdep_assert_held(&phba->hbalock);
2321
2322 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
2323 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
2324 hbqp->next_hbqPutIdx = 0;
2325
2326 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
2327 uint32_t raw_index = phba->hbq_get[hbqno];
2328 uint32_t getidx = le32_to_cpu(raw_index);
2329
2330 hbqp->local_hbqGetIdx = getidx;
2331
2332 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
2333 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2334 "1802 HBQ %d: local_hbqGetIdx "
2335 "%u is > than hbqp->entry_count %u\n",
2336 hbqno, hbqp->local_hbqGetIdx,
2337 hbqp->entry_count);
2338
2339 phba->link_state = LPFC_HBA_ERROR;
2340 return NULL;
2341 }
2342
2343 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
2344 return NULL;
2345 }
2346
2347 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
2348 hbqp->hbqPutIdx;
2349}
2350
2351/**
2352 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
2353 * @phba: Pointer to HBA context object.
2354 *
2355 * This function is called with no lock held to free all the
2356 * hbq buffers while uninitializing the SLI interface. It also
2357 * frees the HBQ buffers returned by the firmware but not yet
2358 * processed by the upper layers.
2359 **/
2360void
2361lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
2362{
2363 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2364 struct hbq_dmabuf *hbq_buf;
2365 unsigned long flags;
2366 int i, hbq_count;
2367
2368 hbq_count = lpfc_sli_hbq_count();
2369 /* Return all memory used by all HBQs */
2370 spin_lock_irqsave(&phba->hbalock, flags);
2371 for (i = 0; i < hbq_count; ++i) {
2372 list_for_each_entry_safe(dmabuf, next_dmabuf,
2373 &phba->hbqs[i].hbq_buffer_list, list) {
2374 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
2375 list_del(&hbq_buf->dbuf.list);
2376 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
2377 }
2378 phba->hbqs[i].buffer_count = 0;
2379 }
2380
2381 /* Mark the HBQs not in use */
2382 phba->hbq_in_use = 0;
2383 spin_unlock_irqrestore(&phba->hbalock, flags);
2384}
2385
2386/**
2387 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
2388 * @phba: Pointer to HBA context object.
2389 * @hbqno: HBQ number.
2390 * @hbq_buf: Pointer to HBQ buffer.
2391 *
2392 * This function is called with the hbalock held to post a
2393 * hbq buffer to the firmware. If the function finds an empty
2394 * slot in the HBQ, it will post the buffer. The function will return
2395 * pointer to the hbq entry if it successfully post the buffer
2396 * else it will return NULL.
2397 **/
2398static int
2399lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
2400 struct hbq_dmabuf *hbq_buf)
2401{
2402 lockdep_assert_held(&phba->hbalock);
2403 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2404}
2405
2406/**
2407 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2408 * @phba: Pointer to HBA context object.
2409 * @hbqno: HBQ number.
2410 * @hbq_buf: Pointer to HBQ buffer.
2411 *
2412 * This function is called with the hbalock held to post a hbq buffer to the
2413 * firmware. If the function finds an empty slot in the HBQ, it will post the
2414 * buffer and place it on the hbq_buffer_list. The function will return zero if
2415 * it successfully post the buffer else it will return an error.
2416 **/
2417static int
2418lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2419 struct hbq_dmabuf *hbq_buf)
2420{
2421 struct lpfc_hbq_entry *hbqe;
2422 dma_addr_t physaddr = hbq_buf->dbuf.phys;
2423
2424 lockdep_assert_held(&phba->hbalock);
2425 /* Get next HBQ entry slot to use */
2426 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2427 if (hbqe) {
2428 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2429
2430 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2431 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
2432 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2433 hbqe->bde.tus.f.bdeFlags = 0;
2434 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2435 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2436 /* Sync SLIM */
2437 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2438 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2439 /* flush */
2440 readl(phba->hbq_put + hbqno);
2441 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2442 return 0;
2443 } else
2444 return -ENOMEM;
2445}
2446
2447/**
2448 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2449 * @phba: Pointer to HBA context object.
2450 * @hbqno: HBQ number.
2451 * @hbq_buf: Pointer to HBQ buffer.
2452 *
2453 * This function is called with the hbalock held to post an RQE to the SLI4
2454 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2455 * the hbq_buffer_list and return zero, otherwise it will return an error.
2456 **/
2457static int
2458lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2459 struct hbq_dmabuf *hbq_buf)
2460{
2461 int rc;
2462 struct lpfc_rqe hrqe;
2463 struct lpfc_rqe drqe;
2464 struct lpfc_queue *hrq;
2465 struct lpfc_queue *drq;
2466
2467 if (hbqno != LPFC_ELS_HBQ)
2468 return 1;
2469 hrq = phba->sli4_hba.hdr_rq;
2470 drq = phba->sli4_hba.dat_rq;
2471
2472 lockdep_assert_held(&phba->hbalock);
2473 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2474 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2475 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2476 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2477 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2478 if (rc < 0)
2479 return rc;
2480 hbq_buf->tag = (rc | (hbqno << 16));
2481 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2482 return 0;
2483}
2484
2485/* HBQ for ELS and CT traffic. */
2486static struct lpfc_hbq_init lpfc_els_hbq = {
2487 .rn = 1,
2488 .entry_count = 256,
2489 .mask_count = 0,
2490 .profile = 0,
2491 .ring_mask = (1 << LPFC_ELS_RING),
2492 .buffer_count = 0,
2493 .init_count = 40,
2494 .add_count = 40,
2495};
2496
2497/* Array of HBQs */
2498struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2499 &lpfc_els_hbq,
2500};
2501
2502/**
2503 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
2504 * @phba: Pointer to HBA context object.
2505 * @hbqno: HBQ number.
2506 * @count: Number of HBQ buffers to be posted.
2507 *
2508 * This function is called with no lock held to post more hbq buffers to the
2509 * given HBQ. The function returns the number of HBQ buffers successfully
2510 * posted.
2511 **/
2512static int
2513lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2514{
2515 uint32_t i, posted = 0;
2516 unsigned long flags;
2517 struct hbq_dmabuf *hbq_buffer;
2518 LIST_HEAD(hbq_buf_list);
2519 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2520 return 0;
2521
2522 if ((phba->hbqs[hbqno].buffer_count + count) >
2523 lpfc_hbq_defs[hbqno]->entry_count)
2524 count = lpfc_hbq_defs[hbqno]->entry_count -
2525 phba->hbqs[hbqno].buffer_count;
2526 if (!count)
2527 return 0;
2528 /* Allocate HBQ entries */
2529 for (i = 0; i < count; i++) {
2530 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2531 if (!hbq_buffer)
2532 break;
2533 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2534 }
2535 /* Check whether HBQ is still in use */
2536 spin_lock_irqsave(&phba->hbalock, flags);
2537 if (!phba->hbq_in_use)
2538 goto err;
2539 while (!list_empty(&hbq_buf_list)) {
2540 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2541 dbuf.list);
2542 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2543 (hbqno << 16));
2544 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2545 phba->hbqs[hbqno].buffer_count++;
2546 posted++;
2547 } else
2548 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2549 }
2550 spin_unlock_irqrestore(&phba->hbalock, flags);
2551 return posted;
2552err:
2553 spin_unlock_irqrestore(&phba->hbalock, flags);
2554 while (!list_empty(&hbq_buf_list)) {
2555 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2556 dbuf.list);
2557 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2558 }
2559 return 0;
2560}
2561
2562/**
2563 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
2564 * @phba: Pointer to HBA context object.
2565 * @qno: HBQ number.
2566 *
2567 * This function posts more buffers to the HBQ. This function
2568 * is called with no lock held. The function returns the number of HBQ entries
2569 * successfully allocated.
2570 **/
2571int
2572lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2573{
2574 if (phba->sli_rev == LPFC_SLI_REV4)
2575 return 0;
2576 else
2577 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2578 lpfc_hbq_defs[qno]->add_count);
2579}
2580
2581/**
2582 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
2583 * @phba: Pointer to HBA context object.
2584 * @qno: HBQ queue number.
2585 *
2586 * This function is called from SLI initialization code path with
2587 * no lock held to post initial HBQ buffers to firmware. The
2588 * function returns the number of HBQ entries successfully allocated.
2589 **/
2590static int
2591lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2592{
2593 if (phba->sli_rev == LPFC_SLI_REV4)
2594 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2595 lpfc_hbq_defs[qno]->entry_count);
2596 else
2597 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2598 lpfc_hbq_defs[qno]->init_count);
2599}
2600
2601/*
2602 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2603 *
2604 * This function removes the first hbq buffer on an hbq list and returns a
2605 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2606 **/
2607static struct hbq_dmabuf *
2608lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2609{
2610 struct lpfc_dmabuf *d_buf;
2611
2612 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2613 if (!d_buf)
2614 return NULL;
2615 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2616}
2617
2618/**
2619 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2620 * @phba: Pointer to HBA context object.
2621 * @hrq: HBQ number.
2622 *
2623 * This function removes the first RQ buffer on an RQ buffer list and returns a
2624 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2625 **/
2626static struct rqb_dmabuf *
2627lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2628{
2629 struct lpfc_dmabuf *h_buf;
2630 struct lpfc_rqb *rqbp;
2631
2632 rqbp = hrq->rqbp;
2633 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2634 struct lpfc_dmabuf, list);
2635 if (!h_buf)
2636 return NULL;
2637 rqbp->buffer_count--;
2638 return container_of(h_buf, struct rqb_dmabuf, hbuf);
2639}
2640
2641/**
2642 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
2643 * @phba: Pointer to HBA context object.
2644 * @tag: Tag of the hbq buffer.
2645 *
2646 * This function searches for the hbq buffer associated with the given tag in
2647 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2648 * otherwise it returns NULL.
2649 **/
2650static struct hbq_dmabuf *
2651lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2652{
2653 struct lpfc_dmabuf *d_buf;
2654 struct hbq_dmabuf *hbq_buf;
2655 uint32_t hbqno;
2656
2657 hbqno = tag >> 16;
2658 if (hbqno >= LPFC_MAX_HBQS)
2659 return NULL;
2660
2661 spin_lock_irq(&phba->hbalock);
2662 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2663 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2664 if (hbq_buf->tag == tag) {
2665 spin_unlock_irq(&phba->hbalock);
2666 return hbq_buf;
2667 }
2668 }
2669 spin_unlock_irq(&phba->hbalock);
2670 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2671 "1803 Bad hbq tag. Data: x%x x%x\n",
2672 tag, phba->hbqs[tag >> 16].buffer_count);
2673 return NULL;
2674}
2675
2676/**
2677 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2678 * @phba: Pointer to HBA context object.
2679 * @hbq_buffer: Pointer to HBQ buffer.
2680 *
2681 * This function is called with hbalock. This function gives back
2682 * the hbq buffer to firmware. If the HBQ does not have space to
2683 * post the buffer, it will free the buffer.
2684 **/
2685void
2686lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2687{
2688 uint32_t hbqno;
2689
2690 if (hbq_buffer) {
2691 hbqno = hbq_buffer->tag >> 16;
2692 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2693 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2694 }
2695}
2696
2697/**
2698 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2699 * @mbxCommand: mailbox command code.
2700 *
2701 * This function is called by the mailbox event handler function to verify
2702 * that the completed mailbox command is a legitimate mailbox command. If the
2703 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2704 * and the mailbox event handler will take the HBA offline.
2705 **/
2706static int
2707lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2708{
2709 uint8_t ret;
2710
2711 switch (mbxCommand) {
2712 case MBX_LOAD_SM:
2713 case MBX_READ_NV:
2714 case MBX_WRITE_NV:
2715 case MBX_WRITE_VPARMS:
2716 case MBX_RUN_BIU_DIAG:
2717 case MBX_INIT_LINK:
2718 case MBX_DOWN_LINK:
2719 case MBX_CONFIG_LINK:
2720 case MBX_CONFIG_RING:
2721 case MBX_RESET_RING:
2722 case MBX_READ_CONFIG:
2723 case MBX_READ_RCONFIG:
2724 case MBX_READ_SPARM:
2725 case MBX_READ_STATUS:
2726 case MBX_READ_RPI:
2727 case MBX_READ_XRI:
2728 case MBX_READ_REV:
2729 case MBX_READ_LNK_STAT:
2730 case MBX_REG_LOGIN:
2731 case MBX_UNREG_LOGIN:
2732 case MBX_CLEAR_LA:
2733 case MBX_DUMP_MEMORY:
2734 case MBX_DUMP_CONTEXT:
2735 case MBX_RUN_DIAGS:
2736 case MBX_RESTART:
2737 case MBX_UPDATE_CFG:
2738 case MBX_DOWN_LOAD:
2739 case MBX_DEL_LD_ENTRY:
2740 case MBX_RUN_PROGRAM:
2741 case MBX_SET_MASK:
2742 case MBX_SET_VARIABLE:
2743 case MBX_UNREG_D_ID:
2744 case MBX_KILL_BOARD:
2745 case MBX_CONFIG_FARP:
2746 case MBX_BEACON:
2747 case MBX_LOAD_AREA:
2748 case MBX_RUN_BIU_DIAG64:
2749 case MBX_CONFIG_PORT:
2750 case MBX_READ_SPARM64:
2751 case MBX_READ_RPI64:
2752 case MBX_REG_LOGIN64:
2753 case MBX_READ_TOPOLOGY:
2754 case MBX_WRITE_WWN:
2755 case MBX_SET_DEBUG:
2756 case MBX_LOAD_EXP_ROM:
2757 case MBX_ASYNCEVT_ENABLE:
2758 case MBX_REG_VPI:
2759 case MBX_UNREG_VPI:
2760 case MBX_HEARTBEAT:
2761 case MBX_PORT_CAPABILITIES:
2762 case MBX_PORT_IOV_CONTROL:
2763 case MBX_SLI4_CONFIG:
2764 case MBX_SLI4_REQ_FTRS:
2765 case MBX_REG_FCFI:
2766 case MBX_UNREG_FCFI:
2767 case MBX_REG_VFI:
2768 case MBX_UNREG_VFI:
2769 case MBX_INIT_VPI:
2770 case MBX_INIT_VFI:
2771 case MBX_RESUME_RPI:
2772 case MBX_READ_EVENT_LOG_STATUS:
2773 case MBX_READ_EVENT_LOG:
2774 case MBX_SECURITY_MGMT:
2775 case MBX_AUTH_PORT:
2776 case MBX_ACCESS_VDATA:
2777 ret = mbxCommand;
2778 break;
2779 default:
2780 ret = MBX_SHUTDOWN;
2781 break;
2782 }
2783 return ret;
2784}
2785
2786/**
2787 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2788 * @phba: Pointer to HBA context object.
2789 * @pmboxq: Pointer to mailbox command.
2790 *
2791 * This is completion handler function for mailbox commands issued from
2792 * lpfc_sli_issue_mbox_wait function. This function is called by the
2793 * mailbox event handler function with no lock held. This function
2794 * will wake up thread waiting on the wait queue pointed by context1
2795 * of the mailbox.
2796 **/
2797void
2798lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2799{
2800 unsigned long drvr_flag;
2801 struct completion *pmbox_done;
2802
2803 /*
2804 * If pmbox_done is empty, the driver thread gave up waiting and
2805 * continued running.
2806 */
2807 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2808 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2809 pmbox_done = (struct completion *)pmboxq->context3;
2810 if (pmbox_done)
2811 complete(pmbox_done);
2812 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2813 return;
2814}
2815
2816static void
2817__lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2818{
2819 unsigned long iflags;
2820
2821 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
2822 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
2823 spin_lock_irqsave(&ndlp->lock, iflags);
2824 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
2825 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
2826 spin_unlock_irqrestore(&ndlp->lock, iflags);
2827 }
2828 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2829}
2830
2831void
2832lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2833{
2834 __lpfc_sli_rpi_release(vport, ndlp);
2835}
2836
2837/**
2838 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2839 * @phba: Pointer to HBA context object.
2840 * @pmb: Pointer to mailbox object.
2841 *
2842 * This function is the default mailbox completion handler. It
2843 * frees the memory resources associated with the completed mailbox
2844 * command. If the completed command is a REG_LOGIN mailbox command,
2845 * this function will issue a UREG_LOGIN to re-claim the RPI.
2846 **/
2847void
2848lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2849{
2850 struct lpfc_vport *vport = pmb->vport;
2851 struct lpfc_nodelist *ndlp;
2852 struct Scsi_Host *shost;
2853 uint16_t rpi, vpi;
2854 int rc;
2855
2856 /*
2857 * If a REG_LOGIN succeeded after node is destroyed or node
2858 * is in re-discovery driver need to cleanup the RPI.
2859 */
2860 if (!(phba->pport->load_flag & FC_UNLOADING) &&
2861 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2862 !pmb->u.mb.mbxStatus) {
2863 rpi = pmb->u.mb.un.varWords[0];
2864 vpi = pmb->u.mb.un.varRegLogin.vpi;
2865 if (phba->sli_rev == LPFC_SLI_REV4)
2866 vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
2867 lpfc_unreg_login(phba, vpi, rpi, pmb);
2868 pmb->vport = vport;
2869 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2870 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2871 if (rc != MBX_NOT_FINISHED)
2872 return;
2873 }
2874
2875 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2876 !(phba->pport->load_flag & FC_UNLOADING) &&
2877 !pmb->u.mb.mbxStatus) {
2878 shost = lpfc_shost_from_vport(vport);
2879 spin_lock_irq(shost->host_lock);
2880 vport->vpi_state |= LPFC_VPI_REGISTERED;
2881 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2882 spin_unlock_irq(shost->host_lock);
2883 }
2884
2885 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2886 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2887 lpfc_nlp_put(ndlp);
2888 }
2889
2890 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2891 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2892
2893 /* Check to see if there are any deferred events to process */
2894 if (ndlp) {
2895 lpfc_printf_vlog(
2896 vport,
2897 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2898 "1438 UNREG cmpl deferred mbox x%x "
2899 "on NPort x%x Data: x%x x%x x%px x%x x%x\n",
2900 ndlp->nlp_rpi, ndlp->nlp_DID,
2901 ndlp->nlp_flag, ndlp->nlp_defer_did,
2902 ndlp, vport->load_flag, kref_read(&ndlp->kref));
2903
2904 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2905 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2906 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2907 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2908 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2909 } else {
2910 __lpfc_sli_rpi_release(vport, ndlp);
2911 }
2912
2913 /* The unreg_login mailbox is complete and had a
2914 * reference that has to be released. The PLOGI
2915 * got its own ref.
2916 */
2917 lpfc_nlp_put(ndlp);
2918 pmb->ctx_ndlp = NULL;
2919 }
2920 }
2921
2922 /* This nlp_put pairs with lpfc_sli4_resume_rpi */
2923 if (pmb->u.mb.mbxCommand == MBX_RESUME_RPI) {
2924 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2925 lpfc_nlp_put(ndlp);
2926 }
2927
2928 /* Check security permission status on INIT_LINK mailbox command */
2929 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2930 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2931 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2932 "2860 SLI authentication is required "
2933 "for INIT_LINK but has not done yet\n");
2934
2935 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2936 lpfc_sli4_mbox_cmd_free(phba, pmb);
2937 else
2938 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
2939}
2940 /**
2941 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2942 * @phba: Pointer to HBA context object.
2943 * @pmb: Pointer to mailbox object.
2944 *
2945 * This function is the unreg rpi mailbox completion handler. It
2946 * frees the memory resources associated with the completed mailbox
2947 * command. An additional reference is put on the ndlp to prevent
2948 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2949 * the unreg mailbox command completes, this routine puts the
2950 * reference back.
2951 *
2952 **/
2953void
2954lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2955{
2956 struct lpfc_vport *vport = pmb->vport;
2957 struct lpfc_nodelist *ndlp;
2958
2959 ndlp = pmb->ctx_ndlp;
2960 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2961 if (phba->sli_rev == LPFC_SLI_REV4 &&
2962 (bf_get(lpfc_sli_intf_if_type,
2963 &phba->sli4_hba.sli_intf) >=
2964 LPFC_SLI_INTF_IF_TYPE_2)) {
2965 if (ndlp) {
2966 lpfc_printf_vlog(
2967 vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2968 "0010 UNREG_LOGIN vpi:%x "
2969 "rpi:%x DID:%x defer x%x flg x%x "
2970 "x%px\n",
2971 vport->vpi, ndlp->nlp_rpi,
2972 ndlp->nlp_DID, ndlp->nlp_defer_did,
2973 ndlp->nlp_flag,
2974 ndlp);
2975 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2976
2977 /* Check to see if there are any deferred
2978 * events to process
2979 */
2980 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2981 (ndlp->nlp_defer_did !=
2982 NLP_EVT_NOTHING_PENDING)) {
2983 lpfc_printf_vlog(
2984 vport, KERN_INFO, LOG_DISCOVERY,
2985 "4111 UNREG cmpl deferred "
2986 "clr x%x on "
2987 "NPort x%x Data: x%x x%px\n",
2988 ndlp->nlp_rpi, ndlp->nlp_DID,
2989 ndlp->nlp_defer_did, ndlp);
2990 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2991 ndlp->nlp_defer_did =
2992 NLP_EVT_NOTHING_PENDING;
2993 lpfc_issue_els_plogi(
2994 vport, ndlp->nlp_DID, 0);
2995 } else {
2996 __lpfc_sli_rpi_release(vport, ndlp);
2997 }
2998 lpfc_nlp_put(ndlp);
2999 }
3000 }
3001 }
3002
3003 mempool_free(pmb, phba->mbox_mem_pool);
3004}
3005
3006/**
3007 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
3008 * @phba: Pointer to HBA context object.
3009 *
3010 * This function is called with no lock held. This function processes all
3011 * the completed mailbox commands and gives it to upper layers. The interrupt
3012 * service routine processes mailbox completion interrupt and adds completed
3013 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
3014 * Worker thread call lpfc_sli_handle_mb_event, which will return the
3015 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
3016 * function returns the mailbox commands to the upper layer by calling the
3017 * completion handler function of each mailbox.
3018 **/
3019int
3020lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
3021{
3022 MAILBOX_t *pmbox;
3023 LPFC_MBOXQ_t *pmb;
3024 int rc;
3025 LIST_HEAD(cmplq);
3026
3027 phba->sli.slistat.mbox_event++;
3028
3029 /* Get all completed mailboxe buffers into the cmplq */
3030 spin_lock_irq(&phba->hbalock);
3031 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
3032 spin_unlock_irq(&phba->hbalock);
3033
3034 /* Get a Mailbox buffer to setup mailbox commands for callback */
3035 do {
3036 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
3037 if (pmb == NULL)
3038 break;
3039
3040 pmbox = &pmb->u.mb;
3041
3042 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
3043 if (pmb->vport) {
3044 lpfc_debugfs_disc_trc(pmb->vport,
3045 LPFC_DISC_TRC_MBOX_VPORT,
3046 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
3047 (uint32_t)pmbox->mbxCommand,
3048 pmbox->un.varWords[0],
3049 pmbox->un.varWords[1]);
3050 }
3051 else {
3052 lpfc_debugfs_disc_trc(phba->pport,
3053 LPFC_DISC_TRC_MBOX,
3054 "MBOX cmpl: cmd:x%x mb:x%x x%x",
3055 (uint32_t)pmbox->mbxCommand,
3056 pmbox->un.varWords[0],
3057 pmbox->un.varWords[1]);
3058 }
3059 }
3060
3061 /*
3062 * It is a fatal error if unknown mbox command completion.
3063 */
3064 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
3065 MBX_SHUTDOWN) {
3066 /* Unknown mailbox command compl */
3067 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3068 "(%d):0323 Unknown Mailbox command "
3069 "x%x (x%x/x%x) Cmpl\n",
3070 pmb->vport ? pmb->vport->vpi :
3071 LPFC_VPORT_UNKNOWN,
3072 pmbox->mbxCommand,
3073 lpfc_sli_config_mbox_subsys_get(phba,
3074 pmb),
3075 lpfc_sli_config_mbox_opcode_get(phba,
3076 pmb));
3077 phba->link_state = LPFC_HBA_ERROR;
3078 phba->work_hs = HS_FFER3;
3079 lpfc_handle_eratt(phba);
3080 continue;
3081 }
3082
3083 if (pmbox->mbxStatus) {
3084 phba->sli.slistat.mbox_stat_err++;
3085 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
3086 /* Mbox cmd cmpl error - RETRYing */
3087 lpfc_printf_log(phba, KERN_INFO,
3088 LOG_MBOX | LOG_SLI,
3089 "(%d):0305 Mbox cmd cmpl "
3090 "error - RETRYing Data: x%x "
3091 "(x%x/x%x) x%x x%x x%x\n",
3092 pmb->vport ? pmb->vport->vpi :
3093 LPFC_VPORT_UNKNOWN,
3094 pmbox->mbxCommand,
3095 lpfc_sli_config_mbox_subsys_get(phba,
3096 pmb),
3097 lpfc_sli_config_mbox_opcode_get(phba,
3098 pmb),
3099 pmbox->mbxStatus,
3100 pmbox->un.varWords[0],
3101 pmb->vport ? pmb->vport->port_state :
3102 LPFC_VPORT_UNKNOWN);
3103 pmbox->mbxStatus = 0;
3104 pmbox->mbxOwner = OWN_HOST;
3105 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3106 if (rc != MBX_NOT_FINISHED)
3107 continue;
3108 }
3109 }
3110
3111 /* Mailbox cmd <cmd> Cmpl <cmpl> */
3112 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
3113 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
3114 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
3115 "x%x x%x x%x\n",
3116 pmb->vport ? pmb->vport->vpi : 0,
3117 pmbox->mbxCommand,
3118 lpfc_sli_config_mbox_subsys_get(phba, pmb),
3119 lpfc_sli_config_mbox_opcode_get(phba, pmb),
3120 pmb->mbox_cmpl,
3121 *((uint32_t *) pmbox),
3122 pmbox->un.varWords[0],
3123 pmbox->un.varWords[1],
3124 pmbox->un.varWords[2],
3125 pmbox->un.varWords[3],
3126 pmbox->un.varWords[4],
3127 pmbox->un.varWords[5],
3128 pmbox->un.varWords[6],
3129 pmbox->un.varWords[7],
3130 pmbox->un.varWords[8],
3131 pmbox->un.varWords[9],
3132 pmbox->un.varWords[10]);
3133
3134 if (pmb->mbox_cmpl)
3135 pmb->mbox_cmpl(phba,pmb);
3136 } while (1);
3137 return 0;
3138}
3139
3140/**
3141 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
3142 * @phba: Pointer to HBA context object.
3143 * @pring: Pointer to driver SLI ring object.
3144 * @tag: buffer tag.
3145 *
3146 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
3147 * is set in the tag the buffer is posted for a particular exchange,
3148 * the function will return the buffer without replacing the buffer.
3149 * If the buffer is for unsolicited ELS or CT traffic, this function
3150 * returns the buffer and also posts another buffer to the firmware.
3151 **/
3152static struct lpfc_dmabuf *
3153lpfc_sli_get_buff(struct lpfc_hba *phba,
3154 struct lpfc_sli_ring *pring,
3155 uint32_t tag)
3156{
3157 struct hbq_dmabuf *hbq_entry;
3158
3159 if (tag & QUE_BUFTAG_BIT)
3160 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
3161 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
3162 if (!hbq_entry)
3163 return NULL;
3164 return &hbq_entry->dbuf;
3165}
3166
3167/**
3168 * lpfc_nvme_unsol_ls_handler - Process an unsolicited event data buffer
3169 * containing a NVME LS request.
3170 * @phba: pointer to lpfc hba data structure.
3171 * @piocb: pointer to the iocbq struct representing the sequence starting
3172 * frame.
3173 *
3174 * This routine initially validates the NVME LS, validates there is a login
3175 * with the port that sent the LS, and then calls the appropriate nvme host
3176 * or target LS request handler.
3177 **/
3178static void
3179lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
3180{
3181 struct lpfc_nodelist *ndlp;
3182 struct lpfc_dmabuf *d_buf;
3183 struct hbq_dmabuf *nvmebuf;
3184 struct fc_frame_header *fc_hdr;
3185 struct lpfc_async_xchg_ctx *axchg = NULL;
3186 char *failwhy = NULL;
3187 uint32_t oxid, sid, did, fctl, size;
3188 int ret = 1;
3189
3190 d_buf = piocb->cmd_dmabuf;
3191
3192 nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
3193 fc_hdr = nvmebuf->hbuf.virt;
3194 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
3195 sid = sli4_sid_from_fc_hdr(fc_hdr);
3196 did = sli4_did_from_fc_hdr(fc_hdr);
3197 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
3198 fc_hdr->fh_f_ctl[1] << 8 |
3199 fc_hdr->fh_f_ctl[2]);
3200 size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
3201
3202 lpfc_nvmeio_data(phba, "NVME LS RCV: xri x%x sz %d from %06x\n",
3203 oxid, size, sid);
3204
3205 if (phba->pport->load_flag & FC_UNLOADING) {
3206 failwhy = "Driver Unloading";
3207 } else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
3208 failwhy = "NVME FC4 Disabled";
3209 } else if (!phba->nvmet_support && !phba->pport->localport) {
3210 failwhy = "No Localport";
3211 } else if (phba->nvmet_support && !phba->targetport) {
3212 failwhy = "No Targetport";
3213 } else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) {
3214 failwhy = "Bad NVME LS R_CTL";
3215 } else if (unlikely((fctl & 0x00FF0000) !=
3216 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) {
3217 failwhy = "Bad NVME LS F_CTL";
3218 } else {
3219 axchg = kzalloc(sizeof(*axchg), GFP_ATOMIC);
3220 if (!axchg)
3221 failwhy = "No CTX memory";
3222 }
3223
3224 if (unlikely(failwhy)) {
3225 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3226 "6154 Drop NVME LS: SID %06X OXID x%X: %s\n",
3227 sid, oxid, failwhy);
3228 goto out_fail;
3229 }
3230
3231 /* validate the source of the LS is logged in */
3232 ndlp = lpfc_findnode_did(phba->pport, sid);
3233 if (!ndlp ||
3234 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3235 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3236 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
3237 "6216 NVME Unsol rcv: No ndlp: "
3238 "NPort_ID x%x oxid x%x\n",
3239 sid, oxid);
3240 goto out_fail;
3241 }
3242
3243 axchg->phba = phba;
3244 axchg->ndlp = ndlp;
3245 axchg->size = size;
3246 axchg->oxid = oxid;
3247 axchg->sid = sid;
3248 axchg->wqeq = NULL;
3249 axchg->state = LPFC_NVME_STE_LS_RCV;
3250 axchg->entry_cnt = 1;
3251 axchg->rqb_buffer = (void *)nvmebuf;
3252 axchg->hdwq = &phba->sli4_hba.hdwq[0];
3253 axchg->payload = nvmebuf->dbuf.virt;
3254 INIT_LIST_HEAD(&axchg->list);
3255
3256 if (phba->nvmet_support) {
3257 ret = lpfc_nvmet_handle_lsreq(phba, axchg);
3258 spin_lock_irq(&ndlp->lock);
3259 if (!ret && !(ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH)) {
3260 ndlp->fc4_xpt_flags |= NLP_XPT_HAS_HH;
3261 spin_unlock_irq(&ndlp->lock);
3262
3263 /* This reference is a single occurrence to hold the
3264 * node valid until the nvmet transport calls
3265 * host_release.
3266 */
3267 if (!lpfc_nlp_get(ndlp))
3268 goto out_fail;
3269
3270 lpfc_printf_log(phba, KERN_ERR, LOG_NODE,
3271 "6206 NVMET unsol ls_req ndlp x%px "
3272 "DID x%x xflags x%x refcnt %d\n",
3273 ndlp, ndlp->nlp_DID,
3274 ndlp->fc4_xpt_flags,
3275 kref_read(&ndlp->kref));
3276 } else {
3277 spin_unlock_irq(&ndlp->lock);
3278 }
3279 } else {
3280 ret = lpfc_nvme_handle_lsreq(phba, axchg);
3281 }
3282
3283 /* if zero, LS was successfully handled. If non-zero, LS not handled */
3284 if (!ret)
3285 return;
3286
3287out_fail:
3288 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3289 "6155 Drop NVME LS from DID %06X: SID %06X OXID x%X "
3290 "NVMe%s handler failed %d\n",
3291 did, sid, oxid,
3292 (phba->nvmet_support) ? "T" : "I", ret);
3293
3294 /* recycle receive buffer */
3295 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
3296
3297 /* If start of new exchange, abort it */
3298 if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX)))
3299 ret = lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid);
3300
3301 if (ret)
3302 kfree(axchg);
3303}
3304
3305/**
3306 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
3307 * @phba: Pointer to HBA context object.
3308 * @pring: Pointer to driver SLI ring object.
3309 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
3310 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
3311 * @fch_type: the type for the first frame of the sequence.
3312 *
3313 * This function is called with no lock held. This function uses the r_ctl and
3314 * type of the received sequence to find the correct callback function to call
3315 * to process the sequence.
3316 **/
3317static int
3318lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3319 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
3320 uint32_t fch_type)
3321{
3322 int i;
3323
3324 switch (fch_type) {
3325 case FC_TYPE_NVME:
3326 lpfc_nvme_unsol_ls_handler(phba, saveq);
3327 return 1;
3328 default:
3329 break;
3330 }
3331
3332 /* unSolicited Responses */
3333 if (pring->prt[0].profile) {
3334 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
3335 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
3336 saveq);
3337 return 1;
3338 }
3339 /* We must search, based on rctl / type
3340 for the right routine */
3341 for (i = 0; i < pring->num_mask; i++) {
3342 if ((pring->prt[i].rctl == fch_r_ctl) &&
3343 (pring->prt[i].type == fch_type)) {
3344 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
3345 (pring->prt[i].lpfc_sli_rcv_unsol_event)
3346 (phba, pring, saveq);
3347 return 1;
3348 }
3349 }
3350 return 0;
3351}
3352
3353static void
3354lpfc_sli_prep_unsol_wqe(struct lpfc_hba *phba,
3355 struct lpfc_iocbq *saveq)
3356{
3357 IOCB_t *irsp;
3358 union lpfc_wqe128 *wqe;
3359 u16 i = 0;
3360
3361 irsp = &saveq->iocb;
3362 wqe = &saveq->wqe;
3363
3364 /* Fill wcqe with the IOCB status fields */
3365 bf_set(lpfc_wcqe_c_status, &saveq->wcqe_cmpl, irsp->ulpStatus);
3366 saveq->wcqe_cmpl.word3 = irsp->ulpBdeCount;
3367 saveq->wcqe_cmpl.parameter = irsp->un.ulpWord[4];
3368 saveq->wcqe_cmpl.total_data_placed = irsp->unsli3.rcvsli3.acc_len;
3369
3370 /* Source ID */
3371 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, irsp->un.rcvels.parmRo);
3372
3373 /* rx-id of the response frame */
3374 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, irsp->ulpContext);
3375
3376 /* ox-id of the frame */
3377 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
3378 irsp->unsli3.rcvsli3.ox_id);
3379
3380 /* DID */
3381 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
3382 irsp->un.rcvels.remoteID);
3383
3384 /* unsol data len */
3385 for (i = 0; i < irsp->ulpBdeCount; i++) {
3386 struct lpfc_hbq_entry *hbqe = NULL;
3387
3388 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3389 if (i == 0) {
3390 hbqe = (struct lpfc_hbq_entry *)
3391 &irsp->un.ulpWord[0];
3392 saveq->wqe.gen_req.bde.tus.f.bdeSize =
3393 hbqe->bde.tus.f.bdeSize;
3394 } else if (i == 1) {
3395 hbqe = (struct lpfc_hbq_entry *)
3396 &irsp->unsli3.sli3Words[4];
3397 saveq->unsol_rcv_len = hbqe->bde.tus.f.bdeSize;
3398 }
3399 }
3400 }
3401}
3402
3403/**
3404 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
3405 * @phba: Pointer to HBA context object.
3406 * @pring: Pointer to driver SLI ring object.
3407 * @saveq: Pointer to the unsolicited iocb.
3408 *
3409 * This function is called with no lock held by the ring event handler
3410 * when there is an unsolicited iocb posted to the response ring by the
3411 * firmware. This function gets the buffer associated with the iocbs
3412 * and calls the event handler for the ring. This function handles both
3413 * qring buffers and hbq buffers.
3414 * When the function returns 1 the caller can free the iocb object otherwise
3415 * upper layer functions will free the iocb objects.
3416 **/
3417static int
3418lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3419 struct lpfc_iocbq *saveq)
3420{
3421 IOCB_t * irsp;
3422 WORD5 * w5p;
3423 dma_addr_t paddr;
3424 uint32_t Rctl, Type;
3425 struct lpfc_iocbq *iocbq;
3426 struct lpfc_dmabuf *dmzbuf;
3427
3428 irsp = &saveq->iocb;
3429 saveq->vport = phba->pport;
3430
3431 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
3432 if (pring->lpfc_sli_rcv_async_status)
3433 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
3434 else
3435 lpfc_printf_log(phba,
3436 KERN_WARNING,
3437 LOG_SLI,
3438 "0316 Ring %d handler: unexpected "
3439 "ASYNC_STATUS iocb received evt_code "
3440 "0x%x\n",
3441 pring->ringno,
3442 irsp->un.asyncstat.evt_code);
3443 return 1;
3444 }
3445
3446 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
3447 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
3448 if (irsp->ulpBdeCount > 0) {
3449 dmzbuf = lpfc_sli_get_buff(phba, pring,
3450 irsp->un.ulpWord[3]);
3451 lpfc_in_buf_free(phba, dmzbuf);
3452 }
3453
3454 if (irsp->ulpBdeCount > 1) {
3455 dmzbuf = lpfc_sli_get_buff(phba, pring,
3456 irsp->unsli3.sli3Words[3]);
3457 lpfc_in_buf_free(phba, dmzbuf);
3458 }
3459
3460 if (irsp->ulpBdeCount > 2) {
3461 dmzbuf = lpfc_sli_get_buff(phba, pring,
3462 irsp->unsli3.sli3Words[7]);
3463 lpfc_in_buf_free(phba, dmzbuf);
3464 }
3465
3466 return 1;
3467 }
3468
3469 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3470 if (irsp->ulpBdeCount != 0) {
3471 saveq->cmd_dmabuf = lpfc_sli_get_buff(phba, pring,
3472 irsp->un.ulpWord[3]);
3473 if (!saveq->cmd_dmabuf)
3474 lpfc_printf_log(phba,
3475 KERN_ERR,
3476 LOG_SLI,
3477 "0341 Ring %d Cannot find buffer for "
3478 "an unsolicited iocb. tag 0x%x\n",
3479 pring->ringno,
3480 irsp->un.ulpWord[3]);
3481 }
3482 if (irsp->ulpBdeCount == 2) {
3483 saveq->bpl_dmabuf = lpfc_sli_get_buff(phba, pring,
3484 irsp->unsli3.sli3Words[7]);
3485 if (!saveq->bpl_dmabuf)
3486 lpfc_printf_log(phba,
3487 KERN_ERR,
3488 LOG_SLI,
3489 "0342 Ring %d Cannot find buffer for an"
3490 " unsolicited iocb. tag 0x%x\n",
3491 pring->ringno,
3492 irsp->unsli3.sli3Words[7]);
3493 }
3494 list_for_each_entry(iocbq, &saveq->list, list) {
3495 irsp = &iocbq->iocb;
3496 if (irsp->ulpBdeCount != 0) {
3497 iocbq->cmd_dmabuf = lpfc_sli_get_buff(phba,
3498 pring,
3499 irsp->un.ulpWord[3]);
3500 if (!iocbq->cmd_dmabuf)
3501 lpfc_printf_log(phba,
3502 KERN_ERR,
3503 LOG_SLI,
3504 "0343 Ring %d Cannot find "
3505 "buffer for an unsolicited iocb"
3506 ". tag 0x%x\n", pring->ringno,
3507 irsp->un.ulpWord[3]);
3508 }
3509 if (irsp->ulpBdeCount == 2) {
3510 iocbq->bpl_dmabuf = lpfc_sli_get_buff(phba,
3511 pring,
3512 irsp->unsli3.sli3Words[7]);
3513 if (!iocbq->bpl_dmabuf)
3514 lpfc_printf_log(phba,
3515 KERN_ERR,
3516 LOG_SLI,
3517 "0344 Ring %d Cannot find "
3518 "buffer for an unsolicited "
3519 "iocb. tag 0x%x\n",
3520 pring->ringno,
3521 irsp->unsli3.sli3Words[7]);
3522 }
3523 }
3524 } else {
3525 paddr = getPaddr(irsp->un.cont64[0].addrHigh,
3526 irsp->un.cont64[0].addrLow);
3527 saveq->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring,
3528 paddr);
3529 if (irsp->ulpBdeCount == 2) {
3530 paddr = getPaddr(irsp->un.cont64[1].addrHigh,
3531 irsp->un.cont64[1].addrLow);
3532 saveq->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba,
3533 pring,
3534 paddr);
3535 }
3536 }
3537
3538 if (irsp->ulpBdeCount != 0 &&
3539 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
3540 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
3541 int found = 0;
3542
3543 /* search continue save q for same XRI */
3544 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
3545 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
3546 saveq->iocb.unsli3.rcvsli3.ox_id) {
3547 list_add_tail(&saveq->list, &iocbq->list);
3548 found = 1;
3549 break;
3550 }
3551 }
3552 if (!found)
3553 list_add_tail(&saveq->clist,
3554 &pring->iocb_continue_saveq);
3555
3556 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
3557 list_del_init(&iocbq->clist);
3558 saveq = iocbq;
3559 irsp = &saveq->iocb;
3560 } else {
3561 return 0;
3562 }
3563 }
3564 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
3565 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
3566 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
3567 Rctl = FC_RCTL_ELS_REQ;
3568 Type = FC_TYPE_ELS;
3569 } else {
3570 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
3571 Rctl = w5p->hcsw.Rctl;
3572 Type = w5p->hcsw.Type;
3573
3574 /* Firmware Workaround */
3575 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
3576 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
3577 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3578 Rctl = FC_RCTL_ELS_REQ;
3579 Type = FC_TYPE_ELS;
3580 w5p->hcsw.Rctl = Rctl;
3581 w5p->hcsw.Type = Type;
3582 }
3583 }
3584
3585 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3586 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
3587 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3588 if (irsp->unsli3.rcvsli3.vpi == 0xffff)
3589 saveq->vport = phba->pport;
3590 else
3591 saveq->vport = lpfc_find_vport_by_vpid(phba,
3592 irsp->unsli3.rcvsli3.vpi);
3593 }
3594
3595 /* Prepare WQE with Unsol frame */
3596 lpfc_sli_prep_unsol_wqe(phba, saveq);
3597
3598 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
3599 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3600 "0313 Ring %d handler: unexpected Rctl x%x "
3601 "Type x%x received\n",
3602 pring->ringno, Rctl, Type);
3603
3604 return 1;
3605}
3606
3607/**
3608 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
3609 * @phba: Pointer to HBA context object.
3610 * @pring: Pointer to driver SLI ring object.
3611 * @prspiocb: Pointer to response iocb object.
3612 *
3613 * This function looks up the iocb_lookup table to get the command iocb
3614 * corresponding to the given response iocb using the iotag of the
3615 * response iocb. The driver calls this function with the hbalock held
3616 * for SLI3 ports or the ring lock held for SLI4 ports.
3617 * This function returns the command iocb object if it finds the command
3618 * iocb else returns NULL.
3619 **/
3620static struct lpfc_iocbq *
3621lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
3622 struct lpfc_sli_ring *pring,
3623 struct lpfc_iocbq *prspiocb)
3624{
3625 struct lpfc_iocbq *cmd_iocb = NULL;
3626 u16 iotag;
3627
3628 if (phba->sli_rev == LPFC_SLI_REV4)
3629 iotag = get_wqe_reqtag(prspiocb);
3630 else
3631 iotag = prspiocb->iocb.ulpIoTag;
3632
3633 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3634 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3635 if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
3636 /* remove from txcmpl queue list */
3637 list_del_init(&cmd_iocb->list);
3638 cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
3639 pring->txcmplq_cnt--;
3640 return cmd_iocb;
3641 }
3642 }
3643
3644 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3645 "0317 iotag x%x is out of "
3646 "range: max iotag x%x\n",
3647 iotag, phba->sli.last_iotag);
3648 return NULL;
3649}
3650
3651/**
3652 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
3653 * @phba: Pointer to HBA context object.
3654 * @pring: Pointer to driver SLI ring object.
3655 * @iotag: IOCB tag.
3656 *
3657 * This function looks up the iocb_lookup table to get the command iocb
3658 * corresponding to the given iotag. The driver calls this function with
3659 * the ring lock held because this function is an SLI4 port only helper.
3660 * This function returns the command iocb object if it finds the command
3661 * iocb else returns NULL.
3662 **/
3663static struct lpfc_iocbq *
3664lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3665 struct lpfc_sli_ring *pring, uint16_t iotag)
3666{
3667 struct lpfc_iocbq *cmd_iocb = NULL;
3668
3669 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3670 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3671 if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
3672 /* remove from txcmpl queue list */
3673 list_del_init(&cmd_iocb->list);
3674 cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
3675 pring->txcmplq_cnt--;
3676 return cmd_iocb;
3677 }
3678 }
3679
3680 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3681 "0372 iotag x%x lookup error: max iotag (x%x) "
3682 "cmd_flag x%x\n",
3683 iotag, phba->sli.last_iotag,
3684 cmd_iocb ? cmd_iocb->cmd_flag : 0xffff);
3685 return NULL;
3686}
3687
3688/**
3689 * lpfc_sli_process_sol_iocb - process solicited iocb completion
3690 * @phba: Pointer to HBA context object.
3691 * @pring: Pointer to driver SLI ring object.
3692 * @saveq: Pointer to the response iocb to be processed.
3693 *
3694 * This function is called by the ring event handler for non-fcp
3695 * rings when there is a new response iocb in the response ring.
3696 * The caller is not required to hold any locks. This function
3697 * gets the command iocb associated with the response iocb and
3698 * calls the completion handler for the command iocb. If there
3699 * is no completion handler, the function will free the resources
3700 * associated with command iocb. If the response iocb is for
3701 * an already aborted command iocb, the status of the completion
3702 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3703 * This function always returns 1.
3704 **/
3705static int
3706lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3707 struct lpfc_iocbq *saveq)
3708{
3709 struct lpfc_iocbq *cmdiocbp;
3710 unsigned long iflag;
3711 u32 ulp_command, ulp_status, ulp_word4, ulp_context, iotag;
3712
3713 if (phba->sli_rev == LPFC_SLI_REV4)
3714 spin_lock_irqsave(&pring->ring_lock, iflag);
3715 else
3716 spin_lock_irqsave(&phba->hbalock, iflag);
3717 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3718 if (phba->sli_rev == LPFC_SLI_REV4)
3719 spin_unlock_irqrestore(&pring->ring_lock, iflag);
3720 else
3721 spin_unlock_irqrestore(&phba->hbalock, iflag);
3722
3723 ulp_command = get_job_cmnd(phba, saveq);
3724 ulp_status = get_job_ulpstatus(phba, saveq);
3725 ulp_word4 = get_job_word4(phba, saveq);
3726 ulp_context = get_job_ulpcontext(phba, saveq);
3727 if (phba->sli_rev == LPFC_SLI_REV4)
3728 iotag = get_wqe_reqtag(saveq);
3729 else
3730 iotag = saveq->iocb.ulpIoTag;
3731
3732 if (cmdiocbp) {
3733 ulp_command = get_job_cmnd(phba, cmdiocbp);
3734 if (cmdiocbp->cmd_cmpl) {
3735 /*
3736 * If an ELS command failed send an event to mgmt
3737 * application.
3738 */
3739 if (ulp_status &&
3740 (pring->ringno == LPFC_ELS_RING) &&
3741 (ulp_command == CMD_ELS_REQUEST64_CR))
3742 lpfc_send_els_failure_event(phba,
3743 cmdiocbp, saveq);
3744
3745 /*
3746 * Post all ELS completions to the worker thread.
3747 * All other are passed to the completion callback.
3748 */
3749 if (pring->ringno == LPFC_ELS_RING) {
3750 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3751 (cmdiocbp->cmd_flag &
3752 LPFC_DRIVER_ABORTED)) {
3753 spin_lock_irqsave(&phba->hbalock,
3754 iflag);
3755 cmdiocbp->cmd_flag &=
3756 ~LPFC_DRIVER_ABORTED;
3757 spin_unlock_irqrestore(&phba->hbalock,
3758 iflag);
3759 saveq->iocb.ulpStatus =
3760 IOSTAT_LOCAL_REJECT;
3761 saveq->iocb.un.ulpWord[4] =
3762 IOERR_SLI_ABORTED;
3763
3764 /* Firmware could still be in progress
3765 * of DMAing payload, so don't free data
3766 * buffer till after a hbeat.
3767 */
3768 spin_lock_irqsave(&phba->hbalock,
3769 iflag);
3770 saveq->cmd_flag |= LPFC_DELAY_MEM_FREE;
3771 spin_unlock_irqrestore(&phba->hbalock,
3772 iflag);
3773 }
3774 if (phba->sli_rev == LPFC_SLI_REV4) {
3775 if (saveq->cmd_flag &
3776 LPFC_EXCHANGE_BUSY) {
3777 /* Set cmdiocb flag for the
3778 * exchange busy so sgl (xri)
3779 * will not be released until
3780 * the abort xri is received
3781 * from hba.
3782 */
3783 spin_lock_irqsave(
3784 &phba->hbalock, iflag);
3785 cmdiocbp->cmd_flag |=
3786 LPFC_EXCHANGE_BUSY;
3787 spin_unlock_irqrestore(
3788 &phba->hbalock, iflag);
3789 }
3790 if (cmdiocbp->cmd_flag &
3791 LPFC_DRIVER_ABORTED) {
3792 /*
3793 * Clear LPFC_DRIVER_ABORTED
3794 * bit in case it was driver
3795 * initiated abort.
3796 */
3797 spin_lock_irqsave(
3798 &phba->hbalock, iflag);
3799 cmdiocbp->cmd_flag &=
3800 ~LPFC_DRIVER_ABORTED;
3801 spin_unlock_irqrestore(
3802 &phba->hbalock, iflag);
3803 set_job_ulpstatus(cmdiocbp,
3804 IOSTAT_LOCAL_REJECT);
3805 set_job_ulpword4(cmdiocbp,
3806 IOERR_ABORT_REQUESTED);
3807 /*
3808 * For SLI4, irsiocb contains
3809 * NO_XRI in sli_xritag, it
3810 * shall not affect releasing
3811 * sgl (xri) process.
3812 */
3813 set_job_ulpstatus(saveq,
3814 IOSTAT_LOCAL_REJECT);
3815 set_job_ulpword4(saveq,
3816 IOERR_SLI_ABORTED);
3817 spin_lock_irqsave(
3818 &phba->hbalock, iflag);
3819 saveq->cmd_flag |=
3820 LPFC_DELAY_MEM_FREE;
3821 spin_unlock_irqrestore(
3822 &phba->hbalock, iflag);
3823 }
3824 }
3825 }
3826 (cmdiocbp->cmd_cmpl) (phba, cmdiocbp, saveq);
3827 } else
3828 lpfc_sli_release_iocbq(phba, cmdiocbp);
3829 } else {
3830 /*
3831 * Unknown initiating command based on the response iotag.
3832 * This could be the case on the ELS ring because of
3833 * lpfc_els_abort().
3834 */
3835 if (pring->ringno != LPFC_ELS_RING) {
3836 /*
3837 * Ring <ringno> handler: unexpected completion IoTag
3838 * <IoTag>
3839 */
3840 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3841 "0322 Ring %d handler: "
3842 "unexpected completion IoTag x%x "
3843 "Data: x%x x%x x%x x%x\n",
3844 pring->ringno, iotag, ulp_status,
3845 ulp_word4, ulp_command, ulp_context);
3846 }
3847 }
3848
3849 return 1;
3850}
3851
3852/**
3853 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
3854 * @phba: Pointer to HBA context object.
3855 * @pring: Pointer to driver SLI ring object.
3856 *
3857 * This function is called from the iocb ring event handlers when
3858 * put pointer is ahead of the get pointer for a ring. This function signal
3859 * an error attention condition to the worker thread and the worker
3860 * thread will transition the HBA to offline state.
3861 **/
3862static void
3863lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3864{
3865 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3866 /*
3867 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3868 * rsp ring <portRspMax>
3869 */
3870 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3871 "0312 Ring %d handler: portRspPut %d "
3872 "is bigger than rsp ring %d\n",
3873 pring->ringno, le32_to_cpu(pgp->rspPutInx),
3874 pring->sli.sli3.numRiocb);
3875
3876 phba->link_state = LPFC_HBA_ERROR;
3877
3878 /*
3879 * All error attention handlers are posted to
3880 * worker thread
3881 */
3882 phba->work_ha |= HA_ERATT;
3883 phba->work_hs = HS_FFER3;
3884
3885 lpfc_worker_wake_up(phba);
3886
3887 return;
3888}
3889
3890/**
3891 * lpfc_poll_eratt - Error attention polling timer timeout handler
3892 * @t: Context to fetch pointer to address of HBA context object from.
3893 *
3894 * This function is invoked by the Error Attention polling timer when the
3895 * timer times out. It will check the SLI Error Attention register for
3896 * possible attention events. If so, it will post an Error Attention event
3897 * and wake up worker thread to process it. Otherwise, it will set up the
3898 * Error Attention polling timer for the next poll.
3899 **/
3900void lpfc_poll_eratt(struct timer_list *t)
3901{
3902 struct lpfc_hba *phba;
3903 uint32_t eratt = 0;
3904 uint64_t sli_intr, cnt;
3905
3906 phba = from_timer(phba, t, eratt_poll);
3907
3908 /* Here we will also keep track of interrupts per sec of the hba */
3909 sli_intr = phba->sli.slistat.sli_intr;
3910
3911 if (phba->sli.slistat.sli_prev_intr > sli_intr)
3912 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3913 sli_intr);
3914 else
3915 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3916
3917 /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3918 do_div(cnt, phba->eratt_poll_interval);
3919 phba->sli.slistat.sli_ips = cnt;
3920
3921 phba->sli.slistat.sli_prev_intr = sli_intr;
3922
3923 /* Check chip HA register for error event */
3924 eratt = lpfc_sli_check_eratt(phba);
3925
3926 if (eratt)
3927 /* Tell the worker thread there is work to do */
3928 lpfc_worker_wake_up(phba);
3929 else
3930 /* Restart the timer for next eratt poll */
3931 mod_timer(&phba->eratt_poll,
3932 jiffies +
3933 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3934 return;
3935}
3936
3937
3938/**
3939 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
3940 * @phba: Pointer to HBA context object.
3941 * @pring: Pointer to driver SLI ring object.
3942 * @mask: Host attention register mask for this ring.
3943 *
3944 * This function is called from the interrupt context when there is a ring
3945 * event for the fcp ring. The caller does not hold any lock.
3946 * The function processes each response iocb in the response ring until it
3947 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
3948 * LE bit set. The function will call the completion handler of the command iocb
3949 * if the response iocb indicates a completion for a command iocb or it is
3950 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3951 * function if this is an unsolicited iocb.
3952 * This routine presumes LPFC_FCP_RING handling and doesn't bother
3953 * to check it explicitly.
3954 */
3955int
3956lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3957 struct lpfc_sli_ring *pring, uint32_t mask)
3958{
3959 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3960 IOCB_t *irsp = NULL;
3961 IOCB_t *entry = NULL;
3962 struct lpfc_iocbq *cmdiocbq = NULL;
3963 struct lpfc_iocbq rspiocbq;
3964 uint32_t status;
3965 uint32_t portRspPut, portRspMax;
3966 int rc = 1;
3967 lpfc_iocb_type type;
3968 unsigned long iflag;
3969 uint32_t rsp_cmpl = 0;
3970
3971 spin_lock_irqsave(&phba->hbalock, iflag);
3972 pring->stats.iocb_event++;
3973
3974 /*
3975 * The next available response entry should never exceed the maximum
3976 * entries. If it does, treat it as an adapter hardware error.
3977 */
3978 portRspMax = pring->sli.sli3.numRiocb;
3979 portRspPut = le32_to_cpu(pgp->rspPutInx);
3980 if (unlikely(portRspPut >= portRspMax)) {
3981 lpfc_sli_rsp_pointers_error(phba, pring);
3982 spin_unlock_irqrestore(&phba->hbalock, iflag);
3983 return 1;
3984 }
3985 if (phba->fcp_ring_in_use) {
3986 spin_unlock_irqrestore(&phba->hbalock, iflag);
3987 return 1;
3988 } else
3989 phba->fcp_ring_in_use = 1;
3990
3991 rmb();
3992 while (pring->sli.sli3.rspidx != portRspPut) {
3993 /*
3994 * Fetch an entry off the ring and copy it into a local data
3995 * structure. The copy involves a byte-swap since the
3996 * network byte order and pci byte orders are different.
3997 */
3998 entry = lpfc_resp_iocb(phba, pring);
3999 phba->last_completion_time = jiffies;
4000
4001 if (++pring->sli.sli3.rspidx >= portRspMax)
4002 pring->sli.sli3.rspidx = 0;
4003
4004 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
4005 (uint32_t *) &rspiocbq.iocb,
4006 phba->iocb_rsp_size);
4007 INIT_LIST_HEAD(&(rspiocbq.list));
4008 irsp = &rspiocbq.iocb;
4009
4010 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
4011 pring->stats.iocb_rsp++;
4012 rsp_cmpl++;
4013
4014 if (unlikely(irsp->ulpStatus)) {
4015 /*
4016 * If resource errors reported from HBA, reduce
4017 * queuedepths of the SCSI device.
4018 */
4019 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
4020 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
4021 IOERR_NO_RESOURCES)) {
4022 spin_unlock_irqrestore(&phba->hbalock, iflag);
4023 phba->lpfc_rampdown_queue_depth(phba);
4024 spin_lock_irqsave(&phba->hbalock, iflag);
4025 }
4026
4027 /* Rsp ring <ringno> error: IOCB */
4028 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4029 "0336 Rsp Ring %d error: IOCB Data: "
4030 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
4031 pring->ringno,
4032 irsp->un.ulpWord[0],
4033 irsp->un.ulpWord[1],
4034 irsp->un.ulpWord[2],
4035 irsp->un.ulpWord[3],
4036 irsp->un.ulpWord[4],
4037 irsp->un.ulpWord[5],
4038 *(uint32_t *)&irsp->un1,
4039 *((uint32_t *)&irsp->un1 + 1));
4040 }
4041
4042 switch (type) {
4043 case LPFC_ABORT_IOCB:
4044 case LPFC_SOL_IOCB:
4045 /*
4046 * Idle exchange closed via ABTS from port. No iocb
4047 * resources need to be recovered.
4048 */
4049 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
4050 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4051 "0333 IOCB cmd 0x%x"
4052 " processed. Skipping"
4053 " completion\n",
4054 irsp->ulpCommand);
4055 break;
4056 }
4057
4058 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
4059 &rspiocbq);
4060 if (unlikely(!cmdiocbq))
4061 break;
4062 if (cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED)
4063 cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
4064 if (cmdiocbq->cmd_cmpl) {
4065 spin_unlock_irqrestore(&phba->hbalock, iflag);
4066 (cmdiocbq->cmd_cmpl)(phba, cmdiocbq,
4067 &rspiocbq);
4068 spin_lock_irqsave(&phba->hbalock, iflag);
4069 }
4070 break;
4071 case LPFC_UNSOL_IOCB:
4072 spin_unlock_irqrestore(&phba->hbalock, iflag);
4073 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
4074 spin_lock_irqsave(&phba->hbalock, iflag);
4075 break;
4076 default:
4077 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
4078 char adaptermsg[LPFC_MAX_ADPTMSG];
4079 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
4080 memcpy(&adaptermsg[0], (uint8_t *) irsp,
4081 MAX_MSG_DATA);
4082 dev_warn(&((phba->pcidev)->dev),
4083 "lpfc%d: %s\n",
4084 phba->brd_no, adaptermsg);
4085 } else {
4086 /* Unknown IOCB command */
4087 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4088 "0334 Unknown IOCB command "
4089 "Data: x%x, x%x x%x x%x x%x\n",
4090 type, irsp->ulpCommand,
4091 irsp->ulpStatus,
4092 irsp->ulpIoTag,
4093 irsp->ulpContext);
4094 }
4095 break;
4096 }
4097
4098 /*
4099 * The response IOCB has been processed. Update the ring
4100 * pointer in SLIM. If the port response put pointer has not
4101 * been updated, sync the pgp->rspPutInx and fetch the new port
4102 * response put pointer.
4103 */
4104 writel(pring->sli.sli3.rspidx,
4105 &phba->host_gp[pring->ringno].rspGetInx);
4106
4107 if (pring->sli.sli3.rspidx == portRspPut)
4108 portRspPut = le32_to_cpu(pgp->rspPutInx);
4109 }
4110
4111 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
4112 pring->stats.iocb_rsp_full++;
4113 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
4114 writel(status, phba->CAregaddr);
4115 readl(phba->CAregaddr);
4116 }
4117 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
4118 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
4119 pring->stats.iocb_cmd_empty++;
4120
4121 /* Force update of the local copy of cmdGetInx */
4122 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
4123 lpfc_sli_resume_iocb(phba, pring);
4124
4125 if ((pring->lpfc_sli_cmd_available))
4126 (pring->lpfc_sli_cmd_available) (phba, pring);
4127
4128 }
4129
4130 phba->fcp_ring_in_use = 0;
4131 spin_unlock_irqrestore(&phba->hbalock, iflag);
4132 return rc;
4133}
4134
4135/**
4136 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
4137 * @phba: Pointer to HBA context object.
4138 * @pring: Pointer to driver SLI ring object.
4139 * @rspiocbp: Pointer to driver response IOCB object.
4140 *
4141 * This function is called from the worker thread when there is a slow-path
4142 * response IOCB to process. This function chains all the response iocbs until
4143 * seeing the iocb with the LE bit set. The function will call
4144 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
4145 * completion of a command iocb. The function will call the
4146 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
4147 * The function frees the resources or calls the completion handler if this
4148 * iocb is an abort completion. The function returns NULL when the response
4149 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
4150 * this function shall chain the iocb on to the iocb_continueq and return the
4151 * response iocb passed in.
4152 **/
4153static struct lpfc_iocbq *
4154lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4155 struct lpfc_iocbq *rspiocbp)
4156{
4157 struct lpfc_iocbq *saveq;
4158 struct lpfc_iocbq *cmdiocb;
4159 struct lpfc_iocbq *next_iocb;
4160 IOCB_t *irsp;
4161 uint32_t free_saveq;
4162 u8 cmd_type;
4163 lpfc_iocb_type type;
4164 unsigned long iflag;
4165 u32 ulp_status = get_job_ulpstatus(phba, rspiocbp);
4166 u32 ulp_word4 = get_job_word4(phba, rspiocbp);
4167 u32 ulp_command = get_job_cmnd(phba, rspiocbp);
4168 int rc;
4169
4170 spin_lock_irqsave(&phba->hbalock, iflag);
4171 /* First add the response iocb to the countinueq list */
4172 list_add_tail(&rspiocbp->list, &pring->iocb_continueq);
4173 pring->iocb_continueq_cnt++;
4174
4175 /*
4176 * By default, the driver expects to free all resources
4177 * associated with this iocb completion.
4178 */
4179 free_saveq = 1;
4180 saveq = list_get_first(&pring->iocb_continueq,
4181 struct lpfc_iocbq, list);
4182 list_del_init(&pring->iocb_continueq);
4183 pring->iocb_continueq_cnt = 0;
4184
4185 pring->stats.iocb_rsp++;
4186
4187 /*
4188 * If resource errors reported from HBA, reduce
4189 * queuedepths of the SCSI device.
4190 */
4191 if (ulp_status == IOSTAT_LOCAL_REJECT &&
4192 ((ulp_word4 & IOERR_PARAM_MASK) ==
4193 IOERR_NO_RESOURCES)) {
4194 spin_unlock_irqrestore(&phba->hbalock, iflag);
4195 phba->lpfc_rampdown_queue_depth(phba);
4196 spin_lock_irqsave(&phba->hbalock, iflag);
4197 }
4198
4199 if (ulp_status) {
4200 /* Rsp ring <ringno> error: IOCB */
4201 if (phba->sli_rev < LPFC_SLI_REV4) {
4202 irsp = &rspiocbp->iocb;
4203 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4204 "0328 Rsp Ring %d error: ulp_status x%x "
4205 "IOCB Data: "
4206 "x%08x x%08x x%08x x%08x "
4207 "x%08x x%08x x%08x x%08x "
4208 "x%08x x%08x x%08x x%08x "
4209 "x%08x x%08x x%08x x%08x\n",
4210 pring->ringno, ulp_status,
4211 get_job_ulpword(rspiocbp, 0),
4212 get_job_ulpword(rspiocbp, 1),
4213 get_job_ulpword(rspiocbp, 2),
4214 get_job_ulpword(rspiocbp, 3),
4215 get_job_ulpword(rspiocbp, 4),
4216 get_job_ulpword(rspiocbp, 5),
4217 *(((uint32_t *)irsp) + 6),
4218 *(((uint32_t *)irsp) + 7),
4219 *(((uint32_t *)irsp) + 8),
4220 *(((uint32_t *)irsp) + 9),
4221 *(((uint32_t *)irsp) + 10),
4222 *(((uint32_t *)irsp) + 11),
4223 *(((uint32_t *)irsp) + 12),
4224 *(((uint32_t *)irsp) + 13),
4225 *(((uint32_t *)irsp) + 14),
4226 *(((uint32_t *)irsp) + 15));
4227 } else {
4228 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4229 "0321 Rsp Ring %d error: "
4230 "IOCB Data: "
4231 "x%x x%x x%x x%x\n",
4232 pring->ringno,
4233 rspiocbp->wcqe_cmpl.word0,
4234 rspiocbp->wcqe_cmpl.total_data_placed,
4235 rspiocbp->wcqe_cmpl.parameter,
4236 rspiocbp->wcqe_cmpl.word3);
4237 }
4238 }
4239
4240
4241 /*
4242 * Fetch the iocb command type and call the correct completion
4243 * routine. Solicited and Unsolicited IOCBs on the ELS ring
4244 * get freed back to the lpfc_iocb_list by the discovery
4245 * kernel thread.
4246 */
4247 cmd_type = ulp_command & CMD_IOCB_MASK;
4248 type = lpfc_sli_iocb_cmd_type(cmd_type);
4249 switch (type) {
4250 case LPFC_SOL_IOCB:
4251 spin_unlock_irqrestore(&phba->hbalock, iflag);
4252 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
4253 spin_lock_irqsave(&phba->hbalock, iflag);
4254 break;
4255 case LPFC_UNSOL_IOCB:
4256 spin_unlock_irqrestore(&phba->hbalock, iflag);
4257 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
4258 spin_lock_irqsave(&phba->hbalock, iflag);
4259 if (!rc)
4260 free_saveq = 0;
4261 break;
4262 case LPFC_ABORT_IOCB:
4263 cmdiocb = NULL;
4264 if (ulp_command != CMD_XRI_ABORTED_CX)
4265 cmdiocb = lpfc_sli_iocbq_lookup(phba, pring,
4266 saveq);
4267 if (cmdiocb) {
4268 /* Call the specified completion routine */
4269 if (cmdiocb->cmd_cmpl) {
4270 spin_unlock_irqrestore(&phba->hbalock, iflag);
4271 cmdiocb->cmd_cmpl(phba, cmdiocb, saveq);
4272 spin_lock_irqsave(&phba->hbalock, iflag);
4273 } else {
4274 __lpfc_sli_release_iocbq(phba, cmdiocb);
4275 }
4276 }
4277 break;
4278 case LPFC_UNKNOWN_IOCB:
4279 if (ulp_command == CMD_ADAPTER_MSG) {
4280 char adaptermsg[LPFC_MAX_ADPTMSG];
4281
4282 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
4283 memcpy(&adaptermsg[0], (uint8_t *)&rspiocbp->wqe,
4284 MAX_MSG_DATA);
4285 dev_warn(&((phba->pcidev)->dev),
4286 "lpfc%d: %s\n",
4287 phba->brd_no, adaptermsg);
4288 } else {
4289 /* Unknown command */
4290 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4291 "0335 Unknown IOCB "
4292 "command Data: x%x "
4293 "x%x x%x x%x\n",
4294 ulp_command,
4295 ulp_status,
4296 get_wqe_reqtag(rspiocbp),
4297 get_job_ulpcontext(phba, rspiocbp));
4298 }
4299 break;
4300 }
4301
4302 if (free_saveq) {
4303 list_for_each_entry_safe(rspiocbp, next_iocb,
4304 &saveq->list, list) {
4305 list_del_init(&rspiocbp->list);
4306 __lpfc_sli_release_iocbq(phba, rspiocbp);
4307 }
4308 __lpfc_sli_release_iocbq(phba, saveq);
4309 }
4310 rspiocbp = NULL;
4311 spin_unlock_irqrestore(&phba->hbalock, iflag);
4312 return rspiocbp;
4313}
4314
4315/**
4316 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
4317 * @phba: Pointer to HBA context object.
4318 * @pring: Pointer to driver SLI ring object.
4319 * @mask: Host attention register mask for this ring.
4320 *
4321 * This routine wraps the actual slow_ring event process routine from the
4322 * API jump table function pointer from the lpfc_hba struct.
4323 **/
4324void
4325lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
4326 struct lpfc_sli_ring *pring, uint32_t mask)
4327{
4328 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
4329}
4330
4331/**
4332 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
4333 * @phba: Pointer to HBA context object.
4334 * @pring: Pointer to driver SLI ring object.
4335 * @mask: Host attention register mask for this ring.
4336 *
4337 * This function is called from the worker thread when there is a ring event
4338 * for non-fcp rings. The caller does not hold any lock. The function will
4339 * remove each response iocb in the response ring and calls the handle
4340 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4341 **/
4342static void
4343lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
4344 struct lpfc_sli_ring *pring, uint32_t mask)
4345{
4346 struct lpfc_pgp *pgp;
4347 IOCB_t *entry;
4348 IOCB_t *irsp = NULL;
4349 struct lpfc_iocbq *rspiocbp = NULL;
4350 uint32_t portRspPut, portRspMax;
4351 unsigned long iflag;
4352 uint32_t status;
4353
4354 pgp = &phba->port_gp[pring->ringno];
4355 spin_lock_irqsave(&phba->hbalock, iflag);
4356 pring->stats.iocb_event++;
4357
4358 /*
4359 * The next available response entry should never exceed the maximum
4360 * entries. If it does, treat it as an adapter hardware error.
4361 */
4362 portRspMax = pring->sli.sli3.numRiocb;
4363 portRspPut = le32_to_cpu(pgp->rspPutInx);
4364 if (portRspPut >= portRspMax) {
4365 /*
4366 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
4367 * rsp ring <portRspMax>
4368 */
4369 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4370 "0303 Ring %d handler: portRspPut %d "
4371 "is bigger than rsp ring %d\n",
4372 pring->ringno, portRspPut, portRspMax);
4373
4374 phba->link_state = LPFC_HBA_ERROR;
4375 spin_unlock_irqrestore(&phba->hbalock, iflag);
4376
4377 phba->work_hs = HS_FFER3;
4378 lpfc_handle_eratt(phba);
4379
4380 return;
4381 }
4382
4383 rmb();
4384 while (pring->sli.sli3.rspidx != portRspPut) {
4385 /*
4386 * Build a completion list and call the appropriate handler.
4387 * The process is to get the next available response iocb, get
4388 * a free iocb from the list, copy the response data into the
4389 * free iocb, insert to the continuation list, and update the
4390 * next response index to slim. This process makes response
4391 * iocb's in the ring available to DMA as fast as possible but
4392 * pays a penalty for a copy operation. Since the iocb is
4393 * only 32 bytes, this penalty is considered small relative to
4394 * the PCI reads for register values and a slim write. When
4395 * the ulpLe field is set, the entire Command has been
4396 * received.
4397 */
4398 entry = lpfc_resp_iocb(phba, pring);
4399
4400 phba->last_completion_time = jiffies;
4401 rspiocbp = __lpfc_sli_get_iocbq(phba);
4402 if (rspiocbp == NULL) {
4403 printk(KERN_ERR "%s: out of buffers! Failing "
4404 "completion.\n", __func__);
4405 break;
4406 }
4407
4408 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
4409 phba->iocb_rsp_size);
4410 irsp = &rspiocbp->iocb;
4411
4412 if (++pring->sli.sli3.rspidx >= portRspMax)
4413 pring->sli.sli3.rspidx = 0;
4414
4415 if (pring->ringno == LPFC_ELS_RING) {
4416 lpfc_debugfs_slow_ring_trc(phba,
4417 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
4418 *(((uint32_t *) irsp) + 4),
4419 *(((uint32_t *) irsp) + 6),
4420 *(((uint32_t *) irsp) + 7));
4421 }
4422
4423 writel(pring->sli.sli3.rspidx,
4424 &phba->host_gp[pring->ringno].rspGetInx);
4425
4426 spin_unlock_irqrestore(&phba->hbalock, iflag);
4427 /* Handle the response IOCB */
4428 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
4429 spin_lock_irqsave(&phba->hbalock, iflag);
4430
4431 /*
4432 * If the port response put pointer has not been updated, sync
4433 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
4434 * response put pointer.
4435 */
4436 if (pring->sli.sli3.rspidx == portRspPut) {
4437 portRspPut = le32_to_cpu(pgp->rspPutInx);
4438 }
4439 } /* while (pring->sli.sli3.rspidx != portRspPut) */
4440
4441 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
4442 /* At least one response entry has been freed */
4443 pring->stats.iocb_rsp_full++;
4444 /* SET RxRE_RSP in Chip Att register */
4445 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
4446 writel(status, phba->CAregaddr);
4447 readl(phba->CAregaddr); /* flush */
4448 }
4449 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
4450 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
4451 pring->stats.iocb_cmd_empty++;
4452
4453 /* Force update of the local copy of cmdGetInx */
4454 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
4455 lpfc_sli_resume_iocb(phba, pring);
4456
4457 if ((pring->lpfc_sli_cmd_available))
4458 (pring->lpfc_sli_cmd_available) (phba, pring);
4459
4460 }
4461
4462 spin_unlock_irqrestore(&phba->hbalock, iflag);
4463 return;
4464}
4465
4466/**
4467 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
4468 * @phba: Pointer to HBA context object.
4469 * @pring: Pointer to driver SLI ring object.
4470 * @mask: Host attention register mask for this ring.
4471 *
4472 * This function is called from the worker thread when there is a pending
4473 * ELS response iocb on the driver internal slow-path response iocb worker
4474 * queue. The caller does not hold any lock. The function will remove each
4475 * response iocb from the response worker queue and calls the handle
4476 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4477 **/
4478static void
4479lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
4480 struct lpfc_sli_ring *pring, uint32_t mask)
4481{
4482 struct lpfc_iocbq *irspiocbq;
4483 struct hbq_dmabuf *dmabuf;
4484 struct lpfc_cq_event *cq_event;
4485 unsigned long iflag;
4486 int count = 0;
4487
4488 spin_lock_irqsave(&phba->hbalock, iflag);
4489 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
4490 spin_unlock_irqrestore(&phba->hbalock, iflag);
4491 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
4492 /* Get the response iocb from the head of work queue */
4493 spin_lock_irqsave(&phba->hbalock, iflag);
4494 list_remove_head(&phba->sli4_hba.sp_queue_event,
4495 cq_event, struct lpfc_cq_event, list);
4496 spin_unlock_irqrestore(&phba->hbalock, iflag);
4497
4498 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
4499 case CQE_CODE_COMPL_WQE:
4500 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
4501 cq_event);
4502 /* Translate ELS WCQE to response IOCBQ */
4503 irspiocbq = lpfc_sli4_els_preprocess_rspiocbq(phba,
4504 irspiocbq);
4505 if (irspiocbq)
4506 lpfc_sli_sp_handle_rspiocb(phba, pring,
4507 irspiocbq);
4508 count++;
4509 break;
4510 case CQE_CODE_RECEIVE:
4511 case CQE_CODE_RECEIVE_V1:
4512 dmabuf = container_of(cq_event, struct hbq_dmabuf,
4513 cq_event);
4514 lpfc_sli4_handle_received_buffer(phba, dmabuf);
4515 count++;
4516 break;
4517 default:
4518 break;
4519 }
4520
4521 /* Limit the number of events to 64 to avoid soft lockups */
4522 if (count == 64)
4523 break;
4524 }
4525}
4526
4527/**
4528 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
4529 * @phba: Pointer to HBA context object.
4530 * @pring: Pointer to driver SLI ring object.
4531 *
4532 * This function aborts all iocbs in the given ring and frees all the iocb
4533 * objects in txq. This function issues an abort iocb for all the iocb commands
4534 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4535 * the return of this function. The caller is not required to hold any locks.
4536 **/
4537void
4538lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
4539{
4540 LIST_HEAD(tx_completions);
4541 LIST_HEAD(txcmplq_completions);
4542 struct lpfc_iocbq *iocb, *next_iocb;
4543 int offline;
4544
4545 if (pring->ringno == LPFC_ELS_RING) {
4546 lpfc_fabric_abort_hba(phba);
4547 }
4548 offline = pci_channel_offline(phba->pcidev);
4549
4550 /* Error everything on txq and txcmplq
4551 * First do the txq.
4552 */
4553 if (phba->sli_rev >= LPFC_SLI_REV4) {
4554 spin_lock_irq(&pring->ring_lock);
4555 list_splice_init(&pring->txq, &tx_completions);
4556 pring->txq_cnt = 0;
4557
4558 if (offline) {
4559 list_splice_init(&pring->txcmplq,
4560 &txcmplq_completions);
4561 } else {
4562 /* Next issue ABTS for everything on the txcmplq */
4563 list_for_each_entry_safe(iocb, next_iocb,
4564 &pring->txcmplq, list)
4565 lpfc_sli_issue_abort_iotag(phba, pring,
4566 iocb, NULL);
4567 }
4568 spin_unlock_irq(&pring->ring_lock);
4569 } else {
4570 spin_lock_irq(&phba->hbalock);
4571 list_splice_init(&pring->txq, &tx_completions);
4572 pring->txq_cnt = 0;
4573
4574 if (offline) {
4575 list_splice_init(&pring->txcmplq, &txcmplq_completions);
4576 } else {
4577 /* Next issue ABTS for everything on the txcmplq */
4578 list_for_each_entry_safe(iocb, next_iocb,
4579 &pring->txcmplq, list)
4580 lpfc_sli_issue_abort_iotag(phba, pring,
4581 iocb, NULL);
4582 }
4583 spin_unlock_irq(&phba->hbalock);
4584 }
4585
4586 if (offline) {
4587 /* Cancel all the IOCBs from the completions list */
4588 lpfc_sli_cancel_iocbs(phba, &txcmplq_completions,
4589 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
4590 } else {
4591 /* Make sure HBA is alive */
4592 lpfc_issue_hb_tmo(phba);
4593 }
4594 /* Cancel all the IOCBs from the completions list */
4595 lpfc_sli_cancel_iocbs(phba, &tx_completions, IOSTAT_LOCAL_REJECT,
4596 IOERR_SLI_ABORTED);
4597}
4598
4599/**
4600 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
4601 * @phba: Pointer to HBA context object.
4602 *
4603 * This function aborts all iocbs in FCP rings and frees all the iocb
4604 * objects in txq. This function issues an abort iocb for all the iocb commands
4605 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4606 * the return of this function. The caller is not required to hold any locks.
4607 **/
4608void
4609lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
4610{
4611 struct lpfc_sli *psli = &phba->sli;
4612 struct lpfc_sli_ring *pring;
4613 uint32_t i;
4614
4615 /* Look on all the FCP Rings for the iotag */
4616 if (phba->sli_rev >= LPFC_SLI_REV4) {
4617 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4618 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4619 lpfc_sli_abort_iocb_ring(phba, pring);
4620 }
4621 } else {
4622 pring = &psli->sli3_ring[LPFC_FCP_RING];
4623 lpfc_sli_abort_iocb_ring(phba, pring);
4624 }
4625}
4626
4627/**
4628 * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring
4629 * @phba: Pointer to HBA context object.
4630 *
4631 * This function flushes all iocbs in the IO ring and frees all the iocb
4632 * objects in txq and txcmplq. This function will not issue abort iocbs
4633 * for all the iocb commands in txcmplq, they will just be returned with
4634 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4635 * slot has been permanently disabled.
4636 **/
4637void
4638lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
4639{
4640 LIST_HEAD(txq);
4641 LIST_HEAD(txcmplq);
4642 struct lpfc_sli *psli = &phba->sli;
4643 struct lpfc_sli_ring *pring;
4644 uint32_t i;
4645 struct lpfc_iocbq *piocb, *next_iocb;
4646
4647 spin_lock_irq(&phba->hbalock);
4648 /* Indicate the I/O queues are flushed */
4649 phba->hba_flag |= HBA_IOQ_FLUSH;
4650 spin_unlock_irq(&phba->hbalock);
4651
4652 /* Look on all the FCP Rings for the iotag */
4653 if (phba->sli_rev >= LPFC_SLI_REV4) {
4654 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4655 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4656
4657 spin_lock_irq(&pring->ring_lock);
4658 /* Retrieve everything on txq */
4659 list_splice_init(&pring->txq, &txq);
4660 list_for_each_entry_safe(piocb, next_iocb,
4661 &pring->txcmplq, list)
4662 piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
4663 /* Retrieve everything on the txcmplq */
4664 list_splice_init(&pring->txcmplq, &txcmplq);
4665 pring->txq_cnt = 0;
4666 pring->txcmplq_cnt = 0;
4667 spin_unlock_irq(&pring->ring_lock);
4668
4669 /* Flush the txq */
4670 lpfc_sli_cancel_iocbs(phba, &txq,
4671 IOSTAT_LOCAL_REJECT,
4672 IOERR_SLI_DOWN);
4673 /* Flush the txcmplq */
4674 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4675 IOSTAT_LOCAL_REJECT,
4676 IOERR_SLI_DOWN);
4677 if (unlikely(pci_channel_offline(phba->pcidev)))
4678 lpfc_sli4_io_xri_aborted(phba, NULL, 0);
4679 }
4680 } else {
4681 pring = &psli->sli3_ring[LPFC_FCP_RING];
4682
4683 spin_lock_irq(&phba->hbalock);
4684 /* Retrieve everything on txq */
4685 list_splice_init(&pring->txq, &txq);
4686 list_for_each_entry_safe(piocb, next_iocb,
4687 &pring->txcmplq, list)
4688 piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
4689 /* Retrieve everything on the txcmplq */
4690 list_splice_init(&pring->txcmplq, &txcmplq);
4691 pring->txq_cnt = 0;
4692 pring->txcmplq_cnt = 0;
4693 spin_unlock_irq(&phba->hbalock);
4694
4695 /* Flush the txq */
4696 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4697 IOERR_SLI_DOWN);
4698 /* Flush the txcmpq */
4699 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4700 IOERR_SLI_DOWN);
4701 }
4702}
4703
4704/**
4705 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
4706 * @phba: Pointer to HBA context object.
4707 * @mask: Bit mask to be checked.
4708 *
4709 * This function reads the host status register and compares
4710 * with the provided bit mask to check if HBA completed
4711 * the restart. This function will wait in a loop for the
4712 * HBA to complete restart. If the HBA does not restart within
4713 * 15 iterations, the function will reset the HBA again. The
4714 * function returns 1 when HBA fail to restart otherwise returns
4715 * zero.
4716 **/
4717static int
4718lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
4719{
4720 uint32_t status;
4721 int i = 0;
4722 int retval = 0;
4723
4724 /* Read the HBA Host Status Register */
4725 if (lpfc_readl(phba->HSregaddr, &status))
4726 return 1;
4727
4728 phba->hba_flag |= HBA_NEEDS_CFG_PORT;
4729
4730 /*
4731 * Check status register every 100ms for 5 retries, then every
4732 * 500ms for 5, then every 2.5 sec for 5, then reset board and
4733 * every 2.5 sec for 4.
4734 * Break our of the loop if errors occurred during init.
4735 */
4736 while (((status & mask) != mask) &&
4737 !(status & HS_FFERM) &&
4738 i++ < 20) {
4739
4740 if (i <= 5)
4741 msleep(10);
4742 else if (i <= 10)
4743 msleep(500);
4744 else
4745 msleep(2500);
4746
4747 if (i == 15) {
4748 /* Do post */
4749 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4750 lpfc_sli_brdrestart(phba);
4751 }
4752 /* Read the HBA Host Status Register */
4753 if (lpfc_readl(phba->HSregaddr, &status)) {
4754 retval = 1;
4755 break;
4756 }
4757 }
4758
4759 /* Check to see if any errors occurred during init */
4760 if ((status & HS_FFERM) || (i >= 20)) {
4761 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4762 "2751 Adapter failed to restart, "
4763 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4764 status,
4765 readl(phba->MBslimaddr + 0xa8),
4766 readl(phba->MBslimaddr + 0xac));
4767 phba->link_state = LPFC_HBA_ERROR;
4768 retval = 1;
4769 }
4770
4771 return retval;
4772}
4773
4774/**
4775 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4776 * @phba: Pointer to HBA context object.
4777 * @mask: Bit mask to be checked.
4778 *
4779 * This function checks the host status register to check if HBA is
4780 * ready. This function will wait in a loop for the HBA to be ready
4781 * If the HBA is not ready , the function will will reset the HBA PCI
4782 * function again. The function returns 1 when HBA fail to be ready
4783 * otherwise returns zero.
4784 **/
4785static int
4786lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4787{
4788 uint32_t status;
4789 int retval = 0;
4790
4791 /* Read the HBA Host Status Register */
4792 status = lpfc_sli4_post_status_check(phba);
4793
4794 if (status) {
4795 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4796 lpfc_sli_brdrestart(phba);
4797 status = lpfc_sli4_post_status_check(phba);
4798 }
4799
4800 /* Check to see if any errors occurred during init */
4801 if (status) {
4802 phba->link_state = LPFC_HBA_ERROR;
4803 retval = 1;
4804 } else
4805 phba->sli4_hba.intr_enable = 0;
4806
4807 phba->hba_flag &= ~HBA_SETUP;
4808 return retval;
4809}
4810
4811/**
4812 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4813 * @phba: Pointer to HBA context object.
4814 * @mask: Bit mask to be checked.
4815 *
4816 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4817 * from the API jump table function pointer from the lpfc_hba struct.
4818 **/
4819int
4820lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4821{
4822 return phba->lpfc_sli_brdready(phba, mask);
4823}
4824
4825#define BARRIER_TEST_PATTERN (0xdeadbeef)
4826
4827/**
4828 * lpfc_reset_barrier - Make HBA ready for HBA reset
4829 * @phba: Pointer to HBA context object.
4830 *
4831 * This function is called before resetting an HBA. This function is called
4832 * with hbalock held and requests HBA to quiesce DMAs before a reset.
4833 **/
4834void lpfc_reset_barrier(struct lpfc_hba *phba)
4835{
4836 uint32_t __iomem *resp_buf;
4837 uint32_t __iomem *mbox_buf;
4838 volatile struct MAILBOX_word0 mbox;
4839 uint32_t hc_copy, ha_copy, resp_data;
4840 int i;
4841 uint8_t hdrtype;
4842
4843 lockdep_assert_held(&phba->hbalock);
4844
4845 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4846 if (hdrtype != 0x80 ||
4847 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4848 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4849 return;
4850
4851 /*
4852 * Tell the other part of the chip to suspend temporarily all
4853 * its DMA activity.
4854 */
4855 resp_buf = phba->MBslimaddr;
4856
4857 /* Disable the error attention */
4858 if (lpfc_readl(phba->HCregaddr, &hc_copy))
4859 return;
4860 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4861 readl(phba->HCregaddr); /* flush */
4862 phba->link_flag |= LS_IGNORE_ERATT;
4863
4864 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4865 return;
4866 if (ha_copy & HA_ERATT) {
4867 /* Clear Chip error bit */
4868 writel(HA_ERATT, phba->HAregaddr);
4869 phba->pport->stopped = 1;
4870 }
4871
4872 mbox.word0 = 0;
4873 mbox.mbxCommand = MBX_KILL_BOARD;
4874 mbox.mbxOwner = OWN_CHIP;
4875
4876 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4877 mbox_buf = phba->MBslimaddr;
4878 writel(mbox.word0, mbox_buf);
4879
4880 for (i = 0; i < 50; i++) {
4881 if (lpfc_readl((resp_buf + 1), &resp_data))
4882 return;
4883 if (resp_data != ~(BARRIER_TEST_PATTERN))
4884 mdelay(1);
4885 else
4886 break;
4887 }
4888 resp_data = 0;
4889 if (lpfc_readl((resp_buf + 1), &resp_data))
4890 return;
4891 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
4892 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4893 phba->pport->stopped)
4894 goto restore_hc;
4895 else
4896 goto clear_errat;
4897 }
4898
4899 mbox.mbxOwner = OWN_HOST;
4900 resp_data = 0;
4901 for (i = 0; i < 500; i++) {
4902 if (lpfc_readl(resp_buf, &resp_data))
4903 return;
4904 if (resp_data != mbox.word0)
4905 mdelay(1);
4906 else
4907 break;
4908 }
4909
4910clear_errat:
4911
4912 while (++i < 500) {
4913 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4914 return;
4915 if (!(ha_copy & HA_ERATT))
4916 mdelay(1);
4917 else
4918 break;
4919 }
4920
4921 if (readl(phba->HAregaddr) & HA_ERATT) {
4922 writel(HA_ERATT, phba->HAregaddr);
4923 phba->pport->stopped = 1;
4924 }
4925
4926restore_hc:
4927 phba->link_flag &= ~LS_IGNORE_ERATT;
4928 writel(hc_copy, phba->HCregaddr);
4929 readl(phba->HCregaddr); /* flush */
4930}
4931
4932/**
4933 * lpfc_sli_brdkill - Issue a kill_board mailbox command
4934 * @phba: Pointer to HBA context object.
4935 *
4936 * This function issues a kill_board mailbox command and waits for
4937 * the error attention interrupt. This function is called for stopping
4938 * the firmware processing. The caller is not required to hold any
4939 * locks. This function calls lpfc_hba_down_post function to free
4940 * any pending commands after the kill. The function will return 1 when it
4941 * fails to kill the board else will return 0.
4942 **/
4943int
4944lpfc_sli_brdkill(struct lpfc_hba *phba)
4945{
4946 struct lpfc_sli *psli;
4947 LPFC_MBOXQ_t *pmb;
4948 uint32_t status;
4949 uint32_t ha_copy;
4950 int retval;
4951 int i = 0;
4952
4953 psli = &phba->sli;
4954
4955 /* Kill HBA */
4956 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4957 "0329 Kill HBA Data: x%x x%x\n",
4958 phba->pport->port_state, psli->sli_flag);
4959
4960 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4961 if (!pmb)
4962 return 1;
4963
4964 /* Disable the error attention */
4965 spin_lock_irq(&phba->hbalock);
4966 if (lpfc_readl(phba->HCregaddr, &status)) {
4967 spin_unlock_irq(&phba->hbalock);
4968 mempool_free(pmb, phba->mbox_mem_pool);
4969 return 1;
4970 }
4971 status &= ~HC_ERINT_ENA;
4972 writel(status, phba->HCregaddr);
4973 readl(phba->HCregaddr); /* flush */
4974 phba->link_flag |= LS_IGNORE_ERATT;
4975 spin_unlock_irq(&phba->hbalock);
4976
4977 lpfc_kill_board(phba, pmb);
4978 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4979 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4980
4981 if (retval != MBX_SUCCESS) {
4982 if (retval != MBX_BUSY)
4983 mempool_free(pmb, phba->mbox_mem_pool);
4984 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4985 "2752 KILL_BOARD command failed retval %d\n",
4986 retval);
4987 spin_lock_irq(&phba->hbalock);
4988 phba->link_flag &= ~LS_IGNORE_ERATT;
4989 spin_unlock_irq(&phba->hbalock);
4990 return 1;
4991 }
4992
4993 spin_lock_irq(&phba->hbalock);
4994 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4995 spin_unlock_irq(&phba->hbalock);
4996
4997 mempool_free(pmb, phba->mbox_mem_pool);
4998
4999 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
5000 * attention every 100ms for 3 seconds. If we don't get ERATT after
5001 * 3 seconds we still set HBA_ERROR state because the status of the
5002 * board is now undefined.
5003 */
5004 if (lpfc_readl(phba->HAregaddr, &ha_copy))
5005 return 1;
5006 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
5007 mdelay(100);
5008 if (lpfc_readl(phba->HAregaddr, &ha_copy))
5009 return 1;
5010 }
5011
5012 del_timer_sync(&psli->mbox_tmo);
5013 if (ha_copy & HA_ERATT) {
5014 writel(HA_ERATT, phba->HAregaddr);
5015 phba->pport->stopped = 1;
5016 }
5017 spin_lock_irq(&phba->hbalock);
5018 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5019 psli->mbox_active = NULL;
5020 phba->link_flag &= ~LS_IGNORE_ERATT;
5021 spin_unlock_irq(&phba->hbalock);
5022
5023 lpfc_hba_down_post(phba);
5024 phba->link_state = LPFC_HBA_ERROR;
5025
5026 return ha_copy & HA_ERATT ? 0 : 1;
5027}
5028
5029/**
5030 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
5031 * @phba: Pointer to HBA context object.
5032 *
5033 * This function resets the HBA by writing HC_INITFF to the control
5034 * register. After the HBA resets, this function resets all the iocb ring
5035 * indices. This function disables PCI layer parity checking during
5036 * the reset.
5037 * This function returns 0 always.
5038 * The caller is not required to hold any locks.
5039 **/
5040int
5041lpfc_sli_brdreset(struct lpfc_hba *phba)
5042{
5043 struct lpfc_sli *psli;
5044 struct lpfc_sli_ring *pring;
5045 uint16_t cfg_value;
5046 int i;
5047
5048 psli = &phba->sli;
5049
5050 /* Reset HBA */
5051 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5052 "0325 Reset HBA Data: x%x x%x\n",
5053 (phba->pport) ? phba->pport->port_state : 0,
5054 psli->sli_flag);
5055
5056 /* perform board reset */
5057 phba->fc_eventTag = 0;
5058 phba->link_events = 0;
5059 phba->hba_flag |= HBA_NEEDS_CFG_PORT;
5060 if (phba->pport) {
5061 phba->pport->fc_myDID = 0;
5062 phba->pport->fc_prevDID = 0;
5063 }
5064
5065 /* Turn off parity checking and serr during the physical reset */
5066 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
5067 return -EIO;
5068
5069 pci_write_config_word(phba->pcidev, PCI_COMMAND,
5070 (cfg_value &
5071 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
5072
5073 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
5074
5075 /* Now toggle INITFF bit in the Host Control Register */
5076 writel(HC_INITFF, phba->HCregaddr);
5077 mdelay(1);
5078 readl(phba->HCregaddr); /* flush */
5079 writel(0, phba->HCregaddr);
5080 readl(phba->HCregaddr); /* flush */
5081
5082 /* Restore PCI cmd register */
5083 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
5084
5085 /* Initialize relevant SLI info */
5086 for (i = 0; i < psli->num_rings; i++) {
5087 pring = &psli->sli3_ring[i];
5088 pring->flag = 0;
5089 pring->sli.sli3.rspidx = 0;
5090 pring->sli.sli3.next_cmdidx = 0;
5091 pring->sli.sli3.local_getidx = 0;
5092 pring->sli.sli3.cmdidx = 0;
5093 pring->missbufcnt = 0;
5094 }
5095
5096 phba->link_state = LPFC_WARM_START;
5097 return 0;
5098}
5099
5100/**
5101 * lpfc_sli4_brdreset - Reset a sli-4 HBA
5102 * @phba: Pointer to HBA context object.
5103 *
5104 * This function resets a SLI4 HBA. This function disables PCI layer parity
5105 * checking during resets the device. The caller is not required to hold
5106 * any locks.
5107 *
5108 * This function returns 0 on success else returns negative error code.
5109 **/
5110int
5111lpfc_sli4_brdreset(struct lpfc_hba *phba)
5112{
5113 struct lpfc_sli *psli = &phba->sli;
5114 uint16_t cfg_value;
5115 int rc = 0;
5116
5117 /* Reset HBA */
5118 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5119 "0295 Reset HBA Data: x%x x%x x%x\n",
5120 phba->pport->port_state, psli->sli_flag,
5121 phba->hba_flag);
5122
5123 /* perform board reset */
5124 phba->fc_eventTag = 0;
5125 phba->link_events = 0;
5126 phba->pport->fc_myDID = 0;
5127 phba->pport->fc_prevDID = 0;
5128 phba->hba_flag &= ~HBA_SETUP;
5129
5130 spin_lock_irq(&phba->hbalock);
5131 psli->sli_flag &= ~(LPFC_PROCESS_LA);
5132 phba->fcf.fcf_flag = 0;
5133 spin_unlock_irq(&phba->hbalock);
5134
5135 /* Now physically reset the device */
5136 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5137 "0389 Performing PCI function reset!\n");
5138
5139 /* Turn off parity checking and serr during the physical reset */
5140 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
5141 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5142 "3205 PCI read Config failed\n");
5143 return -EIO;
5144 }
5145
5146 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
5147 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
5148
5149 /* Perform FCoE PCI function reset before freeing queue memory */
5150 rc = lpfc_pci_function_reset(phba);
5151
5152 /* Restore PCI cmd register */
5153 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
5154
5155 return rc;
5156}
5157
5158/**
5159 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
5160 * @phba: Pointer to HBA context object.
5161 *
5162 * This function is called in the SLI initialization code path to
5163 * restart the HBA. The caller is not required to hold any lock.
5164 * This function writes MBX_RESTART mailbox command to the SLIM and
5165 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
5166 * function to free any pending commands. The function enables
5167 * POST only during the first initialization. The function returns zero.
5168 * The function does not guarantee completion of MBX_RESTART mailbox
5169 * command before the return of this function.
5170 **/
5171static int
5172lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
5173{
5174 volatile struct MAILBOX_word0 mb;
5175 struct lpfc_sli *psli;
5176 void __iomem *to_slim;
5177 uint32_t hba_aer_enabled;
5178
5179 spin_lock_irq(&phba->hbalock);
5180
5181 /* Take PCIe device Advanced Error Reporting (AER) state */
5182 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
5183
5184 psli = &phba->sli;
5185
5186 /* Restart HBA */
5187 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5188 "0337 Restart HBA Data: x%x x%x\n",
5189 (phba->pport) ? phba->pport->port_state : 0,
5190 psli->sli_flag);
5191
5192 mb.word0 = 0;
5193 mb.mbxCommand = MBX_RESTART;
5194 mb.mbxHc = 1;
5195
5196 lpfc_reset_barrier(phba);
5197
5198 to_slim = phba->MBslimaddr;
5199 writel(mb.word0, to_slim);
5200 readl(to_slim); /* flush */
5201
5202 /* Only skip post after fc_ffinit is completed */
5203 if (phba->pport && phba->pport->port_state)
5204 mb.word0 = 1; /* This is really setting up word1 */
5205 else
5206 mb.word0 = 0; /* This is really setting up word1 */
5207 to_slim = phba->MBslimaddr + sizeof (uint32_t);
5208 writel(mb.word0, to_slim);
5209 readl(to_slim); /* flush */
5210
5211 lpfc_sli_brdreset(phba);
5212 if (phba->pport)
5213 phba->pport->stopped = 0;
5214 phba->link_state = LPFC_INIT_START;
5215 phba->hba_flag = 0;
5216 spin_unlock_irq(&phba->hbalock);
5217
5218 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
5219 psli->stats_start = ktime_get_seconds();
5220
5221 /* Give the INITFF and Post time to settle. */
5222 mdelay(100);
5223
5224 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
5225 if (hba_aer_enabled)
5226 pci_disable_pcie_error_reporting(phba->pcidev);
5227
5228 lpfc_hba_down_post(phba);
5229
5230 return 0;
5231}
5232
5233/**
5234 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
5235 * @phba: Pointer to HBA context object.
5236 *
5237 * This function is called in the SLI initialization code path to restart
5238 * a SLI4 HBA. The caller is not required to hold any lock.
5239 * At the end of the function, it calls lpfc_hba_down_post function to
5240 * free any pending commands.
5241 **/
5242static int
5243lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
5244{
5245 struct lpfc_sli *psli = &phba->sli;
5246 uint32_t hba_aer_enabled;
5247 int rc;
5248
5249 /* Restart HBA */
5250 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5251 "0296 Restart HBA Data: x%x x%x\n",
5252 phba->pport->port_state, psli->sli_flag);
5253
5254 /* Take PCIe device Advanced Error Reporting (AER) state */
5255 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
5256
5257 rc = lpfc_sli4_brdreset(phba);
5258 if (rc) {
5259 phba->link_state = LPFC_HBA_ERROR;
5260 goto hba_down_queue;
5261 }
5262
5263 spin_lock_irq(&phba->hbalock);
5264 phba->pport->stopped = 0;
5265 phba->link_state = LPFC_INIT_START;
5266 phba->hba_flag = 0;
5267 phba->sli4_hba.fawwpn_flag = 0;
5268 spin_unlock_irq(&phba->hbalock);
5269
5270 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
5271 psli->stats_start = ktime_get_seconds();
5272
5273 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
5274 if (hba_aer_enabled)
5275 pci_disable_pcie_error_reporting(phba->pcidev);
5276
5277hba_down_queue:
5278 lpfc_hba_down_post(phba);
5279 lpfc_sli4_queue_destroy(phba);
5280
5281 return rc;
5282}
5283
5284/**
5285 * lpfc_sli_brdrestart - Wrapper func for restarting hba
5286 * @phba: Pointer to HBA context object.
5287 *
5288 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
5289 * API jump table function pointer from the lpfc_hba struct.
5290**/
5291int
5292lpfc_sli_brdrestart(struct lpfc_hba *phba)
5293{
5294 return phba->lpfc_sli_brdrestart(phba);
5295}
5296
5297/**
5298 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
5299 * @phba: Pointer to HBA context object.
5300 *
5301 * This function is called after a HBA restart to wait for successful
5302 * restart of the HBA. Successful restart of the HBA is indicated by
5303 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
5304 * iteration, the function will restart the HBA again. The function returns
5305 * zero if HBA successfully restarted else returns negative error code.
5306 **/
5307int
5308lpfc_sli_chipset_init(struct lpfc_hba *phba)
5309{
5310 uint32_t status, i = 0;
5311
5312 /* Read the HBA Host Status Register */
5313 if (lpfc_readl(phba->HSregaddr, &status))
5314 return -EIO;
5315
5316 /* Check status register to see what current state is */
5317 i = 0;
5318 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
5319
5320 /* Check every 10ms for 10 retries, then every 100ms for 90
5321 * retries, then every 1 sec for 50 retires for a total of
5322 * ~60 seconds before reset the board again and check every
5323 * 1 sec for 50 retries. The up to 60 seconds before the
5324 * board ready is required by the Falcon FIPS zeroization
5325 * complete, and any reset the board in between shall cause
5326 * restart of zeroization, further delay the board ready.
5327 */
5328 if (i++ >= 200) {
5329 /* Adapter failed to init, timeout, status reg
5330 <status> */
5331 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5332 "0436 Adapter failed to init, "
5333 "timeout, status reg x%x, "
5334 "FW Data: A8 x%x AC x%x\n", status,
5335 readl(phba->MBslimaddr + 0xa8),
5336 readl(phba->MBslimaddr + 0xac));
5337 phba->link_state = LPFC_HBA_ERROR;
5338 return -ETIMEDOUT;
5339 }
5340
5341 /* Check to see if any errors occurred during init */
5342 if (status & HS_FFERM) {
5343 /* ERROR: During chipset initialization */
5344 /* Adapter failed to init, chipset, status reg
5345 <status> */
5346 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5347 "0437 Adapter failed to init, "
5348 "chipset, status reg x%x, "
5349 "FW Data: A8 x%x AC x%x\n", status,
5350 readl(phba->MBslimaddr + 0xa8),
5351 readl(phba->MBslimaddr + 0xac));
5352 phba->link_state = LPFC_HBA_ERROR;
5353 return -EIO;
5354 }
5355
5356 if (i <= 10)
5357 msleep(10);
5358 else if (i <= 100)
5359 msleep(100);
5360 else
5361 msleep(1000);
5362
5363 if (i == 150) {
5364 /* Do post */
5365 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5366 lpfc_sli_brdrestart(phba);
5367 }
5368 /* Read the HBA Host Status Register */
5369 if (lpfc_readl(phba->HSregaddr, &status))
5370 return -EIO;
5371 }
5372
5373 /* Check to see if any errors occurred during init */
5374 if (status & HS_FFERM) {
5375 /* ERROR: During chipset initialization */
5376 /* Adapter failed to init, chipset, status reg <status> */
5377 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5378 "0438 Adapter failed to init, chipset, "
5379 "status reg x%x, "
5380 "FW Data: A8 x%x AC x%x\n", status,
5381 readl(phba->MBslimaddr + 0xa8),
5382 readl(phba->MBslimaddr + 0xac));
5383 phba->link_state = LPFC_HBA_ERROR;
5384 return -EIO;
5385 }
5386
5387 phba->hba_flag |= HBA_NEEDS_CFG_PORT;
5388
5389 /* Clear all interrupt enable conditions */
5390 writel(0, phba->HCregaddr);
5391 readl(phba->HCregaddr); /* flush */
5392
5393 /* setup host attn register */
5394 writel(0xffffffff, phba->HAregaddr);
5395 readl(phba->HAregaddr); /* flush */
5396 return 0;
5397}
5398
5399/**
5400 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
5401 *
5402 * This function calculates and returns the number of HBQs required to be
5403 * configured.
5404 **/
5405int
5406lpfc_sli_hbq_count(void)
5407{
5408 return ARRAY_SIZE(lpfc_hbq_defs);
5409}
5410
5411/**
5412 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
5413 *
5414 * This function adds the number of hbq entries in every HBQ to get
5415 * the total number of hbq entries required for the HBA and returns
5416 * the total count.
5417 **/
5418static int
5419lpfc_sli_hbq_entry_count(void)
5420{
5421 int hbq_count = lpfc_sli_hbq_count();
5422 int count = 0;
5423 int i;
5424
5425 for (i = 0; i < hbq_count; ++i)
5426 count += lpfc_hbq_defs[i]->entry_count;
5427 return count;
5428}
5429
5430/**
5431 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
5432 *
5433 * This function calculates amount of memory required for all hbq entries
5434 * to be configured and returns the total memory required.
5435 **/
5436int
5437lpfc_sli_hbq_size(void)
5438{
5439 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
5440}
5441
5442/**
5443 * lpfc_sli_hbq_setup - configure and initialize HBQs
5444 * @phba: Pointer to HBA context object.
5445 *
5446 * This function is called during the SLI initialization to configure
5447 * all the HBQs and post buffers to the HBQ. The caller is not
5448 * required to hold any locks. This function will return zero if successful
5449 * else it will return negative error code.
5450 **/
5451static int
5452lpfc_sli_hbq_setup(struct lpfc_hba *phba)
5453{
5454 int hbq_count = lpfc_sli_hbq_count();
5455 LPFC_MBOXQ_t *pmb;
5456 MAILBOX_t *pmbox;
5457 uint32_t hbqno;
5458 uint32_t hbq_entry_index;
5459
5460 /* Get a Mailbox buffer to setup mailbox
5461 * commands for HBA initialization
5462 */
5463 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5464
5465 if (!pmb)
5466 return -ENOMEM;
5467
5468 pmbox = &pmb->u.mb;
5469
5470 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
5471 phba->link_state = LPFC_INIT_MBX_CMDS;
5472 phba->hbq_in_use = 1;
5473
5474 hbq_entry_index = 0;
5475 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
5476 phba->hbqs[hbqno].next_hbqPutIdx = 0;
5477 phba->hbqs[hbqno].hbqPutIdx = 0;
5478 phba->hbqs[hbqno].local_hbqGetIdx = 0;
5479 phba->hbqs[hbqno].entry_count =
5480 lpfc_hbq_defs[hbqno]->entry_count;
5481 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
5482 hbq_entry_index, pmb);
5483 hbq_entry_index += phba->hbqs[hbqno].entry_count;
5484
5485 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
5486 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
5487 mbxStatus <status>, ring <num> */
5488
5489 lpfc_printf_log(phba, KERN_ERR,
5490 LOG_SLI | LOG_VPORT,
5491 "1805 Adapter failed to init. "
5492 "Data: x%x x%x x%x\n",
5493 pmbox->mbxCommand,
5494 pmbox->mbxStatus, hbqno);
5495
5496 phba->link_state = LPFC_HBA_ERROR;
5497 mempool_free(pmb, phba->mbox_mem_pool);
5498 return -ENXIO;
5499 }
5500 }
5501 phba->hbq_count = hbq_count;
5502
5503 mempool_free(pmb, phba->mbox_mem_pool);
5504
5505 /* Initially populate or replenish the HBQs */
5506 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
5507 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
5508 return 0;
5509}
5510
5511/**
5512 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
5513 * @phba: Pointer to HBA context object.
5514 *
5515 * This function is called during the SLI initialization to configure
5516 * all the HBQs and post buffers to the HBQ. The caller is not
5517 * required to hold any locks. This function will return zero if successful
5518 * else it will return negative error code.
5519 **/
5520static int
5521lpfc_sli4_rb_setup(struct lpfc_hba *phba)
5522{
5523 phba->hbq_in_use = 1;
5524 /**
5525 * Specific case when the MDS diagnostics is enabled and supported.
5526 * The receive buffer count is truncated to manage the incoming
5527 * traffic.
5528 **/
5529 if (phba->cfg_enable_mds_diags && phba->mds_diags_support)
5530 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5531 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1;
5532 else
5533 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5534 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
5535 phba->hbq_count = 1;
5536 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
5537 /* Initially populate or replenish the HBQs */
5538 return 0;
5539}
5540
5541/**
5542 * lpfc_sli_config_port - Issue config port mailbox command
5543 * @phba: Pointer to HBA context object.
5544 * @sli_mode: sli mode - 2/3
5545 *
5546 * This function is called by the sli initialization code path
5547 * to issue config_port mailbox command. This function restarts the
5548 * HBA firmware and issues a config_port mailbox command to configure
5549 * the SLI interface in the sli mode specified by sli_mode
5550 * variable. The caller is not required to hold any locks.
5551 * The function returns 0 if successful, else returns negative error
5552 * code.
5553 **/
5554int
5555lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
5556{
5557 LPFC_MBOXQ_t *pmb;
5558 uint32_t resetcount = 0, rc = 0, done = 0;
5559
5560 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5561 if (!pmb) {
5562 phba->link_state = LPFC_HBA_ERROR;
5563 return -ENOMEM;
5564 }
5565
5566 phba->sli_rev = sli_mode;
5567 while (resetcount < 2 && !done) {
5568 spin_lock_irq(&phba->hbalock);
5569 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5570 spin_unlock_irq(&phba->hbalock);
5571 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5572 lpfc_sli_brdrestart(phba);
5573 rc = lpfc_sli_chipset_init(phba);
5574 if (rc)
5575 break;
5576
5577 spin_lock_irq(&phba->hbalock);
5578 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5579 spin_unlock_irq(&phba->hbalock);
5580 resetcount++;
5581
5582 /* Call pre CONFIG_PORT mailbox command initialization. A
5583 * value of 0 means the call was successful. Any other
5584 * nonzero value is a failure, but if ERESTART is returned,
5585 * the driver may reset the HBA and try again.
5586 */
5587 rc = lpfc_config_port_prep(phba);
5588 if (rc == -ERESTART) {
5589 phba->link_state = LPFC_LINK_UNKNOWN;
5590 continue;
5591 } else if (rc)
5592 break;
5593
5594 phba->link_state = LPFC_INIT_MBX_CMDS;
5595 lpfc_config_port(phba, pmb);
5596 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5597 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
5598 LPFC_SLI3_HBQ_ENABLED |
5599 LPFC_SLI3_CRP_ENABLED |
5600 LPFC_SLI3_DSS_ENABLED);
5601 if (rc != MBX_SUCCESS) {
5602 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5603 "0442 Adapter failed to init, mbxCmd x%x "
5604 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
5605 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
5606 spin_lock_irq(&phba->hbalock);
5607 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
5608 spin_unlock_irq(&phba->hbalock);
5609 rc = -ENXIO;
5610 } else {
5611 /* Allow asynchronous mailbox command to go through */
5612 spin_lock_irq(&phba->hbalock);
5613 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5614 spin_unlock_irq(&phba->hbalock);
5615 done = 1;
5616
5617 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5618 (pmb->u.mb.un.varCfgPort.gasabt == 0))
5619 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5620 "3110 Port did not grant ASABT\n");
5621 }
5622 }
5623 if (!done) {
5624 rc = -EINVAL;
5625 goto do_prep_failed;
5626 }
5627 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5628 if (!pmb->u.mb.un.varCfgPort.cMA) {
5629 rc = -ENXIO;
5630 goto do_prep_failed;
5631 }
5632 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
5633 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
5634 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5635 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5636 phba->max_vpi : phba->max_vports;
5637
5638 } else
5639 phba->max_vpi = 0;
5640 if (pmb->u.mb.un.varCfgPort.gerbm)
5641 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
5642 if (pmb->u.mb.un.varCfgPort.gcrp)
5643 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
5644
5645 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5646 phba->port_gp = phba->mbox->us.s3_pgp.port;
5647
5648 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5649 if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5650 phba->cfg_enable_bg = 0;
5651 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
5652 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5653 "0443 Adapter did not grant "
5654 "BlockGuard\n");
5655 }
5656 }
5657 } else {
5658 phba->hbq_get = NULL;
5659 phba->port_gp = phba->mbox->us.s2.port;
5660 phba->max_vpi = 0;
5661 }
5662do_prep_failed:
5663 mempool_free(pmb, phba->mbox_mem_pool);
5664 return rc;
5665}
5666
5667
5668/**
5669 * lpfc_sli_hba_setup - SLI initialization function
5670 * @phba: Pointer to HBA context object.
5671 *
5672 * This function is the main SLI initialization function. This function
5673 * is called by the HBA initialization code, HBA reset code and HBA
5674 * error attention handler code. Caller is not required to hold any
5675 * locks. This function issues config_port mailbox command to configure
5676 * the SLI, setup iocb rings and HBQ rings. In the end the function
5677 * calls the config_port_post function to issue init_link mailbox
5678 * command and to start the discovery. The function will return zero
5679 * if successful, else it will return negative error code.
5680 **/
5681int
5682lpfc_sli_hba_setup(struct lpfc_hba *phba)
5683{
5684 uint32_t rc;
5685 int i;
5686 int longs;
5687
5688 /* Enable ISR already does config_port because of config_msi mbx */
5689 if (phba->hba_flag & HBA_NEEDS_CFG_PORT) {
5690 rc = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
5691 if (rc)
5692 return -EIO;
5693 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
5694 }
5695 phba->fcp_embed_io = 0; /* SLI4 FC support only */
5696
5697 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
5698 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5699 rc = pci_enable_pcie_error_reporting(phba->pcidev);
5700 if (!rc) {
5701 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5702 "2709 This device supports "
5703 "Advanced Error Reporting (AER)\n");
5704 spin_lock_irq(&phba->hbalock);
5705 phba->hba_flag |= HBA_AER_ENABLED;
5706 spin_unlock_irq(&phba->hbalock);
5707 } else {
5708 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5709 "2708 This device does not support "
5710 "Advanced Error Reporting (AER): %d\n",
5711 rc);
5712 phba->cfg_aer_support = 0;
5713 }
5714 }
5715
5716 if (phba->sli_rev == 3) {
5717 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5718 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
5719 } else {
5720 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5721 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
5722 phba->sli3_options = 0;
5723 }
5724
5725 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5726 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5727 phba->sli_rev, phba->max_vpi);
5728 rc = lpfc_sli_ring_map(phba);
5729
5730 if (rc)
5731 goto lpfc_sli_hba_setup_error;
5732
5733 /* Initialize VPIs. */
5734 if (phba->sli_rev == LPFC_SLI_REV3) {
5735 /*
5736 * The VPI bitmask and physical ID array are allocated
5737 * and initialized once only - at driver load. A port
5738 * reset doesn't need to reinitialize this memory.
5739 */
5740 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5741 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
5742 phba->vpi_bmask = kcalloc(longs,
5743 sizeof(unsigned long),
5744 GFP_KERNEL);
5745 if (!phba->vpi_bmask) {
5746 rc = -ENOMEM;
5747 goto lpfc_sli_hba_setup_error;
5748 }
5749
5750 phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5751 sizeof(uint16_t),
5752 GFP_KERNEL);
5753 if (!phba->vpi_ids) {
5754 kfree(phba->vpi_bmask);
5755 rc = -ENOMEM;
5756 goto lpfc_sli_hba_setup_error;
5757 }
5758 for (i = 0; i < phba->max_vpi; i++)
5759 phba->vpi_ids[i] = i;
5760 }
5761 }
5762
5763 /* Init HBQs */
5764 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5765 rc = lpfc_sli_hbq_setup(phba);
5766 if (rc)
5767 goto lpfc_sli_hba_setup_error;
5768 }
5769 spin_lock_irq(&phba->hbalock);
5770 phba->sli.sli_flag |= LPFC_PROCESS_LA;
5771 spin_unlock_irq(&phba->hbalock);
5772
5773 rc = lpfc_config_port_post(phba);
5774 if (rc)
5775 goto lpfc_sli_hba_setup_error;
5776
5777 return rc;
5778
5779lpfc_sli_hba_setup_error:
5780 phba->link_state = LPFC_HBA_ERROR;
5781 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5782 "0445 Firmware initialization failed\n");
5783 return rc;
5784}
5785
5786/**
5787 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5788 * @phba: Pointer to HBA context object.
5789 *
5790 * This function issue a dump mailbox command to read config region
5791 * 23 and parse the records in the region and populate driver
5792 * data structure.
5793 **/
5794static int
5795lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5796{
5797 LPFC_MBOXQ_t *mboxq;
5798 struct lpfc_dmabuf *mp;
5799 struct lpfc_mqe *mqe;
5800 uint32_t data_length;
5801 int rc;
5802
5803 /* Program the default value of vlan_id and fc_map */
5804 phba->valid_vlan = 0;
5805 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5806 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5807 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5808
5809 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5810 if (!mboxq)
5811 return -ENOMEM;
5812
5813 mqe = &mboxq->u.mqe;
5814 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5815 rc = -ENOMEM;
5816 goto out_free_mboxq;
5817 }
5818
5819 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
5820 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5821
5822 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5823 "(%d):2571 Mailbox cmd x%x Status x%x "
5824 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5825 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5826 "CQ: x%x x%x x%x x%x\n",
5827 mboxq->vport ? mboxq->vport->vpi : 0,
5828 bf_get(lpfc_mqe_command, mqe),
5829 bf_get(lpfc_mqe_status, mqe),
5830 mqe->un.mb_words[0], mqe->un.mb_words[1],
5831 mqe->un.mb_words[2], mqe->un.mb_words[3],
5832 mqe->un.mb_words[4], mqe->un.mb_words[5],
5833 mqe->un.mb_words[6], mqe->un.mb_words[7],
5834 mqe->un.mb_words[8], mqe->un.mb_words[9],
5835 mqe->un.mb_words[10], mqe->un.mb_words[11],
5836 mqe->un.mb_words[12], mqe->un.mb_words[13],
5837 mqe->un.mb_words[14], mqe->un.mb_words[15],
5838 mqe->un.mb_words[16], mqe->un.mb_words[50],
5839 mboxq->mcqe.word0,
5840 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5841 mboxq->mcqe.trailer);
5842
5843 if (rc) {
5844 rc = -EIO;
5845 goto out_free_mboxq;
5846 }
5847 data_length = mqe->un.mb_words[5];
5848 if (data_length > DMP_RGN23_SIZE) {
5849 rc = -EIO;
5850 goto out_free_mboxq;
5851 }
5852
5853 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5854 rc = 0;
5855
5856out_free_mboxq:
5857 lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
5858 return rc;
5859}
5860
5861/**
5862 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5863 * @phba: pointer to lpfc hba data structure.
5864 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5865 * @vpd: pointer to the memory to hold resulting port vpd data.
5866 * @vpd_size: On input, the number of bytes allocated to @vpd.
5867 * On output, the number of data bytes in @vpd.
5868 *
5869 * This routine executes a READ_REV SLI4 mailbox command. In
5870 * addition, this routine gets the port vpd data.
5871 *
5872 * Return codes
5873 * 0 - successful
5874 * -ENOMEM - could not allocated memory.
5875 **/
5876static int
5877lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5878 uint8_t *vpd, uint32_t *vpd_size)
5879{
5880 int rc = 0;
5881 uint32_t dma_size;
5882 struct lpfc_dmabuf *dmabuf;
5883 struct lpfc_mqe *mqe;
5884
5885 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5886 if (!dmabuf)
5887 return -ENOMEM;
5888
5889 /*
5890 * Get a DMA buffer for the vpd data resulting from the READ_REV
5891 * mailbox command.
5892 */
5893 dma_size = *vpd_size;
5894 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5895 &dmabuf->phys, GFP_KERNEL);
5896 if (!dmabuf->virt) {
5897 kfree(dmabuf);
5898 return -ENOMEM;
5899 }
5900
5901 /*
5902 * The SLI4 implementation of READ_REV conflicts at word1,
5903 * bits 31:16 and SLI4 adds vpd functionality not present
5904 * in SLI3. This code corrects the conflicts.
5905 */
5906 lpfc_read_rev(phba, mboxq);
5907 mqe = &mboxq->u.mqe;
5908 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5909 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5910 mqe->un.read_rev.word1 &= 0x0000FFFF;
5911 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5912 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5913
5914 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5915 if (rc) {
5916 dma_free_coherent(&phba->pcidev->dev, dma_size,
5917 dmabuf->virt, dmabuf->phys);
5918 kfree(dmabuf);
5919 return -EIO;
5920 }
5921
5922 /*
5923 * The available vpd length cannot be bigger than the
5924 * DMA buffer passed to the port. Catch the less than
5925 * case and update the caller's size.
5926 */
5927 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5928 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5929
5930 memcpy(vpd, dmabuf->virt, *vpd_size);
5931
5932 dma_free_coherent(&phba->pcidev->dev, dma_size,
5933 dmabuf->virt, dmabuf->phys);
5934 kfree(dmabuf);
5935 return 0;
5936}
5937
5938/**
5939 * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
5940 * @phba: pointer to lpfc hba data structure.
5941 *
5942 * This routine retrieves SLI4 device physical port name this PCI function
5943 * is attached to.
5944 *
5945 * Return codes
5946 * 0 - successful
5947 * otherwise - failed to retrieve controller attributes
5948 **/
5949static int
5950lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
5951{
5952 LPFC_MBOXQ_t *mboxq;
5953 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5954 struct lpfc_controller_attribute *cntl_attr;
5955 void *virtaddr = NULL;
5956 uint32_t alloclen, reqlen;
5957 uint32_t shdr_status, shdr_add_status;
5958 union lpfc_sli4_cfg_shdr *shdr;
5959 int rc;
5960
5961 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5962 if (!mboxq)
5963 return -ENOMEM;
5964
5965 /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
5966 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5967 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5968 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5969 LPFC_SLI4_MBX_NEMBED);
5970
5971 if (alloclen < reqlen) {
5972 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5973 "3084 Allocated DMA memory size (%d) is "
5974 "less than the requested DMA memory size "
5975 "(%d)\n", alloclen, reqlen);
5976 rc = -ENOMEM;
5977 goto out_free_mboxq;
5978 }
5979 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5980 virtaddr = mboxq->sge_array->addr[0];
5981 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5982 shdr = &mbx_cntl_attr->cfg_shdr;
5983 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5984 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5985 if (shdr_status || shdr_add_status || rc) {
5986 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5987 "3085 Mailbox x%x (x%x/x%x) failed, "
5988 "rc:x%x, status:x%x, add_status:x%x\n",
5989 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5990 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5991 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5992 rc, shdr_status, shdr_add_status);
5993 rc = -ENXIO;
5994 goto out_free_mboxq;
5995 }
5996
5997 cntl_attr = &mbx_cntl_attr->cntl_attr;
5998 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5999 phba->sli4_hba.lnk_info.lnk_tp =
6000 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
6001 phba->sli4_hba.lnk_info.lnk_no =
6002 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
6003 phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr);
6004 phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr);
6005
6006 memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
6007 strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
6008 sizeof(phba->BIOSVersion));
6009
6010 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6011 "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, "
6012 "flash_id: x%02x, asic_rev: x%02x\n",
6013 phba->sli4_hba.lnk_info.lnk_tp,
6014 phba->sli4_hba.lnk_info.lnk_no,
6015 phba->BIOSVersion, phba->sli4_hba.flash_id,
6016 phba->sli4_hba.asic_rev);
6017out_free_mboxq:
6018 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
6019 lpfc_sli4_mbox_cmd_free(phba, mboxq);
6020 else
6021 mempool_free(mboxq, phba->mbox_mem_pool);
6022 return rc;
6023}
6024
6025/**
6026 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
6027 * @phba: pointer to lpfc hba data structure.
6028 *
6029 * This routine retrieves SLI4 device physical port name this PCI function
6030 * is attached to.
6031 *
6032 * Return codes
6033 * 0 - successful
6034 * otherwise - failed to retrieve physical port name
6035 **/
6036static int
6037lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
6038{
6039 LPFC_MBOXQ_t *mboxq;
6040 struct lpfc_mbx_get_port_name *get_port_name;
6041 uint32_t shdr_status, shdr_add_status;
6042 union lpfc_sli4_cfg_shdr *shdr;
6043 char cport_name = 0;
6044 int rc;
6045
6046 /* We assume nothing at this point */
6047 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
6048 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
6049
6050 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6051 if (!mboxq)
6052 return -ENOMEM;
6053 /* obtain link type and link number via READ_CONFIG */
6054 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
6055 lpfc_sli4_read_config(phba);
6056 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
6057 goto retrieve_ppname;
6058
6059 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
6060 rc = lpfc_sli4_get_ctl_attr(phba);
6061 if (rc)
6062 goto out_free_mboxq;
6063
6064retrieve_ppname:
6065 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
6066 LPFC_MBOX_OPCODE_GET_PORT_NAME,
6067 sizeof(struct lpfc_mbx_get_port_name) -
6068 sizeof(struct lpfc_sli4_cfg_mhdr),
6069 LPFC_SLI4_MBX_EMBED);
6070 get_port_name = &mboxq->u.mqe.un.get_port_name;
6071 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
6072 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
6073 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
6074 phba->sli4_hba.lnk_info.lnk_tp);
6075 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6076 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6077 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6078 if (shdr_status || shdr_add_status || rc) {
6079 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6080 "3087 Mailbox x%x (x%x/x%x) failed: "
6081 "rc:x%x, status:x%x, add_status:x%x\n",
6082 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
6083 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
6084 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
6085 rc, shdr_status, shdr_add_status);
6086 rc = -ENXIO;
6087 goto out_free_mboxq;
6088 }
6089 switch (phba->sli4_hba.lnk_info.lnk_no) {
6090 case LPFC_LINK_NUMBER_0:
6091 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
6092 &get_port_name->u.response);
6093 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6094 break;
6095 case LPFC_LINK_NUMBER_1:
6096 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
6097 &get_port_name->u.response);
6098 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6099 break;
6100 case LPFC_LINK_NUMBER_2:
6101 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
6102 &get_port_name->u.response);
6103 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6104 break;
6105 case LPFC_LINK_NUMBER_3:
6106 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
6107 &get_port_name->u.response);
6108 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6109 break;
6110 default:
6111 break;
6112 }
6113
6114 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
6115 phba->Port[0] = cport_name;
6116 phba->Port[1] = '\0';
6117 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6118 "3091 SLI get port name: %s\n", phba->Port);
6119 }
6120
6121out_free_mboxq:
6122 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
6123 lpfc_sli4_mbox_cmd_free(phba, mboxq);
6124 else
6125 mempool_free(mboxq, phba->mbox_mem_pool);
6126 return rc;
6127}
6128
6129/**
6130 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
6131 * @phba: pointer to lpfc hba data structure.
6132 *
6133 * This routine is called to explicitly arm the SLI4 device's completion and
6134 * event queues
6135 **/
6136static void
6137lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
6138{
6139 int qidx;
6140 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
6141 struct lpfc_sli4_hdw_queue *qp;
6142 struct lpfc_queue *eq;
6143
6144 sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
6145 sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
6146 if (sli4_hba->nvmels_cq)
6147 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
6148 LPFC_QUEUE_REARM);
6149
6150 if (sli4_hba->hdwq) {
6151 /* Loop thru all Hardware Queues */
6152 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
6153 qp = &sli4_hba->hdwq[qidx];
6154 /* ARM the corresponding CQ */
6155 sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
6156 LPFC_QUEUE_REARM);
6157 }
6158
6159 /* Loop thru all IRQ vectors */
6160 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
6161 eq = sli4_hba->hba_eq_hdl[qidx].eq;
6162 /* ARM the corresponding EQ */
6163 sli4_hba->sli4_write_eq_db(phba, eq,
6164 0, LPFC_QUEUE_REARM);
6165 }
6166 }
6167
6168 if (phba->nvmet_support) {
6169 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
6170 sli4_hba->sli4_write_cq_db(phba,
6171 sli4_hba->nvmet_cqset[qidx], 0,
6172 LPFC_QUEUE_REARM);
6173 }
6174 }
6175}
6176
6177/**
6178 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
6179 * @phba: Pointer to HBA context object.
6180 * @type: The resource extent type.
6181 * @extnt_count: buffer to hold port available extent count.
6182 * @extnt_size: buffer to hold element count per extent.
6183 *
6184 * This function calls the port and retrievs the number of available
6185 * extents and their size for a particular extent type.
6186 *
6187 * Returns: 0 if successful. Nonzero otherwise.
6188 **/
6189int
6190lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
6191 uint16_t *extnt_count, uint16_t *extnt_size)
6192{
6193 int rc = 0;
6194 uint32_t length;
6195 uint32_t mbox_tmo;
6196 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
6197 LPFC_MBOXQ_t *mbox;
6198
6199 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6200 if (!mbox)
6201 return -ENOMEM;
6202
6203 /* Find out how many extents are available for this resource type */
6204 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
6205 sizeof(struct lpfc_sli4_cfg_mhdr));
6206 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6207 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
6208 length, LPFC_SLI4_MBX_EMBED);
6209
6210 /* Send an extents count of 0 - the GET doesn't use it. */
6211 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6212 LPFC_SLI4_MBX_EMBED);
6213 if (unlikely(rc)) {
6214 rc = -EIO;
6215 goto err_exit;
6216 }
6217
6218 if (!phba->sli4_hba.intr_enable)
6219 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6220 else {
6221 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6222 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6223 }
6224 if (unlikely(rc)) {
6225 rc = -EIO;
6226 goto err_exit;
6227 }
6228
6229 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
6230 if (bf_get(lpfc_mbox_hdr_status,
6231 &rsrc_info->header.cfg_shdr.response)) {
6232 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6233 "2930 Failed to get resource extents "
6234 "Status 0x%x Add'l Status 0x%x\n",
6235 bf_get(lpfc_mbox_hdr_status,
6236 &rsrc_info->header.cfg_shdr.response),
6237 bf_get(lpfc_mbox_hdr_add_status,
6238 &rsrc_info->header.cfg_shdr.response));
6239 rc = -EIO;
6240 goto err_exit;
6241 }
6242
6243 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
6244 &rsrc_info->u.rsp);
6245 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
6246 &rsrc_info->u.rsp);
6247
6248 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6249 "3162 Retrieved extents type-%d from port: count:%d, "
6250 "size:%d\n", type, *extnt_count, *extnt_size);
6251
6252err_exit:
6253 mempool_free(mbox, phba->mbox_mem_pool);
6254 return rc;
6255}
6256
6257/**
6258 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
6259 * @phba: Pointer to HBA context object.
6260 * @type: The extent type to check.
6261 *
6262 * This function reads the current available extents from the port and checks
6263 * if the extent count or extent size has changed since the last access.
6264 * Callers use this routine post port reset to understand if there is a
6265 * extent reprovisioning requirement.
6266 *
6267 * Returns:
6268 * -Error: error indicates problem.
6269 * 1: Extent count or size has changed.
6270 * 0: No changes.
6271 **/
6272static int
6273lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
6274{
6275 uint16_t curr_ext_cnt, rsrc_ext_cnt;
6276 uint16_t size_diff, rsrc_ext_size;
6277 int rc = 0;
6278 struct lpfc_rsrc_blks *rsrc_entry;
6279 struct list_head *rsrc_blk_list = NULL;
6280
6281 size_diff = 0;
6282 curr_ext_cnt = 0;
6283 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
6284 &rsrc_ext_cnt,
6285 &rsrc_ext_size);
6286 if (unlikely(rc))
6287 return -EIO;
6288
6289 switch (type) {
6290 case LPFC_RSC_TYPE_FCOE_RPI:
6291 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6292 break;
6293 case LPFC_RSC_TYPE_FCOE_VPI:
6294 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
6295 break;
6296 case LPFC_RSC_TYPE_FCOE_XRI:
6297 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6298 break;
6299 case LPFC_RSC_TYPE_FCOE_VFI:
6300 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6301 break;
6302 default:
6303 break;
6304 }
6305
6306 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
6307 curr_ext_cnt++;
6308 if (rsrc_entry->rsrc_size != rsrc_ext_size)
6309 size_diff++;
6310 }
6311
6312 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
6313 rc = 1;
6314
6315 return rc;
6316}
6317
6318/**
6319 * lpfc_sli4_cfg_post_extnts -
6320 * @phba: Pointer to HBA context object.
6321 * @extnt_cnt: number of available extents.
6322 * @type: the extent type (rpi, xri, vfi, vpi).
6323 * @emb: buffer to hold either MBX_EMBED or MBX_NEMBED operation.
6324 * @mbox: pointer to the caller's allocated mailbox structure.
6325 *
6326 * This function executes the extents allocation request. It also
6327 * takes care of the amount of memory needed to allocate or get the
6328 * allocated extents. It is the caller's responsibility to evaluate
6329 * the response.
6330 *
6331 * Returns:
6332 * -Error: Error value describes the condition found.
6333 * 0: if successful
6334 **/
6335static int
6336lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
6337 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
6338{
6339 int rc = 0;
6340 uint32_t req_len;
6341 uint32_t emb_len;
6342 uint32_t alloc_len, mbox_tmo;
6343
6344 /* Calculate the total requested length of the dma memory */
6345 req_len = extnt_cnt * sizeof(uint16_t);
6346
6347 /*
6348 * Calculate the size of an embedded mailbox. The uint32_t
6349 * accounts for extents-specific word.
6350 */
6351 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6352 sizeof(uint32_t);
6353
6354 /*
6355 * Presume the allocation and response will fit into an embedded
6356 * mailbox. If not true, reconfigure to a non-embedded mailbox.
6357 */
6358 *emb = LPFC_SLI4_MBX_EMBED;
6359 if (req_len > emb_len) {
6360 req_len = extnt_cnt * sizeof(uint16_t) +
6361 sizeof(union lpfc_sli4_cfg_shdr) +
6362 sizeof(uint32_t);
6363 *emb = LPFC_SLI4_MBX_NEMBED;
6364 }
6365
6366 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6367 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
6368 req_len, *emb);
6369 if (alloc_len < req_len) {
6370 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6371 "2982 Allocated DMA memory size (x%x) is "
6372 "less than the requested DMA memory "
6373 "size (x%x)\n", alloc_len, req_len);
6374 return -ENOMEM;
6375 }
6376 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
6377 if (unlikely(rc))
6378 return -EIO;
6379
6380 if (!phba->sli4_hba.intr_enable)
6381 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6382 else {
6383 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6384 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6385 }
6386
6387 if (unlikely(rc))
6388 rc = -EIO;
6389 return rc;
6390}
6391
6392/**
6393 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
6394 * @phba: Pointer to HBA context object.
6395 * @type: The resource extent type to allocate.
6396 *
6397 * This function allocates the number of elements for the specified
6398 * resource type.
6399 **/
6400static int
6401lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
6402{
6403 bool emb = false;
6404 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
6405 uint16_t rsrc_id, rsrc_start, j, k;
6406 uint16_t *ids;
6407 int i, rc;
6408 unsigned long longs;
6409 unsigned long *bmask;
6410 struct lpfc_rsrc_blks *rsrc_blks;
6411 LPFC_MBOXQ_t *mbox;
6412 uint32_t length;
6413 struct lpfc_id_range *id_array = NULL;
6414 void *virtaddr = NULL;
6415 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6416 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6417 struct list_head *ext_blk_list;
6418
6419 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
6420 &rsrc_cnt,
6421 &rsrc_size);
6422 if (unlikely(rc))
6423 return -EIO;
6424
6425 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
6426 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6427 "3009 No available Resource Extents "
6428 "for resource type 0x%x: Count: 0x%x, "
6429 "Size 0x%x\n", type, rsrc_cnt,
6430 rsrc_size);
6431 return -ENOMEM;
6432 }
6433
6434 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
6435 "2903 Post resource extents type-0x%x: "
6436 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
6437
6438 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6439 if (!mbox)
6440 return -ENOMEM;
6441
6442 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
6443 if (unlikely(rc)) {
6444 rc = -EIO;
6445 goto err_exit;
6446 }
6447
6448 /*
6449 * Figure out where the response is located. Then get local pointers
6450 * to the response data. The port does not guarantee to respond to
6451 * all extents counts request so update the local variable with the
6452 * allocated count from the port.
6453 */
6454 if (emb == LPFC_SLI4_MBX_EMBED) {
6455 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6456 id_array = &rsrc_ext->u.rsp.id[0];
6457 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6458 } else {
6459 virtaddr = mbox->sge_array->addr[0];
6460 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6461 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6462 id_array = &n_rsrc->id;
6463 }
6464
6465 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
6466 rsrc_id_cnt = rsrc_cnt * rsrc_size;
6467
6468 /*
6469 * Based on the resource size and count, correct the base and max
6470 * resource values.
6471 */
6472 length = sizeof(struct lpfc_rsrc_blks);
6473 switch (type) {
6474 case LPFC_RSC_TYPE_FCOE_RPI:
6475 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6476 sizeof(unsigned long),
6477 GFP_KERNEL);
6478 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6479 rc = -ENOMEM;
6480 goto err_exit;
6481 }
6482 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
6483 sizeof(uint16_t),
6484 GFP_KERNEL);
6485 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6486 kfree(phba->sli4_hba.rpi_bmask);
6487 rc = -ENOMEM;
6488 goto err_exit;
6489 }
6490
6491 /*
6492 * The next_rpi was initialized with the maximum available
6493 * count but the port may allocate a smaller number. Catch
6494 * that case and update the next_rpi.
6495 */
6496 phba->sli4_hba.next_rpi = rsrc_id_cnt;
6497
6498 /* Initialize local ptrs for common extent processing later. */
6499 bmask = phba->sli4_hba.rpi_bmask;
6500 ids = phba->sli4_hba.rpi_ids;
6501 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6502 break;
6503 case LPFC_RSC_TYPE_FCOE_VPI:
6504 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6505 GFP_KERNEL);
6506 if (unlikely(!phba->vpi_bmask)) {
6507 rc = -ENOMEM;
6508 goto err_exit;
6509 }
6510 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
6511 GFP_KERNEL);
6512 if (unlikely(!phba->vpi_ids)) {
6513 kfree(phba->vpi_bmask);
6514 rc = -ENOMEM;
6515 goto err_exit;
6516 }
6517
6518 /* Initialize local ptrs for common extent processing later. */
6519 bmask = phba->vpi_bmask;
6520 ids = phba->vpi_ids;
6521 ext_blk_list = &phba->lpfc_vpi_blk_list;
6522 break;
6523 case LPFC_RSC_TYPE_FCOE_XRI:
6524 phba->sli4_hba.xri_bmask = kcalloc(longs,
6525 sizeof(unsigned long),
6526 GFP_KERNEL);
6527 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6528 rc = -ENOMEM;
6529 goto err_exit;
6530 }
6531 phba->sli4_hba.max_cfg_param.xri_used = 0;
6532 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
6533 sizeof(uint16_t),
6534 GFP_KERNEL);
6535 if (unlikely(!phba->sli4_hba.xri_ids)) {
6536 kfree(phba->sli4_hba.xri_bmask);
6537 rc = -ENOMEM;
6538 goto err_exit;
6539 }
6540
6541 /* Initialize local ptrs for common extent processing later. */
6542 bmask = phba->sli4_hba.xri_bmask;
6543 ids = phba->sli4_hba.xri_ids;
6544 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6545 break;
6546 case LPFC_RSC_TYPE_FCOE_VFI:
6547 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6548 sizeof(unsigned long),
6549 GFP_KERNEL);
6550 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6551 rc = -ENOMEM;
6552 goto err_exit;
6553 }
6554 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
6555 sizeof(uint16_t),
6556 GFP_KERNEL);
6557 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6558 kfree(phba->sli4_hba.vfi_bmask);
6559 rc = -ENOMEM;
6560 goto err_exit;
6561 }
6562
6563 /* Initialize local ptrs for common extent processing later. */
6564 bmask = phba->sli4_hba.vfi_bmask;
6565 ids = phba->sli4_hba.vfi_ids;
6566 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6567 break;
6568 default:
6569 /* Unsupported Opcode. Fail call. */
6570 id_array = NULL;
6571 bmask = NULL;
6572 ids = NULL;
6573 ext_blk_list = NULL;
6574 goto err_exit;
6575 }
6576
6577 /*
6578 * Complete initializing the extent configuration with the
6579 * allocated ids assigned to this function. The bitmask serves
6580 * as an index into the array and manages the available ids. The
6581 * array just stores the ids communicated to the port via the wqes.
6582 */
6583 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
6584 if ((i % 2) == 0)
6585 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
6586 &id_array[k]);
6587 else
6588 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6589 &id_array[k]);
6590
6591 rsrc_blks = kzalloc(length, GFP_KERNEL);
6592 if (unlikely(!rsrc_blks)) {
6593 rc = -ENOMEM;
6594 kfree(bmask);
6595 kfree(ids);
6596 goto err_exit;
6597 }
6598 rsrc_blks->rsrc_start = rsrc_id;
6599 rsrc_blks->rsrc_size = rsrc_size;
6600 list_add_tail(&rsrc_blks->list, ext_blk_list);
6601 rsrc_start = rsrc_id;
6602 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
6603 phba->sli4_hba.io_xri_start = rsrc_start +
6604 lpfc_sli4_get_iocb_cnt(phba);
6605 }
6606
6607 while (rsrc_id < (rsrc_start + rsrc_size)) {
6608 ids[j] = rsrc_id;
6609 rsrc_id++;
6610 j++;
6611 }
6612 /* Entire word processed. Get next word.*/
6613 if ((i % 2) == 1)
6614 k++;
6615 }
6616 err_exit:
6617 lpfc_sli4_mbox_cmd_free(phba, mbox);
6618 return rc;
6619}
6620
6621
6622
6623/**
6624 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
6625 * @phba: Pointer to HBA context object.
6626 * @type: the extent's type.
6627 *
6628 * This function deallocates all extents of a particular resource type.
6629 * SLI4 does not allow for deallocating a particular extent range. It
6630 * is the caller's responsibility to release all kernel memory resources.
6631 **/
6632static int
6633lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6634{
6635 int rc;
6636 uint32_t length, mbox_tmo = 0;
6637 LPFC_MBOXQ_t *mbox;
6638 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6639 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6640
6641 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6642 if (!mbox)
6643 return -ENOMEM;
6644
6645 /*
6646 * This function sends an embedded mailbox because it only sends the
6647 * the resource type. All extents of this type are released by the
6648 * port.
6649 */
6650 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6651 sizeof(struct lpfc_sli4_cfg_mhdr));
6652 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6653 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6654 length, LPFC_SLI4_MBX_EMBED);
6655
6656 /* Send an extents count of 0 - the dealloc doesn't use it. */
6657 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6658 LPFC_SLI4_MBX_EMBED);
6659 if (unlikely(rc)) {
6660 rc = -EIO;
6661 goto out_free_mbox;
6662 }
6663 if (!phba->sli4_hba.intr_enable)
6664 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6665 else {
6666 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6667 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6668 }
6669 if (unlikely(rc)) {
6670 rc = -EIO;
6671 goto out_free_mbox;
6672 }
6673
6674 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6675 if (bf_get(lpfc_mbox_hdr_status,
6676 &dealloc_rsrc->header.cfg_shdr.response)) {
6677 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6678 "2919 Failed to release resource extents "
6679 "for type %d - Status 0x%x Add'l Status 0x%x. "
6680 "Resource memory not released.\n",
6681 type,
6682 bf_get(lpfc_mbox_hdr_status,
6683 &dealloc_rsrc->header.cfg_shdr.response),
6684 bf_get(lpfc_mbox_hdr_add_status,
6685 &dealloc_rsrc->header.cfg_shdr.response));
6686 rc = -EIO;
6687 goto out_free_mbox;
6688 }
6689
6690 /* Release kernel memory resources for the specific type. */
6691 switch (type) {
6692 case LPFC_RSC_TYPE_FCOE_VPI:
6693 kfree(phba->vpi_bmask);
6694 kfree(phba->vpi_ids);
6695 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6696 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6697 &phba->lpfc_vpi_blk_list, list) {
6698 list_del_init(&rsrc_blk->list);
6699 kfree(rsrc_blk);
6700 }
6701 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6702 break;
6703 case LPFC_RSC_TYPE_FCOE_XRI:
6704 kfree(phba->sli4_hba.xri_bmask);
6705 kfree(phba->sli4_hba.xri_ids);
6706 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6707 &phba->sli4_hba.lpfc_xri_blk_list, list) {
6708 list_del_init(&rsrc_blk->list);
6709 kfree(rsrc_blk);
6710 }
6711 break;
6712 case LPFC_RSC_TYPE_FCOE_VFI:
6713 kfree(phba->sli4_hba.vfi_bmask);
6714 kfree(phba->sli4_hba.vfi_ids);
6715 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6716 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6717 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6718 list_del_init(&rsrc_blk->list);
6719 kfree(rsrc_blk);
6720 }
6721 break;
6722 case LPFC_RSC_TYPE_FCOE_RPI:
6723 /* RPI bitmask and physical id array are cleaned up earlier. */
6724 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6725 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6726 list_del_init(&rsrc_blk->list);
6727 kfree(rsrc_blk);
6728 }
6729 break;
6730 default:
6731 break;
6732 }
6733
6734 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6735
6736 out_free_mbox:
6737 mempool_free(mbox, phba->mbox_mem_pool);
6738 return rc;
6739}
6740
6741static void
6742lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6743 uint32_t feature)
6744{
6745 uint32_t len;
6746 u32 sig_freq = 0;
6747
6748 len = sizeof(struct lpfc_mbx_set_feature) -
6749 sizeof(struct lpfc_sli4_cfg_mhdr);
6750 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6751 LPFC_MBOX_OPCODE_SET_FEATURES, len,
6752 LPFC_SLI4_MBX_EMBED);
6753
6754 switch (feature) {
6755 case LPFC_SET_UE_RECOVERY:
6756 bf_set(lpfc_mbx_set_feature_UER,
6757 &mbox->u.mqe.un.set_feature, 1);
6758 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6759 mbox->u.mqe.un.set_feature.param_len = 8;
6760 break;
6761 case LPFC_SET_MDS_DIAGS:
6762 bf_set(lpfc_mbx_set_feature_mds,
6763 &mbox->u.mqe.un.set_feature, 1);
6764 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
6765 &mbox->u.mqe.un.set_feature, 1);
6766 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6767 mbox->u.mqe.un.set_feature.param_len = 8;
6768 break;
6769 case LPFC_SET_CGN_SIGNAL:
6770 if (phba->cmf_active_mode == LPFC_CFG_OFF)
6771 sig_freq = 0;
6772 else
6773 sig_freq = phba->cgn_sig_freq;
6774
6775 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6776 bf_set(lpfc_mbx_set_feature_CGN_alarm_freq,
6777 &mbox->u.mqe.un.set_feature, sig_freq);
6778 bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
6779 &mbox->u.mqe.un.set_feature, sig_freq);
6780 }
6781
6782 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY)
6783 bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
6784 &mbox->u.mqe.un.set_feature, sig_freq);
6785
6786 if (phba->cmf_active_mode == LPFC_CFG_OFF ||
6787 phba->cgn_reg_signal == EDC_CG_SIG_NOTSUPPORTED)
6788 sig_freq = 0;
6789 else
6790 sig_freq = lpfc_acqe_cgn_frequency;
6791
6792 bf_set(lpfc_mbx_set_feature_CGN_acqe_freq,
6793 &mbox->u.mqe.un.set_feature, sig_freq);
6794
6795 mbox->u.mqe.un.set_feature.feature = LPFC_SET_CGN_SIGNAL;
6796 mbox->u.mqe.un.set_feature.param_len = 12;
6797 break;
6798 case LPFC_SET_DUAL_DUMP:
6799 bf_set(lpfc_mbx_set_feature_dd,
6800 &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
6801 bf_set(lpfc_mbx_set_feature_ddquery,
6802 &mbox->u.mqe.un.set_feature, 0);
6803 mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
6804 mbox->u.mqe.un.set_feature.param_len = 4;
6805 break;
6806 case LPFC_SET_ENABLE_MI:
6807 mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_MI;
6808 mbox->u.mqe.un.set_feature.param_len = 4;
6809 bf_set(lpfc_mbx_set_feature_milunq, &mbox->u.mqe.un.set_feature,
6810 phba->pport->cfg_lun_queue_depth);
6811 bf_set(lpfc_mbx_set_feature_mi, &mbox->u.mqe.un.set_feature,
6812 phba->sli4_hba.pc_sli4_params.mi_ver);
6813 break;
6814 case LPFC_SET_ENABLE_CMF:
6815 bf_set(lpfc_mbx_set_feature_dd, &mbox->u.mqe.un.set_feature, 1);
6816 mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_CMF;
6817 mbox->u.mqe.un.set_feature.param_len = 4;
6818 bf_set(lpfc_mbx_set_feature_cmf,
6819 &mbox->u.mqe.un.set_feature, 1);
6820 break;
6821 }
6822 return;
6823}
6824
6825/**
6826 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6827 * @phba: Pointer to HBA context object.
6828 *
6829 * Disable FW logging into host memory on the adapter. To
6830 * be done before reading logs from the host memory.
6831 **/
6832void
6833lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6834{
6835 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6836
6837 spin_lock_irq(&phba->hbalock);
6838 ras_fwlog->state = INACTIVE;
6839 spin_unlock_irq(&phba->hbalock);
6840
6841 /* Disable FW logging to host memory */
6842 writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6843 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6844
6845 /* Wait 10ms for firmware to stop using DMA buffer */
6846 usleep_range(10 * 1000, 20 * 1000);
6847}
6848
6849/**
6850 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6851 * @phba: Pointer to HBA context object.
6852 *
6853 * This function is called to free memory allocated for RAS FW logging
6854 * support in the driver.
6855 **/
6856void
6857lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6858{
6859 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6860 struct lpfc_dmabuf *dmabuf, *next;
6861
6862 if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6863 list_for_each_entry_safe(dmabuf, next,
6864 &ras_fwlog->fwlog_buff_list,
6865 list) {
6866 list_del(&dmabuf->list);
6867 dma_free_coherent(&phba->pcidev->dev,
6868 LPFC_RAS_MAX_ENTRY_SIZE,
6869 dmabuf->virt, dmabuf->phys);
6870 kfree(dmabuf);
6871 }
6872 }
6873
6874 if (ras_fwlog->lwpd.virt) {
6875 dma_free_coherent(&phba->pcidev->dev,
6876 sizeof(uint32_t) * 2,
6877 ras_fwlog->lwpd.virt,
6878 ras_fwlog->lwpd.phys);
6879 ras_fwlog->lwpd.virt = NULL;
6880 }
6881
6882 spin_lock_irq(&phba->hbalock);
6883 ras_fwlog->state = INACTIVE;
6884 spin_unlock_irq(&phba->hbalock);
6885}
6886
6887/**
6888 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6889 * @phba: Pointer to HBA context object.
6890 * @fwlog_buff_count: Count of buffers to be created.
6891 *
6892 * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6893 * to update FW log is posted to the adapter.
6894 * Buffer count is calculated based on module param ras_fwlog_buffsize
6895 * Size of each buffer posted to FW is 64K.
6896 **/
6897
6898static int
6899lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6900 uint32_t fwlog_buff_count)
6901{
6902 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6903 struct lpfc_dmabuf *dmabuf;
6904 int rc = 0, i = 0;
6905
6906 /* Initialize List */
6907 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6908
6909 /* Allocate memory for the LWPD */
6910 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6911 sizeof(uint32_t) * 2,
6912 &ras_fwlog->lwpd.phys,
6913 GFP_KERNEL);
6914 if (!ras_fwlog->lwpd.virt) {
6915 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6916 "6185 LWPD Memory Alloc Failed\n");
6917
6918 return -ENOMEM;
6919 }
6920
6921 ras_fwlog->fw_buffcount = fwlog_buff_count;
6922 for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6923 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6924 GFP_KERNEL);
6925 if (!dmabuf) {
6926 rc = -ENOMEM;
6927 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6928 "6186 Memory Alloc failed FW logging");
6929 goto free_mem;
6930 }
6931
6932 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6933 LPFC_RAS_MAX_ENTRY_SIZE,
6934 &dmabuf->phys, GFP_KERNEL);
6935 if (!dmabuf->virt) {
6936 kfree(dmabuf);
6937 rc = -ENOMEM;
6938 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6939 "6187 DMA Alloc Failed FW logging");
6940 goto free_mem;
6941 }
6942 dmabuf->buffer_tag = i;
6943 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6944 }
6945
6946free_mem:
6947 if (rc)
6948 lpfc_sli4_ras_dma_free(phba);
6949
6950 return rc;
6951}
6952
6953/**
6954 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6955 * @phba: pointer to lpfc hba data structure.
6956 * @pmb: pointer to the driver internal queue element for mailbox command.
6957 *
6958 * Completion handler for driver's RAS MBX command to the device.
6959 **/
6960static void
6961lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6962{
6963 MAILBOX_t *mb;
6964 union lpfc_sli4_cfg_shdr *shdr;
6965 uint32_t shdr_status, shdr_add_status;
6966 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6967
6968 mb = &pmb->u.mb;
6969
6970 shdr = (union lpfc_sli4_cfg_shdr *)
6971 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6972 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6973 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6974
6975 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
6976 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6977 "6188 FW LOG mailbox "
6978 "completed with status x%x add_status x%x,"
6979 " mbx status x%x\n",
6980 shdr_status, shdr_add_status, mb->mbxStatus);
6981
6982 ras_fwlog->ras_hwsupport = false;
6983 goto disable_ras;
6984 }
6985
6986 spin_lock_irq(&phba->hbalock);
6987 ras_fwlog->state = ACTIVE;
6988 spin_unlock_irq(&phba->hbalock);
6989 mempool_free(pmb, phba->mbox_mem_pool);
6990
6991 return;
6992
6993disable_ras:
6994 /* Free RAS DMA memory */
6995 lpfc_sli4_ras_dma_free(phba);
6996 mempool_free(pmb, phba->mbox_mem_pool);
6997}
6998
6999/**
7000 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
7001 * @phba: pointer to lpfc hba data structure.
7002 * @fwlog_level: Logging verbosity level.
7003 * @fwlog_enable: Enable/Disable logging.
7004 *
7005 * Initialize memory and post mailbox command to enable FW logging in host
7006 * memory.
7007 **/
7008int
7009lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
7010 uint32_t fwlog_level,
7011 uint32_t fwlog_enable)
7012{
7013 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
7014 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
7015 struct lpfc_dmabuf *dmabuf;
7016 LPFC_MBOXQ_t *mbox;
7017 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
7018 int rc = 0;
7019
7020 spin_lock_irq(&phba->hbalock);
7021 ras_fwlog->state = INACTIVE;
7022 spin_unlock_irq(&phba->hbalock);
7023
7024 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
7025 phba->cfg_ras_fwlog_buffsize);
7026 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
7027
7028 /*
7029 * If re-enabling FW logging support use earlier allocated
7030 * DMA buffers while posting MBX command.
7031 **/
7032 if (!ras_fwlog->lwpd.virt) {
7033 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
7034 if (rc) {
7035 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7036 "6189 FW Log Memory Allocation Failed");
7037 return rc;
7038 }
7039 }
7040
7041 /* Setup Mailbox command */
7042 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7043 if (!mbox) {
7044 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7045 "6190 RAS MBX Alloc Failed");
7046 rc = -ENOMEM;
7047 goto mem_free;
7048 }
7049
7050 ras_fwlog->fw_loglevel = fwlog_level;
7051 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
7052 sizeof(struct lpfc_sli4_cfg_mhdr));
7053
7054 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
7055 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
7056 len, LPFC_SLI4_MBX_EMBED);
7057
7058 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
7059 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
7060 fwlog_enable);
7061 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
7062 ras_fwlog->fw_loglevel);
7063 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
7064 ras_fwlog->fw_buffcount);
7065 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
7066 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
7067
7068 /* Update DMA buffer address */
7069 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
7070 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
7071
7072 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
7073 putPaddrLow(dmabuf->phys);
7074
7075 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
7076 putPaddrHigh(dmabuf->phys);
7077 }
7078
7079 /* Update LPWD address */
7080 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
7081 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
7082
7083 spin_lock_irq(&phba->hbalock);
7084 ras_fwlog->state = REG_INPROGRESS;
7085 spin_unlock_irq(&phba->hbalock);
7086 mbox->vport = phba->pport;
7087 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
7088
7089 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
7090
7091 if (rc == MBX_NOT_FINISHED) {
7092 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7093 "6191 FW-Log Mailbox failed. "
7094 "status %d mbxStatus : x%x", rc,
7095 bf_get(lpfc_mqe_status, &mbox->u.mqe));
7096 mempool_free(mbox, phba->mbox_mem_pool);
7097 rc = -EIO;
7098 goto mem_free;
7099 } else
7100 rc = 0;
7101mem_free:
7102 if (rc)
7103 lpfc_sli4_ras_dma_free(phba);
7104
7105 return rc;
7106}
7107
7108/**
7109 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
7110 * @phba: Pointer to HBA context object.
7111 *
7112 * Check if RAS is supported on the adapter and initialize it.
7113 **/
7114void
7115lpfc_sli4_ras_setup(struct lpfc_hba *phba)
7116{
7117 /* Check RAS FW Log needs to be enabled or not */
7118 if (lpfc_check_fwlog_support(phba))
7119 return;
7120
7121 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
7122 LPFC_RAS_ENABLE_LOGGING);
7123}
7124
7125/**
7126 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
7127 * @phba: Pointer to HBA context object.
7128 *
7129 * This function allocates all SLI4 resource identifiers.
7130 **/
7131int
7132lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
7133{
7134 int i, rc, error = 0;
7135 uint16_t count, base;
7136 unsigned long longs;
7137
7138 if (!phba->sli4_hba.rpi_hdrs_in_use)
7139 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
7140 if (phba->sli4_hba.extents_in_use) {
7141 /*
7142 * The port supports resource extents. The XRI, VPI, VFI, RPI
7143 * resource extent count must be read and allocated before
7144 * provisioning the resource id arrays.
7145 */
7146 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
7147 LPFC_IDX_RSRC_RDY) {
7148 /*
7149 * Extent-based resources are set - the driver could
7150 * be in a port reset. Figure out if any corrective
7151 * actions need to be taken.
7152 */
7153 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7154 LPFC_RSC_TYPE_FCOE_VFI);
7155 if (rc != 0)
7156 error++;
7157 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7158 LPFC_RSC_TYPE_FCOE_VPI);
7159 if (rc != 0)
7160 error++;
7161 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7162 LPFC_RSC_TYPE_FCOE_XRI);
7163 if (rc != 0)
7164 error++;
7165 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7166 LPFC_RSC_TYPE_FCOE_RPI);
7167 if (rc != 0)
7168 error++;
7169
7170 /*
7171 * It's possible that the number of resources
7172 * provided to this port instance changed between
7173 * resets. Detect this condition and reallocate
7174 * resources. Otherwise, there is no action.
7175 */
7176 if (error) {
7177 lpfc_printf_log(phba, KERN_INFO,
7178 LOG_MBOX | LOG_INIT,
7179 "2931 Detected extent resource "
7180 "change. Reallocating all "
7181 "extents.\n");
7182 rc = lpfc_sli4_dealloc_extent(phba,
7183 LPFC_RSC_TYPE_FCOE_VFI);
7184 rc = lpfc_sli4_dealloc_extent(phba,
7185 LPFC_RSC_TYPE_FCOE_VPI);
7186 rc = lpfc_sli4_dealloc_extent(phba,
7187 LPFC_RSC_TYPE_FCOE_XRI);
7188 rc = lpfc_sli4_dealloc_extent(phba,
7189 LPFC_RSC_TYPE_FCOE_RPI);
7190 } else
7191 return 0;
7192 }
7193
7194 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
7195 if (unlikely(rc))
7196 goto err_exit;
7197
7198 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
7199 if (unlikely(rc))
7200 goto err_exit;
7201
7202 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
7203 if (unlikely(rc))
7204 goto err_exit;
7205
7206 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
7207 if (unlikely(rc))
7208 goto err_exit;
7209 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
7210 LPFC_IDX_RSRC_RDY);
7211 return rc;
7212 } else {
7213 /*
7214 * The port does not support resource extents. The XRI, VPI,
7215 * VFI, RPI resource ids were determined from READ_CONFIG.
7216 * Just allocate the bitmasks and provision the resource id
7217 * arrays. If a port reset is active, the resources don't
7218 * need any action - just exit.
7219 */
7220 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
7221 LPFC_IDX_RSRC_RDY) {
7222 lpfc_sli4_dealloc_resource_identifiers(phba);
7223 lpfc_sli4_remove_rpis(phba);
7224 }
7225 /* RPIs. */
7226 count = phba->sli4_hba.max_cfg_param.max_rpi;
7227 if (count <= 0) {
7228 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7229 "3279 Invalid provisioning of "
7230 "rpi:%d\n", count);
7231 rc = -EINVAL;
7232 goto err_exit;
7233 }
7234 base = phba->sli4_hba.max_cfg_param.rpi_base;
7235 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7236 phba->sli4_hba.rpi_bmask = kcalloc(longs,
7237 sizeof(unsigned long),
7238 GFP_KERNEL);
7239 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
7240 rc = -ENOMEM;
7241 goto err_exit;
7242 }
7243 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
7244 GFP_KERNEL);
7245 if (unlikely(!phba->sli4_hba.rpi_ids)) {
7246 rc = -ENOMEM;
7247 goto free_rpi_bmask;
7248 }
7249
7250 for (i = 0; i < count; i++)
7251 phba->sli4_hba.rpi_ids[i] = base + i;
7252
7253 /* VPIs. */
7254 count = phba->sli4_hba.max_cfg_param.max_vpi;
7255 if (count <= 0) {
7256 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7257 "3280 Invalid provisioning of "
7258 "vpi:%d\n", count);
7259 rc = -EINVAL;
7260 goto free_rpi_ids;
7261 }
7262 base = phba->sli4_hba.max_cfg_param.vpi_base;
7263 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7264 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
7265 GFP_KERNEL);
7266 if (unlikely(!phba->vpi_bmask)) {
7267 rc = -ENOMEM;
7268 goto free_rpi_ids;
7269 }
7270 phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
7271 GFP_KERNEL);
7272 if (unlikely(!phba->vpi_ids)) {
7273 rc = -ENOMEM;
7274 goto free_vpi_bmask;
7275 }
7276
7277 for (i = 0; i < count; i++)
7278 phba->vpi_ids[i] = base + i;
7279
7280 /* XRIs. */
7281 count = phba->sli4_hba.max_cfg_param.max_xri;
7282 if (count <= 0) {
7283 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7284 "3281 Invalid provisioning of "
7285 "xri:%d\n", count);
7286 rc = -EINVAL;
7287 goto free_vpi_ids;
7288 }
7289 base = phba->sli4_hba.max_cfg_param.xri_base;
7290 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7291 phba->sli4_hba.xri_bmask = kcalloc(longs,
7292 sizeof(unsigned long),
7293 GFP_KERNEL);
7294 if (unlikely(!phba->sli4_hba.xri_bmask)) {
7295 rc = -ENOMEM;
7296 goto free_vpi_ids;
7297 }
7298 phba->sli4_hba.max_cfg_param.xri_used = 0;
7299 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
7300 GFP_KERNEL);
7301 if (unlikely(!phba->sli4_hba.xri_ids)) {
7302 rc = -ENOMEM;
7303 goto free_xri_bmask;
7304 }
7305
7306 for (i = 0; i < count; i++)
7307 phba->sli4_hba.xri_ids[i] = base + i;
7308
7309 /* VFIs. */
7310 count = phba->sli4_hba.max_cfg_param.max_vfi;
7311 if (count <= 0) {
7312 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7313 "3282 Invalid provisioning of "
7314 "vfi:%d\n", count);
7315 rc = -EINVAL;
7316 goto free_xri_ids;
7317 }
7318 base = phba->sli4_hba.max_cfg_param.vfi_base;
7319 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7320 phba->sli4_hba.vfi_bmask = kcalloc(longs,
7321 sizeof(unsigned long),
7322 GFP_KERNEL);
7323 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
7324 rc = -ENOMEM;
7325 goto free_xri_ids;
7326 }
7327 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
7328 GFP_KERNEL);
7329 if (unlikely(!phba->sli4_hba.vfi_ids)) {
7330 rc = -ENOMEM;
7331 goto free_vfi_bmask;
7332 }
7333
7334 for (i = 0; i < count; i++)
7335 phba->sli4_hba.vfi_ids[i] = base + i;
7336
7337 /*
7338 * Mark all resources ready. An HBA reset doesn't need
7339 * to reset the initialization.
7340 */
7341 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
7342 LPFC_IDX_RSRC_RDY);
7343 return 0;
7344 }
7345
7346 free_vfi_bmask:
7347 kfree(phba->sli4_hba.vfi_bmask);
7348 phba->sli4_hba.vfi_bmask = NULL;
7349 free_xri_ids:
7350 kfree(phba->sli4_hba.xri_ids);
7351 phba->sli4_hba.xri_ids = NULL;
7352 free_xri_bmask:
7353 kfree(phba->sli4_hba.xri_bmask);
7354 phba->sli4_hba.xri_bmask = NULL;
7355 free_vpi_ids:
7356 kfree(phba->vpi_ids);
7357 phba->vpi_ids = NULL;
7358 free_vpi_bmask:
7359 kfree(phba->vpi_bmask);
7360 phba->vpi_bmask = NULL;
7361 free_rpi_ids:
7362 kfree(phba->sli4_hba.rpi_ids);
7363 phba->sli4_hba.rpi_ids = NULL;
7364 free_rpi_bmask:
7365 kfree(phba->sli4_hba.rpi_bmask);
7366 phba->sli4_hba.rpi_bmask = NULL;
7367 err_exit:
7368 return rc;
7369}
7370
7371/**
7372 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
7373 * @phba: Pointer to HBA context object.
7374 *
7375 * This function allocates the number of elements for the specified
7376 * resource type.
7377 **/
7378int
7379lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
7380{
7381 if (phba->sli4_hba.extents_in_use) {
7382 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
7383 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
7384 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
7385 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
7386 } else {
7387 kfree(phba->vpi_bmask);
7388 phba->sli4_hba.max_cfg_param.vpi_used = 0;
7389 kfree(phba->vpi_ids);
7390 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7391 kfree(phba->sli4_hba.xri_bmask);
7392 kfree(phba->sli4_hba.xri_ids);
7393 kfree(phba->sli4_hba.vfi_bmask);
7394 kfree(phba->sli4_hba.vfi_ids);
7395 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7396 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7397 }
7398
7399 return 0;
7400}
7401
7402/**
7403 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
7404 * @phba: Pointer to HBA context object.
7405 * @type: The resource extent type.
7406 * @extnt_cnt: buffer to hold port extent count response
7407 * @extnt_size: buffer to hold port extent size response.
7408 *
7409 * This function calls the port to read the host allocated extents
7410 * for a particular type.
7411 **/
7412int
7413lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
7414 uint16_t *extnt_cnt, uint16_t *extnt_size)
7415{
7416 bool emb;
7417 int rc = 0;
7418 uint16_t curr_blks = 0;
7419 uint32_t req_len, emb_len;
7420 uint32_t alloc_len, mbox_tmo;
7421 struct list_head *blk_list_head;
7422 struct lpfc_rsrc_blks *rsrc_blk;
7423 LPFC_MBOXQ_t *mbox;
7424 void *virtaddr = NULL;
7425 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
7426 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
7427 union lpfc_sli4_cfg_shdr *shdr;
7428
7429 switch (type) {
7430 case LPFC_RSC_TYPE_FCOE_VPI:
7431 blk_list_head = &phba->lpfc_vpi_blk_list;
7432 break;
7433 case LPFC_RSC_TYPE_FCOE_XRI:
7434 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
7435 break;
7436 case LPFC_RSC_TYPE_FCOE_VFI:
7437 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
7438 break;
7439 case LPFC_RSC_TYPE_FCOE_RPI:
7440 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
7441 break;
7442 default:
7443 return -EIO;
7444 }
7445
7446 /* Count the number of extents currently allocatd for this type. */
7447 list_for_each_entry(rsrc_blk, blk_list_head, list) {
7448 if (curr_blks == 0) {
7449 /*
7450 * The GET_ALLOCATED mailbox does not return the size,
7451 * just the count. The size should be just the size
7452 * stored in the current allocated block and all sizes
7453 * for an extent type are the same so set the return
7454 * value now.
7455 */
7456 *extnt_size = rsrc_blk->rsrc_size;
7457 }
7458 curr_blks++;
7459 }
7460
7461 /*
7462 * Calculate the size of an embedded mailbox. The uint32_t
7463 * accounts for extents-specific word.
7464 */
7465 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
7466 sizeof(uint32_t);
7467
7468 /*
7469 * Presume the allocation and response will fit into an embedded
7470 * mailbox. If not true, reconfigure to a non-embedded mailbox.
7471 */
7472 emb = LPFC_SLI4_MBX_EMBED;
7473 req_len = emb_len;
7474 if (req_len > emb_len) {
7475 req_len = curr_blks * sizeof(uint16_t) +
7476 sizeof(union lpfc_sli4_cfg_shdr) +
7477 sizeof(uint32_t);
7478 emb = LPFC_SLI4_MBX_NEMBED;
7479 }
7480
7481 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7482 if (!mbox)
7483 return -ENOMEM;
7484 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
7485
7486 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7487 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
7488 req_len, emb);
7489 if (alloc_len < req_len) {
7490 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7491 "2983 Allocated DMA memory size (x%x) is "
7492 "less than the requested DMA memory "
7493 "size (x%x)\n", alloc_len, req_len);
7494 rc = -ENOMEM;
7495 goto err_exit;
7496 }
7497 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
7498 if (unlikely(rc)) {
7499 rc = -EIO;
7500 goto err_exit;
7501 }
7502
7503 if (!phba->sli4_hba.intr_enable)
7504 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
7505 else {
7506 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
7507 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
7508 }
7509
7510 if (unlikely(rc)) {
7511 rc = -EIO;
7512 goto err_exit;
7513 }
7514
7515 /*
7516 * Figure out where the response is located. Then get local pointers
7517 * to the response data. The port does not guarantee to respond to
7518 * all extents counts request so update the local variable with the
7519 * allocated count from the port.
7520 */
7521 if (emb == LPFC_SLI4_MBX_EMBED) {
7522 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
7523 shdr = &rsrc_ext->header.cfg_shdr;
7524 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
7525 } else {
7526 virtaddr = mbox->sge_array->addr[0];
7527 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
7528 shdr = &n_rsrc->cfg_shdr;
7529 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
7530 }
7531
7532 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
7533 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7534 "2984 Failed to read allocated resources "
7535 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
7536 type,
7537 bf_get(lpfc_mbox_hdr_status, &shdr->response),
7538 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
7539 rc = -EIO;
7540 goto err_exit;
7541 }
7542 err_exit:
7543 lpfc_sli4_mbox_cmd_free(phba, mbox);
7544 return rc;
7545}
7546
7547/**
7548 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
7549 * @phba: pointer to lpfc hba data structure.
7550 * @sgl_list: linked link of sgl buffers to post
7551 * @cnt: number of linked list buffers
7552 *
7553 * This routine walks the list of buffers that have been allocated and
7554 * repost them to the port by using SGL block post. This is needed after a
7555 * pci_function_reset/warm_start or start. It attempts to construct blocks
7556 * of buffer sgls which contains contiguous xris and uses the non-embedded
7557 * SGL block post mailbox commands to post them to the port. For single
7558 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
7559 * mailbox command for posting.
7560 *
7561 * Returns: 0 = success, non-zero failure.
7562 **/
7563static int
7564lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
7565 struct list_head *sgl_list, int cnt)
7566{
7567 struct lpfc_sglq *sglq_entry = NULL;
7568 struct lpfc_sglq *sglq_entry_next = NULL;
7569 struct lpfc_sglq *sglq_entry_first = NULL;
7570 int status, total_cnt;
7571 int post_cnt = 0, num_posted = 0, block_cnt = 0;
7572 int last_xritag = NO_XRI;
7573 LIST_HEAD(prep_sgl_list);
7574 LIST_HEAD(blck_sgl_list);
7575 LIST_HEAD(allc_sgl_list);
7576 LIST_HEAD(post_sgl_list);
7577 LIST_HEAD(free_sgl_list);
7578
7579 spin_lock_irq(&phba->hbalock);
7580 spin_lock(&phba->sli4_hba.sgl_list_lock);
7581 list_splice_init(sgl_list, &allc_sgl_list);
7582 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7583 spin_unlock_irq(&phba->hbalock);
7584
7585 total_cnt = cnt;
7586 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
7587 &allc_sgl_list, list) {
7588 list_del_init(&sglq_entry->list);
7589 block_cnt++;
7590 if ((last_xritag != NO_XRI) &&
7591 (sglq_entry->sli4_xritag != last_xritag + 1)) {
7592 /* a hole in xri block, form a sgl posting block */
7593 list_splice_init(&prep_sgl_list, &blck_sgl_list);
7594 post_cnt = block_cnt - 1;
7595 /* prepare list for next posting block */
7596 list_add_tail(&sglq_entry->list, &prep_sgl_list);
7597 block_cnt = 1;
7598 } else {
7599 /* prepare list for next posting block */
7600 list_add_tail(&sglq_entry->list, &prep_sgl_list);
7601 /* enough sgls for non-embed sgl mbox command */
7602 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
7603 list_splice_init(&prep_sgl_list,
7604 &blck_sgl_list);
7605 post_cnt = block_cnt;
7606 block_cnt = 0;
7607 }
7608 }
7609 num_posted++;
7610
7611 /* keep track of last sgl's xritag */
7612 last_xritag = sglq_entry->sli4_xritag;
7613
7614 /* end of repost sgl list condition for buffers */
7615 if (num_posted == total_cnt) {
7616 if (post_cnt == 0) {
7617 list_splice_init(&prep_sgl_list,
7618 &blck_sgl_list);
7619 post_cnt = block_cnt;
7620 } else if (block_cnt == 1) {
7621 status = lpfc_sli4_post_sgl(phba,
7622 sglq_entry->phys, 0,
7623 sglq_entry->sli4_xritag);
7624 if (!status) {
7625 /* successful, put sgl to posted list */
7626 list_add_tail(&sglq_entry->list,
7627 &post_sgl_list);
7628 } else {
7629 /* Failure, put sgl to free list */
7630 lpfc_printf_log(phba, KERN_WARNING,
7631 LOG_SLI,
7632 "3159 Failed to post "
7633 "sgl, xritag:x%x\n",
7634 sglq_entry->sli4_xritag);
7635 list_add_tail(&sglq_entry->list,
7636 &free_sgl_list);
7637 total_cnt--;
7638 }
7639 }
7640 }
7641
7642 /* continue until a nembed page worth of sgls */
7643 if (post_cnt == 0)
7644 continue;
7645
7646 /* post the buffer list sgls as a block */
7647 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
7648 post_cnt);
7649
7650 if (!status) {
7651 /* success, put sgl list to posted sgl list */
7652 list_splice_init(&blck_sgl_list, &post_sgl_list);
7653 } else {
7654 /* Failure, put sgl list to free sgl list */
7655 sglq_entry_first = list_first_entry(&blck_sgl_list,
7656 struct lpfc_sglq,
7657 list);
7658 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
7659 "3160 Failed to post sgl-list, "
7660 "xritag:x%x-x%x\n",
7661 sglq_entry_first->sli4_xritag,
7662 (sglq_entry_first->sli4_xritag +
7663 post_cnt - 1));
7664 list_splice_init(&blck_sgl_list, &free_sgl_list);
7665 total_cnt -= post_cnt;
7666 }
7667
7668 /* don't reset xirtag due to hole in xri block */
7669 if (block_cnt == 0)
7670 last_xritag = NO_XRI;
7671
7672 /* reset sgl post count for next round of posting */
7673 post_cnt = 0;
7674 }
7675
7676 /* free the sgls failed to post */
7677 lpfc_free_sgl_list(phba, &free_sgl_list);
7678
7679 /* push sgls posted to the available list */
7680 if (!list_empty(&post_sgl_list)) {
7681 spin_lock_irq(&phba->hbalock);
7682 spin_lock(&phba->sli4_hba.sgl_list_lock);
7683 list_splice_init(&post_sgl_list, sgl_list);
7684 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7685 spin_unlock_irq(&phba->hbalock);
7686 } else {
7687 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7688 "3161 Failure to post sgl to port.\n");
7689 return -EIO;
7690 }
7691
7692 /* return the number of XRIs actually posted */
7693 return total_cnt;
7694}
7695
7696/**
7697 * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
7698 * @phba: pointer to lpfc hba data structure.
7699 *
7700 * This routine walks the list of nvme buffers that have been allocated and
7701 * repost them to the port by using SGL block post. This is needed after a
7702 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
7703 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
7704 * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
7705 *
7706 * Returns: 0 = success, non-zero failure.
7707 **/
7708static int
7709lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
7710{
7711 LIST_HEAD(post_nblist);
7712 int num_posted, rc = 0;
7713
7714 /* get all NVME buffers need to repost to a local list */
7715 lpfc_io_buf_flush(phba, &post_nblist);
7716
7717 /* post the list of nvme buffer sgls to port if available */
7718 if (!list_empty(&post_nblist)) {
7719 num_posted = lpfc_sli4_post_io_sgl_list(
7720 phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
7721 /* failed to post any nvme buffer, return error */
7722 if (num_posted == 0)
7723 rc = -EIO;
7724 }
7725 return rc;
7726}
7727
7728static void
7729lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7730{
7731 uint32_t len;
7732
7733 len = sizeof(struct lpfc_mbx_set_host_data) -
7734 sizeof(struct lpfc_sli4_cfg_mhdr);
7735 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7736 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7737 LPFC_SLI4_MBX_EMBED);
7738
7739 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
7740 mbox->u.mqe.un.set_host_data.param_len =
7741 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
7742 snprintf(mbox->u.mqe.un.set_host_data.un.data,
7743 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7744 "Linux %s v"LPFC_DRIVER_VERSION,
7745 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
7746}
7747
7748int
7749lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
7750 struct lpfc_queue *drq, int count, int idx)
7751{
7752 int rc, i;
7753 struct lpfc_rqe hrqe;
7754 struct lpfc_rqe drqe;
7755 struct lpfc_rqb *rqbp;
7756 unsigned long flags;
7757 struct rqb_dmabuf *rqb_buffer;
7758 LIST_HEAD(rqb_buf_list);
7759
7760 rqbp = hrq->rqbp;
7761 for (i = 0; i < count; i++) {
7762 spin_lock_irqsave(&phba->hbalock, flags);
7763 /* IF RQ is already full, don't bother */
7764 if (rqbp->buffer_count + i >= rqbp->entry_count - 1) {
7765 spin_unlock_irqrestore(&phba->hbalock, flags);
7766 break;
7767 }
7768 spin_unlock_irqrestore(&phba->hbalock, flags);
7769
7770 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7771 if (!rqb_buffer)
7772 break;
7773 rqb_buffer->hrq = hrq;
7774 rqb_buffer->drq = drq;
7775 rqb_buffer->idx = idx;
7776 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7777 }
7778
7779 spin_lock_irqsave(&phba->hbalock, flags);
7780 while (!list_empty(&rqb_buf_list)) {
7781 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7782 hbuf.list);
7783
7784 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7785 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7786 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7787 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7788 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7789 if (rc < 0) {
7790 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7791 "6421 Cannot post to HRQ %d: %x %x %x "
7792 "DRQ %x %x\n",
7793 hrq->queue_id,
7794 hrq->host_index,
7795 hrq->hba_index,
7796 hrq->entry_count,
7797 drq->host_index,
7798 drq->hba_index);
7799 rqbp->rqb_free_buffer(phba, rqb_buffer);
7800 } else {
7801 list_add_tail(&rqb_buffer->hbuf.list,
7802 &rqbp->rqb_buffer_list);
7803 rqbp->buffer_count++;
7804 }
7805 }
7806 spin_unlock_irqrestore(&phba->hbalock, flags);
7807 return 1;
7808}
7809
7810static void
7811lpfc_mbx_cmpl_cgn_set_ftrs(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7812{
7813 struct lpfc_vport *vport = pmb->vport;
7814 union lpfc_sli4_cfg_shdr *shdr;
7815 u32 shdr_status, shdr_add_status;
7816 u32 sig, acqe;
7817
7818 /* Two outcomes. (1) Set featurs was successul and EDC negotiation
7819 * is done. (2) Mailbox failed and send FPIN support only.
7820 */
7821 shdr = (union lpfc_sli4_cfg_shdr *)
7822 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
7823 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7824 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7825 if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) {
7826 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
7827 "2516 CGN SET_FEATURE mbox failed with "
7828 "status x%x add_status x%x, mbx status x%x "
7829 "Reset Congestion to FPINs only\n",
7830 shdr_status, shdr_add_status,
7831 pmb->u.mb.mbxStatus);
7832 /* If there is a mbox error, move on to RDF */
7833 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
7834 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
7835 goto out;
7836 }
7837
7838 /* Zero out Congestion Signal ACQE counter */
7839 phba->cgn_acqe_cnt = 0;
7840
7841 acqe = bf_get(lpfc_mbx_set_feature_CGN_acqe_freq,
7842 &pmb->u.mqe.un.set_feature);
7843 sig = bf_get(lpfc_mbx_set_feature_CGN_warn_freq,
7844 &pmb->u.mqe.un.set_feature);
7845 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7846 "4620 SET_FEATURES Success: Freq: %ds %dms "
7847 " Reg: x%x x%x\n", acqe, sig,
7848 phba->cgn_reg_signal, phba->cgn_reg_fpin);
7849out:
7850 mempool_free(pmb, phba->mbox_mem_pool);
7851
7852 /* Register for FPIN events from the fabric now that the
7853 * EDC common_set_features has completed.
7854 */
7855 lpfc_issue_els_rdf(vport, 0);
7856}
7857
7858int
7859lpfc_config_cgn_signal(struct lpfc_hba *phba)
7860{
7861 LPFC_MBOXQ_t *mboxq;
7862 u32 rc;
7863
7864 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7865 if (!mboxq)
7866 goto out_rdf;
7867
7868 lpfc_set_features(phba, mboxq, LPFC_SET_CGN_SIGNAL);
7869 mboxq->vport = phba->pport;
7870 mboxq->mbox_cmpl = lpfc_mbx_cmpl_cgn_set_ftrs;
7871
7872 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7873 "4621 SET_FEATURES: FREQ sig x%x acqe x%x: "
7874 "Reg: x%x x%x\n",
7875 phba->cgn_sig_freq, lpfc_acqe_cgn_frequency,
7876 phba->cgn_reg_signal, phba->cgn_reg_fpin);
7877
7878 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
7879 if (rc == MBX_NOT_FINISHED)
7880 goto out;
7881 return 0;
7882
7883out:
7884 mempool_free(mboxq, phba->mbox_mem_pool);
7885out_rdf:
7886 /* If there is a mbox error, move on to RDF */
7887 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
7888 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
7889 lpfc_issue_els_rdf(phba->pport, 0);
7890 return -EIO;
7891}
7892
7893/**
7894 * lpfc_init_idle_stat_hb - Initialize idle_stat tracking
7895 * @phba: pointer to lpfc hba data structure.
7896 *
7897 * This routine initializes the per-cq idle_stat to dynamically dictate
7898 * polling decisions.
7899 *
7900 * Return codes:
7901 * None
7902 **/
7903static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
7904{
7905 int i;
7906 struct lpfc_sli4_hdw_queue *hdwq;
7907 struct lpfc_queue *cq;
7908 struct lpfc_idle_stat *idle_stat;
7909 u64 wall;
7910
7911 for_each_present_cpu(i) {
7912 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
7913 cq = hdwq->io_cq;
7914
7915 /* Skip if we've already handled this cq's primary CPU */
7916 if (cq->chann != i)
7917 continue;
7918
7919 idle_stat = &phba->sli4_hba.idle_stat[i];
7920
7921 idle_stat->prev_idle = get_cpu_idle_time(i, &wall, 1);
7922 idle_stat->prev_wall = wall;
7923
7924 if (phba->nvmet_support ||
7925 phba->cmf_active_mode != LPFC_CFG_OFF)
7926 cq->poll_mode = LPFC_QUEUE_WORK;
7927 else
7928 cq->poll_mode = LPFC_IRQ_POLL;
7929 }
7930
7931 if (!phba->nvmet_support)
7932 schedule_delayed_work(&phba->idle_stat_delay_work,
7933 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
7934}
7935
7936static void lpfc_sli4_dip(struct lpfc_hba *phba)
7937{
7938 uint32_t if_type;
7939
7940 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7941 if (if_type == LPFC_SLI_INTF_IF_TYPE_2 ||
7942 if_type == LPFC_SLI_INTF_IF_TYPE_6) {
7943 struct lpfc_register reg_data;
7944
7945 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
7946 ®_data.word0))
7947 return;
7948
7949 if (bf_get(lpfc_sliport_status_dip, ®_data))
7950 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7951 "2904 Firmware Dump Image Present"
7952 " on Adapter");
7953 }
7954}
7955
7956/**
7957 * lpfc_cmf_setup - Initialize idle_stat tracking
7958 * @phba: Pointer to HBA context object.
7959 *
7960 * This is called from HBA setup during driver load or when the HBA
7961 * comes online. this does all the initialization to support CMF and MI.
7962 **/
7963static int
7964lpfc_cmf_setup(struct lpfc_hba *phba)
7965{
7966 LPFC_MBOXQ_t *mboxq;
7967 struct lpfc_dmabuf *mp;
7968 struct lpfc_pc_sli4_params *sli4_params;
7969 int rc, cmf, mi_ver;
7970
7971 rc = lpfc_sli4_refresh_params(phba);
7972 if (unlikely(rc))
7973 return rc;
7974
7975 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7976 if (!mboxq)
7977 return -ENOMEM;
7978
7979 sli4_params = &phba->sli4_hba.pc_sli4_params;
7980
7981 /* Always try to enable MI feature if we can */
7982 if (sli4_params->mi_ver) {
7983 lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_MI);
7984 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7985 mi_ver = bf_get(lpfc_mbx_set_feature_mi,
7986 &mboxq->u.mqe.un.set_feature);
7987
7988 if (rc == MBX_SUCCESS) {
7989 if (mi_ver) {
7990 lpfc_printf_log(phba,
7991 KERN_WARNING, LOG_CGN_MGMT,
7992 "6215 MI is enabled\n");
7993 sli4_params->mi_ver = mi_ver;
7994 } else {
7995 lpfc_printf_log(phba,
7996 KERN_WARNING, LOG_CGN_MGMT,
7997 "6338 MI is disabled\n");
7998 sli4_params->mi_ver = 0;
7999 }
8000 } else {
8001 /* mi_ver is already set from GET_SLI4_PARAMETERS */
8002 lpfc_printf_log(phba, KERN_INFO,
8003 LOG_CGN_MGMT | LOG_INIT,
8004 "6245 Enable MI Mailbox x%x (x%x/x%x) "
8005 "failed, rc:x%x mi:x%x\n",
8006 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8007 lpfc_sli_config_mbox_subsys_get
8008 (phba, mboxq),
8009 lpfc_sli_config_mbox_opcode_get
8010 (phba, mboxq),
8011 rc, sli4_params->mi_ver);
8012 }
8013 } else {
8014 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
8015 "6217 MI is disabled\n");
8016 }
8017
8018 /* Ensure FDMI is enabled for MI if enable_mi is set */
8019 if (sli4_params->mi_ver)
8020 phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT;
8021
8022 /* Always try to enable CMF feature if we can */
8023 if (sli4_params->cmf) {
8024 lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_CMF);
8025 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8026 cmf = bf_get(lpfc_mbx_set_feature_cmf,
8027 &mboxq->u.mqe.un.set_feature);
8028 if (rc == MBX_SUCCESS && cmf) {
8029 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
8030 "6218 CMF is enabled: mode %d\n",
8031 phba->cmf_active_mode);
8032 } else {
8033 lpfc_printf_log(phba, KERN_WARNING,
8034 LOG_CGN_MGMT | LOG_INIT,
8035 "6219 Enable CMF Mailbox x%x (x%x/x%x) "
8036 "failed, rc:x%x dd:x%x\n",
8037 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8038 lpfc_sli_config_mbox_subsys_get
8039 (phba, mboxq),
8040 lpfc_sli_config_mbox_opcode_get
8041 (phba, mboxq),
8042 rc, cmf);
8043 sli4_params->cmf = 0;
8044 phba->cmf_active_mode = LPFC_CFG_OFF;
8045 goto no_cmf;
8046 }
8047
8048 /* Allocate Congestion Information Buffer */
8049 if (!phba->cgn_i) {
8050 mp = kmalloc(sizeof(*mp), GFP_KERNEL);
8051 if (mp)
8052 mp->virt = dma_alloc_coherent
8053 (&phba->pcidev->dev,
8054 sizeof(struct lpfc_cgn_info),
8055 &mp->phys, GFP_KERNEL);
8056 if (!mp || !mp->virt) {
8057 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8058 "2640 Failed to alloc memory "
8059 "for Congestion Info\n");
8060 kfree(mp);
8061 sli4_params->cmf = 0;
8062 phba->cmf_active_mode = LPFC_CFG_OFF;
8063 goto no_cmf;
8064 }
8065 phba->cgn_i = mp;
8066
8067 /* initialize congestion buffer info */
8068 lpfc_init_congestion_buf(phba);
8069 lpfc_init_congestion_stat(phba);
8070
8071 /* Zero out Congestion Signal counters */
8072 atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
8073 atomic64_set(&phba->cgn_acqe_stat.warn, 0);
8074 }
8075
8076 rc = lpfc_sli4_cgn_params_read(phba);
8077 if (rc < 0) {
8078 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
8079 "6242 Error reading Cgn Params (%d)\n",
8080 rc);
8081 /* Ensure CGN Mode is off */
8082 sli4_params->cmf = 0;
8083 } else if (!rc) {
8084 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
8085 "6243 CGN Event empty object.\n");
8086 /* Ensure CGN Mode is off */
8087 sli4_params->cmf = 0;
8088 }
8089 } else {
8090no_cmf:
8091 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
8092 "6220 CMF is disabled\n");
8093 }
8094
8095 /* Only register congestion buffer with firmware if BOTH
8096 * CMF and E2E are enabled.
8097 */
8098 if (sli4_params->cmf && sli4_params->mi_ver) {
8099 rc = lpfc_reg_congestion_buf(phba);
8100 if (rc) {
8101 dma_free_coherent(&phba->pcidev->dev,
8102 sizeof(struct lpfc_cgn_info),
8103 phba->cgn_i->virt, phba->cgn_i->phys);
8104 kfree(phba->cgn_i);
8105 phba->cgn_i = NULL;
8106 /* Ensure CGN Mode is off */
8107 phba->cmf_active_mode = LPFC_CFG_OFF;
8108 return 0;
8109 }
8110 }
8111 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8112 "6470 Setup MI version %d CMF %d mode %d\n",
8113 sli4_params->mi_ver, sli4_params->cmf,
8114 phba->cmf_active_mode);
8115
8116 mempool_free(mboxq, phba->mbox_mem_pool);
8117
8118 /* Initialize atomic counters */
8119 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
8120 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
8121 atomic_set(&phba->cgn_sync_alarm_cnt, 0);
8122 atomic_set(&phba->cgn_sync_warn_cnt, 0);
8123 atomic_set(&phba->cgn_driver_evt_cnt, 0);
8124 atomic_set(&phba->cgn_latency_evt_cnt, 0);
8125 atomic64_set(&phba->cgn_latency_evt, 0);
8126
8127 phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
8128
8129 /* Allocate RX Monitor Buffer */
8130 if (!phba->rxtable) {
8131 phba->rxtable = kmalloc_array(LPFC_MAX_RXMONITOR_ENTRY,
8132 sizeof(struct rxtable_entry),
8133 GFP_KERNEL);
8134 if (!phba->rxtable) {
8135 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8136 "2644 Failed to alloc memory "
8137 "for RX Monitor Buffer\n");
8138 return -ENOMEM;
8139 }
8140 }
8141 atomic_set(&phba->rxtable_idx_head, 0);
8142 atomic_set(&phba->rxtable_idx_tail, 0);
8143 return 0;
8144}
8145
8146static int
8147lpfc_set_host_tm(struct lpfc_hba *phba)
8148{
8149 LPFC_MBOXQ_t *mboxq;
8150 uint32_t len, rc;
8151 struct timespec64 cur_time;
8152 struct tm broken;
8153 uint32_t month, day, year;
8154 uint32_t hour, minute, second;
8155 struct lpfc_mbx_set_host_date_time *tm;
8156
8157 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8158 if (!mboxq)
8159 return -ENOMEM;
8160
8161 len = sizeof(struct lpfc_mbx_set_host_data) -
8162 sizeof(struct lpfc_sli4_cfg_mhdr);
8163 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
8164 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
8165 LPFC_SLI4_MBX_EMBED);
8166
8167 mboxq->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_DATE_TIME;
8168 mboxq->u.mqe.un.set_host_data.param_len =
8169 sizeof(struct lpfc_mbx_set_host_date_time);
8170 tm = &mboxq->u.mqe.un.set_host_data.un.tm;
8171 ktime_get_real_ts64(&cur_time);
8172 time64_to_tm(cur_time.tv_sec, 0, &broken);
8173 month = broken.tm_mon + 1;
8174 day = broken.tm_mday;
8175 year = broken.tm_year - 100;
8176 hour = broken.tm_hour;
8177 minute = broken.tm_min;
8178 second = broken.tm_sec;
8179 bf_set(lpfc_mbx_set_host_month, tm, month);
8180 bf_set(lpfc_mbx_set_host_day, tm, day);
8181 bf_set(lpfc_mbx_set_host_year, tm, year);
8182 bf_set(lpfc_mbx_set_host_hour, tm, hour);
8183 bf_set(lpfc_mbx_set_host_min, tm, minute);
8184 bf_set(lpfc_mbx_set_host_sec, tm, second);
8185
8186 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8187 mempool_free(mboxq, phba->mbox_mem_pool);
8188 return rc;
8189}
8190
8191/**
8192 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
8193 * @phba: Pointer to HBA context object.
8194 *
8195 * This function is the main SLI4 device initialization PCI function. This
8196 * function is called by the HBA initialization code, HBA reset code and
8197 * HBA error attention handler code. Caller is not required to hold any
8198 * locks.
8199 **/
8200int
8201lpfc_sli4_hba_setup(struct lpfc_hba *phba)
8202{
8203 int rc, i, cnt, len, dd;
8204 LPFC_MBOXQ_t *mboxq;
8205 struct lpfc_mqe *mqe;
8206 uint8_t *vpd;
8207 uint32_t vpd_size;
8208 uint32_t ftr_rsp = 0;
8209 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
8210 struct lpfc_vport *vport = phba->pport;
8211 struct lpfc_dmabuf *mp;
8212 struct lpfc_rqb *rqbp;
8213 u32 flg;
8214
8215 /* Perform a PCI function reset to start from clean */
8216 rc = lpfc_pci_function_reset(phba);
8217 if (unlikely(rc))
8218 return -ENODEV;
8219
8220 /* Check the HBA Host Status Register for readyness */
8221 rc = lpfc_sli4_post_status_check(phba);
8222 if (unlikely(rc))
8223 return -ENODEV;
8224 else {
8225 spin_lock_irq(&phba->hbalock);
8226 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
8227 flg = phba->sli.sli_flag;
8228 spin_unlock_irq(&phba->hbalock);
8229 /* Allow a little time after setting SLI_ACTIVE for any polled
8230 * MBX commands to complete via BSG.
8231 */
8232 for (i = 0; i < 50 && (flg & LPFC_SLI_MBOX_ACTIVE); i++) {
8233 msleep(20);
8234 spin_lock_irq(&phba->hbalock);
8235 flg = phba->sli.sli_flag;
8236 spin_unlock_irq(&phba->hbalock);
8237 }
8238 }
8239
8240 lpfc_sli4_dip(phba);
8241
8242 /*
8243 * Allocate a single mailbox container for initializing the
8244 * port.
8245 */
8246 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8247 if (!mboxq)
8248 return -ENOMEM;
8249
8250 /* Issue READ_REV to collect vpd and FW information. */
8251 vpd_size = SLI4_PAGE_SIZE;
8252 vpd = kzalloc(vpd_size, GFP_KERNEL);
8253 if (!vpd) {
8254 rc = -ENOMEM;
8255 goto out_free_mbox;
8256 }
8257
8258 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
8259 if (unlikely(rc)) {
8260 kfree(vpd);
8261 goto out_free_mbox;
8262 }
8263
8264 mqe = &mboxq->u.mqe;
8265 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
8266 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
8267 phba->hba_flag |= HBA_FCOE_MODE;
8268 phba->fcp_embed_io = 0; /* SLI4 FC support only */
8269 } else {
8270 phba->hba_flag &= ~HBA_FCOE_MODE;
8271 }
8272
8273 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
8274 LPFC_DCBX_CEE_MODE)
8275 phba->hba_flag |= HBA_FIP_SUPPORT;
8276 else
8277 phba->hba_flag &= ~HBA_FIP_SUPPORT;
8278
8279 phba->hba_flag &= ~HBA_IOQ_FLUSH;
8280
8281 if (phba->sli_rev != LPFC_SLI_REV4) {
8282 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8283 "0376 READ_REV Error. SLI Level %d "
8284 "FCoE enabled %d\n",
8285 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
8286 rc = -EIO;
8287 kfree(vpd);
8288 goto out_free_mbox;
8289 }
8290
8291 rc = lpfc_set_host_tm(phba);
8292 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
8293 "6468 Set host date / time: Status x%x:\n", rc);
8294
8295 /*
8296 * Continue initialization with default values even if driver failed
8297 * to read FCoE param config regions, only read parameters if the
8298 * board is FCoE
8299 */
8300 if (phba->hba_flag & HBA_FCOE_MODE &&
8301 lpfc_sli4_read_fcoe_params(phba))
8302 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
8303 "2570 Failed to read FCoE parameters\n");
8304
8305 /*
8306 * Retrieve sli4 device physical port name, failure of doing it
8307 * is considered as non-fatal.
8308 */
8309 rc = lpfc_sli4_retrieve_pport_name(phba);
8310 if (!rc)
8311 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8312 "3080 Successful retrieving SLI4 device "
8313 "physical port name: %s.\n", phba->Port);
8314
8315 rc = lpfc_sli4_get_ctl_attr(phba);
8316 if (!rc)
8317 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8318 "8351 Successful retrieving SLI4 device "
8319 "CTL ATTR\n");
8320
8321 /*
8322 * Evaluate the read rev and vpd data. Populate the driver
8323 * state with the results. If this routine fails, the failure
8324 * is not fatal as the driver will use generic values.
8325 */
8326 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
8327 if (unlikely(!rc)) {
8328 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8329 "0377 Error %d parsing vpd. "
8330 "Using defaults.\n", rc);
8331 rc = 0;
8332 }
8333 kfree(vpd);
8334
8335 /* Save information as VPD data */
8336 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
8337 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
8338
8339 /*
8340 * This is because first G7 ASIC doesn't support the standard
8341 * 0x5a NVME cmd descriptor type/subtype
8342 */
8343 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8344 LPFC_SLI_INTF_IF_TYPE_6) &&
8345 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
8346 (phba->vpd.rev.smRev == 0) &&
8347 (phba->cfg_nvme_embed_cmd == 1))
8348 phba->cfg_nvme_embed_cmd = 0;
8349
8350 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
8351 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
8352 &mqe->un.read_rev);
8353 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
8354 &mqe->un.read_rev);
8355 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
8356 &mqe->un.read_rev);
8357 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
8358 &mqe->un.read_rev);
8359 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
8360 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
8361 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
8362 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
8363 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
8364 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
8365 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8366 "(%d):0380 READ_REV Status x%x "
8367 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
8368 mboxq->vport ? mboxq->vport->vpi : 0,
8369 bf_get(lpfc_mqe_status, mqe),
8370 phba->vpd.rev.opFwName,
8371 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
8372 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
8373
8374 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8375 LPFC_SLI_INTF_IF_TYPE_0) {
8376 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
8377 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8378 if (rc == MBX_SUCCESS) {
8379 phba->hba_flag |= HBA_RECOVERABLE_UE;
8380 /* Set 1Sec interval to detect UE */
8381 phba->eratt_poll_interval = 1;
8382 phba->sli4_hba.ue_to_sr = bf_get(
8383 lpfc_mbx_set_feature_UESR,
8384 &mboxq->u.mqe.un.set_feature);
8385 phba->sli4_hba.ue_to_rp = bf_get(
8386 lpfc_mbx_set_feature_UERP,
8387 &mboxq->u.mqe.un.set_feature);
8388 }
8389 }
8390
8391 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
8392 /* Enable MDS Diagnostics only if the SLI Port supports it */
8393 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
8394 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8395 if (rc != MBX_SUCCESS)
8396 phba->mds_diags_support = 0;
8397 }
8398
8399 /*
8400 * Discover the port's supported feature set and match it against the
8401 * hosts requests.
8402 */
8403 lpfc_request_features(phba, mboxq);
8404 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8405 if (unlikely(rc)) {
8406 rc = -EIO;
8407 goto out_free_mbox;
8408 }
8409
8410 /* Disable VMID if app header is not supported */
8411 if (phba->cfg_vmid_app_header && !(bf_get(lpfc_mbx_rq_ftr_rsp_ashdr,
8412 &mqe->un.req_ftrs))) {
8413 bf_set(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags, 0);
8414 phba->cfg_vmid_app_header = 0;
8415 lpfc_printf_log(phba, KERN_DEBUG, LOG_SLI,
8416 "1242 vmid feature not supported\n");
8417 }
8418
8419 /*
8420 * The port must support FCP initiator mode as this is the
8421 * only mode running in the host.
8422 */
8423 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
8424 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8425 "0378 No support for fcpi mode.\n");
8426 ftr_rsp++;
8427 }
8428
8429 /* Performance Hints are ONLY for FCoE */
8430 if (phba->hba_flag & HBA_FCOE_MODE) {
8431 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
8432 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
8433 else
8434 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
8435 }
8436
8437 /*
8438 * If the port cannot support the host's requested features
8439 * then turn off the global config parameters to disable the
8440 * feature in the driver. This is not a fatal error.
8441 */
8442 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
8443 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
8444 phba->cfg_enable_bg = 0;
8445 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
8446 ftr_rsp++;
8447 }
8448 }
8449
8450 if (phba->max_vpi && phba->cfg_enable_npiv &&
8451 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
8452 ftr_rsp++;
8453
8454 if (ftr_rsp) {
8455 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8456 "0379 Feature Mismatch Data: x%08x %08x "
8457 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
8458 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
8459 phba->cfg_enable_npiv, phba->max_vpi);
8460 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
8461 phba->cfg_enable_bg = 0;
8462 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
8463 phba->cfg_enable_npiv = 0;
8464 }
8465
8466 /* These SLI3 features are assumed in SLI4 */
8467 spin_lock_irq(&phba->hbalock);
8468 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
8469 spin_unlock_irq(&phba->hbalock);
8470
8471 /* Always try to enable dual dump feature if we can */
8472 lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP);
8473 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8474 dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature);
8475 if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP))
8476 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8477 "6448 Dual Dump is enabled\n");
8478 else
8479 lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT,
8480 "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
8481 "rc:x%x dd:x%x\n",
8482 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8483 lpfc_sli_config_mbox_subsys_get(
8484 phba, mboxq),
8485 lpfc_sli_config_mbox_opcode_get(
8486 phba, mboxq),
8487 rc, dd);
8488 /*
8489 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
8490 * calls depends on these resources to complete port setup.
8491 */
8492 rc = lpfc_sli4_alloc_resource_identifiers(phba);
8493 if (rc) {
8494 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8495 "2920 Failed to alloc Resource IDs "
8496 "rc = x%x\n", rc);
8497 goto out_free_mbox;
8498 }
8499
8500 lpfc_set_host_data(phba, mboxq);
8501
8502 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8503 if (rc) {
8504 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8505 "2134 Failed to set host os driver version %x",
8506 rc);
8507 }
8508
8509 /* Read the port's service parameters. */
8510 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
8511 if (rc) {
8512 phba->link_state = LPFC_HBA_ERROR;
8513 rc = -ENOMEM;
8514 goto out_free_mbox;
8515 }
8516
8517 mboxq->vport = vport;
8518 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8519 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
8520 if (rc == MBX_SUCCESS) {
8521 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
8522 rc = 0;
8523 }
8524
8525 /*
8526 * This memory was allocated by the lpfc_read_sparam routine but is
8527 * no longer needed. It is released and ctx_buf NULLed to prevent
8528 * unintended pointer access as the mbox is reused.
8529 */
8530 lpfc_mbuf_free(phba, mp->virt, mp->phys);
8531 kfree(mp);
8532 mboxq->ctx_buf = NULL;
8533 if (unlikely(rc)) {
8534 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8535 "0382 READ_SPARAM command failed "
8536 "status %d, mbxStatus x%x\n",
8537 rc, bf_get(lpfc_mqe_status, mqe));
8538 phba->link_state = LPFC_HBA_ERROR;
8539 rc = -EIO;
8540 goto out_free_mbox;
8541 }
8542
8543 lpfc_update_vport_wwn(vport);
8544
8545 /* Update the fc_host data structures with new wwn. */
8546 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
8547 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
8548
8549 /* Create all the SLI4 queues */
8550 rc = lpfc_sli4_queue_create(phba);
8551 if (rc) {
8552 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8553 "3089 Failed to allocate queues\n");
8554 rc = -ENODEV;
8555 goto out_free_mbox;
8556 }
8557 /* Set up all the queues to the device */
8558 rc = lpfc_sli4_queue_setup(phba);
8559 if (unlikely(rc)) {
8560 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8561 "0381 Error %d during queue setup.\n ", rc);
8562 goto out_stop_timers;
8563 }
8564 /* Initialize the driver internal SLI layer lists. */
8565 lpfc_sli4_setup(phba);
8566 lpfc_sli4_queue_init(phba);
8567
8568 /* update host els xri-sgl sizes and mappings */
8569 rc = lpfc_sli4_els_sgl_update(phba);
8570 if (unlikely(rc)) {
8571 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8572 "1400 Failed to update xri-sgl size and "
8573 "mapping: %d\n", rc);
8574 goto out_destroy_queue;
8575 }
8576
8577 /* register the els sgl pool to the port */
8578 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
8579 phba->sli4_hba.els_xri_cnt);
8580 if (unlikely(rc < 0)) {
8581 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8582 "0582 Error %d during els sgl post "
8583 "operation\n", rc);
8584 rc = -ENODEV;
8585 goto out_destroy_queue;
8586 }
8587 phba->sli4_hba.els_xri_cnt = rc;
8588
8589 if (phba->nvmet_support) {
8590 /* update host nvmet xri-sgl sizes and mappings */
8591 rc = lpfc_sli4_nvmet_sgl_update(phba);
8592 if (unlikely(rc)) {
8593 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8594 "6308 Failed to update nvmet-sgl size "
8595 "and mapping: %d\n", rc);
8596 goto out_destroy_queue;
8597 }
8598
8599 /* register the nvmet sgl pool to the port */
8600 rc = lpfc_sli4_repost_sgl_list(
8601 phba,
8602 &phba->sli4_hba.lpfc_nvmet_sgl_list,
8603 phba->sli4_hba.nvmet_xri_cnt);
8604 if (unlikely(rc < 0)) {
8605 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8606 "3117 Error %d during nvmet "
8607 "sgl post\n", rc);
8608 rc = -ENODEV;
8609 goto out_destroy_queue;
8610 }
8611 phba->sli4_hba.nvmet_xri_cnt = rc;
8612
8613 /* We allocate an iocbq for every receive context SGL.
8614 * The additional allocation is for abort and ls handling.
8615 */
8616 cnt = phba->sli4_hba.nvmet_xri_cnt +
8617 phba->sli4_hba.max_cfg_param.max_xri;
8618 } else {
8619 /* update host common xri-sgl sizes and mappings */
8620 rc = lpfc_sli4_io_sgl_update(phba);
8621 if (unlikely(rc)) {
8622 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8623 "6082 Failed to update nvme-sgl size "
8624 "and mapping: %d\n", rc);
8625 goto out_destroy_queue;
8626 }
8627
8628 /* register the allocated common sgl pool to the port */
8629 rc = lpfc_sli4_repost_io_sgl_list(phba);
8630 if (unlikely(rc)) {
8631 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8632 "6116 Error %d during nvme sgl post "
8633 "operation\n", rc);
8634 /* Some NVME buffers were moved to abort nvme list */
8635 /* A pci function reset will repost them */
8636 rc = -ENODEV;
8637 goto out_destroy_queue;
8638 }
8639 /* Each lpfc_io_buf job structure has an iocbq element.
8640 * This cnt provides for abort, els, ct and ls requests.
8641 */
8642 cnt = phba->sli4_hba.max_cfg_param.max_xri;
8643 }
8644
8645 if (!phba->sli.iocbq_lookup) {
8646 /* Initialize and populate the iocb list per host */
8647 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8648 "2821 initialize iocb list with %d entries\n",
8649 cnt);
8650 rc = lpfc_init_iocb_list(phba, cnt);
8651 if (rc) {
8652 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8653 "1413 Failed to init iocb list.\n");
8654 goto out_destroy_queue;
8655 }
8656 }
8657
8658 if (phba->nvmet_support)
8659 lpfc_nvmet_create_targetport(phba);
8660
8661 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
8662 /* Post initial buffers to all RQs created */
8663 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
8664 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
8665 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
8666 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
8667 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
8668 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
8669 rqbp->buffer_count = 0;
8670
8671 lpfc_post_rq_buffer(
8672 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
8673 phba->sli4_hba.nvmet_mrq_data[i],
8674 phba->cfg_nvmet_mrq_post, i);
8675 }
8676 }
8677
8678 /* Post the rpi header region to the device. */
8679 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
8680 if (unlikely(rc)) {
8681 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8682 "0393 Error %d during rpi post operation\n",
8683 rc);
8684 rc = -ENODEV;
8685 goto out_free_iocblist;
8686 }
8687 lpfc_sli4_node_prep(phba);
8688
8689 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
8690 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
8691 /*
8692 * The FC Port needs to register FCFI (index 0)
8693 */
8694 lpfc_reg_fcfi(phba, mboxq);
8695 mboxq->vport = phba->pport;
8696 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8697 if (rc != MBX_SUCCESS)
8698 goto out_unset_queue;
8699 rc = 0;
8700 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
8701 &mboxq->u.mqe.un.reg_fcfi);
8702 } else {
8703 /* We are a NVME Target mode with MRQ > 1 */
8704
8705 /* First register the FCFI */
8706 lpfc_reg_fcfi_mrq(phba, mboxq, 0);
8707 mboxq->vport = phba->pport;
8708 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8709 if (rc != MBX_SUCCESS)
8710 goto out_unset_queue;
8711 rc = 0;
8712 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
8713 &mboxq->u.mqe.un.reg_fcfi_mrq);
8714
8715 /* Next register the MRQs */
8716 lpfc_reg_fcfi_mrq(phba, mboxq, 1);
8717 mboxq->vport = phba->pport;
8718 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8719 if (rc != MBX_SUCCESS)
8720 goto out_unset_queue;
8721 rc = 0;
8722 }
8723 /* Check if the port is configured to be disabled */
8724 lpfc_sli_read_link_ste(phba);
8725 }
8726
8727 /* Don't post more new bufs if repost already recovered
8728 * the nvme sgls.
8729 */
8730 if (phba->nvmet_support == 0) {
8731 if (phba->sli4_hba.io_xri_cnt == 0) {
8732 len = lpfc_new_io_buf(
8733 phba, phba->sli4_hba.io_xri_max);
8734 if (len == 0) {
8735 rc = -ENOMEM;
8736 goto out_unset_queue;
8737 }
8738
8739 if (phba->cfg_xri_rebalancing)
8740 lpfc_create_multixri_pools(phba);
8741 }
8742 } else {
8743 phba->cfg_xri_rebalancing = 0;
8744 }
8745
8746 /* Allow asynchronous mailbox command to go through */
8747 spin_lock_irq(&phba->hbalock);
8748 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8749 spin_unlock_irq(&phba->hbalock);
8750
8751 /* Post receive buffers to the device */
8752 lpfc_sli4_rb_setup(phba);
8753
8754 /* Reset HBA FCF states after HBA reset */
8755 phba->fcf.fcf_flag = 0;
8756 phba->fcf.current_rec.flag = 0;
8757
8758 /* Start the ELS watchdog timer */
8759 mod_timer(&vport->els_tmofunc,
8760 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
8761
8762 /* Start heart beat timer */
8763 mod_timer(&phba->hb_tmofunc,
8764 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
8765 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
8766 phba->last_completion_time = jiffies;
8767
8768 /* start eq_delay heartbeat */
8769 if (phba->cfg_auto_imax)
8770 queue_delayed_work(phba->wq, &phba->eq_delay_work,
8771 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
8772
8773 /* start per phba idle_stat_delay heartbeat */
8774 lpfc_init_idle_stat_hb(phba);
8775
8776 /* Start error attention (ERATT) polling timer */
8777 mod_timer(&phba->eratt_poll,
8778 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
8779
8780 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
8781 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
8782 rc = pci_enable_pcie_error_reporting(phba->pcidev);
8783 if (!rc) {
8784 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8785 "2829 This device supports "
8786 "Advanced Error Reporting (AER)\n");
8787 spin_lock_irq(&phba->hbalock);
8788 phba->hba_flag |= HBA_AER_ENABLED;
8789 spin_unlock_irq(&phba->hbalock);
8790 } else {
8791 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8792 "2830 This device does not support "
8793 "Advanced Error Reporting (AER)\n");
8794 phba->cfg_aer_support = 0;
8795 }
8796 rc = 0;
8797 }
8798
8799 /*
8800 * The port is ready, set the host's link state to LINK_DOWN
8801 * in preparation for link interrupts.
8802 */
8803 spin_lock_irq(&phba->hbalock);
8804 phba->link_state = LPFC_LINK_DOWN;
8805
8806 /* Check if physical ports are trunked */
8807 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
8808 phba->trunk_link.link0.state = LPFC_LINK_DOWN;
8809 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
8810 phba->trunk_link.link1.state = LPFC_LINK_DOWN;
8811 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
8812 phba->trunk_link.link2.state = LPFC_LINK_DOWN;
8813 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
8814 phba->trunk_link.link3.state = LPFC_LINK_DOWN;
8815 spin_unlock_irq(&phba->hbalock);
8816
8817 /* Arm the CQs and then EQs on device */
8818 lpfc_sli4_arm_cqeq_intr(phba);
8819
8820 /* Indicate device interrupt mode */
8821 phba->sli4_hba.intr_enable = 1;
8822
8823 /* Setup CMF after HBA is initialized */
8824 lpfc_cmf_setup(phba);
8825
8826 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
8827 (phba->hba_flag & LINK_DISABLED)) {
8828 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8829 "3103 Adapter Link is disabled.\n");
8830 lpfc_down_link(phba, mboxq);
8831 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8832 if (rc != MBX_SUCCESS) {
8833 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8834 "3104 Adapter failed to issue "
8835 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
8836 goto out_io_buff_free;
8837 }
8838 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
8839 /* don't perform init_link on SLI4 FC port loopback test */
8840 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
8841 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
8842 if (rc)
8843 goto out_io_buff_free;
8844 }
8845 }
8846 mempool_free(mboxq, phba->mbox_mem_pool);
8847
8848 /* Enable RAS FW log support */
8849 lpfc_sli4_ras_setup(phba);
8850
8851 phba->hba_flag |= HBA_SETUP;
8852 return rc;
8853
8854out_io_buff_free:
8855 /* Free allocated IO Buffers */
8856 lpfc_io_free(phba);
8857out_unset_queue:
8858 /* Unset all the queues set up in this routine when error out */
8859 lpfc_sli4_queue_unset(phba);
8860out_free_iocblist:
8861 lpfc_free_iocb_list(phba);
8862out_destroy_queue:
8863 lpfc_sli4_queue_destroy(phba);
8864out_stop_timers:
8865 lpfc_stop_hba_timers(phba);
8866out_free_mbox:
8867 mempool_free(mboxq, phba->mbox_mem_pool);
8868 return rc;
8869}
8870
8871/**
8872 * lpfc_mbox_timeout - Timeout call back function for mbox timer
8873 * @t: Context to fetch pointer to hba structure from.
8874 *
8875 * This is the callback function for mailbox timer. The mailbox
8876 * timer is armed when a new mailbox command is issued and the timer
8877 * is deleted when the mailbox complete. The function is called by
8878 * the kernel timer code when a mailbox does not complete within
8879 * expected time. This function wakes up the worker thread to
8880 * process the mailbox timeout and returns. All the processing is
8881 * done by the worker thread function lpfc_mbox_timeout_handler.
8882 **/
8883void
8884lpfc_mbox_timeout(struct timer_list *t)
8885{
8886 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo);
8887 unsigned long iflag;
8888 uint32_t tmo_posted;
8889
8890 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
8891 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
8892 if (!tmo_posted)
8893 phba->pport->work_port_events |= WORKER_MBOX_TMO;
8894 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
8895
8896 if (!tmo_posted)
8897 lpfc_worker_wake_up(phba);
8898 return;
8899}
8900
8901/**
8902 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
8903 * are pending
8904 * @phba: Pointer to HBA context object.
8905 *
8906 * This function checks if any mailbox completions are present on the mailbox
8907 * completion queue.
8908 **/
8909static bool
8910lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
8911{
8912
8913 uint32_t idx;
8914 struct lpfc_queue *mcq;
8915 struct lpfc_mcqe *mcqe;
8916 bool pending_completions = false;
8917 uint8_t qe_valid;
8918
8919 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
8920 return false;
8921
8922 /* Check for completions on mailbox completion queue */
8923
8924 mcq = phba->sli4_hba.mbx_cq;
8925 idx = mcq->hba_index;
8926 qe_valid = mcq->qe_valid;
8927 while (bf_get_le32(lpfc_cqe_valid,
8928 (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
8929 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
8930 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
8931 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
8932 pending_completions = true;
8933 break;
8934 }
8935 idx = (idx + 1) % mcq->entry_count;
8936 if (mcq->hba_index == idx)
8937 break;
8938
8939 /* if the index wrapped around, toggle the valid bit */
8940 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
8941 qe_valid = (qe_valid) ? 0 : 1;
8942 }
8943 return pending_completions;
8944
8945}
8946
8947/**
8948 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
8949 * that were missed.
8950 * @phba: Pointer to HBA context object.
8951 *
8952 * For sli4, it is possible to miss an interrupt. As such mbox completions
8953 * maybe missed causing erroneous mailbox timeouts to occur. This function
8954 * checks to see if mbox completions are on the mailbox completion queue
8955 * and will process all the completions associated with the eq for the
8956 * mailbox completion queue.
8957 **/
8958static bool
8959lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
8960{
8961 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
8962 uint32_t eqidx;
8963 struct lpfc_queue *fpeq = NULL;
8964 struct lpfc_queue *eq;
8965 bool mbox_pending;
8966
8967 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
8968 return false;
8969
8970 /* Find the EQ associated with the mbox CQ */
8971 if (sli4_hba->hdwq) {
8972 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
8973 eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
8974 if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
8975 fpeq = eq;
8976 break;
8977 }
8978 }
8979 }
8980 if (!fpeq)
8981 return false;
8982
8983 /* Turn off interrupts from this EQ */
8984
8985 sli4_hba->sli4_eq_clr_intr(fpeq);
8986
8987 /* Check to see if a mbox completion is pending */
8988
8989 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
8990
8991 /*
8992 * If a mbox completion is pending, process all the events on EQ
8993 * associated with the mbox completion queue (this could include
8994 * mailbox commands, async events, els commands, receive queue data
8995 * and fcp commands)
8996 */
8997
8998 if (mbox_pending)
8999 /* process and rearm the EQ */
9000 lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
9001 else
9002 /* Always clear and re-arm the EQ */
9003 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
9004
9005 return mbox_pending;
9006
9007}
9008
9009/**
9010 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
9011 * @phba: Pointer to HBA context object.
9012 *
9013 * This function is called from worker thread when a mailbox command times out.
9014 * The caller is not required to hold any locks. This function will reset the
9015 * HBA and recover all the pending commands.
9016 **/
9017void
9018lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
9019{
9020 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
9021 MAILBOX_t *mb = NULL;
9022
9023 struct lpfc_sli *psli = &phba->sli;
9024
9025 /* If the mailbox completed, process the completion */
9026 lpfc_sli4_process_missed_mbox_completions(phba);
9027
9028 if (!(psli->sli_flag & LPFC_SLI_ACTIVE))
9029 return;
9030
9031 if (pmbox != NULL)
9032 mb = &pmbox->u.mb;
9033 /* Check the pmbox pointer first. There is a race condition
9034 * between the mbox timeout handler getting executed in the
9035 * worklist and the mailbox actually completing. When this
9036 * race condition occurs, the mbox_active will be NULL.
9037 */
9038 spin_lock_irq(&phba->hbalock);
9039 if (pmbox == NULL) {
9040 lpfc_printf_log(phba, KERN_WARNING,
9041 LOG_MBOX | LOG_SLI,
9042 "0353 Active Mailbox cleared - mailbox timeout "
9043 "exiting\n");
9044 spin_unlock_irq(&phba->hbalock);
9045 return;
9046 }
9047
9048 /* Mbox cmd <mbxCommand> timeout */
9049 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9050 "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
9051 mb->mbxCommand,
9052 phba->pport->port_state,
9053 phba->sli.sli_flag,
9054 phba->sli.mbox_active);
9055 spin_unlock_irq(&phba->hbalock);
9056
9057 /* Setting state unknown so lpfc_sli_abort_iocb_ring
9058 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
9059 * it to fail all outstanding SCSI IO.
9060 */
9061 spin_lock_irq(&phba->pport->work_port_lock);
9062 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
9063 spin_unlock_irq(&phba->pport->work_port_lock);
9064 spin_lock_irq(&phba->hbalock);
9065 phba->link_state = LPFC_LINK_UNKNOWN;
9066 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
9067 spin_unlock_irq(&phba->hbalock);
9068
9069 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9070 "0345 Resetting board due to mailbox timeout\n");
9071
9072 /* Reset the HBA device */
9073 lpfc_reset_hba(phba);
9074}
9075
9076/**
9077 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
9078 * @phba: Pointer to HBA context object.
9079 * @pmbox: Pointer to mailbox object.
9080 * @flag: Flag indicating how the mailbox need to be processed.
9081 *
9082 * This function is called by discovery code and HBA management code
9083 * to submit a mailbox command to firmware with SLI-3 interface spec. This
9084 * function gets the hbalock to protect the data structures.
9085 * The mailbox command can be submitted in polling mode, in which case
9086 * this function will wait in a polling loop for the completion of the
9087 * mailbox.
9088 * If the mailbox is submitted in no_wait mode (not polling) the
9089 * function will submit the command and returns immediately without waiting
9090 * for the mailbox completion. The no_wait is supported only when HBA
9091 * is in SLI2/SLI3 mode - interrupts are enabled.
9092 * The SLI interface allows only one mailbox pending at a time. If the
9093 * mailbox is issued in polling mode and there is already a mailbox
9094 * pending, then the function will return an error. If the mailbox is issued
9095 * in NO_WAIT mode and there is a mailbox pending already, the function
9096 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
9097 * The sli layer owns the mailbox object until the completion of mailbox
9098 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
9099 * return codes the caller owns the mailbox command after the return of
9100 * the function.
9101 **/
9102static int
9103lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
9104 uint32_t flag)
9105{
9106 MAILBOX_t *mbx;
9107 struct lpfc_sli *psli = &phba->sli;
9108 uint32_t status, evtctr;
9109 uint32_t ha_copy, hc_copy;
9110 int i;
9111 unsigned long timeout;
9112 unsigned long drvr_flag = 0;
9113 uint32_t word0, ldata;
9114 void __iomem *to_slim;
9115 int processing_queue = 0;
9116
9117 spin_lock_irqsave(&phba->hbalock, drvr_flag);
9118 if (!pmbox) {
9119 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9120 /* processing mbox queue from intr_handler */
9121 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9122 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9123 return MBX_SUCCESS;
9124 }
9125 processing_queue = 1;
9126 pmbox = lpfc_mbox_get(phba);
9127 if (!pmbox) {
9128 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9129 return MBX_SUCCESS;
9130 }
9131 }
9132
9133 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
9134 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
9135 if(!pmbox->vport) {
9136 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9137 lpfc_printf_log(phba, KERN_ERR,
9138 LOG_MBOX | LOG_VPORT,
9139 "1806 Mbox x%x failed. No vport\n",
9140 pmbox->u.mb.mbxCommand);
9141 dump_stack();
9142 goto out_not_finished;
9143 }
9144 }
9145
9146 /* If the PCI channel is in offline state, do not post mbox. */
9147 if (unlikely(pci_channel_offline(phba->pcidev))) {
9148 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9149 goto out_not_finished;
9150 }
9151
9152 /* If HBA has a deferred error attention, fail the iocb. */
9153 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
9154 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9155 goto out_not_finished;
9156 }
9157
9158 psli = &phba->sli;
9159
9160 mbx = &pmbox->u.mb;
9161 status = MBX_SUCCESS;
9162
9163 if (phba->link_state == LPFC_HBA_ERROR) {
9164 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9165
9166 /* Mbox command <mbxCommand> cannot issue */
9167 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9168 "(%d):0311 Mailbox command x%x cannot "
9169 "issue Data: x%x x%x\n",
9170 pmbox->vport ? pmbox->vport->vpi : 0,
9171 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9172 goto out_not_finished;
9173 }
9174
9175 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
9176 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
9177 !(hc_copy & HC_MBINT_ENA)) {
9178 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9179 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9180 "(%d):2528 Mailbox command x%x cannot "
9181 "issue Data: x%x x%x\n",
9182 pmbox->vport ? pmbox->vport->vpi : 0,
9183 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9184 goto out_not_finished;
9185 }
9186 }
9187
9188 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9189 /* Polling for a mbox command when another one is already active
9190 * is not allowed in SLI. Also, the driver must have established
9191 * SLI2 mode to queue and process multiple mbox commands.
9192 */
9193
9194 if (flag & MBX_POLL) {
9195 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9196
9197 /* Mbox command <mbxCommand> cannot issue */
9198 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9199 "(%d):2529 Mailbox command x%x "
9200 "cannot issue Data: x%x x%x\n",
9201 pmbox->vport ? pmbox->vport->vpi : 0,
9202 pmbox->u.mb.mbxCommand,
9203 psli->sli_flag, flag);
9204 goto out_not_finished;
9205 }
9206
9207 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
9208 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9209 /* Mbox command <mbxCommand> cannot issue */
9210 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9211 "(%d):2530 Mailbox command x%x "
9212 "cannot issue Data: x%x x%x\n",
9213 pmbox->vport ? pmbox->vport->vpi : 0,
9214 pmbox->u.mb.mbxCommand,
9215 psli->sli_flag, flag);
9216 goto out_not_finished;
9217 }
9218
9219 /* Another mailbox command is still being processed, queue this
9220 * command to be processed later.
9221 */
9222 lpfc_mbox_put(phba, pmbox);
9223
9224 /* Mbox cmd issue - BUSY */
9225 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9226 "(%d):0308 Mbox cmd issue - BUSY Data: "
9227 "x%x x%x x%x x%x\n",
9228 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
9229 mbx->mbxCommand,
9230 phba->pport ? phba->pport->port_state : 0xff,
9231 psli->sli_flag, flag);
9232
9233 psli->slistat.mbox_busy++;
9234 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9235
9236 if (pmbox->vport) {
9237 lpfc_debugfs_disc_trc(pmbox->vport,
9238 LPFC_DISC_TRC_MBOX_VPORT,
9239 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
9240 (uint32_t)mbx->mbxCommand,
9241 mbx->un.varWords[0], mbx->un.varWords[1]);
9242 }
9243 else {
9244 lpfc_debugfs_disc_trc(phba->pport,
9245 LPFC_DISC_TRC_MBOX,
9246 "MBOX Bsy: cmd:x%x mb:x%x x%x",
9247 (uint32_t)mbx->mbxCommand,
9248 mbx->un.varWords[0], mbx->un.varWords[1]);
9249 }
9250
9251 return MBX_BUSY;
9252 }
9253
9254 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9255
9256 /* If we are not polling, we MUST be in SLI2 mode */
9257 if (flag != MBX_POLL) {
9258 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
9259 (mbx->mbxCommand != MBX_KILL_BOARD)) {
9260 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9261 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9262 /* Mbox command <mbxCommand> cannot issue */
9263 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9264 "(%d):2531 Mailbox command x%x "
9265 "cannot issue Data: x%x x%x\n",
9266 pmbox->vport ? pmbox->vport->vpi : 0,
9267 pmbox->u.mb.mbxCommand,
9268 psli->sli_flag, flag);
9269 goto out_not_finished;
9270 }
9271 /* timeout active mbox command */
9272 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
9273 1000);
9274 mod_timer(&psli->mbox_tmo, jiffies + timeout);
9275 }
9276
9277 /* Mailbox cmd <cmd> issue */
9278 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9279 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
9280 "x%x\n",
9281 pmbox->vport ? pmbox->vport->vpi : 0,
9282 mbx->mbxCommand,
9283 phba->pport ? phba->pport->port_state : 0xff,
9284 psli->sli_flag, flag);
9285
9286 if (mbx->mbxCommand != MBX_HEARTBEAT) {
9287 if (pmbox->vport) {
9288 lpfc_debugfs_disc_trc(pmbox->vport,
9289 LPFC_DISC_TRC_MBOX_VPORT,
9290 "MBOX Send vport: cmd:x%x mb:x%x x%x",
9291 (uint32_t)mbx->mbxCommand,
9292 mbx->un.varWords[0], mbx->un.varWords[1]);
9293 }
9294 else {
9295 lpfc_debugfs_disc_trc(phba->pport,
9296 LPFC_DISC_TRC_MBOX,
9297 "MBOX Send: cmd:x%x mb:x%x x%x",
9298 (uint32_t)mbx->mbxCommand,
9299 mbx->un.varWords[0], mbx->un.varWords[1]);
9300 }
9301 }
9302
9303 psli->slistat.mbox_cmd++;
9304 evtctr = psli->slistat.mbox_event;
9305
9306 /* next set own bit for the adapter and copy over command word */
9307 mbx->mbxOwner = OWN_CHIP;
9308
9309 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9310 /* Populate mbox extension offset word. */
9311 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
9312 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
9313 = (uint8_t *)phba->mbox_ext
9314 - (uint8_t *)phba->mbox;
9315 }
9316
9317 /* Copy the mailbox extension data */
9318 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
9319 lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
9320 (uint8_t *)phba->mbox_ext,
9321 pmbox->in_ext_byte_len);
9322 }
9323 /* Copy command data to host SLIM area */
9324 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
9325 } else {
9326 /* Populate mbox extension offset word. */
9327 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
9328 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
9329 = MAILBOX_HBA_EXT_OFFSET;
9330
9331 /* Copy the mailbox extension data */
9332 if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
9333 lpfc_memcpy_to_slim(phba->MBslimaddr +
9334 MAILBOX_HBA_EXT_OFFSET,
9335 pmbox->ctx_buf, pmbox->in_ext_byte_len);
9336
9337 if (mbx->mbxCommand == MBX_CONFIG_PORT)
9338 /* copy command data into host mbox for cmpl */
9339 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
9340 MAILBOX_CMD_SIZE);
9341
9342 /* First copy mbox command data to HBA SLIM, skip past first
9343 word */
9344 to_slim = phba->MBslimaddr + sizeof (uint32_t);
9345 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
9346 MAILBOX_CMD_SIZE - sizeof (uint32_t));
9347
9348 /* Next copy over first word, with mbxOwner set */
9349 ldata = *((uint32_t *)mbx);
9350 to_slim = phba->MBslimaddr;
9351 writel(ldata, to_slim);
9352 readl(to_slim); /* flush */
9353
9354 if (mbx->mbxCommand == MBX_CONFIG_PORT)
9355 /* switch over to host mailbox */
9356 psli->sli_flag |= LPFC_SLI_ACTIVE;
9357 }
9358
9359 wmb();
9360
9361 switch (flag) {
9362 case MBX_NOWAIT:
9363 /* Set up reference to mailbox command */
9364 psli->mbox_active = pmbox;
9365 /* Interrupt board to do it */
9366 writel(CA_MBATT, phba->CAregaddr);
9367 readl(phba->CAregaddr); /* flush */
9368 /* Don't wait for it to finish, just return */
9369 break;
9370
9371 case MBX_POLL:
9372 /* Set up null reference to mailbox command */
9373 psli->mbox_active = NULL;
9374 /* Interrupt board to do it */
9375 writel(CA_MBATT, phba->CAregaddr);
9376 readl(phba->CAregaddr); /* flush */
9377
9378 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9379 /* First read mbox status word */
9380 word0 = *((uint32_t *)phba->mbox);
9381 word0 = le32_to_cpu(word0);
9382 } else {
9383 /* First read mbox status word */
9384 if (lpfc_readl(phba->MBslimaddr, &word0)) {
9385 spin_unlock_irqrestore(&phba->hbalock,
9386 drvr_flag);
9387 goto out_not_finished;
9388 }
9389 }
9390
9391 /* Read the HBA Host Attention Register */
9392 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
9393 spin_unlock_irqrestore(&phba->hbalock,
9394 drvr_flag);
9395 goto out_not_finished;
9396 }
9397 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
9398 1000) + jiffies;
9399 i = 0;
9400 /* Wait for command to complete */
9401 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
9402 (!(ha_copy & HA_MBATT) &&
9403 (phba->link_state > LPFC_WARM_START))) {
9404 if (time_after(jiffies, timeout)) {
9405 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9406 spin_unlock_irqrestore(&phba->hbalock,
9407 drvr_flag);
9408 goto out_not_finished;
9409 }
9410
9411 /* Check if we took a mbox interrupt while we were
9412 polling */
9413 if (((word0 & OWN_CHIP) != OWN_CHIP)
9414 && (evtctr != psli->slistat.mbox_event))
9415 break;
9416
9417 if (i++ > 10) {
9418 spin_unlock_irqrestore(&phba->hbalock,
9419 drvr_flag);
9420 msleep(1);
9421 spin_lock_irqsave(&phba->hbalock, drvr_flag);
9422 }
9423
9424 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9425 /* First copy command data */
9426 word0 = *((uint32_t *)phba->mbox);
9427 word0 = le32_to_cpu(word0);
9428 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
9429 MAILBOX_t *slimmb;
9430 uint32_t slimword0;
9431 /* Check real SLIM for any errors */
9432 slimword0 = readl(phba->MBslimaddr);
9433 slimmb = (MAILBOX_t *) & slimword0;
9434 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
9435 && slimmb->mbxStatus) {
9436 psli->sli_flag &=
9437 ~LPFC_SLI_ACTIVE;
9438 word0 = slimword0;
9439 }
9440 }
9441 } else {
9442 /* First copy command data */
9443 word0 = readl(phba->MBslimaddr);
9444 }
9445 /* Read the HBA Host Attention Register */
9446 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
9447 spin_unlock_irqrestore(&phba->hbalock,
9448 drvr_flag);
9449 goto out_not_finished;
9450 }
9451 }
9452
9453 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9454 /* copy results back to user */
9455 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
9456 MAILBOX_CMD_SIZE);
9457 /* Copy the mailbox extension data */
9458 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
9459 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
9460 pmbox->ctx_buf,
9461 pmbox->out_ext_byte_len);
9462 }
9463 } else {
9464 /* First copy command data */
9465 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
9466 MAILBOX_CMD_SIZE);
9467 /* Copy the mailbox extension data */
9468 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
9469 lpfc_memcpy_from_slim(
9470 pmbox->ctx_buf,
9471 phba->MBslimaddr +
9472 MAILBOX_HBA_EXT_OFFSET,
9473 pmbox->out_ext_byte_len);
9474 }
9475 }
9476
9477 writel(HA_MBATT, phba->HAregaddr);
9478 readl(phba->HAregaddr); /* flush */
9479
9480 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9481 status = mbx->mbxStatus;
9482 }
9483
9484 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9485 return status;
9486
9487out_not_finished:
9488 if (processing_queue) {
9489 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
9490 lpfc_mbox_cmpl_put(phba, pmbox);
9491 }
9492 return MBX_NOT_FINISHED;
9493}
9494
9495/**
9496 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
9497 * @phba: Pointer to HBA context object.
9498 *
9499 * The function blocks the posting of SLI4 asynchronous mailbox commands from
9500 * the driver internal pending mailbox queue. It will then try to wait out the
9501 * possible outstanding mailbox command before return.
9502 *
9503 * Returns:
9504 * 0 - the outstanding mailbox command completed; otherwise, the wait for
9505 * the outstanding mailbox command timed out.
9506 **/
9507static int
9508lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
9509{
9510 struct lpfc_sli *psli = &phba->sli;
9511 LPFC_MBOXQ_t *mboxq;
9512 int rc = 0;
9513 unsigned long timeout = 0;
9514 u32 sli_flag;
9515 u8 cmd, subsys, opcode;
9516
9517 /* Mark the asynchronous mailbox command posting as blocked */
9518 spin_lock_irq(&phba->hbalock);
9519 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
9520 /* Determine how long we might wait for the active mailbox
9521 * command to be gracefully completed by firmware.
9522 */
9523 if (phba->sli.mbox_active)
9524 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
9525 phba->sli.mbox_active) *
9526 1000) + jiffies;
9527 spin_unlock_irq(&phba->hbalock);
9528
9529 /* Make sure the mailbox is really active */
9530 if (timeout)
9531 lpfc_sli4_process_missed_mbox_completions(phba);
9532
9533 /* Wait for the outstanding mailbox command to complete */
9534 while (phba->sli.mbox_active) {
9535 /* Check active mailbox complete status every 2ms */
9536 msleep(2);
9537 if (time_after(jiffies, timeout)) {
9538 /* Timeout, mark the outstanding cmd not complete */
9539
9540 /* Sanity check sli.mbox_active has not completed or
9541 * cancelled from another context during last 2ms sleep,
9542 * so take hbalock to be sure before logging.
9543 */
9544 spin_lock_irq(&phba->hbalock);
9545 if (phba->sli.mbox_active) {
9546 mboxq = phba->sli.mbox_active;
9547 cmd = mboxq->u.mb.mbxCommand;
9548 subsys = lpfc_sli_config_mbox_subsys_get(phba,
9549 mboxq);
9550 opcode = lpfc_sli_config_mbox_opcode_get(phba,
9551 mboxq);
9552 sli_flag = psli->sli_flag;
9553 spin_unlock_irq(&phba->hbalock);
9554 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9555 "2352 Mailbox command x%x "
9556 "(x%x/x%x) sli_flag x%x could "
9557 "not complete\n",
9558 cmd, subsys, opcode,
9559 sli_flag);
9560 } else {
9561 spin_unlock_irq(&phba->hbalock);
9562 }
9563
9564 rc = 1;
9565 break;
9566 }
9567 }
9568
9569 /* Can not cleanly block async mailbox command, fails it */
9570 if (rc) {
9571 spin_lock_irq(&phba->hbalock);
9572 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
9573 spin_unlock_irq(&phba->hbalock);
9574 }
9575 return rc;
9576}
9577
9578/**
9579 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
9580 * @phba: Pointer to HBA context object.
9581 *
9582 * The function unblocks and resume posting of SLI4 asynchronous mailbox
9583 * commands from the driver internal pending mailbox queue. It makes sure
9584 * that there is no outstanding mailbox command before resuming posting
9585 * asynchronous mailbox commands. If, for any reason, there is outstanding
9586 * mailbox command, it will try to wait it out before resuming asynchronous
9587 * mailbox command posting.
9588 **/
9589static void
9590lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
9591{
9592 struct lpfc_sli *psli = &phba->sli;
9593
9594 spin_lock_irq(&phba->hbalock);
9595 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9596 /* Asynchronous mailbox posting is not blocked, do nothing */
9597 spin_unlock_irq(&phba->hbalock);
9598 return;
9599 }
9600
9601 /* Outstanding synchronous mailbox command is guaranteed to be done,
9602 * successful or timeout, after timing-out the outstanding mailbox
9603 * command shall always be removed, so just unblock posting async
9604 * mailbox command and resume
9605 */
9606 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
9607 spin_unlock_irq(&phba->hbalock);
9608
9609 /* wake up worker thread to post asynchronous mailbox command */
9610 lpfc_worker_wake_up(phba);
9611}
9612
9613/**
9614 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
9615 * @phba: Pointer to HBA context object.
9616 * @mboxq: Pointer to mailbox object.
9617 *
9618 * The function waits for the bootstrap mailbox register ready bit from
9619 * port for twice the regular mailbox command timeout value.
9620 *
9621 * 0 - no timeout on waiting for bootstrap mailbox register ready.
9622 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
9623 **/
9624static int
9625lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9626{
9627 uint32_t db_ready;
9628 unsigned long timeout;
9629 struct lpfc_register bmbx_reg;
9630
9631 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
9632 * 1000) + jiffies;
9633
9634 do {
9635 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
9636 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
9637 if (!db_ready)
9638 mdelay(2);
9639
9640 if (time_after(jiffies, timeout))
9641 return MBXERR_ERROR;
9642 } while (!db_ready);
9643
9644 return 0;
9645}
9646
9647/**
9648 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
9649 * @phba: Pointer to HBA context object.
9650 * @mboxq: Pointer to mailbox object.
9651 *
9652 * The function posts a mailbox to the port. The mailbox is expected
9653 * to be comletely filled in and ready for the port to operate on it.
9654 * This routine executes a synchronous completion operation on the
9655 * mailbox by polling for its completion.
9656 *
9657 * The caller must not be holding any locks when calling this routine.
9658 *
9659 * Returns:
9660 * MBX_SUCCESS - mailbox posted successfully
9661 * Any of the MBX error values.
9662 **/
9663static int
9664lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9665{
9666 int rc = MBX_SUCCESS;
9667 unsigned long iflag;
9668 uint32_t mcqe_status;
9669 uint32_t mbx_cmnd;
9670 struct lpfc_sli *psli = &phba->sli;
9671 struct lpfc_mqe *mb = &mboxq->u.mqe;
9672 struct lpfc_bmbx_create *mbox_rgn;
9673 struct dma_address *dma_address;
9674
9675 /*
9676 * Only one mailbox can be active to the bootstrap mailbox region
9677 * at a time and there is no queueing provided.
9678 */
9679 spin_lock_irqsave(&phba->hbalock, iflag);
9680 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9681 spin_unlock_irqrestore(&phba->hbalock, iflag);
9682 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9683 "(%d):2532 Mailbox command x%x (x%x/x%x) "
9684 "cannot issue Data: x%x x%x\n",
9685 mboxq->vport ? mboxq->vport->vpi : 0,
9686 mboxq->u.mb.mbxCommand,
9687 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9688 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9689 psli->sli_flag, MBX_POLL);
9690 return MBXERR_ERROR;
9691 }
9692 /* The server grabs the token and owns it until release */
9693 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9694 phba->sli.mbox_active = mboxq;
9695 spin_unlock_irqrestore(&phba->hbalock, iflag);
9696
9697 /* wait for bootstrap mbox register for readyness */
9698 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9699 if (rc)
9700 goto exit;
9701 /*
9702 * Initialize the bootstrap memory region to avoid stale data areas
9703 * in the mailbox post. Then copy the caller's mailbox contents to
9704 * the bmbx mailbox region.
9705 */
9706 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
9707 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
9708 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
9709 sizeof(struct lpfc_mqe));
9710
9711 /* Post the high mailbox dma address to the port and wait for ready. */
9712 dma_address = &phba->sli4_hba.bmbx.dma_address;
9713 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
9714
9715 /* wait for bootstrap mbox register for hi-address write done */
9716 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9717 if (rc)
9718 goto exit;
9719
9720 /* Post the low mailbox dma address to the port. */
9721 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
9722
9723 /* wait for bootstrap mbox register for low address write done */
9724 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9725 if (rc)
9726 goto exit;
9727
9728 /*
9729 * Read the CQ to ensure the mailbox has completed.
9730 * If so, update the mailbox status so that the upper layers
9731 * can complete the request normally.
9732 */
9733 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
9734 sizeof(struct lpfc_mqe));
9735 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
9736 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
9737 sizeof(struct lpfc_mcqe));
9738 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
9739 /*
9740 * When the CQE status indicates a failure and the mailbox status
9741 * indicates success then copy the CQE status into the mailbox status
9742 * (and prefix it with x4000).
9743 */
9744 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
9745 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
9746 bf_set(lpfc_mqe_status, mb,
9747 (LPFC_MBX_ERROR_RANGE | mcqe_status));
9748 rc = MBXERR_ERROR;
9749 } else
9750 lpfc_sli4_swap_str(phba, mboxq);
9751
9752 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9753 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
9754 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
9755 " x%x x%x CQ: x%x x%x x%x x%x\n",
9756 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
9757 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9758 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9759 bf_get(lpfc_mqe_status, mb),
9760 mb->un.mb_words[0], mb->un.mb_words[1],
9761 mb->un.mb_words[2], mb->un.mb_words[3],
9762 mb->un.mb_words[4], mb->un.mb_words[5],
9763 mb->un.mb_words[6], mb->un.mb_words[7],
9764 mb->un.mb_words[8], mb->un.mb_words[9],
9765 mb->un.mb_words[10], mb->un.mb_words[11],
9766 mb->un.mb_words[12], mboxq->mcqe.word0,
9767 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
9768 mboxq->mcqe.trailer);
9769exit:
9770 /* We are holding the token, no needed for lock when release */
9771 spin_lock_irqsave(&phba->hbalock, iflag);
9772 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9773 phba->sli.mbox_active = NULL;
9774 spin_unlock_irqrestore(&phba->hbalock, iflag);
9775 return rc;
9776}
9777
9778/**
9779 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
9780 * @phba: Pointer to HBA context object.
9781 * @mboxq: Pointer to mailbox object.
9782 * @flag: Flag indicating how the mailbox need to be processed.
9783 *
9784 * This function is called by discovery code and HBA management code to submit
9785 * a mailbox command to firmware with SLI-4 interface spec.
9786 *
9787 * Return codes the caller owns the mailbox command after the return of the
9788 * function.
9789 **/
9790static int
9791lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
9792 uint32_t flag)
9793{
9794 struct lpfc_sli *psli = &phba->sli;
9795 unsigned long iflags;
9796 int rc;
9797
9798 /* dump from issue mailbox command if setup */
9799 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
9800
9801 rc = lpfc_mbox_dev_check(phba);
9802 if (unlikely(rc)) {
9803 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9804 "(%d):2544 Mailbox command x%x (x%x/x%x) "
9805 "cannot issue Data: x%x x%x\n",
9806 mboxq->vport ? mboxq->vport->vpi : 0,
9807 mboxq->u.mb.mbxCommand,
9808 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9809 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9810 psli->sli_flag, flag);
9811 goto out_not_finished;
9812 }
9813
9814 /* Detect polling mode and jump to a handler */
9815 if (!phba->sli4_hba.intr_enable) {
9816 if (flag == MBX_POLL)
9817 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
9818 else
9819 rc = -EIO;
9820 if (rc != MBX_SUCCESS)
9821 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
9822 "(%d):2541 Mailbox command x%x "
9823 "(x%x/x%x) failure: "
9824 "mqe_sta: x%x mcqe_sta: x%x/x%x "
9825 "Data: x%x x%x\n",
9826 mboxq->vport ? mboxq->vport->vpi : 0,
9827 mboxq->u.mb.mbxCommand,
9828 lpfc_sli_config_mbox_subsys_get(phba,
9829 mboxq),
9830 lpfc_sli_config_mbox_opcode_get(phba,
9831 mboxq),
9832 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
9833 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
9834 bf_get(lpfc_mcqe_ext_status,
9835 &mboxq->mcqe),
9836 psli->sli_flag, flag);
9837 return rc;
9838 } else if (flag == MBX_POLL) {
9839 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
9840 "(%d):2542 Try to issue mailbox command "
9841 "x%x (x%x/x%x) synchronously ahead of async "
9842 "mailbox command queue: x%x x%x\n",
9843 mboxq->vport ? mboxq->vport->vpi : 0,
9844 mboxq->u.mb.mbxCommand,
9845 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9846 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9847 psli->sli_flag, flag);
9848 /* Try to block the asynchronous mailbox posting */
9849 rc = lpfc_sli4_async_mbox_block(phba);
9850 if (!rc) {
9851 /* Successfully blocked, now issue sync mbox cmd */
9852 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
9853 if (rc != MBX_SUCCESS)
9854 lpfc_printf_log(phba, KERN_WARNING,
9855 LOG_MBOX | LOG_SLI,
9856 "(%d):2597 Sync Mailbox command "
9857 "x%x (x%x/x%x) failure: "
9858 "mqe_sta: x%x mcqe_sta: x%x/x%x "
9859 "Data: x%x x%x\n",
9860 mboxq->vport ? mboxq->vport->vpi : 0,
9861 mboxq->u.mb.mbxCommand,
9862 lpfc_sli_config_mbox_subsys_get(phba,
9863 mboxq),
9864 lpfc_sli_config_mbox_opcode_get(phba,
9865 mboxq),
9866 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
9867 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
9868 bf_get(lpfc_mcqe_ext_status,
9869 &mboxq->mcqe),
9870 psli->sli_flag, flag);
9871 /* Unblock the async mailbox posting afterward */
9872 lpfc_sli4_async_mbox_unblock(phba);
9873 }
9874 return rc;
9875 }
9876
9877 /* Now, interrupt mode asynchronous mailbox command */
9878 rc = lpfc_mbox_cmd_check(phba, mboxq);
9879 if (rc) {
9880 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9881 "(%d):2543 Mailbox command x%x (x%x/x%x) "
9882 "cannot issue Data: x%x x%x\n",
9883 mboxq->vport ? mboxq->vport->vpi : 0,
9884 mboxq->u.mb.mbxCommand,
9885 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9886 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9887 psli->sli_flag, flag);
9888 goto out_not_finished;
9889 }
9890
9891 /* Put the mailbox command to the driver internal FIFO */
9892 psli->slistat.mbox_busy++;
9893 spin_lock_irqsave(&phba->hbalock, iflags);
9894 lpfc_mbox_put(phba, mboxq);
9895 spin_unlock_irqrestore(&phba->hbalock, iflags);
9896 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9897 "(%d):0354 Mbox cmd issue - Enqueue Data: "
9898 "x%x (x%x/x%x) x%x x%x x%x\n",
9899 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
9900 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
9901 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9902 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9903 phba->pport->port_state,
9904 psli->sli_flag, MBX_NOWAIT);
9905 /* Wake up worker thread to transport mailbox command from head */
9906 lpfc_worker_wake_up(phba);
9907
9908 return MBX_BUSY;
9909
9910out_not_finished:
9911 return MBX_NOT_FINISHED;
9912}
9913
9914/**
9915 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
9916 * @phba: Pointer to HBA context object.
9917 *
9918 * This function is called by worker thread to send a mailbox command to
9919 * SLI4 HBA firmware.
9920 *
9921 **/
9922int
9923lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
9924{
9925 struct lpfc_sli *psli = &phba->sli;
9926 LPFC_MBOXQ_t *mboxq;
9927 int rc = MBX_SUCCESS;
9928 unsigned long iflags;
9929 struct lpfc_mqe *mqe;
9930 uint32_t mbx_cmnd;
9931
9932 /* Check interrupt mode before post async mailbox command */
9933 if (unlikely(!phba->sli4_hba.intr_enable))
9934 return MBX_NOT_FINISHED;
9935
9936 /* Check for mailbox command service token */
9937 spin_lock_irqsave(&phba->hbalock, iflags);
9938 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9939 spin_unlock_irqrestore(&phba->hbalock, iflags);
9940 return MBX_NOT_FINISHED;
9941 }
9942 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9943 spin_unlock_irqrestore(&phba->hbalock, iflags);
9944 return MBX_NOT_FINISHED;
9945 }
9946 if (unlikely(phba->sli.mbox_active)) {
9947 spin_unlock_irqrestore(&phba->hbalock, iflags);
9948 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9949 "0384 There is pending active mailbox cmd\n");
9950 return MBX_NOT_FINISHED;
9951 }
9952 /* Take the mailbox command service token */
9953 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9954
9955 /* Get the next mailbox command from head of queue */
9956 mboxq = lpfc_mbox_get(phba);
9957
9958 /* If no more mailbox command waiting for post, we're done */
9959 if (!mboxq) {
9960 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9961 spin_unlock_irqrestore(&phba->hbalock, iflags);
9962 return MBX_SUCCESS;
9963 }
9964 phba->sli.mbox_active = mboxq;
9965 spin_unlock_irqrestore(&phba->hbalock, iflags);
9966
9967 /* Check device readiness for posting mailbox command */
9968 rc = lpfc_mbox_dev_check(phba);
9969 if (unlikely(rc))
9970 /* Driver clean routine will clean up pending mailbox */
9971 goto out_not_finished;
9972
9973 /* Prepare the mbox command to be posted */
9974 mqe = &mboxq->u.mqe;
9975 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
9976
9977 /* Start timer for the mbox_tmo and log some mailbox post messages */
9978 mod_timer(&psli->mbox_tmo, (jiffies +
9979 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
9980
9981 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9982 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
9983 "x%x x%x\n",
9984 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
9985 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9986 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9987 phba->pport->port_state, psli->sli_flag);
9988
9989 if (mbx_cmnd != MBX_HEARTBEAT) {
9990 if (mboxq->vport) {
9991 lpfc_debugfs_disc_trc(mboxq->vport,
9992 LPFC_DISC_TRC_MBOX_VPORT,
9993 "MBOX Send vport: cmd:x%x mb:x%x x%x",
9994 mbx_cmnd, mqe->un.mb_words[0],
9995 mqe->un.mb_words[1]);
9996 } else {
9997 lpfc_debugfs_disc_trc(phba->pport,
9998 LPFC_DISC_TRC_MBOX,
9999 "MBOX Send: cmd:x%x mb:x%x x%x",
10000 mbx_cmnd, mqe->un.mb_words[0],
10001 mqe->un.mb_words[1]);
10002 }
10003 }
10004 psli->slistat.mbox_cmd++;
10005
10006 /* Post the mailbox command to the port */
10007 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
10008 if (rc != MBX_SUCCESS) {
10009 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10010 "(%d):2533 Mailbox command x%x (x%x/x%x) "
10011 "cannot issue Data: x%x x%x\n",
10012 mboxq->vport ? mboxq->vport->vpi : 0,
10013 mboxq->u.mb.mbxCommand,
10014 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10015 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10016 psli->sli_flag, MBX_NOWAIT);
10017 goto out_not_finished;
10018 }
10019
10020 return rc;
10021
10022out_not_finished:
10023 spin_lock_irqsave(&phba->hbalock, iflags);
10024 if (phba->sli.mbox_active) {
10025 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
10026 __lpfc_mbox_cmpl_put(phba, mboxq);
10027 /* Release the token */
10028 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10029 phba->sli.mbox_active = NULL;
10030 }
10031 spin_unlock_irqrestore(&phba->hbalock, iflags);
10032
10033 return MBX_NOT_FINISHED;
10034}
10035
10036/**
10037 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
10038 * @phba: Pointer to HBA context object.
10039 * @pmbox: Pointer to mailbox object.
10040 * @flag: Flag indicating how the mailbox need to be processed.
10041 *
10042 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
10043 * the API jump table function pointer from the lpfc_hba struct.
10044 *
10045 * Return codes the caller owns the mailbox command after the return of the
10046 * function.
10047 **/
10048int
10049lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
10050{
10051 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
10052}
10053
10054/**
10055 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
10056 * @phba: The hba struct for which this call is being executed.
10057 * @dev_grp: The HBA PCI-Device group number.
10058 *
10059 * This routine sets up the mbox interface API function jump table in @phba
10060 * struct.
10061 * Returns: 0 - success, -ENODEV - failure.
10062 **/
10063int
10064lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10065{
10066
10067 switch (dev_grp) {
10068 case LPFC_PCI_DEV_LP:
10069 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
10070 phba->lpfc_sli_handle_slow_ring_event =
10071 lpfc_sli_handle_slow_ring_event_s3;
10072 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
10073 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
10074 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
10075 break;
10076 case LPFC_PCI_DEV_OC:
10077 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
10078 phba->lpfc_sli_handle_slow_ring_event =
10079 lpfc_sli_handle_slow_ring_event_s4;
10080 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
10081 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
10082 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
10083 break;
10084 default:
10085 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10086 "1420 Invalid HBA PCI-device group: 0x%x\n",
10087 dev_grp);
10088 return -ENODEV;
10089 }
10090 return 0;
10091}
10092
10093/**
10094 * __lpfc_sli_ringtx_put - Add an iocb to the txq
10095 * @phba: Pointer to HBA context object.
10096 * @pring: Pointer to driver SLI ring object.
10097 * @piocb: Pointer to address of newly added command iocb.
10098 *
10099 * This function is called with hbalock held for SLI3 ports or
10100 * the ring lock held for SLI4 ports to add a command
10101 * iocb to the txq when SLI layer cannot submit the command iocb
10102 * to the ring.
10103 **/
10104void
10105__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10106 struct lpfc_iocbq *piocb)
10107{
10108 if (phba->sli_rev == LPFC_SLI_REV4)
10109 lockdep_assert_held(&pring->ring_lock);
10110 else
10111 lockdep_assert_held(&phba->hbalock);
10112 /* Insert the caller's iocb in the txq tail for later processing. */
10113 list_add_tail(&piocb->list, &pring->txq);
10114}
10115
10116/**
10117 * lpfc_sli_next_iocb - Get the next iocb in the txq
10118 * @phba: Pointer to HBA context object.
10119 * @pring: Pointer to driver SLI ring object.
10120 * @piocb: Pointer to address of newly added command iocb.
10121 *
10122 * This function is called with hbalock held before a new
10123 * iocb is submitted to the firmware. This function checks
10124 * txq to flush the iocbs in txq to Firmware before
10125 * submitting new iocbs to the Firmware.
10126 * If there are iocbs in the txq which need to be submitted
10127 * to firmware, lpfc_sli_next_iocb returns the first element
10128 * of the txq after dequeuing it from txq.
10129 * If there is no iocb in the txq then the function will return
10130 * *piocb and *piocb is set to NULL. Caller needs to check
10131 * *piocb to find if there are more commands in the txq.
10132 **/
10133static struct lpfc_iocbq *
10134lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10135 struct lpfc_iocbq **piocb)
10136{
10137 struct lpfc_iocbq * nextiocb;
10138
10139 lockdep_assert_held(&phba->hbalock);
10140
10141 nextiocb = lpfc_sli_ringtx_get(phba, pring);
10142 if (!nextiocb) {
10143 nextiocb = *piocb;
10144 *piocb = NULL;
10145 }
10146
10147 return nextiocb;
10148}
10149
10150/**
10151 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
10152 * @phba: Pointer to HBA context object.
10153 * @ring_number: SLI ring number to issue iocb on.
10154 * @piocb: Pointer to command iocb.
10155 * @flag: Flag indicating if this command can be put into txq.
10156 *
10157 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
10158 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
10159 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
10160 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
10161 * this function allows only iocbs for posting buffers. This function finds
10162 * next available slot in the command ring and posts the command to the
10163 * available slot and writes the port attention register to request HBA start
10164 * processing new iocb. If there is no slot available in the ring and
10165 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
10166 * the function returns IOCB_BUSY.
10167 *
10168 * This function is called with hbalock held. The function will return success
10169 * after it successfully submit the iocb to firmware or after adding to the
10170 * txq.
10171 **/
10172static int
10173__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
10174 struct lpfc_iocbq *piocb, uint32_t flag)
10175{
10176 struct lpfc_iocbq *nextiocb;
10177 IOCB_t *iocb;
10178 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
10179
10180 lockdep_assert_held(&phba->hbalock);
10181
10182 if (piocb->cmd_cmpl && (!piocb->vport) &&
10183 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
10184 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
10185 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10186 "1807 IOCB x%x failed. No vport\n",
10187 piocb->iocb.ulpCommand);
10188 dump_stack();
10189 return IOCB_ERROR;
10190 }
10191
10192
10193 /* If the PCI channel is in offline state, do not post iocbs. */
10194 if (unlikely(pci_channel_offline(phba->pcidev)))
10195 return IOCB_ERROR;
10196
10197 /* If HBA has a deferred error attention, fail the iocb. */
10198 if (unlikely(phba->hba_flag & DEFER_ERATT))
10199 return IOCB_ERROR;
10200
10201 /*
10202 * We should never get an IOCB if we are in a < LINK_DOWN state
10203 */
10204 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
10205 return IOCB_ERROR;
10206
10207 /*
10208 * Check to see if we are blocking IOCB processing because of a
10209 * outstanding event.
10210 */
10211 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
10212 goto iocb_busy;
10213
10214 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
10215 /*
10216 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
10217 * can be issued if the link is not up.
10218 */
10219 switch (piocb->iocb.ulpCommand) {
10220 case CMD_GEN_REQUEST64_CR:
10221 case CMD_GEN_REQUEST64_CX:
10222 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
10223 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
10224 FC_RCTL_DD_UNSOL_CMD) ||
10225 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
10226 MENLO_TRANSPORT_TYPE))
10227
10228 goto iocb_busy;
10229 break;
10230 case CMD_QUE_RING_BUF_CN:
10231 case CMD_QUE_RING_BUF64_CN:
10232 /*
10233 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
10234 * completion, cmd_cmpl MUST be 0.
10235 */
10236 if (piocb->cmd_cmpl)
10237 piocb->cmd_cmpl = NULL;
10238 fallthrough;
10239 case CMD_CREATE_XRI_CR:
10240 case CMD_CLOSE_XRI_CN:
10241 case CMD_CLOSE_XRI_CX:
10242 break;
10243 default:
10244 goto iocb_busy;
10245 }
10246
10247 /*
10248 * For FCP commands, we must be in a state where we can process link
10249 * attention events.
10250 */
10251 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
10252 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
10253 goto iocb_busy;
10254 }
10255
10256 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
10257 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
10258 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
10259
10260 if (iocb)
10261 lpfc_sli_update_ring(phba, pring);
10262 else
10263 lpfc_sli_update_full_ring(phba, pring);
10264
10265 if (!piocb)
10266 return IOCB_SUCCESS;
10267
10268 goto out_busy;
10269
10270 iocb_busy:
10271 pring->stats.iocb_cmd_delay++;
10272
10273 out_busy:
10274
10275 if (!(flag & SLI_IOCB_RET_IOCB)) {
10276 __lpfc_sli_ringtx_put(phba, pring, piocb);
10277 return IOCB_SUCCESS;
10278 }
10279
10280 return IOCB_BUSY;
10281}
10282
10283/**
10284 * __lpfc_sli_issue_fcp_io_s3 - SLI3 device for sending fcp io iocb
10285 * @phba: Pointer to HBA context object.
10286 * @ring_number: SLI ring number to issue wqe on.
10287 * @piocb: Pointer to command iocb.
10288 * @flag: Flag indicating if this command can be put into txq.
10289 *
10290 * __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to
10291 * send an iocb command to an HBA with SLI-4 interface spec.
10292 *
10293 * This function takes the hbalock before invoking the lockless version.
10294 * The function will return success after it successfully submit the wqe to
10295 * firmware or after adding to the txq.
10296 **/
10297static int
10298__lpfc_sli_issue_fcp_io_s3(struct lpfc_hba *phba, uint32_t ring_number,
10299 struct lpfc_iocbq *piocb, uint32_t flag)
10300{
10301 unsigned long iflags;
10302 int rc;
10303
10304 spin_lock_irqsave(&phba->hbalock, iflags);
10305 rc = __lpfc_sli_issue_iocb_s3(phba, ring_number, piocb, flag);
10306 spin_unlock_irqrestore(&phba->hbalock, iflags);
10307
10308 return rc;
10309}
10310
10311/**
10312 * __lpfc_sli_issue_fcp_io_s4 - SLI4 device for sending fcp io wqe
10313 * @phba: Pointer to HBA context object.
10314 * @ring_number: SLI ring number to issue wqe on.
10315 * @piocb: Pointer to command iocb.
10316 * @flag: Flag indicating if this command can be put into txq.
10317 *
10318 * __lpfc_sli_issue_fcp_io_s4 is used by other functions in the driver to issue
10319 * an wqe command to an HBA with SLI-4 interface spec.
10320 *
10321 * This function is a lockless version. The function will return success
10322 * after it successfully submit the wqe to firmware or after adding to the
10323 * txq.
10324 **/
10325static int
10326__lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number,
10327 struct lpfc_iocbq *piocb, uint32_t flag)
10328{
10329 int rc;
10330 struct lpfc_io_buf *lpfc_cmd = piocb->io_buf;
10331
10332 lpfc_prep_embed_io(phba, lpfc_cmd);
10333 rc = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
10334 return rc;
10335}
10336
10337void
10338lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
10339{
10340 struct lpfc_iocbq *piocb = &lpfc_cmd->cur_iocbq;
10341 union lpfc_wqe128 *wqe = &lpfc_cmd->cur_iocbq.wqe;
10342 struct sli4_sge *sgl;
10343
10344 /* 128 byte wqe support here */
10345 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
10346
10347 if (phba->fcp_embed_io) {
10348 struct fcp_cmnd *fcp_cmnd;
10349 u32 *ptr;
10350
10351 fcp_cmnd = lpfc_cmd->fcp_cmnd;
10352
10353 /* Word 0-2 - FCP_CMND */
10354 wqe->generic.bde.tus.f.bdeFlags =
10355 BUFF_TYPE_BDE_IMMED;
10356 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
10357 wqe->generic.bde.addrHigh = 0;
10358 wqe->generic.bde.addrLow = 88; /* Word 22 */
10359
10360 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
10361 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
10362
10363 /* Word 22-29 FCP CMND Payload */
10364 ptr = &wqe->words[22];
10365 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
10366 } else {
10367 /* Word 0-2 - Inline BDE */
10368 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
10369 wqe->generic.bde.tus.f.bdeSize = sizeof(struct fcp_cmnd);
10370 wqe->generic.bde.addrHigh = sgl->addr_hi;
10371 wqe->generic.bde.addrLow = sgl->addr_lo;
10372
10373 /* Word 10 */
10374 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
10375 bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
10376 }
10377
10378 /* add the VMID tags as per switch response */
10379 if (unlikely(piocb->cmd_flag & LPFC_IO_VMID)) {
10380 if (phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) {
10381 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
10382 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
10383 (piocb->vmid_tag.cs_ctl_vmid));
10384 } else if (phba->cfg_vmid_app_header) {
10385 bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
10386 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
10387 wqe->words[31] = piocb->vmid_tag.app_id;
10388 }
10389 }
10390}
10391
10392/**
10393 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
10394 * @phba: Pointer to HBA context object.
10395 * @ring_number: SLI ring number to issue iocb on.
10396 * @piocb: Pointer to command iocb.
10397 * @flag: Flag indicating if this command can be put into txq.
10398 *
10399 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
10400 * an iocb command to an HBA with SLI-4 interface spec.
10401 *
10402 * This function is called with ringlock held. The function will return success
10403 * after it successfully submit the iocb to firmware or after adding to the
10404 * txq.
10405 **/
10406static int
10407__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
10408 struct lpfc_iocbq *piocb, uint32_t flag)
10409{
10410 struct lpfc_sglq *sglq;
10411 union lpfc_wqe128 *wqe;
10412 struct lpfc_queue *wq;
10413 struct lpfc_sli_ring *pring;
10414 u32 ulp_command = get_job_cmnd(phba, piocb);
10415
10416 /* Get the WQ */
10417 if ((piocb->cmd_flag & LPFC_IO_FCP) ||
10418 (piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
10419 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
10420 } else {
10421 wq = phba->sli4_hba.els_wq;
10422 }
10423
10424 /* Get corresponding ring */
10425 pring = wq->pring;
10426
10427 /*
10428 * The WQE can be either 64 or 128 bytes,
10429 */
10430
10431 lockdep_assert_held(&pring->ring_lock);
10432 wqe = &piocb->wqe;
10433 if (piocb->sli4_xritag == NO_XRI) {
10434 if (ulp_command == CMD_ABORT_XRI_CX)
10435 sglq = NULL;
10436 else {
10437 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
10438 if (!sglq) {
10439 if (!(flag & SLI_IOCB_RET_IOCB)) {
10440 __lpfc_sli_ringtx_put(phba,
10441 pring,
10442 piocb);
10443 return IOCB_SUCCESS;
10444 } else {
10445 return IOCB_BUSY;
10446 }
10447 }
10448 }
10449 } else if (piocb->cmd_flag & LPFC_IO_FCP) {
10450 /* These IO's already have an XRI and a mapped sgl. */
10451 sglq = NULL;
10452 }
10453 else {
10454 /*
10455 * This is a continuation of a commandi,(CX) so this
10456 * sglq is on the active list
10457 */
10458 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
10459 if (!sglq)
10460 return IOCB_ERROR;
10461 }
10462
10463 if (sglq) {
10464 piocb->sli4_lxritag = sglq->sli4_lxritag;
10465 piocb->sli4_xritag = sglq->sli4_xritag;
10466
10467 /* ABTS sent by initiator to CT exchange, the
10468 * RX_ID field will be filled with the newly
10469 * allocated responder XRI.
10470 */
10471 if (ulp_command == CMD_XMIT_BLS_RSP64_CX &&
10472 piocb->abort_bls == LPFC_ABTS_UNSOL_INT)
10473 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
10474 piocb->sli4_xritag);
10475
10476 bf_set(wqe_xri_tag, &wqe->generic.wqe_com,
10477 piocb->sli4_xritag);
10478
10479 if (lpfc_wqe_bpl2sgl(phba, piocb, sglq) == NO_XRI)
10480 return IOCB_ERROR;
10481 }
10482
10483 if (lpfc_sli4_wq_put(wq, wqe))
10484 return IOCB_ERROR;
10485
10486 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
10487
10488 return 0;
10489}
10490
10491/*
10492 * lpfc_sli_issue_fcp_io - Wrapper func for issuing fcp i/o
10493 *
10494 * This routine wraps the actual fcp i/o function for issusing WQE for sli-4
10495 * or IOCB for sli-3 function.
10496 * pointer from the lpfc_hba struct.
10497 *
10498 * Return codes:
10499 * IOCB_ERROR - Error
10500 * IOCB_SUCCESS - Success
10501 * IOCB_BUSY - Busy
10502 **/
10503int
10504lpfc_sli_issue_fcp_io(struct lpfc_hba *phba, uint32_t ring_number,
10505 struct lpfc_iocbq *piocb, uint32_t flag)
10506{
10507 return phba->__lpfc_sli_issue_fcp_io(phba, ring_number, piocb, flag);
10508}
10509
10510/*
10511 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
10512 *
10513 * This routine wraps the actual lockless version for issusing IOCB function
10514 * pointer from the lpfc_hba struct.
10515 *
10516 * Return codes:
10517 * IOCB_ERROR - Error
10518 * IOCB_SUCCESS - Success
10519 * IOCB_BUSY - Busy
10520 **/
10521int
10522__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10523 struct lpfc_iocbq *piocb, uint32_t flag)
10524{
10525 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10526}
10527
10528static void
10529__lpfc_sli_prep_els_req_rsp_s3(struct lpfc_iocbq *cmdiocbq,
10530 struct lpfc_vport *vport,
10531 struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
10532 u32 elscmd, u8 tmo, u8 expect_rsp)
10533{
10534 struct lpfc_hba *phba = vport->phba;
10535 IOCB_t *cmd;
10536
10537 cmd = &cmdiocbq->iocb;
10538 memset(cmd, 0, sizeof(*cmd));
10539
10540 cmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
10541 cmd->un.elsreq64.bdl.addrLow = putPaddrLow(bmp->phys);
10542 cmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
10543
10544 if (expect_rsp) {
10545 cmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
10546 cmd->un.elsreq64.remoteID = did; /* DID */
10547 cmd->ulpCommand = CMD_ELS_REQUEST64_CR;
10548 cmd->ulpTimeout = tmo;
10549 } else {
10550 cmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
10551 cmd->un.genreq64.xmit_els_remoteID = did; /* DID */
10552 cmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
10553 }
10554 cmd->ulpBdeCount = 1;
10555 cmd->ulpLe = 1;
10556 cmd->ulpClass = CLASS3;
10557
10558 /* If we have NPIV enabled, we want to send ELS traffic by VPI. */
10559 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
10560 if (expect_rsp) {
10561 cmd->un.elsreq64.myID = vport->fc_myDID;
10562
10563 /* For ELS_REQUEST64_CR, use the VPI by default */
10564 cmd->ulpContext = phba->vpi_ids[vport->vpi];
10565 }
10566
10567 cmd->ulpCt_h = 0;
10568 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
10569 if (elscmd == ELS_CMD_ECHO)
10570 cmd->ulpCt_l = 0; /* context = invalid RPI */
10571 else
10572 cmd->ulpCt_l = 1; /* context = VPI */
10573 }
10574}
10575
10576static void
10577__lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq *cmdiocbq,
10578 struct lpfc_vport *vport,
10579 struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
10580 u32 elscmd, u8 tmo, u8 expect_rsp)
10581{
10582 struct lpfc_hba *phba = vport->phba;
10583 union lpfc_wqe128 *wqe;
10584 struct ulp_bde64_le *bde;
10585 u8 els_id;
10586
10587 wqe = &cmdiocbq->wqe;
10588 memset(wqe, 0, sizeof(*wqe));
10589
10590 /* Word 0 - 2 BDE */
10591 bde = (struct ulp_bde64_le *)&wqe->generic.bde;
10592 bde->addr_low = cpu_to_le32(putPaddrLow(bmp->phys));
10593 bde->addr_high = cpu_to_le32(putPaddrHigh(bmp->phys));
10594 bde->type_size = cpu_to_le32(cmd_size);
10595 bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
10596
10597 if (expect_rsp) {
10598 bf_set(wqe_cmnd, &wqe->els_req.wqe_com, CMD_ELS_REQUEST64_WQE);
10599
10600 /* Transfer length */
10601 wqe->els_req.payload_len = cmd_size;
10602 wqe->els_req.max_response_payload_len = FCELSSIZE;
10603
10604 /* DID */
10605 bf_set(wqe_els_did, &wqe->els_req.wqe_dest, did);
10606
10607 /* Word 11 - ELS_ID */
10608 switch (elscmd) {
10609 case ELS_CMD_PLOGI:
10610 els_id = LPFC_ELS_ID_PLOGI;
10611 break;
10612 case ELS_CMD_FLOGI:
10613 els_id = LPFC_ELS_ID_FLOGI;
10614 break;
10615 case ELS_CMD_LOGO:
10616 els_id = LPFC_ELS_ID_LOGO;
10617 break;
10618 case ELS_CMD_FDISC:
10619 if (!vport->fc_myDID) {
10620 els_id = LPFC_ELS_ID_FDISC;
10621 break;
10622 }
10623 fallthrough;
10624 default:
10625 els_id = LPFC_ELS_ID_DEFAULT;
10626 break;
10627 }
10628
10629 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
10630 } else {
10631 /* DID */
10632 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, did);
10633
10634 /* Transfer length */
10635 wqe->xmit_els_rsp.response_payload_len = cmd_size;
10636
10637 bf_set(wqe_cmnd, &wqe->xmit_els_rsp.wqe_com,
10638 CMD_XMIT_ELS_RSP64_WQE);
10639 }
10640
10641 bf_set(wqe_tmo, &wqe->generic.wqe_com, tmo);
10642 bf_set(wqe_reqtag, &wqe->generic.wqe_com, cmdiocbq->iotag);
10643 bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
10644
10645 /* If we have NPIV enabled, we want to send ELS traffic by VPI.
10646 * For SLI4, since the driver controls VPIs we also want to include
10647 * all ELS pt2pt protocol traffic as well.
10648 */
10649 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
10650 (vport->fc_flag & FC_PT2PT)) {
10651 if (expect_rsp) {
10652 bf_set(els_req64_sid, &wqe->els_req, vport->fc_myDID);
10653
10654 /* For ELS_REQUEST64_WQE, use the VPI by default */
10655 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
10656 phba->vpi_ids[vport->vpi]);
10657 }
10658
10659 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
10660 if (elscmd == ELS_CMD_ECHO)
10661 bf_set(wqe_ct, &wqe->generic.wqe_com, 0);
10662 else
10663 bf_set(wqe_ct, &wqe->generic.wqe_com, 1);
10664 }
10665}
10666
10667void
10668lpfc_sli_prep_els_req_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
10669 struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
10670 u16 cmd_size, u32 did, u32 elscmd, u8 tmo,
10671 u8 expect_rsp)
10672{
10673 phba->__lpfc_sli_prep_els_req_rsp(cmdiocbq, vport, bmp, cmd_size, did,
10674 elscmd, tmo, expect_rsp);
10675}
10676
10677static void
10678__lpfc_sli_prep_gen_req_s3(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
10679 u16 rpi, u32 num_entry, u8 tmo)
10680{
10681 IOCB_t *cmd;
10682
10683 cmd = &cmdiocbq->iocb;
10684 memset(cmd, 0, sizeof(*cmd));
10685
10686 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
10687 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
10688 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
10689 cmd->un.genreq64.bdl.bdeSize = num_entry * sizeof(struct ulp_bde64);
10690
10691 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
10692 cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
10693 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
10694
10695 cmd->ulpContext = rpi;
10696 cmd->ulpClass = CLASS3;
10697 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
10698 cmd->ulpBdeCount = 1;
10699 cmd->ulpLe = 1;
10700 cmd->ulpOwner = OWN_CHIP;
10701 cmd->ulpTimeout = tmo;
10702}
10703
10704static void
10705__lpfc_sli_prep_gen_req_s4(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
10706 u16 rpi, u32 num_entry, u8 tmo)
10707{
10708 union lpfc_wqe128 *cmdwqe;
10709 struct ulp_bde64_le *bde, *bpl;
10710 u32 xmit_len = 0, total_len = 0, size, type, i;
10711
10712 cmdwqe = &cmdiocbq->wqe;
10713 memset(cmdwqe, 0, sizeof(*cmdwqe));
10714
10715 /* Calculate total_len and xmit_len */
10716 bpl = (struct ulp_bde64_le *)bmp->virt;
10717 for (i = 0; i < num_entry; i++) {
10718 size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK;
10719 total_len += size;
10720 }
10721 for (i = 0; i < num_entry; i++) {
10722 size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK;
10723 type = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_TYPE_MASK;
10724 if (type != ULP_BDE64_TYPE_BDE_64)
10725 break;
10726 xmit_len += size;
10727 }
10728
10729 /* Words 0 - 2 */
10730 bde = (struct ulp_bde64_le *)&cmdwqe->generic.bde;
10731 bde->addr_low = bpl->addr_low;
10732 bde->addr_high = bpl->addr_high;
10733 bde->type_size = cpu_to_le32(xmit_len);
10734 bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
10735
10736 /* Word 3 */
10737 cmdwqe->gen_req.request_payload_len = xmit_len;
10738
10739 /* Word 5 */
10740 bf_set(wqe_type, &cmdwqe->gen_req.wge_ctl, FC_TYPE_CT);
10741 bf_set(wqe_rctl, &cmdwqe->gen_req.wge_ctl, FC_RCTL_DD_UNSOL_CTL);
10742 bf_set(wqe_si, &cmdwqe->gen_req.wge_ctl, 1);
10743 bf_set(wqe_la, &cmdwqe->gen_req.wge_ctl, 1);
10744
10745 /* Word 6 */
10746 bf_set(wqe_ctxt_tag, &cmdwqe->gen_req.wqe_com, rpi);
10747
10748 /* Word 7 */
10749 bf_set(wqe_tmo, &cmdwqe->gen_req.wqe_com, tmo);
10750 bf_set(wqe_class, &cmdwqe->gen_req.wqe_com, CLASS3);
10751 bf_set(wqe_cmnd, &cmdwqe->gen_req.wqe_com, CMD_GEN_REQUEST64_CR);
10752 bf_set(wqe_ct, &cmdwqe->gen_req.wqe_com, SLI4_CT_RPI);
10753
10754 /* Word 12 */
10755 cmdwqe->gen_req.max_response_payload_len = total_len - xmit_len;
10756}
10757
10758void
10759lpfc_sli_prep_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
10760 struct lpfc_dmabuf *bmp, u16 rpi, u32 num_entry, u8 tmo)
10761{
10762 phba->__lpfc_sli_prep_gen_req(cmdiocbq, bmp, rpi, num_entry, tmo);
10763}
10764
10765static void
10766__lpfc_sli_prep_xmit_seq64_s3(struct lpfc_iocbq *cmdiocbq,
10767 struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
10768 u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
10769{
10770 IOCB_t *icmd;
10771
10772 icmd = &cmdiocbq->iocb;
10773 memset(icmd, 0, sizeof(*icmd));
10774
10775 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
10776 icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
10777 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
10778 icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
10779 icmd->un.xseq64.w5.hcsw.Fctl = LA;
10780 if (last_seq)
10781 icmd->un.xseq64.w5.hcsw.Fctl |= LS;
10782 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
10783 icmd->un.xseq64.w5.hcsw.Rctl = rctl;
10784 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
10785
10786 icmd->ulpBdeCount = 1;
10787 icmd->ulpLe = 1;
10788 icmd->ulpClass = CLASS3;
10789
10790 switch (cr_cx_cmd) {
10791 case CMD_XMIT_SEQUENCE64_CR:
10792 icmd->ulpContext = rpi;
10793 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
10794 break;
10795 case CMD_XMIT_SEQUENCE64_CX:
10796 icmd->ulpContext = ox_id;
10797 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
10798 break;
10799 default:
10800 break;
10801 }
10802}
10803
10804static void
10805__lpfc_sli_prep_xmit_seq64_s4(struct lpfc_iocbq *cmdiocbq,
10806 struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
10807 u32 full_size, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
10808{
10809 union lpfc_wqe128 *wqe;
10810 struct ulp_bde64 *bpl;
10811
10812 wqe = &cmdiocbq->wqe;
10813 memset(wqe, 0, sizeof(*wqe));
10814
10815 /* Words 0 - 2 */
10816 bpl = (struct ulp_bde64 *)bmp->virt;
10817 wqe->xmit_sequence.bde.addrHigh = bpl->addrHigh;
10818 wqe->xmit_sequence.bde.addrLow = bpl->addrLow;
10819 wqe->xmit_sequence.bde.tus.w = bpl->tus.w;
10820
10821 /* Word 5 */
10822 bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, last_seq);
10823 bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 1);
10824 bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
10825 bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, rctl);
10826 bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_CT);
10827
10828 /* Word 6 */
10829 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, rpi);
10830
10831 bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
10832 CMD_XMIT_SEQUENCE64_WQE);
10833
10834 /* Word 7 */
10835 bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
10836
10837 /* Word 9 */
10838 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ox_id);
10839
10840 /* Word 12 */
10841 if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK))
10842 wqe->xmit_sequence.xmit_len = full_size;
10843 else
10844 wqe->xmit_sequence.xmit_len =
10845 wqe->xmit_sequence.bde.tus.f.bdeSize;
10846}
10847
10848void
10849lpfc_sli_prep_xmit_seq64(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
10850 struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
10851 u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
10852{
10853 phba->__lpfc_sli_prep_xmit_seq64(cmdiocbq, bmp, rpi, ox_id, num_entry,
10854 rctl, last_seq, cr_cx_cmd);
10855}
10856
10857static void
10858__lpfc_sli_prep_abort_xri_s3(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
10859 u16 iotag, u8 ulp_class, u16 cqid, bool ia)
10860{
10861 IOCB_t *icmd = NULL;
10862
10863 icmd = &cmdiocbq->iocb;
10864 memset(icmd, 0, sizeof(*icmd));
10865
10866 /* Word 5 */
10867 icmd->un.acxri.abortContextTag = ulp_context;
10868 icmd->un.acxri.abortIoTag = iotag;
10869
10870 if (ia) {
10871 /* Word 7 */
10872 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
10873 } else {
10874 /* Word 3 */
10875 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
10876
10877 /* Word 7 */
10878 icmd->ulpClass = ulp_class;
10879 icmd->ulpCommand = CMD_ABORT_XRI_CN;
10880 }
10881
10882 /* Word 7 */
10883 icmd->ulpLe = 1;
10884}
10885
10886static void
10887__lpfc_sli_prep_abort_xri_s4(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
10888 u16 iotag, u8 ulp_class, u16 cqid, bool ia)
10889{
10890 union lpfc_wqe128 *wqe;
10891
10892 wqe = &cmdiocbq->wqe;
10893 memset(wqe, 0, sizeof(*wqe));
10894
10895 /* Word 3 */
10896 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
10897 if (ia)
10898 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
10899 else
10900 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
10901
10902 /* Word 7 */
10903 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_WQE);
10904
10905 /* Word 8 */
10906 wqe->abort_cmd.wqe_com.abort_tag = ulp_context;
10907
10908 /* Word 9 */
10909 bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, iotag);
10910
10911 /* Word 10 */
10912 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
10913
10914 /* Word 11 */
10915 bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, cqid);
10916 bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
10917}
10918
10919void
10920lpfc_sli_prep_abort_xri(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
10921 u16 ulp_context, u16 iotag, u8 ulp_class, u16 cqid,
10922 bool ia)
10923{
10924 phba->__lpfc_sli_prep_abort_xri(cmdiocbq, ulp_context, iotag, ulp_class,
10925 cqid, ia);
10926}
10927
10928/**
10929 * lpfc_sli_api_table_setup - Set up sli api function jump table
10930 * @phba: The hba struct for which this call is being executed.
10931 * @dev_grp: The HBA PCI-Device group number.
10932 *
10933 * This routine sets up the SLI interface API function jump table in @phba
10934 * struct.
10935 * Returns: 0 - success, -ENODEV - failure.
10936 **/
10937int
10938lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10939{
10940
10941 switch (dev_grp) {
10942 case LPFC_PCI_DEV_LP:
10943 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
10944 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
10945 phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s3;
10946 phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s3;
10947 phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s3;
10948 phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s3;
10949 phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s3;
10950 break;
10951 case LPFC_PCI_DEV_OC:
10952 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
10953 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
10954 phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s4;
10955 phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s4;
10956 phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s4;
10957 phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s4;
10958 phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s4;
10959 break;
10960 default:
10961 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10962 "1419 Invalid HBA PCI-device group: 0x%x\n",
10963 dev_grp);
10964 return -ENODEV;
10965 }
10966 return 0;
10967}
10968
10969/**
10970 * lpfc_sli4_calc_ring - Calculates which ring to use
10971 * @phba: Pointer to HBA context object.
10972 * @piocb: Pointer to command iocb.
10973 *
10974 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
10975 * hba_wqidx, thus we need to calculate the corresponding ring.
10976 * Since ABORTS must go on the same WQ of the command they are
10977 * aborting, we use command's hba_wqidx.
10978 */
10979struct lpfc_sli_ring *
10980lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
10981{
10982 struct lpfc_io_buf *lpfc_cmd;
10983
10984 if (piocb->cmd_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
10985 if (unlikely(!phba->sli4_hba.hdwq))
10986 return NULL;
10987 /*
10988 * for abort iocb hba_wqidx should already
10989 * be setup based on what work queue we used.
10990 */
10991 if (!(piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
10992 lpfc_cmd = piocb->io_buf;
10993 piocb->hba_wqidx = lpfc_cmd->hdwq_no;
10994 }
10995 return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
10996 } else {
10997 if (unlikely(!phba->sli4_hba.els_wq))
10998 return NULL;
10999 piocb->hba_wqidx = 0;
11000 return phba->sli4_hba.els_wq->pring;
11001 }
11002}
11003
11004/**
11005 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
11006 * @phba: Pointer to HBA context object.
11007 * @ring_number: Ring number
11008 * @piocb: Pointer to command iocb.
11009 * @flag: Flag indicating if this command can be put into txq.
11010 *
11011 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
11012 * function. This function gets the hbalock and calls
11013 * __lpfc_sli_issue_iocb function and will return the error returned
11014 * by __lpfc_sli_issue_iocb function. This wrapper is used by
11015 * functions which do not hold hbalock.
11016 **/
11017int
11018lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
11019 struct lpfc_iocbq *piocb, uint32_t flag)
11020{
11021 struct lpfc_sli_ring *pring;
11022 struct lpfc_queue *eq;
11023 unsigned long iflags;
11024 int rc;
11025
11026 /* If the PCI channel is in offline state, do not post iocbs. */
11027 if (unlikely(pci_channel_offline(phba->pcidev)))
11028 return IOCB_ERROR;
11029
11030 if (phba->sli_rev == LPFC_SLI_REV4) {
11031 lpfc_sli_prep_wqe(phba, piocb);
11032
11033 eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
11034
11035 pring = lpfc_sli4_calc_ring(phba, piocb);
11036 if (unlikely(pring == NULL))
11037 return IOCB_ERROR;
11038
11039 spin_lock_irqsave(&pring->ring_lock, iflags);
11040 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
11041 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11042
11043 lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH);
11044 } else {
11045 /* For now, SLI2/3 will still use hbalock */
11046 spin_lock_irqsave(&phba->hbalock, iflags);
11047 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
11048 spin_unlock_irqrestore(&phba->hbalock, iflags);
11049 }
11050 return rc;
11051}
11052
11053/**
11054 * lpfc_extra_ring_setup - Extra ring setup function
11055 * @phba: Pointer to HBA context object.
11056 *
11057 * This function is called while driver attaches with the
11058 * HBA to setup the extra ring. The extra ring is used
11059 * only when driver needs to support target mode functionality
11060 * or IP over FC functionalities.
11061 *
11062 * This function is called with no lock held. SLI3 only.
11063 **/
11064static int
11065lpfc_extra_ring_setup( struct lpfc_hba *phba)
11066{
11067 struct lpfc_sli *psli;
11068 struct lpfc_sli_ring *pring;
11069
11070 psli = &phba->sli;
11071
11072 /* Adjust cmd/rsp ring iocb entries more evenly */
11073
11074 /* Take some away from the FCP ring */
11075 pring = &psli->sli3_ring[LPFC_FCP_RING];
11076 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11077 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11078 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11079 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11080
11081 /* and give them to the extra ring */
11082 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
11083
11084 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11085 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11086 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11087 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11088
11089 /* Setup default profile for this ring */
11090 pring->iotag_max = 4096;
11091 pring->num_mask = 1;
11092 pring->prt[0].profile = 0; /* Mask 0 */
11093 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
11094 pring->prt[0].type = phba->cfg_multi_ring_type;
11095 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
11096 return 0;
11097}
11098
11099static void
11100lpfc_sli_post_recovery_event(struct lpfc_hba *phba,
11101 struct lpfc_nodelist *ndlp)
11102{
11103 unsigned long iflags;
11104 struct lpfc_work_evt *evtp = &ndlp->recovery_evt;
11105
11106 spin_lock_irqsave(&phba->hbalock, iflags);
11107 if (!list_empty(&evtp->evt_listp)) {
11108 spin_unlock_irqrestore(&phba->hbalock, iflags);
11109 return;
11110 }
11111
11112 /* Incrementing the reference count until the queued work is done. */
11113 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
11114 if (!evtp->evt_arg1) {
11115 spin_unlock_irqrestore(&phba->hbalock, iflags);
11116 return;
11117 }
11118 evtp->evt = LPFC_EVT_RECOVER_PORT;
11119 list_add_tail(&evtp->evt_listp, &phba->work_list);
11120 spin_unlock_irqrestore(&phba->hbalock, iflags);
11121
11122 lpfc_worker_wake_up(phba);
11123}
11124
11125/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
11126 * @phba: Pointer to HBA context object.
11127 * @iocbq: Pointer to iocb object.
11128 *
11129 * The async_event handler calls this routine when it receives
11130 * an ASYNC_STATUS_CN event from the port. The port generates
11131 * this event when an Abort Sequence request to an rport fails
11132 * twice in succession. The abort could be originated by the
11133 * driver or by the port. The ABTS could have been for an ELS
11134 * or FCP IO. The port only generates this event when an ABTS
11135 * fails to complete after one retry.
11136 */
11137static void
11138lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
11139 struct lpfc_iocbq *iocbq)
11140{
11141 struct lpfc_nodelist *ndlp = NULL;
11142 uint16_t rpi = 0, vpi = 0;
11143 struct lpfc_vport *vport = NULL;
11144
11145 /* The rpi in the ulpContext is vport-sensitive. */
11146 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
11147 rpi = iocbq->iocb.ulpContext;
11148
11149 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11150 "3092 Port generated ABTS async event "
11151 "on vpi %d rpi %d status 0x%x\n",
11152 vpi, rpi, iocbq->iocb.ulpStatus);
11153
11154 vport = lpfc_find_vport_by_vpid(phba, vpi);
11155 if (!vport)
11156 goto err_exit;
11157 ndlp = lpfc_findnode_rpi(vport, rpi);
11158 if (!ndlp)
11159 goto err_exit;
11160
11161 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
11162 lpfc_sli_abts_recover_port(vport, ndlp);
11163 return;
11164
11165 err_exit:
11166 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11167 "3095 Event Context not found, no "
11168 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
11169 vpi, rpi, iocbq->iocb.ulpStatus,
11170 iocbq->iocb.ulpContext);
11171}
11172
11173/* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
11174 * @phba: pointer to HBA context object.
11175 * @ndlp: nodelist pointer for the impacted rport.
11176 * @axri: pointer to the wcqe containing the failed exchange.
11177 *
11178 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
11179 * port. The port generates this event when an abort exchange request to an
11180 * rport fails twice in succession with no reply. The abort could be originated
11181 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
11182 */
11183void
11184lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
11185 struct lpfc_nodelist *ndlp,
11186 struct sli4_wcqe_xri_aborted *axri)
11187{
11188 uint32_t ext_status = 0;
11189
11190 if (!ndlp) {
11191 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11192 "3115 Node Context not found, driver "
11193 "ignoring abts err event\n");
11194 return;
11195 }
11196
11197 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11198 "3116 Port generated FCP XRI ABORT event on "
11199 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
11200 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
11201 bf_get(lpfc_wcqe_xa_xri, axri),
11202 bf_get(lpfc_wcqe_xa_status, axri),
11203 axri->parameter);
11204
11205 /*
11206 * Catch the ABTS protocol failure case. Older OCe FW releases returned
11207 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
11208 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
11209 */
11210 ext_status = axri->parameter & IOERR_PARAM_MASK;
11211 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
11212 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
11213 lpfc_sli_post_recovery_event(phba, ndlp);
11214}
11215
11216/**
11217 * lpfc_sli_async_event_handler - ASYNC iocb handler function
11218 * @phba: Pointer to HBA context object.
11219 * @pring: Pointer to driver SLI ring object.
11220 * @iocbq: Pointer to iocb object.
11221 *
11222 * This function is called by the slow ring event handler
11223 * function when there is an ASYNC event iocb in the ring.
11224 * This function is called with no lock held.
11225 * Currently this function handles only temperature related
11226 * ASYNC events. The function decodes the temperature sensor
11227 * event message and posts events for the management applications.
11228 **/
11229static void
11230lpfc_sli_async_event_handler(struct lpfc_hba * phba,
11231 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
11232{
11233 IOCB_t *icmd;
11234 uint16_t evt_code;
11235 struct temp_event temp_event_data;
11236 struct Scsi_Host *shost;
11237 uint32_t *iocb_w;
11238
11239 icmd = &iocbq->iocb;
11240 evt_code = icmd->un.asyncstat.evt_code;
11241
11242 switch (evt_code) {
11243 case ASYNC_TEMP_WARN:
11244 case ASYNC_TEMP_SAFE:
11245 temp_event_data.data = (uint32_t) icmd->ulpContext;
11246 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
11247 if (evt_code == ASYNC_TEMP_WARN) {
11248 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
11249 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11250 "0347 Adapter is very hot, please take "
11251 "corrective action. temperature : %d Celsius\n",
11252 (uint32_t) icmd->ulpContext);
11253 } else {
11254 temp_event_data.event_code = LPFC_NORMAL_TEMP;
11255 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11256 "0340 Adapter temperature is OK now. "
11257 "temperature : %d Celsius\n",
11258 (uint32_t) icmd->ulpContext);
11259 }
11260
11261 /* Send temperature change event to applications */
11262 shost = lpfc_shost_from_vport(phba->pport);
11263 fc_host_post_vendor_event(shost, fc_get_event_number(),
11264 sizeof(temp_event_data), (char *) &temp_event_data,
11265 LPFC_NL_VENDOR_ID);
11266 break;
11267 case ASYNC_STATUS_CN:
11268 lpfc_sli_abts_err_handler(phba, iocbq);
11269 break;
11270 default:
11271 iocb_w = (uint32_t *) icmd;
11272 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11273 "0346 Ring %d handler: unexpected ASYNC_STATUS"
11274 " evt_code 0x%x\n"
11275 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
11276 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
11277 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
11278 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
11279 pring->ringno, icmd->un.asyncstat.evt_code,
11280 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
11281 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
11282 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
11283 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
11284
11285 break;
11286 }
11287}
11288
11289
11290/**
11291 * lpfc_sli4_setup - SLI ring setup function
11292 * @phba: Pointer to HBA context object.
11293 *
11294 * lpfc_sli_setup sets up rings of the SLI interface with
11295 * number of iocbs per ring and iotags. This function is
11296 * called while driver attach to the HBA and before the
11297 * interrupts are enabled. So there is no need for locking.
11298 *
11299 * This function always returns 0.
11300 **/
11301int
11302lpfc_sli4_setup(struct lpfc_hba *phba)
11303{
11304 struct lpfc_sli_ring *pring;
11305
11306 pring = phba->sli4_hba.els_wq->pring;
11307 pring->num_mask = LPFC_MAX_RING_MASK;
11308 pring->prt[0].profile = 0; /* Mask 0 */
11309 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
11310 pring->prt[0].type = FC_TYPE_ELS;
11311 pring->prt[0].lpfc_sli_rcv_unsol_event =
11312 lpfc_els_unsol_event;
11313 pring->prt[1].profile = 0; /* Mask 1 */
11314 pring->prt[1].rctl = FC_RCTL_ELS_REP;
11315 pring->prt[1].type = FC_TYPE_ELS;
11316 pring->prt[1].lpfc_sli_rcv_unsol_event =
11317 lpfc_els_unsol_event;
11318 pring->prt[2].profile = 0; /* Mask 2 */
11319 /* NameServer Inquiry */
11320 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
11321 /* NameServer */
11322 pring->prt[2].type = FC_TYPE_CT;
11323 pring->prt[2].lpfc_sli_rcv_unsol_event =
11324 lpfc_ct_unsol_event;
11325 pring->prt[3].profile = 0; /* Mask 3 */
11326 /* NameServer response */
11327 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
11328 /* NameServer */
11329 pring->prt[3].type = FC_TYPE_CT;
11330 pring->prt[3].lpfc_sli_rcv_unsol_event =
11331 lpfc_ct_unsol_event;
11332 return 0;
11333}
11334
11335/**
11336 * lpfc_sli_setup - SLI ring setup function
11337 * @phba: Pointer to HBA context object.
11338 *
11339 * lpfc_sli_setup sets up rings of the SLI interface with
11340 * number of iocbs per ring and iotags. This function is
11341 * called while driver attach to the HBA and before the
11342 * interrupts are enabled. So there is no need for locking.
11343 *
11344 * This function always returns 0. SLI3 only.
11345 **/
11346int
11347lpfc_sli_setup(struct lpfc_hba *phba)
11348{
11349 int i, totiocbsize = 0;
11350 struct lpfc_sli *psli = &phba->sli;
11351 struct lpfc_sli_ring *pring;
11352
11353 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
11354 psli->sli_flag = 0;
11355
11356 psli->iocbq_lookup = NULL;
11357 psli->iocbq_lookup_len = 0;
11358 psli->last_iotag = 0;
11359
11360 for (i = 0; i < psli->num_rings; i++) {
11361 pring = &psli->sli3_ring[i];
11362 switch (i) {
11363 case LPFC_FCP_RING: /* ring 0 - FCP */
11364 /* numCiocb and numRiocb are used in config_port */
11365 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
11366 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
11367 pring->sli.sli3.numCiocb +=
11368 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11369 pring->sli.sli3.numRiocb +=
11370 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11371 pring->sli.sli3.numCiocb +=
11372 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11373 pring->sli.sli3.numRiocb +=
11374 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11375 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11376 SLI3_IOCB_CMD_SIZE :
11377 SLI2_IOCB_CMD_SIZE;
11378 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11379 SLI3_IOCB_RSP_SIZE :
11380 SLI2_IOCB_RSP_SIZE;
11381 pring->iotag_ctr = 0;
11382 pring->iotag_max =
11383 (phba->cfg_hba_queue_depth * 2);
11384 pring->fast_iotag = pring->iotag_max;
11385 pring->num_mask = 0;
11386 break;
11387 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
11388 /* numCiocb and numRiocb are used in config_port */
11389 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
11390 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
11391 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11392 SLI3_IOCB_CMD_SIZE :
11393 SLI2_IOCB_CMD_SIZE;
11394 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11395 SLI3_IOCB_RSP_SIZE :
11396 SLI2_IOCB_RSP_SIZE;
11397 pring->iotag_max = phba->cfg_hba_queue_depth;
11398 pring->num_mask = 0;
11399 break;
11400 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
11401 /* numCiocb and numRiocb are used in config_port */
11402 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
11403 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
11404 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11405 SLI3_IOCB_CMD_SIZE :
11406 SLI2_IOCB_CMD_SIZE;
11407 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11408 SLI3_IOCB_RSP_SIZE :
11409 SLI2_IOCB_RSP_SIZE;
11410 pring->fast_iotag = 0;
11411 pring->iotag_ctr = 0;
11412 pring->iotag_max = 4096;
11413 pring->lpfc_sli_rcv_async_status =
11414 lpfc_sli_async_event_handler;
11415 pring->num_mask = LPFC_MAX_RING_MASK;
11416 pring->prt[0].profile = 0; /* Mask 0 */
11417 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
11418 pring->prt[0].type = FC_TYPE_ELS;
11419 pring->prt[0].lpfc_sli_rcv_unsol_event =
11420 lpfc_els_unsol_event;
11421 pring->prt[1].profile = 0; /* Mask 1 */
11422 pring->prt[1].rctl = FC_RCTL_ELS_REP;
11423 pring->prt[1].type = FC_TYPE_ELS;
11424 pring->prt[1].lpfc_sli_rcv_unsol_event =
11425 lpfc_els_unsol_event;
11426 pring->prt[2].profile = 0; /* Mask 2 */
11427 /* NameServer Inquiry */
11428 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
11429 /* NameServer */
11430 pring->prt[2].type = FC_TYPE_CT;
11431 pring->prt[2].lpfc_sli_rcv_unsol_event =
11432 lpfc_ct_unsol_event;
11433 pring->prt[3].profile = 0; /* Mask 3 */
11434 /* NameServer response */
11435 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
11436 /* NameServer */
11437 pring->prt[3].type = FC_TYPE_CT;
11438 pring->prt[3].lpfc_sli_rcv_unsol_event =
11439 lpfc_ct_unsol_event;
11440 break;
11441 }
11442 totiocbsize += (pring->sli.sli3.numCiocb *
11443 pring->sli.sli3.sizeCiocb) +
11444 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
11445 }
11446 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
11447 /* Too many cmd / rsp ring entries in SLI2 SLIM */
11448 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
11449 "SLI2 SLIM Data: x%x x%lx\n",
11450 phba->brd_no, totiocbsize,
11451 (unsigned long) MAX_SLIM_IOCB_SIZE);
11452 }
11453 if (phba->cfg_multi_ring_support == 2)
11454 lpfc_extra_ring_setup(phba);
11455
11456 return 0;
11457}
11458
11459/**
11460 * lpfc_sli4_queue_init - Queue initialization function
11461 * @phba: Pointer to HBA context object.
11462 *
11463 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
11464 * ring. This function also initializes ring indices of each ring.
11465 * This function is called during the initialization of the SLI
11466 * interface of an HBA.
11467 * This function is called with no lock held and always returns
11468 * 1.
11469 **/
11470void
11471lpfc_sli4_queue_init(struct lpfc_hba *phba)
11472{
11473 struct lpfc_sli *psli;
11474 struct lpfc_sli_ring *pring;
11475 int i;
11476
11477 psli = &phba->sli;
11478 spin_lock_irq(&phba->hbalock);
11479 INIT_LIST_HEAD(&psli->mboxq);
11480 INIT_LIST_HEAD(&psli->mboxq_cmpl);
11481 /* Initialize list headers for txq and txcmplq as double linked lists */
11482 for (i = 0; i < phba->cfg_hdw_queue; i++) {
11483 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
11484 pring->flag = 0;
11485 pring->ringno = LPFC_FCP_RING;
11486 pring->txcmplq_cnt = 0;
11487 INIT_LIST_HEAD(&pring->txq);
11488 INIT_LIST_HEAD(&pring->txcmplq);
11489 INIT_LIST_HEAD(&pring->iocb_continueq);
11490 spin_lock_init(&pring->ring_lock);
11491 }
11492 pring = phba->sli4_hba.els_wq->pring;
11493 pring->flag = 0;
11494 pring->ringno = LPFC_ELS_RING;
11495 pring->txcmplq_cnt = 0;
11496 INIT_LIST_HEAD(&pring->txq);
11497 INIT_LIST_HEAD(&pring->txcmplq);
11498 INIT_LIST_HEAD(&pring->iocb_continueq);
11499 spin_lock_init(&pring->ring_lock);
11500
11501 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11502 pring = phba->sli4_hba.nvmels_wq->pring;
11503 pring->flag = 0;
11504 pring->ringno = LPFC_ELS_RING;
11505 pring->txcmplq_cnt = 0;
11506 INIT_LIST_HEAD(&pring->txq);
11507 INIT_LIST_HEAD(&pring->txcmplq);
11508 INIT_LIST_HEAD(&pring->iocb_continueq);
11509 spin_lock_init(&pring->ring_lock);
11510 }
11511
11512 spin_unlock_irq(&phba->hbalock);
11513}
11514
11515/**
11516 * lpfc_sli_queue_init - Queue initialization function
11517 * @phba: Pointer to HBA context object.
11518 *
11519 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
11520 * ring. This function also initializes ring indices of each ring.
11521 * This function is called during the initialization of the SLI
11522 * interface of an HBA.
11523 * This function is called with no lock held and always returns
11524 * 1.
11525 **/
11526void
11527lpfc_sli_queue_init(struct lpfc_hba *phba)
11528{
11529 struct lpfc_sli *psli;
11530 struct lpfc_sli_ring *pring;
11531 int i;
11532
11533 psli = &phba->sli;
11534 spin_lock_irq(&phba->hbalock);
11535 INIT_LIST_HEAD(&psli->mboxq);
11536 INIT_LIST_HEAD(&psli->mboxq_cmpl);
11537 /* Initialize list headers for txq and txcmplq as double linked lists */
11538 for (i = 0; i < psli->num_rings; i++) {
11539 pring = &psli->sli3_ring[i];
11540 pring->ringno = i;
11541 pring->sli.sli3.next_cmdidx = 0;
11542 pring->sli.sli3.local_getidx = 0;
11543 pring->sli.sli3.cmdidx = 0;
11544 INIT_LIST_HEAD(&pring->iocb_continueq);
11545 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
11546 INIT_LIST_HEAD(&pring->postbufq);
11547 pring->flag = 0;
11548 INIT_LIST_HEAD(&pring->txq);
11549 INIT_LIST_HEAD(&pring->txcmplq);
11550 spin_lock_init(&pring->ring_lock);
11551 }
11552 spin_unlock_irq(&phba->hbalock);
11553}
11554
11555/**
11556 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
11557 * @phba: Pointer to HBA context object.
11558 *
11559 * This routine flushes the mailbox command subsystem. It will unconditionally
11560 * flush all the mailbox commands in the three possible stages in the mailbox
11561 * command sub-system: pending mailbox command queue; the outstanding mailbox
11562 * command; and completed mailbox command queue. It is caller's responsibility
11563 * to make sure that the driver is in the proper state to flush the mailbox
11564 * command sub-system. Namely, the posting of mailbox commands into the
11565 * pending mailbox command queue from the various clients must be stopped;
11566 * either the HBA is in a state that it will never works on the outstanding
11567 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
11568 * mailbox command has been completed.
11569 **/
11570static void
11571lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
11572{
11573 LIST_HEAD(completions);
11574 struct lpfc_sli *psli = &phba->sli;
11575 LPFC_MBOXQ_t *pmb;
11576 unsigned long iflag;
11577
11578 /* Disable softirqs, including timers from obtaining phba->hbalock */
11579 local_bh_disable();
11580
11581 /* Flush all the mailbox commands in the mbox system */
11582 spin_lock_irqsave(&phba->hbalock, iflag);
11583
11584 /* The pending mailbox command queue */
11585 list_splice_init(&phba->sli.mboxq, &completions);
11586 /* The outstanding active mailbox command */
11587 if (psli->mbox_active) {
11588 list_add_tail(&psli->mbox_active->list, &completions);
11589 psli->mbox_active = NULL;
11590 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11591 }
11592 /* The completed mailbox command queue */
11593 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
11594 spin_unlock_irqrestore(&phba->hbalock, iflag);
11595
11596 /* Enable softirqs again, done with phba->hbalock */
11597 local_bh_enable();
11598
11599 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
11600 while (!list_empty(&completions)) {
11601 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
11602 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
11603 if (pmb->mbox_cmpl)
11604 pmb->mbox_cmpl(phba, pmb);
11605 }
11606}
11607
11608/**
11609 * lpfc_sli_host_down - Vport cleanup function
11610 * @vport: Pointer to virtual port object.
11611 *
11612 * lpfc_sli_host_down is called to clean up the resources
11613 * associated with a vport before destroying virtual
11614 * port data structures.
11615 * This function does following operations:
11616 * - Free discovery resources associated with this virtual
11617 * port.
11618 * - Free iocbs associated with this virtual port in
11619 * the txq.
11620 * - Send abort for all iocb commands associated with this
11621 * vport in txcmplq.
11622 *
11623 * This function is called with no lock held and always returns 1.
11624 **/
11625int
11626lpfc_sli_host_down(struct lpfc_vport *vport)
11627{
11628 LIST_HEAD(completions);
11629 struct lpfc_hba *phba = vport->phba;
11630 struct lpfc_sli *psli = &phba->sli;
11631 struct lpfc_queue *qp = NULL;
11632 struct lpfc_sli_ring *pring;
11633 struct lpfc_iocbq *iocb, *next_iocb;
11634 int i;
11635 unsigned long flags = 0;
11636 uint16_t prev_pring_flag;
11637
11638 lpfc_cleanup_discovery_resources(vport);
11639
11640 spin_lock_irqsave(&phba->hbalock, flags);
11641
11642 /*
11643 * Error everything on the txq since these iocbs
11644 * have not been given to the FW yet.
11645 * Also issue ABTS for everything on the txcmplq
11646 */
11647 if (phba->sli_rev != LPFC_SLI_REV4) {
11648 for (i = 0; i < psli->num_rings; i++) {
11649 pring = &psli->sli3_ring[i];
11650 prev_pring_flag = pring->flag;
11651 /* Only slow rings */
11652 if (pring->ringno == LPFC_ELS_RING) {
11653 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11654 /* Set the lpfc data pending flag */
11655 set_bit(LPFC_DATA_READY, &phba->data_flags);
11656 }
11657 list_for_each_entry_safe(iocb, next_iocb,
11658 &pring->txq, list) {
11659 if (iocb->vport != vport)
11660 continue;
11661 list_move_tail(&iocb->list, &completions);
11662 }
11663 list_for_each_entry_safe(iocb, next_iocb,
11664 &pring->txcmplq, list) {
11665 if (iocb->vport != vport)
11666 continue;
11667 lpfc_sli_issue_abort_iotag(phba, pring, iocb,
11668 NULL);
11669 }
11670 pring->flag = prev_pring_flag;
11671 }
11672 } else {
11673 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11674 pring = qp->pring;
11675 if (!pring)
11676 continue;
11677 if (pring == phba->sli4_hba.els_wq->pring) {
11678 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11679 /* Set the lpfc data pending flag */
11680 set_bit(LPFC_DATA_READY, &phba->data_flags);
11681 }
11682 prev_pring_flag = pring->flag;
11683 spin_lock(&pring->ring_lock);
11684 list_for_each_entry_safe(iocb, next_iocb,
11685 &pring->txq, list) {
11686 if (iocb->vport != vport)
11687 continue;
11688 list_move_tail(&iocb->list, &completions);
11689 }
11690 spin_unlock(&pring->ring_lock);
11691 list_for_each_entry_safe(iocb, next_iocb,
11692 &pring->txcmplq, list) {
11693 if (iocb->vport != vport)
11694 continue;
11695 lpfc_sli_issue_abort_iotag(phba, pring, iocb,
11696 NULL);
11697 }
11698 pring->flag = prev_pring_flag;
11699 }
11700 }
11701 spin_unlock_irqrestore(&phba->hbalock, flags);
11702
11703 /* Make sure HBA is alive */
11704 lpfc_issue_hb_tmo(phba);
11705
11706 /* Cancel all the IOCBs from the completions list */
11707 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11708 IOERR_SLI_DOWN);
11709 return 1;
11710}
11711
11712/**
11713 * lpfc_sli_hba_down - Resource cleanup function for the HBA
11714 * @phba: Pointer to HBA context object.
11715 *
11716 * This function cleans up all iocb, buffers, mailbox commands
11717 * while shutting down the HBA. This function is called with no
11718 * lock held and always returns 1.
11719 * This function does the following to cleanup driver resources:
11720 * - Free discovery resources for each virtual port
11721 * - Cleanup any pending fabric iocbs
11722 * - Iterate through the iocb txq and free each entry
11723 * in the list.
11724 * - Free up any buffer posted to the HBA
11725 * - Free mailbox commands in the mailbox queue.
11726 **/
11727int
11728lpfc_sli_hba_down(struct lpfc_hba *phba)
11729{
11730 LIST_HEAD(completions);
11731 struct lpfc_sli *psli = &phba->sli;
11732 struct lpfc_queue *qp = NULL;
11733 struct lpfc_sli_ring *pring;
11734 struct lpfc_dmabuf *buf_ptr;
11735 unsigned long flags = 0;
11736 int i;
11737
11738 /* Shutdown the mailbox command sub-system */
11739 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
11740
11741 lpfc_hba_down_prep(phba);
11742
11743 /* Disable softirqs, including timers from obtaining phba->hbalock */
11744 local_bh_disable();
11745
11746 lpfc_fabric_abort_hba(phba);
11747
11748 spin_lock_irqsave(&phba->hbalock, flags);
11749
11750 /*
11751 * Error everything on the txq since these iocbs
11752 * have not been given to the FW yet.
11753 */
11754 if (phba->sli_rev != LPFC_SLI_REV4) {
11755 for (i = 0; i < psli->num_rings; i++) {
11756 pring = &psli->sli3_ring[i];
11757 /* Only slow rings */
11758 if (pring->ringno == LPFC_ELS_RING) {
11759 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11760 /* Set the lpfc data pending flag */
11761 set_bit(LPFC_DATA_READY, &phba->data_flags);
11762 }
11763 list_splice_init(&pring->txq, &completions);
11764 }
11765 } else {
11766 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11767 pring = qp->pring;
11768 if (!pring)
11769 continue;
11770 spin_lock(&pring->ring_lock);
11771 list_splice_init(&pring->txq, &completions);
11772 spin_unlock(&pring->ring_lock);
11773 if (pring == phba->sli4_hba.els_wq->pring) {
11774 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11775 /* Set the lpfc data pending flag */
11776 set_bit(LPFC_DATA_READY, &phba->data_flags);
11777 }
11778 }
11779 }
11780 spin_unlock_irqrestore(&phba->hbalock, flags);
11781
11782 /* Cancel all the IOCBs from the completions list */
11783 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11784 IOERR_SLI_DOWN);
11785
11786 spin_lock_irqsave(&phba->hbalock, flags);
11787 list_splice_init(&phba->elsbuf, &completions);
11788 phba->elsbuf_cnt = 0;
11789 phba->elsbuf_prev_cnt = 0;
11790 spin_unlock_irqrestore(&phba->hbalock, flags);
11791
11792 while (!list_empty(&completions)) {
11793 list_remove_head(&completions, buf_ptr,
11794 struct lpfc_dmabuf, list);
11795 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
11796 kfree(buf_ptr);
11797 }
11798
11799 /* Enable softirqs again, done with phba->hbalock */
11800 local_bh_enable();
11801
11802 /* Return any active mbox cmds */
11803 del_timer_sync(&psli->mbox_tmo);
11804
11805 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
11806 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
11807 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
11808
11809 return 1;
11810}
11811
11812/**
11813 * lpfc_sli_pcimem_bcopy - SLI memory copy function
11814 * @srcp: Source memory pointer.
11815 * @destp: Destination memory pointer.
11816 * @cnt: Number of words required to be copied.
11817 *
11818 * This function is used for copying data between driver memory
11819 * and the SLI memory. This function also changes the endianness
11820 * of each word if native endianness is different from SLI
11821 * endianness. This function can be called with or without
11822 * lock.
11823 **/
11824void
11825lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
11826{
11827 uint32_t *src = srcp;
11828 uint32_t *dest = destp;
11829 uint32_t ldata;
11830 int i;
11831
11832 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
11833 ldata = *src;
11834 ldata = le32_to_cpu(ldata);
11835 *dest = ldata;
11836 src++;
11837 dest++;
11838 }
11839}
11840
11841
11842/**
11843 * lpfc_sli_bemem_bcopy - SLI memory copy function
11844 * @srcp: Source memory pointer.
11845 * @destp: Destination memory pointer.
11846 * @cnt: Number of words required to be copied.
11847 *
11848 * This function is used for copying data between a data structure
11849 * with big endian representation to local endianness.
11850 * This function can be called with or without lock.
11851 **/
11852void
11853lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
11854{
11855 uint32_t *src = srcp;
11856 uint32_t *dest = destp;
11857 uint32_t ldata;
11858 int i;
11859
11860 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
11861 ldata = *src;
11862 ldata = be32_to_cpu(ldata);
11863 *dest = ldata;
11864 src++;
11865 dest++;
11866 }
11867}
11868
11869/**
11870 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
11871 * @phba: Pointer to HBA context object.
11872 * @pring: Pointer to driver SLI ring object.
11873 * @mp: Pointer to driver buffer object.
11874 *
11875 * This function is called with no lock held.
11876 * It always return zero after adding the buffer to the postbufq
11877 * buffer list.
11878 **/
11879int
11880lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11881 struct lpfc_dmabuf *mp)
11882{
11883 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
11884 later */
11885 spin_lock_irq(&phba->hbalock);
11886 list_add_tail(&mp->list, &pring->postbufq);
11887 pring->postbufq_cnt++;
11888 spin_unlock_irq(&phba->hbalock);
11889 return 0;
11890}
11891
11892/**
11893 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
11894 * @phba: Pointer to HBA context object.
11895 *
11896 * When HBQ is enabled, buffers are searched based on tags. This function
11897 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
11898 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
11899 * does not conflict with tags of buffer posted for unsolicited events.
11900 * The function returns the allocated tag. The function is called with
11901 * no locks held.
11902 **/
11903uint32_t
11904lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
11905{
11906 spin_lock_irq(&phba->hbalock);
11907 phba->buffer_tag_count++;
11908 /*
11909 * Always set the QUE_BUFTAG_BIT to distiguish between
11910 * a tag assigned by HBQ.
11911 */
11912 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
11913 spin_unlock_irq(&phba->hbalock);
11914 return phba->buffer_tag_count;
11915}
11916
11917/**
11918 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
11919 * @phba: Pointer to HBA context object.
11920 * @pring: Pointer to driver SLI ring object.
11921 * @tag: Buffer tag.
11922 *
11923 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
11924 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
11925 * iocb is posted to the response ring with the tag of the buffer.
11926 * This function searches the pring->postbufq list using the tag
11927 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
11928 * iocb. If the buffer is found then lpfc_dmabuf object of the
11929 * buffer is returned to the caller else NULL is returned.
11930 * This function is called with no lock held.
11931 **/
11932struct lpfc_dmabuf *
11933lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11934 uint32_t tag)
11935{
11936 struct lpfc_dmabuf *mp, *next_mp;
11937 struct list_head *slp = &pring->postbufq;
11938
11939 /* Search postbufq, from the beginning, looking for a match on tag */
11940 spin_lock_irq(&phba->hbalock);
11941 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11942 if (mp->buffer_tag == tag) {
11943 list_del_init(&mp->list);
11944 pring->postbufq_cnt--;
11945 spin_unlock_irq(&phba->hbalock);
11946 return mp;
11947 }
11948 }
11949
11950 spin_unlock_irq(&phba->hbalock);
11951 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11952 "0402 Cannot find virtual addr for buffer tag on "
11953 "ring %d Data x%lx x%px x%px x%x\n",
11954 pring->ringno, (unsigned long) tag,
11955 slp->next, slp->prev, pring->postbufq_cnt);
11956
11957 return NULL;
11958}
11959
11960/**
11961 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
11962 * @phba: Pointer to HBA context object.
11963 * @pring: Pointer to driver SLI ring object.
11964 * @phys: DMA address of the buffer.
11965 *
11966 * This function searches the buffer list using the dma_address
11967 * of unsolicited event to find the driver's lpfc_dmabuf object
11968 * corresponding to the dma_address. The function returns the
11969 * lpfc_dmabuf object if a buffer is found else it returns NULL.
11970 * This function is called by the ct and els unsolicited event
11971 * handlers to get the buffer associated with the unsolicited
11972 * event.
11973 *
11974 * This function is called with no lock held.
11975 **/
11976struct lpfc_dmabuf *
11977lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11978 dma_addr_t phys)
11979{
11980 struct lpfc_dmabuf *mp, *next_mp;
11981 struct list_head *slp = &pring->postbufq;
11982
11983 /* Search postbufq, from the beginning, looking for a match on phys */
11984 spin_lock_irq(&phba->hbalock);
11985 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11986 if (mp->phys == phys) {
11987 list_del_init(&mp->list);
11988 pring->postbufq_cnt--;
11989 spin_unlock_irq(&phba->hbalock);
11990 return mp;
11991 }
11992 }
11993
11994 spin_unlock_irq(&phba->hbalock);
11995 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11996 "0410 Cannot find virtual addr for mapped buf on "
11997 "ring %d Data x%llx x%px x%px x%x\n",
11998 pring->ringno, (unsigned long long)phys,
11999 slp->next, slp->prev, pring->postbufq_cnt);
12000 return NULL;
12001}
12002
12003/**
12004 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
12005 * @phba: Pointer to HBA context object.
12006 * @cmdiocb: Pointer to driver command iocb object.
12007 * @rspiocb: Pointer to driver response iocb object.
12008 *
12009 * This function is the completion handler for the abort iocbs for
12010 * ELS commands. This function is called from the ELS ring event
12011 * handler with no lock held. This function frees memory resources
12012 * associated with the abort iocb.
12013 **/
12014static void
12015lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12016 struct lpfc_iocbq *rspiocb)
12017{
12018 u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
12019 u32 ulp_word4 = get_job_word4(phba, rspiocb);
12020 u8 cmnd = get_job_cmnd(phba, cmdiocb);
12021
12022 if (ulp_status) {
12023 /*
12024 * Assume that the port already completed and returned, or
12025 * will return the iocb. Just Log the message.
12026 */
12027 if (phba->sli_rev < LPFC_SLI_REV4) {
12028 if (cmnd == CMD_ABORT_XRI_CX &&
12029 ulp_status == IOSTAT_LOCAL_REJECT &&
12030 ulp_word4 == IOERR_ABORT_REQUESTED) {
12031 goto release_iocb;
12032 }
12033 }
12034
12035 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
12036 "0327 Cannot abort els iocb x%px "
12037 "with io cmd xri %x abort tag : x%x, "
12038 "abort status %x abort code %x\n",
12039 cmdiocb, get_job_abtsiotag(phba, cmdiocb),
12040 (phba->sli_rev == LPFC_SLI_REV4) ?
12041 get_wqe_reqtag(cmdiocb) :
12042 cmdiocb->iocb.un.acxri.abortContextTag,
12043 ulp_status, ulp_word4);
12044
12045 }
12046release_iocb:
12047 lpfc_sli_release_iocbq(phba, cmdiocb);
12048 return;
12049}
12050
12051/**
12052 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
12053 * @phba: Pointer to HBA context object.
12054 * @cmdiocb: Pointer to driver command iocb object.
12055 * @rspiocb: Pointer to driver response iocb object.
12056 *
12057 * The function is called from SLI ring event handler with no
12058 * lock held. This function is the completion handler for ELS commands
12059 * which are aborted. The function frees memory resources used for
12060 * the aborted ELS commands.
12061 **/
12062void
12063lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12064 struct lpfc_iocbq *rspiocb)
12065{
12066 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
12067 IOCB_t *irsp;
12068 LPFC_MBOXQ_t *mbox;
12069 u32 ulp_command, ulp_status, ulp_word4, iotag;
12070
12071 ulp_command = get_job_cmnd(phba, cmdiocb);
12072 ulp_status = get_job_ulpstatus(phba, rspiocb);
12073 ulp_word4 = get_job_word4(phba, rspiocb);
12074
12075 if (phba->sli_rev == LPFC_SLI_REV4) {
12076 iotag = get_wqe_reqtag(cmdiocb);
12077 } else {
12078 irsp = &rspiocb->iocb;
12079 iotag = irsp->ulpIoTag;
12080
12081 /* It is possible a PLOGI_RJT for NPIV ports to get aborted.
12082 * The MBX_REG_LOGIN64 mbox command is freed back to the
12083 * mbox_mem_pool here.
12084 */
12085 if (cmdiocb->context_un.mbox) {
12086 mbox = cmdiocb->context_un.mbox;
12087 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
12088 cmdiocb->context_un.mbox = NULL;
12089 }
12090 }
12091
12092 /* ELS cmd tag <ulpIoTag> completes */
12093 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
12094 "0139 Ignoring ELS cmd code x%x completion Data: "
12095 "x%x x%x x%x x%px\n",
12096 ulp_command, ulp_status, ulp_word4, iotag,
12097 cmdiocb->ndlp);
12098 /*
12099 * Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp
12100 * if exchange is busy.
12101 */
12102 if (ulp_command == CMD_GEN_REQUEST64_CR)
12103 lpfc_ct_free_iocb(phba, cmdiocb);
12104 else
12105 lpfc_els_free_iocb(phba, cmdiocb);
12106
12107 lpfc_nlp_put(ndlp);
12108}
12109
12110/**
12111 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
12112 * @phba: Pointer to HBA context object.
12113 * @pring: Pointer to driver SLI ring object.
12114 * @cmdiocb: Pointer to driver command iocb object.
12115 * @cmpl: completion function.
12116 *
12117 * This function issues an abort iocb for the provided command iocb. In case
12118 * of unloading, the abort iocb will not be issued to commands on the ELS
12119 * ring. Instead, the callback function shall be changed to those commands
12120 * so that nothing happens when them finishes. This function is called with
12121 * hbalock held andno ring_lock held (SLI4). The function returns IOCB_SUCCESS
12122 * when the command iocb is an abort request.
12123 *
12124 **/
12125int
12126lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12127 struct lpfc_iocbq *cmdiocb, void *cmpl)
12128{
12129 struct lpfc_vport *vport = cmdiocb->vport;
12130 struct lpfc_iocbq *abtsiocbp;
12131 int retval = IOCB_ERROR;
12132 unsigned long iflags;
12133 struct lpfc_nodelist *ndlp = NULL;
12134 u32 ulp_command = get_job_cmnd(phba, cmdiocb);
12135 u16 ulp_context, iotag;
12136 bool ia;
12137
12138 /*
12139 * There are certain command types we don't want to abort. And we
12140 * don't want to abort commands that are already in the process of
12141 * being aborted.
12142 */
12143 if (ulp_command == CMD_ABORT_XRI_WQE ||
12144 ulp_command == CMD_ABORT_XRI_CN ||
12145 ulp_command == CMD_CLOSE_XRI_CN ||
12146 cmdiocb->cmd_flag & LPFC_DRIVER_ABORTED)
12147 return IOCB_ABORTING;
12148
12149 if (!pring) {
12150 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
12151 cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
12152 else
12153 cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
12154 return retval;
12155 }
12156
12157 /*
12158 * If we're unloading, don't abort iocb on the ELS ring, but change
12159 * the callback so that nothing happens when it finishes.
12160 */
12161 if ((vport->load_flag & FC_UNLOADING) &&
12162 pring->ringno == LPFC_ELS_RING) {
12163 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
12164 cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
12165 else
12166 cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
12167 return retval;
12168 }
12169
12170 /* issue ABTS for this IOCB based on iotag */
12171 abtsiocbp = __lpfc_sli_get_iocbq(phba);
12172 if (abtsiocbp == NULL)
12173 return IOCB_NORESOURCE;
12174
12175 /* This signals the response to set the correct status
12176 * before calling the completion handler
12177 */
12178 cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
12179
12180 if (phba->sli_rev == LPFC_SLI_REV4) {
12181 ulp_context = cmdiocb->sli4_xritag;
12182 iotag = abtsiocbp->iotag;
12183 } else {
12184 iotag = cmdiocb->iocb.ulpIoTag;
12185 if (pring->ringno == LPFC_ELS_RING) {
12186 ndlp = cmdiocb->ndlp;
12187 ulp_context = ndlp->nlp_rpi;
12188 } else {
12189 ulp_context = cmdiocb->iocb.ulpContext;
12190 }
12191 }
12192
12193 if (phba->link_state < LPFC_LINK_UP ||
12194 (phba->sli_rev == LPFC_SLI_REV4 &&
12195 phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN) ||
12196 (phba->link_flag & LS_EXTERNAL_LOOPBACK))
12197 ia = true;
12198 else
12199 ia = false;
12200
12201 lpfc_sli_prep_abort_xri(phba, abtsiocbp, ulp_context, iotag,
12202 cmdiocb->iocb.ulpClass,
12203 LPFC_WQE_CQ_ID_DEFAULT, ia);
12204
12205 abtsiocbp->vport = vport;
12206
12207 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
12208 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
12209 if (cmdiocb->cmd_flag & LPFC_IO_FCP)
12210 abtsiocbp->cmd_flag |= (LPFC_IO_FCP | LPFC_USE_FCPWQIDX);
12211
12212 if (cmdiocb->cmd_flag & LPFC_IO_FOF)
12213 abtsiocbp->cmd_flag |= LPFC_IO_FOF;
12214
12215 if (cmpl)
12216 abtsiocbp->cmd_cmpl = cmpl;
12217 else
12218 abtsiocbp->cmd_cmpl = lpfc_sli_abort_els_cmpl;
12219 abtsiocbp->vport = vport;
12220
12221 if (phba->sli_rev == LPFC_SLI_REV4) {
12222 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
12223 if (unlikely(pring == NULL))
12224 goto abort_iotag_exit;
12225 /* Note: both hbalock and ring_lock need to be set here */
12226 spin_lock_irqsave(&pring->ring_lock, iflags);
12227 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
12228 abtsiocbp, 0);
12229 spin_unlock_irqrestore(&pring->ring_lock, iflags);
12230 } else {
12231 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
12232 abtsiocbp, 0);
12233 }
12234
12235abort_iotag_exit:
12236
12237 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
12238 "0339 Abort IO XRI x%x, Original iotag x%x, "
12239 "abort tag x%x Cmdjob : x%px Abortjob : x%px "
12240 "retval x%x\n",
12241 ulp_context, (phba->sli_rev == LPFC_SLI_REV4) ?
12242 cmdiocb->iotag : iotag, iotag, cmdiocb, abtsiocbp,
12243 retval);
12244 if (retval) {
12245 cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
12246 __lpfc_sli_release_iocbq(phba, abtsiocbp);
12247 }
12248
12249 /*
12250 * Caller to this routine should check for IOCB_ERROR
12251 * and handle it properly. This routine no longer removes
12252 * iocb off txcmplq and call compl in case of IOCB_ERROR.
12253 */
12254 return retval;
12255}
12256
12257/**
12258 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
12259 * @phba: pointer to lpfc HBA data structure.
12260 *
12261 * This routine will abort all pending and outstanding iocbs to an HBA.
12262 **/
12263void
12264lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
12265{
12266 struct lpfc_sli *psli = &phba->sli;
12267 struct lpfc_sli_ring *pring;
12268 struct lpfc_queue *qp = NULL;
12269 int i;
12270
12271 if (phba->sli_rev != LPFC_SLI_REV4) {
12272 for (i = 0; i < psli->num_rings; i++) {
12273 pring = &psli->sli3_ring[i];
12274 lpfc_sli_abort_iocb_ring(phba, pring);
12275 }
12276 return;
12277 }
12278 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
12279 pring = qp->pring;
12280 if (!pring)
12281 continue;
12282 lpfc_sli_abort_iocb_ring(phba, pring);
12283 }
12284}
12285
12286/**
12287 * lpfc_sli_validate_fcp_iocb_for_abort - filter iocbs appropriate for FCP aborts
12288 * @iocbq: Pointer to iocb object.
12289 * @vport: Pointer to driver virtual port object.
12290 *
12291 * This function acts as an iocb filter for functions which abort FCP iocbs.
12292 *
12293 * Return values
12294 * -ENODEV, if a null iocb or vport ptr is encountered
12295 * -EINVAL, if the iocb is not an FCP I/O, not on the TX cmpl queue, premarked as
12296 * driver already started the abort process, or is an abort iocb itself
12297 * 0, passes criteria for aborting the FCP I/O iocb
12298 **/
12299static int
12300lpfc_sli_validate_fcp_iocb_for_abort(struct lpfc_iocbq *iocbq,
12301 struct lpfc_vport *vport)
12302{
12303 u8 ulp_command;
12304
12305 /* No null ptr vports */
12306 if (!iocbq || iocbq->vport != vport)
12307 return -ENODEV;
12308
12309 /* iocb must be for FCP IO, already exists on the TX cmpl queue,
12310 * can't be premarked as driver aborted, nor be an ABORT iocb itself
12311 */
12312 ulp_command = get_job_cmnd(vport->phba, iocbq);
12313 if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
12314 !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ) ||
12315 (iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
12316 (ulp_command == CMD_ABORT_XRI_CN ||
12317 ulp_command == CMD_CLOSE_XRI_CN ||
12318 ulp_command == CMD_ABORT_XRI_WQE))
12319 return -EINVAL;
12320
12321 return 0;
12322}
12323
12324/**
12325 * lpfc_sli_validate_fcp_iocb - validate commands associated with a SCSI target
12326 * @iocbq: Pointer to driver iocb object.
12327 * @vport: Pointer to driver virtual port object.
12328 * @tgt_id: SCSI ID of the target.
12329 * @lun_id: LUN ID of the scsi device.
12330 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
12331 *
12332 * This function acts as an iocb filter for validating a lun/SCSI target/SCSI
12333 * host.
12334 *
12335 * It will return
12336 * 0 if the filtering criteria is met for the given iocb and will return
12337 * 1 if the filtering criteria is not met.
12338 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
12339 * given iocb is for the SCSI device specified by vport, tgt_id and
12340 * lun_id parameter.
12341 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
12342 * given iocb is for the SCSI target specified by vport and tgt_id
12343 * parameters.
12344 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
12345 * given iocb is for the SCSI host associated with the given vport.
12346 * This function is called with no locks held.
12347 **/
12348static int
12349lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
12350 uint16_t tgt_id, uint64_t lun_id,
12351 lpfc_ctx_cmd ctx_cmd)
12352{
12353 struct lpfc_io_buf *lpfc_cmd;
12354 int rc = 1;
12355
12356 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
12357
12358 if (lpfc_cmd->pCmd == NULL)
12359 return rc;
12360
12361 switch (ctx_cmd) {
12362 case LPFC_CTX_LUN:
12363 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
12364 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
12365 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
12366 rc = 0;
12367 break;
12368 case LPFC_CTX_TGT:
12369 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
12370 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
12371 rc = 0;
12372 break;
12373 case LPFC_CTX_HOST:
12374 rc = 0;
12375 break;
12376 default:
12377 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
12378 __func__, ctx_cmd);
12379 break;
12380 }
12381
12382 return rc;
12383}
12384
12385/**
12386 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
12387 * @vport: Pointer to virtual port.
12388 * @tgt_id: SCSI ID of the target.
12389 * @lun_id: LUN ID of the scsi device.
12390 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12391 *
12392 * This function returns number of FCP commands pending for the vport.
12393 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
12394 * commands pending on the vport associated with SCSI device specified
12395 * by tgt_id and lun_id parameters.
12396 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
12397 * commands pending on the vport associated with SCSI target specified
12398 * by tgt_id parameter.
12399 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
12400 * commands pending on the vport.
12401 * This function returns the number of iocbs which satisfy the filter.
12402 * This function is called without any lock held.
12403 **/
12404int
12405lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
12406 lpfc_ctx_cmd ctx_cmd)
12407{
12408 struct lpfc_hba *phba = vport->phba;
12409 struct lpfc_iocbq *iocbq;
12410 int sum, i;
12411 unsigned long iflags;
12412 u8 ulp_command;
12413
12414 spin_lock_irqsave(&phba->hbalock, iflags);
12415 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
12416 iocbq = phba->sli.iocbq_lookup[i];
12417
12418 if (!iocbq || iocbq->vport != vport)
12419 continue;
12420 if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
12421 !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ))
12422 continue;
12423
12424 /* Include counting outstanding aborts */
12425 ulp_command = get_job_cmnd(phba, iocbq);
12426 if (ulp_command == CMD_ABORT_XRI_CN ||
12427 ulp_command == CMD_CLOSE_XRI_CN ||
12428 ulp_command == CMD_ABORT_XRI_WQE) {
12429 sum++;
12430 continue;
12431 }
12432
12433 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12434 ctx_cmd) == 0)
12435 sum++;
12436 }
12437 spin_unlock_irqrestore(&phba->hbalock, iflags);
12438
12439 return sum;
12440}
12441
12442/**
12443 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
12444 * @phba: Pointer to HBA context object
12445 * @cmdiocb: Pointer to command iocb object.
12446 * @rspiocb: Pointer to response iocb object.
12447 *
12448 * This function is called when an aborted FCP iocb completes. This
12449 * function is called by the ring event handler with no lock held.
12450 * This function frees the iocb.
12451 **/
12452void
12453lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12454 struct lpfc_iocbq *rspiocb)
12455{
12456 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12457 "3096 ABORT_XRI_CX completing on rpi x%x "
12458 "original iotag x%x, abort cmd iotag x%x "
12459 "status 0x%x, reason 0x%x\n",
12460 (phba->sli_rev == LPFC_SLI_REV4) ?
12461 cmdiocb->sli4_xritag :
12462 cmdiocb->iocb.un.acxri.abortContextTag,
12463 get_job_abtsiotag(phba, cmdiocb),
12464 cmdiocb->iotag, get_job_ulpstatus(phba, rspiocb),
12465 get_job_word4(phba, rspiocb));
12466 lpfc_sli_release_iocbq(phba, cmdiocb);
12467 return;
12468}
12469
12470/**
12471 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
12472 * @vport: Pointer to virtual port.
12473 * @tgt_id: SCSI ID of the target.
12474 * @lun_id: LUN ID of the scsi device.
12475 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12476 *
12477 * This function sends an abort command for every SCSI command
12478 * associated with the given virtual port pending on the ring
12479 * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then
12480 * lpfc_sli_validate_fcp_iocb function. The ordering for validation before
12481 * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort
12482 * followed by lpfc_sli_validate_fcp_iocb.
12483 *
12484 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
12485 * FCP iocbs associated with lun specified by tgt_id and lun_id
12486 * parameters
12487 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
12488 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
12489 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
12490 * FCP iocbs associated with virtual port.
12491 * The pring used for SLI3 is sli3_ring[LPFC_FCP_RING], for SLI4
12492 * lpfc_sli4_calc_ring is used.
12493 * This function returns number of iocbs it failed to abort.
12494 * This function is called with no locks held.
12495 **/
12496int
12497lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
12498 lpfc_ctx_cmd abort_cmd)
12499{
12500 struct lpfc_hba *phba = vport->phba;
12501 struct lpfc_sli_ring *pring = NULL;
12502 struct lpfc_iocbq *iocbq;
12503 int errcnt = 0, ret_val = 0;
12504 unsigned long iflags;
12505 int i;
12506
12507 /* all I/Os are in process of being flushed */
12508 if (phba->hba_flag & HBA_IOQ_FLUSH)
12509 return errcnt;
12510
12511 for (i = 1; i <= phba->sli.last_iotag; i++) {
12512 iocbq = phba->sli.iocbq_lookup[i];
12513
12514 if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
12515 continue;
12516
12517 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12518 abort_cmd) != 0)
12519 continue;
12520
12521 spin_lock_irqsave(&phba->hbalock, iflags);
12522 if (phba->sli_rev == LPFC_SLI_REV3) {
12523 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12524 } else if (phba->sli_rev == LPFC_SLI_REV4) {
12525 pring = lpfc_sli4_calc_ring(phba, iocbq);
12526 }
12527 ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq,
12528 lpfc_sli_abort_fcp_cmpl);
12529 spin_unlock_irqrestore(&phba->hbalock, iflags);
12530 if (ret_val != IOCB_SUCCESS)
12531 errcnt++;
12532 }
12533
12534 return errcnt;
12535}
12536
12537/**
12538 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
12539 * @vport: Pointer to virtual port.
12540 * @pring: Pointer to driver SLI ring object.
12541 * @tgt_id: SCSI ID of the target.
12542 * @lun_id: LUN ID of the scsi device.
12543 * @cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12544 *
12545 * This function sends an abort command for every SCSI command
12546 * associated with the given virtual port pending on the ring
12547 * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then
12548 * lpfc_sli_validate_fcp_iocb function. The ordering for validation before
12549 * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort
12550 * followed by lpfc_sli_validate_fcp_iocb.
12551 *
12552 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
12553 * FCP iocbs associated with lun specified by tgt_id and lun_id
12554 * parameters
12555 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
12556 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
12557 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
12558 * FCP iocbs associated with virtual port.
12559 * This function returns number of iocbs it aborted .
12560 * This function is called with no locks held right after a taskmgmt
12561 * command is sent.
12562 **/
12563int
12564lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
12565 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
12566{
12567 struct lpfc_hba *phba = vport->phba;
12568 struct lpfc_io_buf *lpfc_cmd;
12569 struct lpfc_iocbq *abtsiocbq;
12570 struct lpfc_nodelist *ndlp = NULL;
12571 struct lpfc_iocbq *iocbq;
12572 int sum, i, ret_val;
12573 unsigned long iflags;
12574 struct lpfc_sli_ring *pring_s4 = NULL;
12575 u16 ulp_context, iotag, cqid = LPFC_WQE_CQ_ID_DEFAULT;
12576 bool ia;
12577
12578 spin_lock_irqsave(&phba->hbalock, iflags);
12579
12580 /* all I/Os are in process of being flushed */
12581 if (phba->hba_flag & HBA_IOQ_FLUSH) {
12582 spin_unlock_irqrestore(&phba->hbalock, iflags);
12583 return 0;
12584 }
12585 sum = 0;
12586
12587 for (i = 1; i <= phba->sli.last_iotag; i++) {
12588 iocbq = phba->sli.iocbq_lookup[i];
12589
12590 if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
12591 continue;
12592
12593 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12594 cmd) != 0)
12595 continue;
12596
12597 /* Guard against IO completion being called at same time */
12598 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
12599 spin_lock(&lpfc_cmd->buf_lock);
12600
12601 if (!lpfc_cmd->pCmd) {
12602 spin_unlock(&lpfc_cmd->buf_lock);
12603 continue;
12604 }
12605
12606 if (phba->sli_rev == LPFC_SLI_REV4) {
12607 pring_s4 =
12608 phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
12609 if (!pring_s4) {
12610 spin_unlock(&lpfc_cmd->buf_lock);
12611 continue;
12612 }
12613 /* Note: both hbalock and ring_lock must be set here */
12614 spin_lock(&pring_s4->ring_lock);
12615 }
12616
12617 /*
12618 * If the iocbq is already being aborted, don't take a second
12619 * action, but do count it.
12620 */
12621 if ((iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
12622 !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
12623 if (phba->sli_rev == LPFC_SLI_REV4)
12624 spin_unlock(&pring_s4->ring_lock);
12625 spin_unlock(&lpfc_cmd->buf_lock);
12626 continue;
12627 }
12628
12629 /* issue ABTS for this IOCB based on iotag */
12630 abtsiocbq = __lpfc_sli_get_iocbq(phba);
12631 if (!abtsiocbq) {
12632 if (phba->sli_rev == LPFC_SLI_REV4)
12633 spin_unlock(&pring_s4->ring_lock);
12634 spin_unlock(&lpfc_cmd->buf_lock);
12635 continue;
12636 }
12637
12638 if (phba->sli_rev == LPFC_SLI_REV4) {
12639 iotag = abtsiocbq->iotag;
12640 ulp_context = iocbq->sli4_xritag;
12641 cqid = lpfc_cmd->hdwq->io_cq_map;
12642 } else {
12643 iotag = iocbq->iocb.ulpIoTag;
12644 if (pring->ringno == LPFC_ELS_RING) {
12645 ndlp = iocbq->ndlp;
12646 ulp_context = ndlp->nlp_rpi;
12647 } else {
12648 ulp_context = iocbq->iocb.ulpContext;
12649 }
12650 }
12651
12652 ndlp = lpfc_cmd->rdata->pnode;
12653
12654 if (lpfc_is_link_up(phba) &&
12655 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE) &&
12656 !(phba->link_flag & LS_EXTERNAL_LOOPBACK))
12657 ia = false;
12658 else
12659 ia = true;
12660
12661 lpfc_sli_prep_abort_xri(phba, abtsiocbq, ulp_context, iotag,
12662 iocbq->iocb.ulpClass, cqid,
12663 ia);
12664
12665 abtsiocbq->vport = vport;
12666
12667 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
12668 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
12669 if (iocbq->cmd_flag & LPFC_IO_FCP)
12670 abtsiocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
12671 if (iocbq->cmd_flag & LPFC_IO_FOF)
12672 abtsiocbq->cmd_flag |= LPFC_IO_FOF;
12673
12674 /* Setup callback routine and issue the command. */
12675 abtsiocbq->cmd_cmpl = lpfc_sli_abort_fcp_cmpl;
12676
12677 /*
12678 * Indicate the IO is being aborted by the driver and set
12679 * the caller's flag into the aborted IO.
12680 */
12681 iocbq->cmd_flag |= LPFC_DRIVER_ABORTED;
12682
12683 if (phba->sli_rev == LPFC_SLI_REV4) {
12684 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
12685 abtsiocbq, 0);
12686 spin_unlock(&pring_s4->ring_lock);
12687 } else {
12688 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
12689 abtsiocbq, 0);
12690 }
12691
12692 spin_unlock(&lpfc_cmd->buf_lock);
12693
12694 if (ret_val == IOCB_ERROR)
12695 __lpfc_sli_release_iocbq(phba, abtsiocbq);
12696 else
12697 sum++;
12698 }
12699 spin_unlock_irqrestore(&phba->hbalock, iflags);
12700 return sum;
12701}
12702
12703/**
12704 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
12705 * @phba: Pointer to HBA context object.
12706 * @cmdiocbq: Pointer to command iocb.
12707 * @rspiocbq: Pointer to response iocb.
12708 *
12709 * This function is the completion handler for iocbs issued using
12710 * lpfc_sli_issue_iocb_wait function. This function is called by the
12711 * ring event handler function without any lock held. This function
12712 * can be called from both worker thread context and interrupt
12713 * context. This function also can be called from other thread which
12714 * cleans up the SLI layer objects.
12715 * This function copy the contents of the response iocb to the
12716 * response iocb memory object provided by the caller of
12717 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
12718 * sleeps for the iocb completion.
12719 **/
12720static void
12721lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
12722 struct lpfc_iocbq *cmdiocbq,
12723 struct lpfc_iocbq *rspiocbq)
12724{
12725 wait_queue_head_t *pdone_q;
12726 unsigned long iflags;
12727 struct lpfc_io_buf *lpfc_cmd;
12728 size_t offset = offsetof(struct lpfc_iocbq, wqe);
12729
12730 spin_lock_irqsave(&phba->hbalock, iflags);
12731 if (cmdiocbq->cmd_flag & LPFC_IO_WAKE_TMO) {
12732
12733 /*
12734 * A time out has occurred for the iocb. If a time out
12735 * completion handler has been supplied, call it. Otherwise,
12736 * just free the iocbq.
12737 */
12738
12739 spin_unlock_irqrestore(&phba->hbalock, iflags);
12740 cmdiocbq->cmd_cmpl = cmdiocbq->wait_cmd_cmpl;
12741 cmdiocbq->wait_cmd_cmpl = NULL;
12742 if (cmdiocbq->cmd_cmpl)
12743 (cmdiocbq->cmd_cmpl)(phba, cmdiocbq, NULL);
12744 else
12745 lpfc_sli_release_iocbq(phba, cmdiocbq);
12746 return;
12747 }
12748
12749 /* Copy the contents of the local rspiocb into the caller's buffer. */
12750 cmdiocbq->cmd_flag |= LPFC_IO_WAKE;
12751 if (cmdiocbq->rsp_iocb && rspiocbq)
12752 memcpy((char *)cmdiocbq->rsp_iocb + offset,
12753 (char *)rspiocbq + offset, sizeof(*rspiocbq) - offset);
12754
12755 /* Set the exchange busy flag for task management commands */
12756 if ((cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
12757 !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) {
12758 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
12759 cur_iocbq);
12760 if (rspiocbq && (rspiocbq->cmd_flag & LPFC_EXCHANGE_BUSY))
12761 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
12762 else
12763 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
12764 }
12765
12766 pdone_q = cmdiocbq->context_un.wait_queue;
12767 if (pdone_q)
12768 wake_up(pdone_q);
12769 spin_unlock_irqrestore(&phba->hbalock, iflags);
12770 return;
12771}
12772
12773/**
12774 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
12775 * @phba: Pointer to HBA context object..
12776 * @piocbq: Pointer to command iocb.
12777 * @flag: Flag to test.
12778 *
12779 * This routine grabs the hbalock and then test the cmd_flag to
12780 * see if the passed in flag is set.
12781 * Returns:
12782 * 1 if flag is set.
12783 * 0 if flag is not set.
12784 **/
12785static int
12786lpfc_chk_iocb_flg(struct lpfc_hba *phba,
12787 struct lpfc_iocbq *piocbq, uint32_t flag)
12788{
12789 unsigned long iflags;
12790 int ret;
12791
12792 spin_lock_irqsave(&phba->hbalock, iflags);
12793 ret = piocbq->cmd_flag & flag;
12794 spin_unlock_irqrestore(&phba->hbalock, iflags);
12795 return ret;
12796
12797}
12798
12799/**
12800 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
12801 * @phba: Pointer to HBA context object..
12802 * @ring_number: Ring number
12803 * @piocb: Pointer to command iocb.
12804 * @prspiocbq: Pointer to response iocb.
12805 * @timeout: Timeout in number of seconds.
12806 *
12807 * This function issues the iocb to firmware and waits for the
12808 * iocb to complete. The cmd_cmpl field of the shall be used
12809 * to handle iocbs which time out. If the field is NULL, the
12810 * function shall free the iocbq structure. If more clean up is
12811 * needed, the caller is expected to provide a completion function
12812 * that will provide the needed clean up. If the iocb command is
12813 * not completed within timeout seconds, the function will either
12814 * free the iocbq structure (if cmd_cmpl == NULL) or execute the
12815 * completion function set in the cmd_cmpl field and then return
12816 * a status of IOCB_TIMEDOUT. The caller should not free the iocb
12817 * resources if this function returns IOCB_TIMEDOUT.
12818 * The function waits for the iocb completion using an
12819 * non-interruptible wait.
12820 * This function will sleep while waiting for iocb completion.
12821 * So, this function should not be called from any context which
12822 * does not allow sleeping. Due to the same reason, this function
12823 * cannot be called with interrupt disabled.
12824 * This function assumes that the iocb completions occur while
12825 * this function sleep. So, this function cannot be called from
12826 * the thread which process iocb completion for this ring.
12827 * This function clears the cmd_flag of the iocb object before
12828 * issuing the iocb and the iocb completion handler sets this
12829 * flag and wakes this thread when the iocb completes.
12830 * The contents of the response iocb will be copied to prspiocbq
12831 * by the completion handler when the command completes.
12832 * This function returns IOCB_SUCCESS when success.
12833 * This function is called with no lock held.
12834 **/
12835int
12836lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
12837 uint32_t ring_number,
12838 struct lpfc_iocbq *piocb,
12839 struct lpfc_iocbq *prspiocbq,
12840 uint32_t timeout)
12841{
12842 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
12843 long timeleft, timeout_req = 0;
12844 int retval = IOCB_SUCCESS;
12845 uint32_t creg_val;
12846 struct lpfc_iocbq *iocb;
12847 int txq_cnt = 0;
12848 int txcmplq_cnt = 0;
12849 struct lpfc_sli_ring *pring;
12850 unsigned long iflags;
12851 bool iocb_completed = true;
12852
12853 if (phba->sli_rev >= LPFC_SLI_REV4) {
12854 lpfc_sli_prep_wqe(phba, piocb);
12855
12856 pring = lpfc_sli4_calc_ring(phba, piocb);
12857 } else
12858 pring = &phba->sli.sli3_ring[ring_number];
12859 /*
12860 * If the caller has provided a response iocbq buffer, then rsp_iocb
12861 * is NULL or its an error.
12862 */
12863 if (prspiocbq) {
12864 if (piocb->rsp_iocb)
12865 return IOCB_ERROR;
12866 piocb->rsp_iocb = prspiocbq;
12867 }
12868
12869 piocb->wait_cmd_cmpl = piocb->cmd_cmpl;
12870 piocb->cmd_cmpl = lpfc_sli_wake_iocb_wait;
12871 piocb->context_un.wait_queue = &done_q;
12872 piocb->cmd_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
12873
12874 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
12875 if (lpfc_readl(phba->HCregaddr, &creg_val))
12876 return IOCB_ERROR;
12877 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
12878 writel(creg_val, phba->HCregaddr);
12879 readl(phba->HCregaddr); /* flush */
12880 }
12881
12882 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
12883 SLI_IOCB_RET_IOCB);
12884 if (retval == IOCB_SUCCESS) {
12885 timeout_req = msecs_to_jiffies(timeout * 1000);
12886 timeleft = wait_event_timeout(done_q,
12887 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
12888 timeout_req);
12889 spin_lock_irqsave(&phba->hbalock, iflags);
12890 if (!(piocb->cmd_flag & LPFC_IO_WAKE)) {
12891
12892 /*
12893 * IOCB timed out. Inform the wake iocb wait
12894 * completion function and set local status
12895 */
12896
12897 iocb_completed = false;
12898 piocb->cmd_flag |= LPFC_IO_WAKE_TMO;
12899 }
12900 spin_unlock_irqrestore(&phba->hbalock, iflags);
12901 if (iocb_completed) {
12902 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12903 "0331 IOCB wake signaled\n");
12904 /* Note: we are not indicating if the IOCB has a success
12905 * status or not - that's for the caller to check.
12906 * IOCB_SUCCESS means just that the command was sent and
12907 * completed. Not that it completed successfully.
12908 * */
12909 } else if (timeleft == 0) {
12910 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12911 "0338 IOCB wait timeout error - no "
12912 "wake response Data x%x\n", timeout);
12913 retval = IOCB_TIMEDOUT;
12914 } else {
12915 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12916 "0330 IOCB wake NOT set, "
12917 "Data x%x x%lx\n",
12918 timeout, (timeleft / jiffies));
12919 retval = IOCB_TIMEDOUT;
12920 }
12921 } else if (retval == IOCB_BUSY) {
12922 if (phba->cfg_log_verbose & LOG_SLI) {
12923 list_for_each_entry(iocb, &pring->txq, list) {
12924 txq_cnt++;
12925 }
12926 list_for_each_entry(iocb, &pring->txcmplq, list) {
12927 txcmplq_cnt++;
12928 }
12929 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12930 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
12931 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
12932 }
12933 return retval;
12934 } else {
12935 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12936 "0332 IOCB wait issue failed, Data x%x\n",
12937 retval);
12938 retval = IOCB_ERROR;
12939 }
12940
12941 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
12942 if (lpfc_readl(phba->HCregaddr, &creg_val))
12943 return IOCB_ERROR;
12944 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
12945 writel(creg_val, phba->HCregaddr);
12946 readl(phba->HCregaddr); /* flush */
12947 }
12948
12949 if (prspiocbq)
12950 piocb->rsp_iocb = NULL;
12951
12952 piocb->context_un.wait_queue = NULL;
12953 piocb->cmd_cmpl = NULL;
12954 return retval;
12955}
12956
12957/**
12958 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
12959 * @phba: Pointer to HBA context object.
12960 * @pmboxq: Pointer to driver mailbox object.
12961 * @timeout: Timeout in number of seconds.
12962 *
12963 * This function issues the mailbox to firmware and waits for the
12964 * mailbox command to complete. If the mailbox command is not
12965 * completed within timeout seconds, it returns MBX_TIMEOUT.
12966 * The function waits for the mailbox completion using an
12967 * interruptible wait. If the thread is woken up due to a
12968 * signal, MBX_TIMEOUT error is returned to the caller. Caller
12969 * should not free the mailbox resources, if this function returns
12970 * MBX_TIMEOUT.
12971 * This function will sleep while waiting for mailbox completion.
12972 * So, this function should not be called from any context which
12973 * does not allow sleeping. Due to the same reason, this function
12974 * cannot be called with interrupt disabled.
12975 * This function assumes that the mailbox completion occurs while
12976 * this function sleep. So, this function cannot be called from
12977 * the worker thread which processes mailbox completion.
12978 * This function is called in the context of HBA management
12979 * applications.
12980 * This function returns MBX_SUCCESS when successful.
12981 * This function is called with no lock held.
12982 **/
12983int
12984lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
12985 uint32_t timeout)
12986{
12987 struct completion mbox_done;
12988 int retval;
12989 unsigned long flag;
12990
12991 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
12992 /* setup wake call as IOCB callback */
12993 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
12994
12995 /* setup context3 field to pass wait_queue pointer to wake function */
12996 init_completion(&mbox_done);
12997 pmboxq->context3 = &mbox_done;
12998 /* now issue the command */
12999 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
13000 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
13001 wait_for_completion_timeout(&mbox_done,
13002 msecs_to_jiffies(timeout * 1000));
13003
13004 spin_lock_irqsave(&phba->hbalock, flag);
13005 pmboxq->context3 = NULL;
13006 /*
13007 * if LPFC_MBX_WAKE flag is set the mailbox is completed
13008 * else do not free the resources.
13009 */
13010 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
13011 retval = MBX_SUCCESS;
13012 } else {
13013 retval = MBX_TIMEOUT;
13014 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13015 }
13016 spin_unlock_irqrestore(&phba->hbalock, flag);
13017 }
13018 return retval;
13019}
13020
13021/**
13022 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
13023 * @phba: Pointer to HBA context.
13024 * @mbx_action: Mailbox shutdown options.
13025 *
13026 * This function is called to shutdown the driver's mailbox sub-system.
13027 * It first marks the mailbox sub-system is in a block state to prevent
13028 * the asynchronous mailbox command from issued off the pending mailbox
13029 * command queue. If the mailbox command sub-system shutdown is due to
13030 * HBA error conditions such as EEH or ERATT, this routine shall invoke
13031 * the mailbox sub-system flush routine to forcefully bring down the
13032 * mailbox sub-system. Otherwise, if it is due to normal condition (such
13033 * as with offline or HBA function reset), this routine will wait for the
13034 * outstanding mailbox command to complete before invoking the mailbox
13035 * sub-system flush routine to gracefully bring down mailbox sub-system.
13036 **/
13037void
13038lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
13039{
13040 struct lpfc_sli *psli = &phba->sli;
13041 unsigned long timeout;
13042
13043 if (mbx_action == LPFC_MBX_NO_WAIT) {
13044 /* delay 100ms for port state */
13045 msleep(100);
13046 lpfc_sli_mbox_sys_flush(phba);
13047 return;
13048 }
13049 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
13050
13051 /* Disable softirqs, including timers from obtaining phba->hbalock */
13052 local_bh_disable();
13053
13054 spin_lock_irq(&phba->hbalock);
13055 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
13056
13057 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
13058 /* Determine how long we might wait for the active mailbox
13059 * command to be gracefully completed by firmware.
13060 */
13061 if (phba->sli.mbox_active)
13062 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
13063 phba->sli.mbox_active) *
13064 1000) + jiffies;
13065 spin_unlock_irq(&phba->hbalock);
13066
13067 /* Enable softirqs again, done with phba->hbalock */
13068 local_bh_enable();
13069
13070 while (phba->sli.mbox_active) {
13071 /* Check active mailbox complete status every 2ms */
13072 msleep(2);
13073 if (time_after(jiffies, timeout))
13074 /* Timeout, let the mailbox flush routine to
13075 * forcefully release active mailbox command
13076 */
13077 break;
13078 }
13079 } else {
13080 spin_unlock_irq(&phba->hbalock);
13081
13082 /* Enable softirqs again, done with phba->hbalock */
13083 local_bh_enable();
13084 }
13085
13086 lpfc_sli_mbox_sys_flush(phba);
13087}
13088
13089/**
13090 * lpfc_sli_eratt_read - read sli-3 error attention events
13091 * @phba: Pointer to HBA context.
13092 *
13093 * This function is called to read the SLI3 device error attention registers
13094 * for possible error attention events. The caller must hold the hostlock
13095 * with spin_lock_irq().
13096 *
13097 * This function returns 1 when there is Error Attention in the Host Attention
13098 * Register and returns 0 otherwise.
13099 **/
13100static int
13101lpfc_sli_eratt_read(struct lpfc_hba *phba)
13102{
13103 uint32_t ha_copy;
13104
13105 /* Read chip Host Attention (HA) register */
13106 if (lpfc_readl(phba->HAregaddr, &ha_copy))
13107 goto unplug_err;
13108
13109 if (ha_copy & HA_ERATT) {
13110 /* Read host status register to retrieve error event */
13111 if (lpfc_sli_read_hs(phba))
13112 goto unplug_err;
13113
13114 /* Check if there is a deferred error condition is active */
13115 if ((HS_FFER1 & phba->work_hs) &&
13116 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
13117 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
13118 phba->hba_flag |= DEFER_ERATT;
13119 /* Clear all interrupt enable conditions */
13120 writel(0, phba->HCregaddr);
13121 readl(phba->HCregaddr);
13122 }
13123
13124 /* Set the driver HA work bitmap */
13125 phba->work_ha |= HA_ERATT;
13126 /* Indicate polling handles this ERATT */
13127 phba->hba_flag |= HBA_ERATT_HANDLED;
13128 return 1;
13129 }
13130 return 0;
13131
13132unplug_err:
13133 /* Set the driver HS work bitmap */
13134 phba->work_hs |= UNPLUG_ERR;
13135 /* Set the driver HA work bitmap */
13136 phba->work_ha |= HA_ERATT;
13137 /* Indicate polling handles this ERATT */
13138 phba->hba_flag |= HBA_ERATT_HANDLED;
13139 return 1;
13140}
13141
13142/**
13143 * lpfc_sli4_eratt_read - read sli-4 error attention events
13144 * @phba: Pointer to HBA context.
13145 *
13146 * This function is called to read the SLI4 device error attention registers
13147 * for possible error attention events. The caller must hold the hostlock
13148 * with spin_lock_irq().
13149 *
13150 * This function returns 1 when there is Error Attention in the Host Attention
13151 * Register and returns 0 otherwise.
13152 **/
13153static int
13154lpfc_sli4_eratt_read(struct lpfc_hba *phba)
13155{
13156 uint32_t uerr_sta_hi, uerr_sta_lo;
13157 uint32_t if_type, portsmphr;
13158 struct lpfc_register portstat_reg;
13159 u32 logmask;
13160
13161 /*
13162 * For now, use the SLI4 device internal unrecoverable error
13163 * registers for error attention. This can be changed later.
13164 */
13165 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
13166 switch (if_type) {
13167 case LPFC_SLI_INTF_IF_TYPE_0:
13168 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
13169 &uerr_sta_lo) ||
13170 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
13171 &uerr_sta_hi)) {
13172 phba->work_hs |= UNPLUG_ERR;
13173 phba->work_ha |= HA_ERATT;
13174 phba->hba_flag |= HBA_ERATT_HANDLED;
13175 return 1;
13176 }
13177 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
13178 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
13179 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13180 "1423 HBA Unrecoverable error: "
13181 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
13182 "ue_mask_lo_reg=0x%x, "
13183 "ue_mask_hi_reg=0x%x\n",
13184 uerr_sta_lo, uerr_sta_hi,
13185 phba->sli4_hba.ue_mask_lo,
13186 phba->sli4_hba.ue_mask_hi);
13187 phba->work_status[0] = uerr_sta_lo;
13188 phba->work_status[1] = uerr_sta_hi;
13189 phba->work_ha |= HA_ERATT;
13190 phba->hba_flag |= HBA_ERATT_HANDLED;
13191 return 1;
13192 }
13193 break;
13194 case LPFC_SLI_INTF_IF_TYPE_2:
13195 case LPFC_SLI_INTF_IF_TYPE_6:
13196 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
13197 &portstat_reg.word0) ||
13198 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
13199 &portsmphr)){
13200 phba->work_hs |= UNPLUG_ERR;
13201 phba->work_ha |= HA_ERATT;
13202 phba->hba_flag |= HBA_ERATT_HANDLED;
13203 return 1;
13204 }
13205 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
13206 phba->work_status[0] =
13207 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
13208 phba->work_status[1] =
13209 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
13210 logmask = LOG_TRACE_EVENT;
13211 if (phba->work_status[0] ==
13212 SLIPORT_ERR1_REG_ERR_CODE_2 &&
13213 phba->work_status[1] == SLIPORT_ERR2_REG_FW_RESTART)
13214 logmask = LOG_SLI;
13215 lpfc_printf_log(phba, KERN_ERR, logmask,
13216 "2885 Port Status Event: "
13217 "port status reg 0x%x, "
13218 "port smphr reg 0x%x, "
13219 "error 1=0x%x, error 2=0x%x\n",
13220 portstat_reg.word0,
13221 portsmphr,
13222 phba->work_status[0],
13223 phba->work_status[1]);
13224 phba->work_ha |= HA_ERATT;
13225 phba->hba_flag |= HBA_ERATT_HANDLED;
13226 return 1;
13227 }
13228 break;
13229 case LPFC_SLI_INTF_IF_TYPE_1:
13230 default:
13231 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13232 "2886 HBA Error Attention on unsupported "
13233 "if type %d.", if_type);
13234 return 1;
13235 }
13236
13237 return 0;
13238}
13239
13240/**
13241 * lpfc_sli_check_eratt - check error attention events
13242 * @phba: Pointer to HBA context.
13243 *
13244 * This function is called from timer soft interrupt context to check HBA's
13245 * error attention register bit for error attention events.
13246 *
13247 * This function returns 1 when there is Error Attention in the Host Attention
13248 * Register and returns 0 otherwise.
13249 **/
13250int
13251lpfc_sli_check_eratt(struct lpfc_hba *phba)
13252{
13253 uint32_t ha_copy;
13254
13255 /* If somebody is waiting to handle an eratt, don't process it
13256 * here. The brdkill function will do this.
13257 */
13258 if (phba->link_flag & LS_IGNORE_ERATT)
13259 return 0;
13260
13261 /* Check if interrupt handler handles this ERATT */
13262 spin_lock_irq(&phba->hbalock);
13263 if (phba->hba_flag & HBA_ERATT_HANDLED) {
13264 /* Interrupt handler has handled ERATT */
13265 spin_unlock_irq(&phba->hbalock);
13266 return 0;
13267 }
13268
13269 /*
13270 * If there is deferred error attention, do not check for error
13271 * attention
13272 */
13273 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13274 spin_unlock_irq(&phba->hbalock);
13275 return 0;
13276 }
13277
13278 /* If PCI channel is offline, don't process it */
13279 if (unlikely(pci_channel_offline(phba->pcidev))) {
13280 spin_unlock_irq(&phba->hbalock);
13281 return 0;
13282 }
13283
13284 switch (phba->sli_rev) {
13285 case LPFC_SLI_REV2:
13286 case LPFC_SLI_REV3:
13287 /* Read chip Host Attention (HA) register */
13288 ha_copy = lpfc_sli_eratt_read(phba);
13289 break;
13290 case LPFC_SLI_REV4:
13291 /* Read device Uncoverable Error (UERR) registers */
13292 ha_copy = lpfc_sli4_eratt_read(phba);
13293 break;
13294 default:
13295 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13296 "0299 Invalid SLI revision (%d)\n",
13297 phba->sli_rev);
13298 ha_copy = 0;
13299 break;
13300 }
13301 spin_unlock_irq(&phba->hbalock);
13302
13303 return ha_copy;
13304}
13305
13306/**
13307 * lpfc_intr_state_check - Check device state for interrupt handling
13308 * @phba: Pointer to HBA context.
13309 *
13310 * This inline routine checks whether a device or its PCI slot is in a state
13311 * that the interrupt should be handled.
13312 *
13313 * This function returns 0 if the device or the PCI slot is in a state that
13314 * interrupt should be handled, otherwise -EIO.
13315 */
13316static inline int
13317lpfc_intr_state_check(struct lpfc_hba *phba)
13318{
13319 /* If the pci channel is offline, ignore all the interrupts */
13320 if (unlikely(pci_channel_offline(phba->pcidev)))
13321 return -EIO;
13322
13323 /* Update device level interrupt statistics */
13324 phba->sli.slistat.sli_intr++;
13325
13326 /* Ignore all interrupts during initialization. */
13327 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
13328 return -EIO;
13329
13330 return 0;
13331}
13332
13333/**
13334 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
13335 * @irq: Interrupt number.
13336 * @dev_id: The device context pointer.
13337 *
13338 * This function is directly called from the PCI layer as an interrupt
13339 * service routine when device with SLI-3 interface spec is enabled with
13340 * MSI-X multi-message interrupt mode and there are slow-path events in
13341 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
13342 * interrupt mode, this function is called as part of the device-level
13343 * interrupt handler. When the PCI slot is in error recovery or the HBA
13344 * is undergoing initialization, the interrupt handler will not process
13345 * the interrupt. The link attention and ELS ring attention events are
13346 * handled by the worker thread. The interrupt handler signals the worker
13347 * thread and returns for these events. This function is called without
13348 * any lock held. It gets the hbalock to access and update SLI data
13349 * structures.
13350 *
13351 * This function returns IRQ_HANDLED when interrupt is handled else it
13352 * returns IRQ_NONE.
13353 **/
13354irqreturn_t
13355lpfc_sli_sp_intr_handler(int irq, void *dev_id)
13356{
13357 struct lpfc_hba *phba;
13358 uint32_t ha_copy, hc_copy;
13359 uint32_t work_ha_copy;
13360 unsigned long status;
13361 unsigned long iflag;
13362 uint32_t control;
13363
13364 MAILBOX_t *mbox, *pmbox;
13365 struct lpfc_vport *vport;
13366 struct lpfc_nodelist *ndlp;
13367 struct lpfc_dmabuf *mp;
13368 LPFC_MBOXQ_t *pmb;
13369 int rc;
13370
13371 /*
13372 * Get the driver's phba structure from the dev_id and
13373 * assume the HBA is not interrupting.
13374 */
13375 phba = (struct lpfc_hba *)dev_id;
13376
13377 if (unlikely(!phba))
13378 return IRQ_NONE;
13379
13380 /*
13381 * Stuff needs to be attented to when this function is invoked as an
13382 * individual interrupt handler in MSI-X multi-message interrupt mode
13383 */
13384 if (phba->intr_type == MSIX) {
13385 /* Check device state for handling interrupt */
13386 if (lpfc_intr_state_check(phba))
13387 return IRQ_NONE;
13388 /* Need to read HA REG for slow-path events */
13389 spin_lock_irqsave(&phba->hbalock, iflag);
13390 if (lpfc_readl(phba->HAregaddr, &ha_copy))
13391 goto unplug_error;
13392 /* If somebody is waiting to handle an eratt don't process it
13393 * here. The brdkill function will do this.
13394 */
13395 if (phba->link_flag & LS_IGNORE_ERATT)
13396 ha_copy &= ~HA_ERATT;
13397 /* Check the need for handling ERATT in interrupt handler */
13398 if (ha_copy & HA_ERATT) {
13399 if (phba->hba_flag & HBA_ERATT_HANDLED)
13400 /* ERATT polling has handled ERATT */
13401 ha_copy &= ~HA_ERATT;
13402 else
13403 /* Indicate interrupt handler handles ERATT */
13404 phba->hba_flag |= HBA_ERATT_HANDLED;
13405 }
13406
13407 /*
13408 * If there is deferred error attention, do not check for any
13409 * interrupt.
13410 */
13411 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13412 spin_unlock_irqrestore(&phba->hbalock, iflag);
13413 return IRQ_NONE;
13414 }
13415
13416 /* Clear up only attention source related to slow-path */
13417 if (lpfc_readl(phba->HCregaddr, &hc_copy))
13418 goto unplug_error;
13419
13420 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
13421 HC_LAINT_ENA | HC_ERINT_ENA),
13422 phba->HCregaddr);
13423 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
13424 phba->HAregaddr);
13425 writel(hc_copy, phba->HCregaddr);
13426 readl(phba->HAregaddr); /* flush */
13427 spin_unlock_irqrestore(&phba->hbalock, iflag);
13428 } else
13429 ha_copy = phba->ha_copy;
13430
13431 work_ha_copy = ha_copy & phba->work_ha_mask;
13432
13433 if (work_ha_copy) {
13434 if (work_ha_copy & HA_LATT) {
13435 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
13436 /*
13437 * Turn off Link Attention interrupts
13438 * until CLEAR_LA done
13439 */
13440 spin_lock_irqsave(&phba->hbalock, iflag);
13441 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
13442 if (lpfc_readl(phba->HCregaddr, &control))
13443 goto unplug_error;
13444 control &= ~HC_LAINT_ENA;
13445 writel(control, phba->HCregaddr);
13446 readl(phba->HCregaddr); /* flush */
13447 spin_unlock_irqrestore(&phba->hbalock, iflag);
13448 }
13449 else
13450 work_ha_copy &= ~HA_LATT;
13451 }
13452
13453 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
13454 /*
13455 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
13456 * the only slow ring.
13457 */
13458 status = (work_ha_copy &
13459 (HA_RXMASK << (4*LPFC_ELS_RING)));
13460 status >>= (4*LPFC_ELS_RING);
13461 if (status & HA_RXMASK) {
13462 spin_lock_irqsave(&phba->hbalock, iflag);
13463 if (lpfc_readl(phba->HCregaddr, &control))
13464 goto unplug_error;
13465
13466 lpfc_debugfs_slow_ring_trc(phba,
13467 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
13468 control, status,
13469 (uint32_t)phba->sli.slistat.sli_intr);
13470
13471 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
13472 lpfc_debugfs_slow_ring_trc(phba,
13473 "ISR Disable ring:"
13474 "pwork:x%x hawork:x%x wait:x%x",
13475 phba->work_ha, work_ha_copy,
13476 (uint32_t)((unsigned long)
13477 &phba->work_waitq));
13478
13479 control &=
13480 ~(HC_R0INT_ENA << LPFC_ELS_RING);
13481 writel(control, phba->HCregaddr);
13482 readl(phba->HCregaddr); /* flush */
13483 }
13484 else {
13485 lpfc_debugfs_slow_ring_trc(phba,
13486 "ISR slow ring: pwork:"
13487 "x%x hawork:x%x wait:x%x",
13488 phba->work_ha, work_ha_copy,
13489 (uint32_t)((unsigned long)
13490 &phba->work_waitq));
13491 }
13492 spin_unlock_irqrestore(&phba->hbalock, iflag);
13493 }
13494 }
13495 spin_lock_irqsave(&phba->hbalock, iflag);
13496 if (work_ha_copy & HA_ERATT) {
13497 if (lpfc_sli_read_hs(phba))
13498 goto unplug_error;
13499 /*
13500 * Check if there is a deferred error condition
13501 * is active
13502 */
13503 if ((HS_FFER1 & phba->work_hs) &&
13504 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
13505 HS_FFER6 | HS_FFER7 | HS_FFER8) &
13506 phba->work_hs)) {
13507 phba->hba_flag |= DEFER_ERATT;
13508 /* Clear all interrupt enable conditions */
13509 writel(0, phba->HCregaddr);
13510 readl(phba->HCregaddr);
13511 }
13512 }
13513
13514 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
13515 pmb = phba->sli.mbox_active;
13516 pmbox = &pmb->u.mb;
13517 mbox = phba->mbox;
13518 vport = pmb->vport;
13519
13520 /* First check out the status word */
13521 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
13522 if (pmbox->mbxOwner != OWN_HOST) {
13523 spin_unlock_irqrestore(&phba->hbalock, iflag);
13524 /*
13525 * Stray Mailbox Interrupt, mbxCommand <cmd>
13526 * mbxStatus <status>
13527 */
13528 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13529 "(%d):0304 Stray Mailbox "
13530 "Interrupt mbxCommand x%x "
13531 "mbxStatus x%x\n",
13532 (vport ? vport->vpi : 0),
13533 pmbox->mbxCommand,
13534 pmbox->mbxStatus);
13535 /* clear mailbox attention bit */
13536 work_ha_copy &= ~HA_MBATT;
13537 } else {
13538 phba->sli.mbox_active = NULL;
13539 spin_unlock_irqrestore(&phba->hbalock, iflag);
13540 phba->last_completion_time = jiffies;
13541 del_timer(&phba->sli.mbox_tmo);
13542 if (pmb->mbox_cmpl) {
13543 lpfc_sli_pcimem_bcopy(mbox, pmbox,
13544 MAILBOX_CMD_SIZE);
13545 if (pmb->out_ext_byte_len &&
13546 pmb->ctx_buf)
13547 lpfc_sli_pcimem_bcopy(
13548 phba->mbox_ext,
13549 pmb->ctx_buf,
13550 pmb->out_ext_byte_len);
13551 }
13552 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13553 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13554
13555 lpfc_debugfs_disc_trc(vport,
13556 LPFC_DISC_TRC_MBOX_VPORT,
13557 "MBOX dflt rpi: : "
13558 "status:x%x rpi:x%x",
13559 (uint32_t)pmbox->mbxStatus,
13560 pmbox->un.varWords[0], 0);
13561
13562 if (!pmbox->mbxStatus) {
13563 mp = (struct lpfc_dmabuf *)
13564 (pmb->ctx_buf);
13565 ndlp = (struct lpfc_nodelist *)
13566 pmb->ctx_ndlp;
13567
13568 /* Reg_LOGIN of dflt RPI was
13569 * successful. new lets get
13570 * rid of the RPI using the
13571 * same mbox buffer.
13572 */
13573 lpfc_unreg_login(phba,
13574 vport->vpi,
13575 pmbox->un.varWords[0],
13576 pmb);
13577 pmb->mbox_cmpl =
13578 lpfc_mbx_cmpl_dflt_rpi;
13579 pmb->ctx_buf = mp;
13580 pmb->ctx_ndlp = ndlp;
13581 pmb->vport = vport;
13582 rc = lpfc_sli_issue_mbox(phba,
13583 pmb,
13584 MBX_NOWAIT);
13585 if (rc != MBX_BUSY)
13586 lpfc_printf_log(phba,
13587 KERN_ERR,
13588 LOG_TRACE_EVENT,
13589 "0350 rc should have"
13590 "been MBX_BUSY\n");
13591 if (rc != MBX_NOT_FINISHED)
13592 goto send_current_mbox;
13593 }
13594 }
13595 spin_lock_irqsave(
13596 &phba->pport->work_port_lock,
13597 iflag);
13598 phba->pport->work_port_events &=
13599 ~WORKER_MBOX_TMO;
13600 spin_unlock_irqrestore(
13601 &phba->pport->work_port_lock,
13602 iflag);
13603
13604 /* Do NOT queue MBX_HEARTBEAT to the worker
13605 * thread for processing.
13606 */
13607 if (pmbox->mbxCommand == MBX_HEARTBEAT) {
13608 /* Process mbox now */
13609 phba->sli.mbox_active = NULL;
13610 phba->sli.sli_flag &=
13611 ~LPFC_SLI_MBOX_ACTIVE;
13612 if (pmb->mbox_cmpl)
13613 pmb->mbox_cmpl(phba, pmb);
13614 } else {
13615 /* Queue to worker thread to process */
13616 lpfc_mbox_cmpl_put(phba, pmb);
13617 }
13618 }
13619 } else
13620 spin_unlock_irqrestore(&phba->hbalock, iflag);
13621
13622 if ((work_ha_copy & HA_MBATT) &&
13623 (phba->sli.mbox_active == NULL)) {
13624send_current_mbox:
13625 /* Process next mailbox command if there is one */
13626 do {
13627 rc = lpfc_sli_issue_mbox(phba, NULL,
13628 MBX_NOWAIT);
13629 } while (rc == MBX_NOT_FINISHED);
13630 if (rc != MBX_SUCCESS)
13631 lpfc_printf_log(phba, KERN_ERR,
13632 LOG_TRACE_EVENT,
13633 "0349 rc should be "
13634 "MBX_SUCCESS\n");
13635 }
13636
13637 spin_lock_irqsave(&phba->hbalock, iflag);
13638 phba->work_ha |= work_ha_copy;
13639 spin_unlock_irqrestore(&phba->hbalock, iflag);
13640 lpfc_worker_wake_up(phba);
13641 }
13642 return IRQ_HANDLED;
13643unplug_error:
13644 spin_unlock_irqrestore(&phba->hbalock, iflag);
13645 return IRQ_HANDLED;
13646
13647} /* lpfc_sli_sp_intr_handler */
13648
13649/**
13650 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
13651 * @irq: Interrupt number.
13652 * @dev_id: The device context pointer.
13653 *
13654 * This function is directly called from the PCI layer as an interrupt
13655 * service routine when device with SLI-3 interface spec is enabled with
13656 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
13657 * ring event in the HBA. However, when the device is enabled with either
13658 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
13659 * device-level interrupt handler. When the PCI slot is in error recovery
13660 * or the HBA is undergoing initialization, the interrupt handler will not
13661 * process the interrupt. The SCSI FCP fast-path ring event are handled in
13662 * the intrrupt context. This function is called without any lock held.
13663 * It gets the hbalock to access and update SLI data structures.
13664 *
13665 * This function returns IRQ_HANDLED when interrupt is handled else it
13666 * returns IRQ_NONE.
13667 **/
13668irqreturn_t
13669lpfc_sli_fp_intr_handler(int irq, void *dev_id)
13670{
13671 struct lpfc_hba *phba;
13672 uint32_t ha_copy;
13673 unsigned long status;
13674 unsigned long iflag;
13675 struct lpfc_sli_ring *pring;
13676
13677 /* Get the driver's phba structure from the dev_id and
13678 * assume the HBA is not interrupting.
13679 */
13680 phba = (struct lpfc_hba *) dev_id;
13681
13682 if (unlikely(!phba))
13683 return IRQ_NONE;
13684
13685 /*
13686 * Stuff needs to be attented to when this function is invoked as an
13687 * individual interrupt handler in MSI-X multi-message interrupt mode
13688 */
13689 if (phba->intr_type == MSIX) {
13690 /* Check device state for handling interrupt */
13691 if (lpfc_intr_state_check(phba))
13692 return IRQ_NONE;
13693 /* Need to read HA REG for FCP ring and other ring events */
13694 if (lpfc_readl(phba->HAregaddr, &ha_copy))
13695 return IRQ_HANDLED;
13696 /* Clear up only attention source related to fast-path */
13697 spin_lock_irqsave(&phba->hbalock, iflag);
13698 /*
13699 * If there is deferred error attention, do not check for
13700 * any interrupt.
13701 */
13702 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13703 spin_unlock_irqrestore(&phba->hbalock, iflag);
13704 return IRQ_NONE;
13705 }
13706 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
13707 phba->HAregaddr);
13708 readl(phba->HAregaddr); /* flush */
13709 spin_unlock_irqrestore(&phba->hbalock, iflag);
13710 } else
13711 ha_copy = phba->ha_copy;
13712
13713 /*
13714 * Process all events on FCP ring. Take the optimized path for FCP IO.
13715 */
13716 ha_copy &= ~(phba->work_ha_mask);
13717
13718 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
13719 status >>= (4*LPFC_FCP_RING);
13720 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
13721 if (status & HA_RXMASK)
13722 lpfc_sli_handle_fast_ring_event(phba, pring, status);
13723
13724 if (phba->cfg_multi_ring_support == 2) {
13725 /*
13726 * Process all events on extra ring. Take the optimized path
13727 * for extra ring IO.
13728 */
13729 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
13730 status >>= (4*LPFC_EXTRA_RING);
13731 if (status & HA_RXMASK) {
13732 lpfc_sli_handle_fast_ring_event(phba,
13733 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
13734 status);
13735 }
13736 }
13737 return IRQ_HANDLED;
13738} /* lpfc_sli_fp_intr_handler */
13739
13740/**
13741 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
13742 * @irq: Interrupt number.
13743 * @dev_id: The device context pointer.
13744 *
13745 * This function is the HBA device-level interrupt handler to device with
13746 * SLI-3 interface spec, called from the PCI layer when either MSI or
13747 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
13748 * requires driver attention. This function invokes the slow-path interrupt
13749 * attention handling function and fast-path interrupt attention handling
13750 * function in turn to process the relevant HBA attention events. This
13751 * function is called without any lock held. It gets the hbalock to access
13752 * and update SLI data structures.
13753 *
13754 * This function returns IRQ_HANDLED when interrupt is handled, else it
13755 * returns IRQ_NONE.
13756 **/
13757irqreturn_t
13758lpfc_sli_intr_handler(int irq, void *dev_id)
13759{
13760 struct lpfc_hba *phba;
13761 irqreturn_t sp_irq_rc, fp_irq_rc;
13762 unsigned long status1, status2;
13763 uint32_t hc_copy;
13764
13765 /*
13766 * Get the driver's phba structure from the dev_id and
13767 * assume the HBA is not interrupting.
13768 */
13769 phba = (struct lpfc_hba *) dev_id;
13770
13771 if (unlikely(!phba))
13772 return IRQ_NONE;
13773
13774 /* Check device state for handling interrupt */
13775 if (lpfc_intr_state_check(phba))
13776 return IRQ_NONE;
13777
13778 spin_lock(&phba->hbalock);
13779 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
13780 spin_unlock(&phba->hbalock);
13781 return IRQ_HANDLED;
13782 }
13783
13784 if (unlikely(!phba->ha_copy)) {
13785 spin_unlock(&phba->hbalock);
13786 return IRQ_NONE;
13787 } else if (phba->ha_copy & HA_ERATT) {
13788 if (phba->hba_flag & HBA_ERATT_HANDLED)
13789 /* ERATT polling has handled ERATT */
13790 phba->ha_copy &= ~HA_ERATT;
13791 else
13792 /* Indicate interrupt handler handles ERATT */
13793 phba->hba_flag |= HBA_ERATT_HANDLED;
13794 }
13795
13796 /*
13797 * If there is deferred error attention, do not check for any interrupt.
13798 */
13799 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13800 spin_unlock(&phba->hbalock);
13801 return IRQ_NONE;
13802 }
13803
13804 /* Clear attention sources except link and error attentions */
13805 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
13806 spin_unlock(&phba->hbalock);
13807 return IRQ_HANDLED;
13808 }
13809 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
13810 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
13811 phba->HCregaddr);
13812 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
13813 writel(hc_copy, phba->HCregaddr);
13814 readl(phba->HAregaddr); /* flush */
13815 spin_unlock(&phba->hbalock);
13816
13817 /*
13818 * Invokes slow-path host attention interrupt handling as appropriate.
13819 */
13820
13821 /* status of events with mailbox and link attention */
13822 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
13823
13824 /* status of events with ELS ring */
13825 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
13826 status2 >>= (4*LPFC_ELS_RING);
13827
13828 if (status1 || (status2 & HA_RXMASK))
13829 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
13830 else
13831 sp_irq_rc = IRQ_NONE;
13832
13833 /*
13834 * Invoke fast-path host attention interrupt handling as appropriate.
13835 */
13836
13837 /* status of events with FCP ring */
13838 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
13839 status1 >>= (4*LPFC_FCP_RING);
13840
13841 /* status of events with extra ring */
13842 if (phba->cfg_multi_ring_support == 2) {
13843 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
13844 status2 >>= (4*LPFC_EXTRA_RING);
13845 } else
13846 status2 = 0;
13847
13848 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
13849 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
13850 else
13851 fp_irq_rc = IRQ_NONE;
13852
13853 /* Return device-level interrupt handling status */
13854 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
13855} /* lpfc_sli_intr_handler */
13856
13857/**
13858 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
13859 * @phba: pointer to lpfc hba data structure.
13860 *
13861 * This routine is invoked by the worker thread to process all the pending
13862 * SLI4 els abort xri events.
13863 **/
13864void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
13865{
13866 struct lpfc_cq_event *cq_event;
13867 unsigned long iflags;
13868
13869 /* First, declare the els xri abort event has been handled */
13870 spin_lock_irqsave(&phba->hbalock, iflags);
13871 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
13872 spin_unlock_irqrestore(&phba->hbalock, iflags);
13873
13874 /* Now, handle all the els xri abort events */
13875 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
13876 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
13877 /* Get the first event from the head of the event queue */
13878 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
13879 cq_event, struct lpfc_cq_event, list);
13880 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
13881 iflags);
13882 /* Notify aborted XRI for ELS work queue */
13883 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
13884
13885 /* Free the event processed back to the free pool */
13886 lpfc_sli4_cq_event_release(phba, cq_event);
13887 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
13888 iflags);
13889 }
13890 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
13891}
13892
13893/**
13894 * lpfc_sli4_els_preprocess_rspiocbq - Get response iocbq from els wcqe
13895 * @phba: Pointer to HBA context object.
13896 * @irspiocbq: Pointer to work-queue completion queue entry.
13897 *
13898 * This routine handles an ELS work-queue completion event and construct
13899 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
13900 * discovery engine to handle.
13901 *
13902 * Return: Pointer to the receive IOCBQ, NULL otherwise.
13903 **/
13904static struct lpfc_iocbq *
13905lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
13906 struct lpfc_iocbq *irspiocbq)
13907{
13908 struct lpfc_sli_ring *pring;
13909 struct lpfc_iocbq *cmdiocbq;
13910 struct lpfc_wcqe_complete *wcqe;
13911 unsigned long iflags;
13912
13913 pring = lpfc_phba_elsring(phba);
13914 if (unlikely(!pring))
13915 return NULL;
13916
13917 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
13918 spin_lock_irqsave(&pring->ring_lock, iflags);
13919 pring->stats.iocb_event++;
13920 /* Look up the ELS command IOCB and create pseudo response IOCB */
13921 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13922 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13923 if (unlikely(!cmdiocbq)) {
13924 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13925 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13926 "0386 ELS complete with no corresponding "
13927 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
13928 wcqe->word0, wcqe->total_data_placed,
13929 wcqe->parameter, wcqe->word3);
13930 lpfc_sli_release_iocbq(phba, irspiocbq);
13931 return NULL;
13932 }
13933
13934 memcpy(&irspiocbq->wqe, &cmdiocbq->wqe, sizeof(union lpfc_wqe128));
13935 memcpy(&irspiocbq->wcqe_cmpl, wcqe, sizeof(*wcqe));
13936
13937 /* Put the iocb back on the txcmplq */
13938 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
13939 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13940
13941 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
13942 spin_lock_irqsave(&phba->hbalock, iflags);
13943 cmdiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
13944 spin_unlock_irqrestore(&phba->hbalock, iflags);
13945 }
13946
13947 return irspiocbq;
13948}
13949
13950inline struct lpfc_cq_event *
13951lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
13952{
13953 struct lpfc_cq_event *cq_event;
13954
13955 /* Allocate a new internal CQ_EVENT entry */
13956 cq_event = lpfc_sli4_cq_event_alloc(phba);
13957 if (!cq_event) {
13958 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13959 "0602 Failed to alloc CQ_EVENT entry\n");
13960 return NULL;
13961 }
13962
13963 /* Move the CQE into the event */
13964 memcpy(&cq_event->cqe, entry, size);
13965 return cq_event;
13966}
13967
13968/**
13969 * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event
13970 * @phba: Pointer to HBA context object.
13971 * @mcqe: Pointer to mailbox completion queue entry.
13972 *
13973 * This routine process a mailbox completion queue entry with asynchronous
13974 * event.
13975 *
13976 * Return: true if work posted to worker thread, otherwise false.
13977 **/
13978static bool
13979lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13980{
13981 struct lpfc_cq_event *cq_event;
13982 unsigned long iflags;
13983
13984 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13985 "0392 Async Event: word0:x%x, word1:x%x, "
13986 "word2:x%x, word3:x%x\n", mcqe->word0,
13987 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
13988
13989 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
13990 if (!cq_event)
13991 return false;
13992
13993 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
13994 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
13995 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
13996
13997 /* Set the async event flag */
13998 spin_lock_irqsave(&phba->hbalock, iflags);
13999 phba->hba_flag |= ASYNC_EVENT;
14000 spin_unlock_irqrestore(&phba->hbalock, iflags);
14001
14002 return true;
14003}
14004
14005/**
14006 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
14007 * @phba: Pointer to HBA context object.
14008 * @mcqe: Pointer to mailbox completion queue entry.
14009 *
14010 * This routine process a mailbox completion queue entry with mailbox
14011 * completion event.
14012 *
14013 * Return: true if work posted to worker thread, otherwise false.
14014 **/
14015static bool
14016lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
14017{
14018 uint32_t mcqe_status;
14019 MAILBOX_t *mbox, *pmbox;
14020 struct lpfc_mqe *mqe;
14021 struct lpfc_vport *vport;
14022 struct lpfc_nodelist *ndlp;
14023 struct lpfc_dmabuf *mp;
14024 unsigned long iflags;
14025 LPFC_MBOXQ_t *pmb;
14026 bool workposted = false;
14027 int rc;
14028
14029 /* If not a mailbox complete MCQE, out by checking mailbox consume */
14030 if (!bf_get(lpfc_trailer_completed, mcqe))
14031 goto out_no_mqe_complete;
14032
14033 /* Get the reference to the active mbox command */
14034 spin_lock_irqsave(&phba->hbalock, iflags);
14035 pmb = phba->sli.mbox_active;
14036 if (unlikely(!pmb)) {
14037 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14038 "1832 No pending MBOX command to handle\n");
14039 spin_unlock_irqrestore(&phba->hbalock, iflags);
14040 goto out_no_mqe_complete;
14041 }
14042 spin_unlock_irqrestore(&phba->hbalock, iflags);
14043 mqe = &pmb->u.mqe;
14044 pmbox = (MAILBOX_t *)&pmb->u.mqe;
14045 mbox = phba->mbox;
14046 vport = pmb->vport;
14047
14048 /* Reset heartbeat timer */
14049 phba->last_completion_time = jiffies;
14050 del_timer(&phba->sli.mbox_tmo);
14051
14052 /* Move mbox data to caller's mailbox region, do endian swapping */
14053 if (pmb->mbox_cmpl && mbox)
14054 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
14055
14056 /*
14057 * For mcqe errors, conditionally move a modified error code to
14058 * the mbox so that the error will not be missed.
14059 */
14060 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
14061 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
14062 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
14063 bf_set(lpfc_mqe_status, mqe,
14064 (LPFC_MBX_ERROR_RANGE | mcqe_status));
14065 }
14066 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
14067 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
14068 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
14069 "MBOX dflt rpi: status:x%x rpi:x%x",
14070 mcqe_status,
14071 pmbox->un.varWords[0], 0);
14072 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
14073 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
14074 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
14075
14076 /* Reg_LOGIN of dflt RPI was successful. Mark the
14077 * node as having an UNREG_LOGIN in progress to stop
14078 * an unsolicited PLOGI from the same NPortId from
14079 * starting another mailbox transaction.
14080 */
14081 spin_lock_irqsave(&ndlp->lock, iflags);
14082 ndlp->nlp_flag |= NLP_UNREG_INP;
14083 spin_unlock_irqrestore(&ndlp->lock, iflags);
14084 lpfc_unreg_login(phba, vport->vpi,
14085 pmbox->un.varWords[0], pmb);
14086 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
14087 pmb->ctx_buf = mp;
14088
14089 /* No reference taken here. This is a default
14090 * RPI reg/immediate unreg cycle. The reference was
14091 * taken in the reg rpi path and is released when
14092 * this mailbox completes.
14093 */
14094 pmb->ctx_ndlp = ndlp;
14095 pmb->vport = vport;
14096 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
14097 if (rc != MBX_BUSY)
14098 lpfc_printf_log(phba, KERN_ERR,
14099 LOG_TRACE_EVENT,
14100 "0385 rc should "
14101 "have been MBX_BUSY\n");
14102 if (rc != MBX_NOT_FINISHED)
14103 goto send_current_mbox;
14104 }
14105 }
14106 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
14107 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
14108 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
14109
14110 /* Do NOT queue MBX_HEARTBEAT to the worker thread for processing. */
14111 if (pmbox->mbxCommand == MBX_HEARTBEAT) {
14112 spin_lock_irqsave(&phba->hbalock, iflags);
14113 /* Release the mailbox command posting token */
14114 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
14115 phba->sli.mbox_active = NULL;
14116 if (bf_get(lpfc_trailer_consumed, mcqe))
14117 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14118 spin_unlock_irqrestore(&phba->hbalock, iflags);
14119
14120 /* Post the next mbox command, if there is one */
14121 lpfc_sli4_post_async_mbox(phba);
14122
14123 /* Process cmpl now */
14124 if (pmb->mbox_cmpl)
14125 pmb->mbox_cmpl(phba, pmb);
14126 return false;
14127 }
14128
14129 /* There is mailbox completion work to queue to the worker thread */
14130 spin_lock_irqsave(&phba->hbalock, iflags);
14131 __lpfc_mbox_cmpl_put(phba, pmb);
14132 phba->work_ha |= HA_MBATT;
14133 spin_unlock_irqrestore(&phba->hbalock, iflags);
14134 workposted = true;
14135
14136send_current_mbox:
14137 spin_lock_irqsave(&phba->hbalock, iflags);
14138 /* Release the mailbox command posting token */
14139 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
14140 /* Setting active mailbox pointer need to be in sync to flag clear */
14141 phba->sli.mbox_active = NULL;
14142 if (bf_get(lpfc_trailer_consumed, mcqe))
14143 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14144 spin_unlock_irqrestore(&phba->hbalock, iflags);
14145 /* Wake up worker thread to post the next pending mailbox command */
14146 lpfc_worker_wake_up(phba);
14147 return workposted;
14148
14149out_no_mqe_complete:
14150 spin_lock_irqsave(&phba->hbalock, iflags);
14151 if (bf_get(lpfc_trailer_consumed, mcqe))
14152 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14153 spin_unlock_irqrestore(&phba->hbalock, iflags);
14154 return false;
14155}
14156
14157/**
14158 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
14159 * @phba: Pointer to HBA context object.
14160 * @cq: Pointer to associated CQ
14161 * @cqe: Pointer to mailbox completion queue entry.
14162 *
14163 * This routine process a mailbox completion queue entry, it invokes the
14164 * proper mailbox complete handling or asynchronous event handling routine
14165 * according to the MCQE's async bit.
14166 *
14167 * Return: true if work posted to worker thread, otherwise false.
14168 **/
14169static bool
14170lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14171 struct lpfc_cqe *cqe)
14172{
14173 struct lpfc_mcqe mcqe;
14174 bool workposted;
14175
14176 cq->CQ_mbox++;
14177
14178 /* Copy the mailbox MCQE and convert endian order as needed */
14179 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
14180
14181 /* Invoke the proper event handling routine */
14182 if (!bf_get(lpfc_trailer_async, &mcqe))
14183 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
14184 else
14185 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
14186 return workposted;
14187}
14188
14189/**
14190 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
14191 * @phba: Pointer to HBA context object.
14192 * @cq: Pointer to associated CQ
14193 * @wcqe: Pointer to work-queue completion queue entry.
14194 *
14195 * This routine handles an ELS work-queue completion event.
14196 *
14197 * Return: true if work posted to worker thread, otherwise false.
14198 **/
14199static bool
14200lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14201 struct lpfc_wcqe_complete *wcqe)
14202{
14203 struct lpfc_iocbq *irspiocbq;
14204 unsigned long iflags;
14205 struct lpfc_sli_ring *pring = cq->pring;
14206 int txq_cnt = 0;
14207 int txcmplq_cnt = 0;
14208
14209 /* Check for response status */
14210 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
14211 /* Log the error status */
14212 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14213 "0357 ELS CQE error: status=x%x: "
14214 "CQE: %08x %08x %08x %08x\n",
14215 bf_get(lpfc_wcqe_c_status, wcqe),
14216 wcqe->word0, wcqe->total_data_placed,
14217 wcqe->parameter, wcqe->word3);
14218 }
14219
14220 /* Get an irspiocbq for later ELS response processing use */
14221 irspiocbq = lpfc_sli_get_iocbq(phba);
14222 if (!irspiocbq) {
14223 if (!list_empty(&pring->txq))
14224 txq_cnt++;
14225 if (!list_empty(&pring->txcmplq))
14226 txcmplq_cnt++;
14227 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14228 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
14229 "els_txcmplq_cnt=%d\n",
14230 txq_cnt, phba->iocb_cnt,
14231 txcmplq_cnt);
14232 return false;
14233 }
14234
14235 /* Save off the slow-path queue event for work thread to process */
14236 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
14237 spin_lock_irqsave(&phba->hbalock, iflags);
14238 list_add_tail(&irspiocbq->cq_event.list,
14239 &phba->sli4_hba.sp_queue_event);
14240 phba->hba_flag |= HBA_SP_QUEUE_EVT;
14241 spin_unlock_irqrestore(&phba->hbalock, iflags);
14242
14243 return true;
14244}
14245
14246/**
14247 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
14248 * @phba: Pointer to HBA context object.
14249 * @wcqe: Pointer to work-queue completion queue entry.
14250 *
14251 * This routine handles slow-path WQ entry consumed event by invoking the
14252 * proper WQ release routine to the slow-path WQ.
14253 **/
14254static void
14255lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
14256 struct lpfc_wcqe_release *wcqe)
14257{
14258 /* sanity check on queue memory */
14259 if (unlikely(!phba->sli4_hba.els_wq))
14260 return;
14261 /* Check for the slow-path ELS work queue */
14262 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
14263 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
14264 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
14265 else
14266 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14267 "2579 Slow-path wqe consume event carries "
14268 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
14269 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
14270 phba->sli4_hba.els_wq->queue_id);
14271}
14272
14273/**
14274 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
14275 * @phba: Pointer to HBA context object.
14276 * @cq: Pointer to a WQ completion queue.
14277 * @wcqe: Pointer to work-queue completion queue entry.
14278 *
14279 * This routine handles an XRI abort event.
14280 *
14281 * Return: true if work posted to worker thread, otherwise false.
14282 **/
14283static bool
14284lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
14285 struct lpfc_queue *cq,
14286 struct sli4_wcqe_xri_aborted *wcqe)
14287{
14288 bool workposted = false;
14289 struct lpfc_cq_event *cq_event;
14290 unsigned long iflags;
14291
14292 switch (cq->subtype) {
14293 case LPFC_IO:
14294 lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
14295 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14296 /* Notify aborted XRI for NVME work queue */
14297 if (phba->nvmet_support)
14298 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
14299 }
14300 workposted = false;
14301 break;
14302 case LPFC_NVME_LS: /* NVME LS uses ELS resources */
14303 case LPFC_ELS:
14304 cq_event = lpfc_cq_event_setup(phba, wcqe, sizeof(*wcqe));
14305 if (!cq_event) {
14306 workposted = false;
14307 break;
14308 }
14309 cq_event->hdwq = cq->hdwq;
14310 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
14311 iflags);
14312 list_add_tail(&cq_event->list,
14313 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
14314 /* Set the els xri abort event flag */
14315 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
14316 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
14317 iflags);
14318 workposted = true;
14319 break;
14320 default:
14321 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14322 "0603 Invalid CQ subtype %d: "
14323 "%08x %08x %08x %08x\n",
14324 cq->subtype, wcqe->word0, wcqe->parameter,
14325 wcqe->word2, wcqe->word3);
14326 workposted = false;
14327 break;
14328 }
14329 return workposted;
14330}
14331
14332#define FC_RCTL_MDS_DIAGS 0xF4
14333
14334/**
14335 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
14336 * @phba: Pointer to HBA context object.
14337 * @rcqe: Pointer to receive-queue completion queue entry.
14338 *
14339 * This routine process a receive-queue completion queue entry.
14340 *
14341 * Return: true if work posted to worker thread, otherwise false.
14342 **/
14343static bool
14344lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
14345{
14346 bool workposted = false;
14347 struct fc_frame_header *fc_hdr;
14348 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
14349 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
14350 struct lpfc_nvmet_tgtport *tgtp;
14351 struct hbq_dmabuf *dma_buf;
14352 uint32_t status, rq_id;
14353 unsigned long iflags;
14354
14355 /* sanity check on queue memory */
14356 if (unlikely(!hrq) || unlikely(!drq))
14357 return workposted;
14358
14359 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
14360 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
14361 else
14362 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
14363 if (rq_id != hrq->queue_id)
14364 goto out;
14365
14366 status = bf_get(lpfc_rcqe_status, rcqe);
14367 switch (status) {
14368 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
14369 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14370 "2537 Receive Frame Truncated!!\n");
14371 fallthrough;
14372 case FC_STATUS_RQ_SUCCESS:
14373 spin_lock_irqsave(&phba->hbalock, iflags);
14374 lpfc_sli4_rq_release(hrq, drq);
14375 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
14376 if (!dma_buf) {
14377 hrq->RQ_no_buf_found++;
14378 spin_unlock_irqrestore(&phba->hbalock, iflags);
14379 goto out;
14380 }
14381 hrq->RQ_rcv_buf++;
14382 hrq->RQ_buf_posted--;
14383 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
14384
14385 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
14386
14387 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
14388 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
14389 spin_unlock_irqrestore(&phba->hbalock, iflags);
14390 /* Handle MDS Loopback frames */
14391 if (!(phba->pport->load_flag & FC_UNLOADING))
14392 lpfc_sli4_handle_mds_loopback(phba->pport,
14393 dma_buf);
14394 else
14395 lpfc_in_buf_free(phba, &dma_buf->dbuf);
14396 break;
14397 }
14398
14399 /* save off the frame for the work thread to process */
14400 list_add_tail(&dma_buf->cq_event.list,
14401 &phba->sli4_hba.sp_queue_event);
14402 /* Frame received */
14403 phba->hba_flag |= HBA_SP_QUEUE_EVT;
14404 spin_unlock_irqrestore(&phba->hbalock, iflags);
14405 workposted = true;
14406 break;
14407 case FC_STATUS_INSUFF_BUF_FRM_DISC:
14408 if (phba->nvmet_support) {
14409 tgtp = phba->targetport->private;
14410 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14411 "6402 RQE Error x%x, posted %d err_cnt "
14412 "%d: %x %x %x\n",
14413 status, hrq->RQ_buf_posted,
14414 hrq->RQ_no_posted_buf,
14415 atomic_read(&tgtp->rcv_fcp_cmd_in),
14416 atomic_read(&tgtp->rcv_fcp_cmd_out),
14417 atomic_read(&tgtp->xmt_fcp_release));
14418 }
14419 fallthrough;
14420
14421 case FC_STATUS_INSUFF_BUF_NEED_BUF:
14422 hrq->RQ_no_posted_buf++;
14423 /* Post more buffers if possible */
14424 spin_lock_irqsave(&phba->hbalock, iflags);
14425 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
14426 spin_unlock_irqrestore(&phba->hbalock, iflags);
14427 workposted = true;
14428 break;
14429 }
14430out:
14431 return workposted;
14432}
14433
14434/**
14435 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
14436 * @phba: Pointer to HBA context object.
14437 * @cq: Pointer to the completion queue.
14438 * @cqe: Pointer to a completion queue entry.
14439 *
14440 * This routine process a slow-path work-queue or receive queue completion queue
14441 * entry.
14442 *
14443 * Return: true if work posted to worker thread, otherwise false.
14444 **/
14445static bool
14446lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14447 struct lpfc_cqe *cqe)
14448{
14449 struct lpfc_cqe cqevt;
14450 bool workposted = false;
14451
14452 /* Copy the work queue CQE and convert endian order if needed */
14453 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
14454
14455 /* Check and process for different type of WCQE and dispatch */
14456 switch (bf_get(lpfc_cqe_code, &cqevt)) {
14457 case CQE_CODE_COMPL_WQE:
14458 /* Process the WQ/RQ complete event */
14459 phba->last_completion_time = jiffies;
14460 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
14461 (struct lpfc_wcqe_complete *)&cqevt);
14462 break;
14463 case CQE_CODE_RELEASE_WQE:
14464 /* Process the WQ release event */
14465 lpfc_sli4_sp_handle_rel_wcqe(phba,
14466 (struct lpfc_wcqe_release *)&cqevt);
14467 break;
14468 case CQE_CODE_XRI_ABORTED:
14469 /* Process the WQ XRI abort event */
14470 phba->last_completion_time = jiffies;
14471 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14472 (struct sli4_wcqe_xri_aborted *)&cqevt);
14473 break;
14474 case CQE_CODE_RECEIVE:
14475 case CQE_CODE_RECEIVE_V1:
14476 /* Process the RQ event */
14477 phba->last_completion_time = jiffies;
14478 workposted = lpfc_sli4_sp_handle_rcqe(phba,
14479 (struct lpfc_rcqe *)&cqevt);
14480 break;
14481 default:
14482 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14483 "0388 Not a valid WCQE code: x%x\n",
14484 bf_get(lpfc_cqe_code, &cqevt));
14485 break;
14486 }
14487 return workposted;
14488}
14489
14490/**
14491 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
14492 * @phba: Pointer to HBA context object.
14493 * @eqe: Pointer to fast-path event queue entry.
14494 * @speq: Pointer to slow-path event queue.
14495 *
14496 * This routine process a event queue entry from the slow-path event queue.
14497 * It will check the MajorCode and MinorCode to determine this is for a
14498 * completion event on a completion queue, if not, an error shall be logged
14499 * and just return. Otherwise, it will get to the corresponding completion
14500 * queue and process all the entries on that completion queue, rearm the
14501 * completion queue, and then return.
14502 *
14503 **/
14504static void
14505lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
14506 struct lpfc_queue *speq)
14507{
14508 struct lpfc_queue *cq = NULL, *childq;
14509 uint16_t cqid;
14510 int ret = 0;
14511
14512 /* Get the reference to the corresponding CQ */
14513 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14514
14515 list_for_each_entry(childq, &speq->child_list, list) {
14516 if (childq->queue_id == cqid) {
14517 cq = childq;
14518 break;
14519 }
14520 }
14521 if (unlikely(!cq)) {
14522 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
14523 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14524 "0365 Slow-path CQ identifier "
14525 "(%d) does not exist\n", cqid);
14526 return;
14527 }
14528
14529 /* Save EQ associated with this CQ */
14530 cq->assoc_qp = speq;
14531
14532 if (is_kdump_kernel())
14533 ret = queue_work(phba->wq, &cq->spwork);
14534 else
14535 ret = queue_work_on(cq->chann, phba->wq, &cq->spwork);
14536
14537 if (!ret)
14538 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14539 "0390 Cannot schedule queue work "
14540 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14541 cqid, cq->queue_id, raw_smp_processor_id());
14542}
14543
14544/**
14545 * __lpfc_sli4_process_cq - Process elements of a CQ
14546 * @phba: Pointer to HBA context object.
14547 * @cq: Pointer to CQ to be processed
14548 * @handler: Routine to process each cqe
14549 * @delay: Pointer to usdelay to set in case of rescheduling of the handler
14550 * @poll_mode: Polling mode we were called from
14551 *
14552 * This routine processes completion queue entries in a CQ. While a valid
14553 * queue element is found, the handler is called. During processing checks
14554 * are made for periodic doorbell writes to let the hardware know of
14555 * element consumption.
14556 *
14557 * If the max limit on cqes to process is hit, or there are no more valid
14558 * entries, the loop stops. If we processed a sufficient number of elements,
14559 * meaning there is sufficient load, rather than rearming and generating
14560 * another interrupt, a cq rescheduling delay will be set. A delay of 0
14561 * indicates no rescheduling.
14562 *
14563 * Returns True if work scheduled, False otherwise.
14564 **/
14565static bool
14566__lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
14567 bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
14568 struct lpfc_cqe *), unsigned long *delay,
14569 enum lpfc_poll_mode poll_mode)
14570{
14571 struct lpfc_cqe *cqe;
14572 bool workposted = false;
14573 int count = 0, consumed = 0;
14574 bool arm = true;
14575
14576 /* default - no reschedule */
14577 *delay = 0;
14578
14579 if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
14580 goto rearm_and_exit;
14581
14582 /* Process all the entries to the CQ */
14583 cq->q_flag = 0;
14584 cqe = lpfc_sli4_cq_get(cq);
14585 while (cqe) {
14586 workposted |= handler(phba, cq, cqe);
14587 __lpfc_sli4_consume_cqe(phba, cq, cqe);
14588
14589 consumed++;
14590 if (!(++count % cq->max_proc_limit))
14591 break;
14592
14593 if (!(count % cq->notify_interval)) {
14594 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14595 LPFC_QUEUE_NOARM);
14596 consumed = 0;
14597 cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK;
14598 }
14599
14600 if (count == LPFC_NVMET_CQ_NOTIFY)
14601 cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
14602
14603 cqe = lpfc_sli4_cq_get(cq);
14604 }
14605 if (count >= phba->cfg_cq_poll_threshold) {
14606 *delay = 1;
14607 arm = false;
14608 }
14609
14610 /* Note: complete the irq_poll softirq before rearming CQ */
14611 if (poll_mode == LPFC_IRQ_POLL)
14612 irq_poll_complete(&cq->iop);
14613
14614 /* Track the max number of CQEs processed in 1 EQ */
14615 if (count > cq->CQ_max_cqe)
14616 cq->CQ_max_cqe = count;
14617
14618 cq->assoc_qp->EQ_cqe_cnt += count;
14619
14620 /* Catch the no cq entry condition */
14621 if (unlikely(count == 0))
14622 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14623 "0369 No entry from completion queue "
14624 "qid=%d\n", cq->queue_id);
14625
14626 xchg(&cq->queue_claimed, 0);
14627
14628rearm_and_exit:
14629 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14630 arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
14631
14632 return workposted;
14633}
14634
14635/**
14636 * __lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
14637 * @cq: pointer to CQ to process
14638 *
14639 * This routine calls the cq processing routine with a handler specific
14640 * to the type of queue bound to it.
14641 *
14642 * The CQ routine returns two values: the first is the calling status,
14643 * which indicates whether work was queued to the background discovery
14644 * thread. If true, the routine should wakeup the discovery thread;
14645 * the second is the delay parameter. If non-zero, rather than rearming
14646 * the CQ and yet another interrupt, the CQ handler should be queued so
14647 * that it is processed in a subsequent polling action. The value of
14648 * the delay indicates when to reschedule it.
14649 **/
14650static void
14651__lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
14652{
14653 struct lpfc_hba *phba = cq->phba;
14654 unsigned long delay;
14655 bool workposted = false;
14656 int ret = 0;
14657
14658 /* Process and rearm the CQ */
14659 switch (cq->type) {
14660 case LPFC_MCQ:
14661 workposted |= __lpfc_sli4_process_cq(phba, cq,
14662 lpfc_sli4_sp_handle_mcqe,
14663 &delay, LPFC_QUEUE_WORK);
14664 break;
14665 case LPFC_WCQ:
14666 if (cq->subtype == LPFC_IO)
14667 workposted |= __lpfc_sli4_process_cq(phba, cq,
14668 lpfc_sli4_fp_handle_cqe,
14669 &delay, LPFC_QUEUE_WORK);
14670 else
14671 workposted |= __lpfc_sli4_process_cq(phba, cq,
14672 lpfc_sli4_sp_handle_cqe,
14673 &delay, LPFC_QUEUE_WORK);
14674 break;
14675 default:
14676 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14677 "0370 Invalid completion queue type (%d)\n",
14678 cq->type);
14679 return;
14680 }
14681
14682 if (delay) {
14683 if (is_kdump_kernel())
14684 ret = queue_delayed_work(phba->wq, &cq->sched_spwork,
14685 delay);
14686 else
14687 ret = queue_delayed_work_on(cq->chann, phba->wq,
14688 &cq->sched_spwork, delay);
14689 if (!ret)
14690 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14691 "0394 Cannot schedule queue work "
14692 "for cqid=%d on CPU %d\n",
14693 cq->queue_id, cq->chann);
14694 }
14695
14696 /* wake up worker thread if there are works to be done */
14697 if (workposted)
14698 lpfc_worker_wake_up(phba);
14699}
14700
14701/**
14702 * lpfc_sli4_sp_process_cq - slow-path work handler when started by
14703 * interrupt
14704 * @work: pointer to work element
14705 *
14706 * translates from the work handler and calls the slow-path handler.
14707 **/
14708static void
14709lpfc_sli4_sp_process_cq(struct work_struct *work)
14710{
14711 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
14712
14713 __lpfc_sli4_sp_process_cq(cq);
14714}
14715
14716/**
14717 * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
14718 * @work: pointer to work element
14719 *
14720 * translates from the work handler and calls the slow-path handler.
14721 **/
14722static void
14723lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
14724{
14725 struct lpfc_queue *cq = container_of(to_delayed_work(work),
14726 struct lpfc_queue, sched_spwork);
14727
14728 __lpfc_sli4_sp_process_cq(cq);
14729}
14730
14731/**
14732 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
14733 * @phba: Pointer to HBA context object.
14734 * @cq: Pointer to associated CQ
14735 * @wcqe: Pointer to work-queue completion queue entry.
14736 *
14737 * This routine process a fast-path work queue completion entry from fast-path
14738 * event queue for FCP command response completion.
14739 **/
14740static void
14741lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14742 struct lpfc_wcqe_complete *wcqe)
14743{
14744 struct lpfc_sli_ring *pring = cq->pring;
14745 struct lpfc_iocbq *cmdiocbq;
14746 unsigned long iflags;
14747
14748 /* Check for response status */
14749 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
14750 /* If resource errors reported from HBA, reduce queue
14751 * depth of the SCSI device.
14752 */
14753 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
14754 IOSTAT_LOCAL_REJECT)) &&
14755 ((wcqe->parameter & IOERR_PARAM_MASK) ==
14756 IOERR_NO_RESOURCES))
14757 phba->lpfc_rampdown_queue_depth(phba);
14758
14759 /* Log the cmpl status */
14760 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14761 "0373 FCP CQE cmpl: status=x%x: "
14762 "CQE: %08x %08x %08x %08x\n",
14763 bf_get(lpfc_wcqe_c_status, wcqe),
14764 wcqe->word0, wcqe->total_data_placed,
14765 wcqe->parameter, wcqe->word3);
14766 }
14767
14768 /* Look up the FCP command IOCB and create pseudo response IOCB */
14769 spin_lock_irqsave(&pring->ring_lock, iflags);
14770 pring->stats.iocb_event++;
14771 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
14772 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14773 spin_unlock_irqrestore(&pring->ring_lock, iflags);
14774 if (unlikely(!cmdiocbq)) {
14775 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14776 "0374 FCP complete with no corresponding "
14777 "cmdiocb: iotag (%d)\n",
14778 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14779 return;
14780 }
14781#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
14782 cmdiocbq->isr_timestamp = cq->isr_timestamp;
14783#endif
14784 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
14785 spin_lock_irqsave(&phba->hbalock, iflags);
14786 cmdiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
14787 spin_unlock_irqrestore(&phba->hbalock, iflags);
14788 }
14789
14790 if (cmdiocbq->cmd_cmpl) {
14791 /* For FCP the flag is cleared in cmd_cmpl */
14792 if (!(cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
14793 cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED) {
14794 spin_lock_irqsave(&phba->hbalock, iflags);
14795 cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
14796 spin_unlock_irqrestore(&phba->hbalock, iflags);
14797 }
14798
14799 /* Pass the cmd_iocb and the wcqe to the upper layer */
14800 memcpy(&cmdiocbq->wcqe_cmpl, wcqe,
14801 sizeof(struct lpfc_wcqe_complete));
14802 (cmdiocbq->cmd_cmpl)(phba, cmdiocbq, cmdiocbq);
14803 } else {
14804 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14805 "0375 FCP cmdiocb not callback function "
14806 "iotag: (%d)\n",
14807 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14808 }
14809}
14810
14811/**
14812 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
14813 * @phba: Pointer to HBA context object.
14814 * @cq: Pointer to completion queue.
14815 * @wcqe: Pointer to work-queue completion queue entry.
14816 *
14817 * This routine handles an fast-path WQ entry consumed event by invoking the
14818 * proper WQ release routine to the slow-path WQ.
14819 **/
14820static void
14821lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14822 struct lpfc_wcqe_release *wcqe)
14823{
14824 struct lpfc_queue *childwq;
14825 bool wqid_matched = false;
14826 uint16_t hba_wqid;
14827
14828 /* Check for fast-path FCP work queue release */
14829 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
14830 list_for_each_entry(childwq, &cq->child_list, list) {
14831 if (childwq->queue_id == hba_wqid) {
14832 lpfc_sli4_wq_release(childwq,
14833 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
14834 if (childwq->q_flag & HBA_NVMET_WQFULL)
14835 lpfc_nvmet_wqfull_process(phba, childwq);
14836 wqid_matched = true;
14837 break;
14838 }
14839 }
14840 /* Report warning log message if no match found */
14841 if (wqid_matched != true)
14842 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14843 "2580 Fast-path wqe consume event carries "
14844 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
14845}
14846
14847/**
14848 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
14849 * @phba: Pointer to HBA context object.
14850 * @cq: Pointer to completion queue.
14851 * @rcqe: Pointer to receive-queue completion queue entry.
14852 *
14853 * This routine process a receive-queue completion queue entry.
14854 *
14855 * Return: true if work posted to worker thread, otherwise false.
14856 **/
14857static bool
14858lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14859 struct lpfc_rcqe *rcqe)
14860{
14861 bool workposted = false;
14862 struct lpfc_queue *hrq;
14863 struct lpfc_queue *drq;
14864 struct rqb_dmabuf *dma_buf;
14865 struct fc_frame_header *fc_hdr;
14866 struct lpfc_nvmet_tgtport *tgtp;
14867 uint32_t status, rq_id;
14868 unsigned long iflags;
14869 uint32_t fctl, idx;
14870
14871 if ((phba->nvmet_support == 0) ||
14872 (phba->sli4_hba.nvmet_cqset == NULL))
14873 return workposted;
14874
14875 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
14876 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
14877 drq = phba->sli4_hba.nvmet_mrq_data[idx];
14878
14879 /* sanity check on queue memory */
14880 if (unlikely(!hrq) || unlikely(!drq))
14881 return workposted;
14882
14883 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
14884 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
14885 else
14886 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
14887
14888 if ((phba->nvmet_support == 0) ||
14889 (rq_id != hrq->queue_id))
14890 return workposted;
14891
14892 status = bf_get(lpfc_rcqe_status, rcqe);
14893 switch (status) {
14894 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
14895 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14896 "6126 Receive Frame Truncated!!\n");
14897 fallthrough;
14898 case FC_STATUS_RQ_SUCCESS:
14899 spin_lock_irqsave(&phba->hbalock, iflags);
14900 lpfc_sli4_rq_release(hrq, drq);
14901 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
14902 if (!dma_buf) {
14903 hrq->RQ_no_buf_found++;
14904 spin_unlock_irqrestore(&phba->hbalock, iflags);
14905 goto out;
14906 }
14907 spin_unlock_irqrestore(&phba->hbalock, iflags);
14908 hrq->RQ_rcv_buf++;
14909 hrq->RQ_buf_posted--;
14910 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
14911
14912 /* Just some basic sanity checks on FCP Command frame */
14913 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
14914 fc_hdr->fh_f_ctl[1] << 8 |
14915 fc_hdr->fh_f_ctl[2]);
14916 if (((fctl &
14917 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
14918 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
14919 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
14920 goto drop;
14921
14922 if (fc_hdr->fh_type == FC_TYPE_FCP) {
14923 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
14924 lpfc_nvmet_unsol_fcp_event(
14925 phba, idx, dma_buf, cq->isr_timestamp,
14926 cq->q_flag & HBA_NVMET_CQ_NOTIFY);
14927 return false;
14928 }
14929drop:
14930 lpfc_rq_buf_free(phba, &dma_buf->hbuf);
14931 break;
14932 case FC_STATUS_INSUFF_BUF_FRM_DISC:
14933 if (phba->nvmet_support) {
14934 tgtp = phba->targetport->private;
14935 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14936 "6401 RQE Error x%x, posted %d err_cnt "
14937 "%d: %x %x %x\n",
14938 status, hrq->RQ_buf_posted,
14939 hrq->RQ_no_posted_buf,
14940 atomic_read(&tgtp->rcv_fcp_cmd_in),
14941 atomic_read(&tgtp->rcv_fcp_cmd_out),
14942 atomic_read(&tgtp->xmt_fcp_release));
14943 }
14944 fallthrough;
14945
14946 case FC_STATUS_INSUFF_BUF_NEED_BUF:
14947 hrq->RQ_no_posted_buf++;
14948 /* Post more buffers if possible */
14949 break;
14950 }
14951out:
14952 return workposted;
14953}
14954
14955/**
14956 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
14957 * @phba: adapter with cq
14958 * @cq: Pointer to the completion queue.
14959 * @cqe: Pointer to fast-path completion queue entry.
14960 *
14961 * This routine process a fast-path work queue completion entry from fast-path
14962 * event queue for FCP command response completion.
14963 *
14964 * Return: true if work posted to worker thread, otherwise false.
14965 **/
14966static bool
14967lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14968 struct lpfc_cqe *cqe)
14969{
14970 struct lpfc_wcqe_release wcqe;
14971 bool workposted = false;
14972
14973 /* Copy the work queue CQE and convert endian order if needed */
14974 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
14975
14976 /* Check and process for different type of WCQE and dispatch */
14977 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
14978 case CQE_CODE_COMPL_WQE:
14979 case CQE_CODE_NVME_ERSP:
14980 cq->CQ_wq++;
14981 /* Process the WQ complete event */
14982 phba->last_completion_time = jiffies;
14983 if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
14984 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
14985 (struct lpfc_wcqe_complete *)&wcqe);
14986 break;
14987 case CQE_CODE_RELEASE_WQE:
14988 cq->CQ_release_wqe++;
14989 /* Process the WQ release event */
14990 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
14991 (struct lpfc_wcqe_release *)&wcqe);
14992 break;
14993 case CQE_CODE_XRI_ABORTED:
14994 cq->CQ_xri_aborted++;
14995 /* Process the WQ XRI abort event */
14996 phba->last_completion_time = jiffies;
14997 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14998 (struct sli4_wcqe_xri_aborted *)&wcqe);
14999 break;
15000 case CQE_CODE_RECEIVE_V1:
15001 case CQE_CODE_RECEIVE:
15002 phba->last_completion_time = jiffies;
15003 if (cq->subtype == LPFC_NVMET) {
15004 workposted = lpfc_sli4_nvmet_handle_rcqe(
15005 phba, cq, (struct lpfc_rcqe *)&wcqe);
15006 }
15007 break;
15008 default:
15009 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15010 "0144 Not a valid CQE code: x%x\n",
15011 bf_get(lpfc_wcqe_c_code, &wcqe));
15012 break;
15013 }
15014 return workposted;
15015}
15016
15017/**
15018 * lpfc_sli4_sched_cq_work - Schedules cq work
15019 * @phba: Pointer to HBA context object.
15020 * @cq: Pointer to CQ
15021 * @cqid: CQ ID
15022 *
15023 * This routine checks the poll mode of the CQ corresponding to
15024 * cq->chann, then either schedules a softirq or queue_work to complete
15025 * cq work.
15026 *
15027 * queue_work path is taken if in NVMET mode, or if poll_mode is in
15028 * LPFC_QUEUE_WORK mode. Otherwise, softirq path is taken.
15029 *
15030 **/
15031static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba,
15032 struct lpfc_queue *cq, uint16_t cqid)
15033{
15034 int ret = 0;
15035
15036 switch (cq->poll_mode) {
15037 case LPFC_IRQ_POLL:
15038 /* CGN mgmt is mutually exclusive from softirq processing */
15039 if (phba->cmf_active_mode == LPFC_CFG_OFF) {
15040 irq_poll_sched(&cq->iop);
15041 break;
15042 }
15043 fallthrough;
15044 case LPFC_QUEUE_WORK:
15045 default:
15046 if (is_kdump_kernel())
15047 ret = queue_work(phba->wq, &cq->irqwork);
15048 else
15049 ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork);
15050 if (!ret)
15051 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15052 "0383 Cannot schedule queue work "
15053 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
15054 cqid, cq->queue_id,
15055 raw_smp_processor_id());
15056 }
15057}
15058
15059/**
15060 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
15061 * @phba: Pointer to HBA context object.
15062 * @eq: Pointer to the queue structure.
15063 * @eqe: Pointer to fast-path event queue entry.
15064 *
15065 * This routine process a event queue entry from the fast-path event queue.
15066 * It will check the MajorCode and MinorCode to determine this is for a
15067 * completion event on a completion queue, if not, an error shall be logged
15068 * and just return. Otherwise, it will get to the corresponding completion
15069 * queue and process all the entries on the completion queue, rearm the
15070 * completion queue, and then return.
15071 **/
15072static void
15073lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
15074 struct lpfc_eqe *eqe)
15075{
15076 struct lpfc_queue *cq = NULL;
15077 uint32_t qidx = eq->hdwq;
15078 uint16_t cqid, id;
15079
15080 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
15081 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15082 "0366 Not a valid completion "
15083 "event: majorcode=x%x, minorcode=x%x\n",
15084 bf_get_le32(lpfc_eqe_major_code, eqe),
15085 bf_get_le32(lpfc_eqe_minor_code, eqe));
15086 return;
15087 }
15088
15089 /* Get the reference to the corresponding CQ */
15090 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
15091
15092 /* Use the fast lookup method first */
15093 if (cqid <= phba->sli4_hba.cq_max) {
15094 cq = phba->sli4_hba.cq_lookup[cqid];
15095 if (cq)
15096 goto work_cq;
15097 }
15098
15099 /* Next check for NVMET completion */
15100 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
15101 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
15102 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
15103 /* Process NVMET unsol rcv */
15104 cq = phba->sli4_hba.nvmet_cqset[cqid - id];
15105 goto process_cq;
15106 }
15107 }
15108
15109 if (phba->sli4_hba.nvmels_cq &&
15110 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
15111 /* Process NVME unsol rcv */
15112 cq = phba->sli4_hba.nvmels_cq;
15113 }
15114
15115 /* Otherwise this is a Slow path event */
15116 if (cq == NULL) {
15117 lpfc_sli4_sp_handle_eqe(phba, eqe,
15118 phba->sli4_hba.hdwq[qidx].hba_eq);
15119 return;
15120 }
15121
15122process_cq:
15123 if (unlikely(cqid != cq->queue_id)) {
15124 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15125 "0368 Miss-matched fast-path completion "
15126 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
15127 cqid, cq->queue_id);
15128 return;
15129 }
15130
15131work_cq:
15132#if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
15133 if (phba->ktime_on)
15134 cq->isr_timestamp = ktime_get_ns();
15135 else
15136 cq->isr_timestamp = 0;
15137#endif
15138 lpfc_sli4_sched_cq_work(phba, cq, cqid);
15139}
15140
15141/**
15142 * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
15143 * @cq: Pointer to CQ to be processed
15144 * @poll_mode: Enum lpfc_poll_state to determine poll mode
15145 *
15146 * This routine calls the cq processing routine with the handler for
15147 * fast path CQEs.
15148 *
15149 * The CQ routine returns two values: the first is the calling status,
15150 * which indicates whether work was queued to the background discovery
15151 * thread. If true, the routine should wakeup the discovery thread;
15152 * the second is the delay parameter. If non-zero, rather than rearming
15153 * the CQ and yet another interrupt, the CQ handler should be queued so
15154 * that it is processed in a subsequent polling action. The value of
15155 * the delay indicates when to reschedule it.
15156 **/
15157static void
15158__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq,
15159 enum lpfc_poll_mode poll_mode)
15160{
15161 struct lpfc_hba *phba = cq->phba;
15162 unsigned long delay;
15163 bool workposted = false;
15164 int ret = 0;
15165
15166 /* process and rearm the CQ */
15167 workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
15168 &delay, poll_mode);
15169
15170 if (delay) {
15171 if (is_kdump_kernel())
15172 ret = queue_delayed_work(phba->wq, &cq->sched_irqwork,
15173 delay);
15174 else
15175 ret = queue_delayed_work_on(cq->chann, phba->wq,
15176 &cq->sched_irqwork, delay);
15177 if (!ret)
15178 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15179 "0367 Cannot schedule queue work "
15180 "for cqid=%d on CPU %d\n",
15181 cq->queue_id, cq->chann);
15182 }
15183
15184 /* wake up worker thread if there are works to be done */
15185 if (workposted)
15186 lpfc_worker_wake_up(phba);
15187}
15188
15189/**
15190 * lpfc_sli4_hba_process_cq - fast-path work handler when started by
15191 * interrupt
15192 * @work: pointer to work element
15193 *
15194 * translates from the work handler and calls the fast-path handler.
15195 **/
15196static void
15197lpfc_sli4_hba_process_cq(struct work_struct *work)
15198{
15199 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
15200
15201 __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
15202}
15203
15204/**
15205 * lpfc_sli4_dly_hba_process_cq - fast-path work handler when started by timer
15206 * @work: pointer to work element
15207 *
15208 * translates from the work handler and calls the fast-path handler.
15209 **/
15210static void
15211lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
15212{
15213 struct lpfc_queue *cq = container_of(to_delayed_work(work),
15214 struct lpfc_queue, sched_irqwork);
15215
15216 __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
15217}
15218
15219/**
15220 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
15221 * @irq: Interrupt number.
15222 * @dev_id: The device context pointer.
15223 *
15224 * This function is directly called from the PCI layer as an interrupt
15225 * service routine when device with SLI-4 interface spec is enabled with
15226 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
15227 * ring event in the HBA. However, when the device is enabled with either
15228 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
15229 * device-level interrupt handler. When the PCI slot is in error recovery
15230 * or the HBA is undergoing initialization, the interrupt handler will not
15231 * process the interrupt. The SCSI FCP fast-path ring event are handled in
15232 * the intrrupt context. This function is called without any lock held.
15233 * It gets the hbalock to access and update SLI data structures. Note that,
15234 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
15235 * equal to that of FCP CQ index.
15236 *
15237 * The link attention and ELS ring attention events are handled
15238 * by the worker thread. The interrupt handler signals the worker thread
15239 * and returns for these events. This function is called without any lock
15240 * held. It gets the hbalock to access and update SLI data structures.
15241 *
15242 * This function returns IRQ_HANDLED when interrupt is handled else it
15243 * returns IRQ_NONE.
15244 **/
15245irqreturn_t
15246lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
15247{
15248 struct lpfc_hba *phba;
15249 struct lpfc_hba_eq_hdl *hba_eq_hdl;
15250 struct lpfc_queue *fpeq;
15251 unsigned long iflag;
15252 int ecount = 0;
15253 int hba_eqidx;
15254 struct lpfc_eq_intr_info *eqi;
15255
15256 /* Get the driver's phba structure from the dev_id */
15257 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
15258 phba = hba_eq_hdl->phba;
15259 hba_eqidx = hba_eq_hdl->idx;
15260
15261 if (unlikely(!phba))
15262 return IRQ_NONE;
15263 if (unlikely(!phba->sli4_hba.hdwq))
15264 return IRQ_NONE;
15265
15266 /* Get to the EQ struct associated with this vector */
15267 fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
15268 if (unlikely(!fpeq))
15269 return IRQ_NONE;
15270
15271 /* Check device state for handling interrupt */
15272 if (unlikely(lpfc_intr_state_check(phba))) {
15273 /* Check again for link_state with lock held */
15274 spin_lock_irqsave(&phba->hbalock, iflag);
15275 if (phba->link_state < LPFC_LINK_DOWN)
15276 /* Flush, clear interrupt, and rearm the EQ */
15277 lpfc_sli4_eqcq_flush(phba, fpeq);
15278 spin_unlock_irqrestore(&phba->hbalock, iflag);
15279 return IRQ_NONE;
15280 }
15281
15282 eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
15283 eqi->icnt++;
15284
15285 fpeq->last_cpu = raw_smp_processor_id();
15286
15287 if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
15288 fpeq->q_flag & HBA_EQ_DELAY_CHK &&
15289 phba->cfg_auto_imax &&
15290 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
15291 phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
15292 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
15293
15294 /* process and rearm the EQ */
15295 ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
15296
15297 if (unlikely(ecount == 0)) {
15298 fpeq->EQ_no_entry++;
15299 if (phba->intr_type == MSIX)
15300 /* MSI-X treated interrupt served as no EQ share INT */
15301 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15302 "0358 MSI-X interrupt with no EQE\n");
15303 else
15304 /* Non MSI-X treated on interrupt as EQ share INT */
15305 return IRQ_NONE;
15306 }
15307
15308 return IRQ_HANDLED;
15309} /* lpfc_sli4_hba_intr_handler */
15310
15311/**
15312 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
15313 * @irq: Interrupt number.
15314 * @dev_id: The device context pointer.
15315 *
15316 * This function is the device-level interrupt handler to device with SLI-4
15317 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
15318 * interrupt mode is enabled and there is an event in the HBA which requires
15319 * driver attention. This function invokes the slow-path interrupt attention
15320 * handling function and fast-path interrupt attention handling function in
15321 * turn to process the relevant HBA attention events. This function is called
15322 * without any lock held. It gets the hbalock to access and update SLI data
15323 * structures.
15324 *
15325 * This function returns IRQ_HANDLED when interrupt is handled, else it
15326 * returns IRQ_NONE.
15327 **/
15328irqreturn_t
15329lpfc_sli4_intr_handler(int irq, void *dev_id)
15330{
15331 struct lpfc_hba *phba;
15332 irqreturn_t hba_irq_rc;
15333 bool hba_handled = false;
15334 int qidx;
15335
15336 /* Get the driver's phba structure from the dev_id */
15337 phba = (struct lpfc_hba *)dev_id;
15338
15339 if (unlikely(!phba))
15340 return IRQ_NONE;
15341
15342 /*
15343 * Invoke fast-path host attention interrupt handling as appropriate.
15344 */
15345 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
15346 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
15347 &phba->sli4_hba.hba_eq_hdl[qidx]);
15348 if (hba_irq_rc == IRQ_HANDLED)
15349 hba_handled |= true;
15350 }
15351
15352 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
15353} /* lpfc_sli4_intr_handler */
15354
15355void lpfc_sli4_poll_hbtimer(struct timer_list *t)
15356{
15357 struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
15358 struct lpfc_queue *eq;
15359 int i = 0;
15360
15361 rcu_read_lock();
15362
15363 list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
15364 i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH);
15365 if (!list_empty(&phba->poll_list))
15366 mod_timer(&phba->cpuhp_poll_timer,
15367 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
15368
15369 rcu_read_unlock();
15370}
15371
15372inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
15373{
15374 struct lpfc_hba *phba = eq->phba;
15375 int i = 0;
15376
15377 /*
15378 * Unlocking an irq is one of the entry point to check
15379 * for re-schedule, but we are good for io submission
15380 * path as midlayer does a get_cpu to glue us in. Flush
15381 * out the invalidate queue so we can see the updated
15382 * value for flag.
15383 */
15384 smp_rmb();
15385
15386 if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
15387 /* We will not likely get the completion for the caller
15388 * during this iteration but i guess that's fine.
15389 * Future io's coming on this eq should be able to
15390 * pick it up. As for the case of single io's, they
15391 * will be handled through a sched from polling timer
15392 * function which is currently triggered every 1msec.
15393 */
15394 i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
15395
15396 return i;
15397}
15398
15399static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
15400{
15401 struct lpfc_hba *phba = eq->phba;
15402
15403 /* kickstart slowpath processing if needed */
15404 if (list_empty(&phba->poll_list))
15405 mod_timer(&phba->cpuhp_poll_timer,
15406 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
15407
15408 list_add_rcu(&eq->_poll_list, &phba->poll_list);
15409 synchronize_rcu();
15410}
15411
15412static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
15413{
15414 struct lpfc_hba *phba = eq->phba;
15415
15416 /* Disable slowpath processing for this eq. Kick start the eq
15417 * by RE-ARMING the eq's ASAP
15418 */
15419 list_del_rcu(&eq->_poll_list);
15420 synchronize_rcu();
15421
15422 if (list_empty(&phba->poll_list))
15423 del_timer_sync(&phba->cpuhp_poll_timer);
15424}
15425
15426void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
15427{
15428 struct lpfc_queue *eq, *next;
15429
15430 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
15431 list_del(&eq->_poll_list);
15432
15433 INIT_LIST_HEAD(&phba->poll_list);
15434 synchronize_rcu();
15435}
15436
15437static inline void
15438__lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
15439{
15440 if (mode == eq->mode)
15441 return;
15442 /*
15443 * currently this function is only called during a hotplug
15444 * event and the cpu on which this function is executing
15445 * is going offline. By now the hotplug has instructed
15446 * the scheduler to remove this cpu from cpu active mask.
15447 * So we don't need to work about being put aside by the
15448 * scheduler for a high priority process. Yes, the inte-
15449 * rrupts could come but they are known to retire ASAP.
15450 */
15451
15452 /* Disable polling in the fastpath */
15453 WRITE_ONCE(eq->mode, mode);
15454 /* flush out the store buffer */
15455 smp_wmb();
15456
15457 /*
15458 * Add this eq to the polling list and start polling. For
15459 * a grace period both interrupt handler and poller will
15460 * try to process the eq _but_ that's fine. We have a
15461 * synchronization mechanism in place (queue_claimed) to
15462 * deal with it. This is just a draining phase for int-
15463 * errupt handler (not eq's) as we have guranteed through
15464 * barrier that all the CPUs have seen the new CQ_POLLED
15465 * state. which will effectively disable the REARMING of
15466 * the EQ. The whole idea is eq's die off eventually as
15467 * we are not rearming EQ's anymore.
15468 */
15469 mode ? lpfc_sli4_add_to_poll_list(eq) :
15470 lpfc_sli4_remove_from_poll_list(eq);
15471}
15472
15473void lpfc_sli4_start_polling(struct lpfc_queue *eq)
15474{
15475 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
15476}
15477
15478void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
15479{
15480 struct lpfc_hba *phba = eq->phba;
15481
15482 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
15483
15484 /* Kick start for the pending io's in h/w.
15485 * Once we switch back to interrupt processing on a eq
15486 * the io path completion will only arm eq's when it
15487 * receives a completion. But since eq's are in disa-
15488 * rmed state it doesn't receive a completion. This
15489 * creates a deadlock scenaro.
15490 */
15491 phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
15492}
15493
15494/**
15495 * lpfc_sli4_queue_free - free a queue structure and associated memory
15496 * @queue: The queue structure to free.
15497 *
15498 * This function frees a queue structure and the DMAable memory used for
15499 * the host resident queue. This function must be called after destroying the
15500 * queue on the HBA.
15501 **/
15502void
15503lpfc_sli4_queue_free(struct lpfc_queue *queue)
15504{
15505 struct lpfc_dmabuf *dmabuf;
15506
15507 if (!queue)
15508 return;
15509
15510 if (!list_empty(&queue->wq_list))
15511 list_del(&queue->wq_list);
15512
15513 while (!list_empty(&queue->page_list)) {
15514 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
15515 list);
15516 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
15517 dmabuf->virt, dmabuf->phys);
15518 kfree(dmabuf);
15519 }
15520 if (queue->rqbp) {
15521 lpfc_free_rq_buffer(queue->phba, queue);
15522 kfree(queue->rqbp);
15523 }
15524
15525 if (!list_empty(&queue->cpu_list))
15526 list_del(&queue->cpu_list);
15527
15528 kfree(queue);
15529 return;
15530}
15531
15532/**
15533 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
15534 * @phba: The HBA that this queue is being created on.
15535 * @page_size: The size of a queue page
15536 * @entry_size: The size of each queue entry for this queue.
15537 * @entry_count: The number of entries that this queue will handle.
15538 * @cpu: The cpu that will primarily utilize this queue.
15539 *
15540 * This function allocates a queue structure and the DMAable memory used for
15541 * the host resident queue. This function must be called before creating the
15542 * queue on the HBA.
15543 **/
15544struct lpfc_queue *
15545lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
15546 uint32_t entry_size, uint32_t entry_count, int cpu)
15547{
15548 struct lpfc_queue *queue;
15549 struct lpfc_dmabuf *dmabuf;
15550 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15551 uint16_t x, pgcnt;
15552
15553 if (!phba->sli4_hba.pc_sli4_params.supported)
15554 hw_page_size = page_size;
15555
15556 pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
15557
15558 /* If needed, Adjust page count to match the max the adapter supports */
15559 if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
15560 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
15561
15562 queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
15563 GFP_KERNEL, cpu_to_node(cpu));
15564 if (!queue)
15565 return NULL;
15566
15567 INIT_LIST_HEAD(&queue->list);
15568 INIT_LIST_HEAD(&queue->_poll_list);
15569 INIT_LIST_HEAD(&queue->wq_list);
15570 INIT_LIST_HEAD(&queue->wqfull_list);
15571 INIT_LIST_HEAD(&queue->page_list);
15572 INIT_LIST_HEAD(&queue->child_list);
15573 INIT_LIST_HEAD(&queue->cpu_list);
15574
15575 /* Set queue parameters now. If the system cannot provide memory
15576 * resources, the free routine needs to know what was allocated.
15577 */
15578 queue->page_count = pgcnt;
15579 queue->q_pgs = (void **)&queue[1];
15580 queue->entry_cnt_per_pg = hw_page_size / entry_size;
15581 queue->entry_size = entry_size;
15582 queue->entry_count = entry_count;
15583 queue->page_size = hw_page_size;
15584 queue->phba = phba;
15585
15586 for (x = 0; x < queue->page_count; x++) {
15587 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
15588 dev_to_node(&phba->pcidev->dev));
15589 if (!dmabuf)
15590 goto out_fail;
15591 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
15592 hw_page_size, &dmabuf->phys,
15593 GFP_KERNEL);
15594 if (!dmabuf->virt) {
15595 kfree(dmabuf);
15596 goto out_fail;
15597 }
15598 dmabuf->buffer_tag = x;
15599 list_add_tail(&dmabuf->list, &queue->page_list);
15600 /* use lpfc_sli4_qe to index a paritcular entry in this page */
15601 queue->q_pgs[x] = dmabuf->virt;
15602 }
15603 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
15604 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
15605 INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
15606 INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
15607
15608 /* notify_interval will be set during q creation */
15609
15610 return queue;
15611out_fail:
15612 lpfc_sli4_queue_free(queue);
15613 return NULL;
15614}
15615
15616/**
15617 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
15618 * @phba: HBA structure that indicates port to create a queue on.
15619 * @pci_barset: PCI BAR set flag.
15620 *
15621 * This function shall perform iomap of the specified PCI BAR address to host
15622 * memory address if not already done so and return it. The returned host
15623 * memory address can be NULL.
15624 */
15625static void __iomem *
15626lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
15627{
15628 if (!phba->pcidev)
15629 return NULL;
15630
15631 switch (pci_barset) {
15632 case WQ_PCI_BAR_0_AND_1:
15633 return phba->pci_bar0_memmap_p;
15634 case WQ_PCI_BAR_2_AND_3:
15635 return phba->pci_bar2_memmap_p;
15636 case WQ_PCI_BAR_4_AND_5:
15637 return phba->pci_bar4_memmap_p;
15638 default:
15639 break;
15640 }
15641 return NULL;
15642}
15643
15644/**
15645 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
15646 * @phba: HBA structure that EQs are on.
15647 * @startq: The starting EQ index to modify
15648 * @numq: The number of EQs (consecutive indexes) to modify
15649 * @usdelay: amount of delay
15650 *
15651 * This function revises the EQ delay on 1 or more EQs. The EQ delay
15652 * is set either by writing to a register (if supported by the SLI Port)
15653 * or by mailbox command. The mailbox command allows several EQs to be
15654 * updated at once.
15655 *
15656 * The @phba struct is used to send a mailbox command to HBA. The @startq
15657 * is used to get the starting EQ index to change. The @numq value is
15658 * used to specify how many consecutive EQ indexes, starting at EQ index,
15659 * are to be changed. This function is asynchronous and will wait for any
15660 * mailbox commands to finish before returning.
15661 *
15662 * On success this function will return a zero. If unable to allocate
15663 * enough memory this function will return -ENOMEM. If a mailbox command
15664 * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
15665 * have had their delay multipler changed.
15666 **/
15667void
15668lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
15669 uint32_t numq, uint32_t usdelay)
15670{
15671 struct lpfc_mbx_modify_eq_delay *eq_delay;
15672 LPFC_MBOXQ_t *mbox;
15673 struct lpfc_queue *eq;
15674 int cnt = 0, rc, length;
15675 uint32_t shdr_status, shdr_add_status;
15676 uint32_t dmult;
15677 int qidx;
15678 union lpfc_sli4_cfg_shdr *shdr;
15679
15680 if (startq >= phba->cfg_irq_chann)
15681 return;
15682
15683 if (usdelay > 0xFFFF) {
15684 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
15685 "6429 usdelay %d too large. Scaled down to "
15686 "0xFFFF.\n", usdelay);
15687 usdelay = 0xFFFF;
15688 }
15689
15690 /* set values by EQ_DELAY register if supported */
15691 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
15692 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
15693 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
15694 if (!eq)
15695 continue;
15696
15697 lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
15698
15699 if (++cnt >= numq)
15700 break;
15701 }
15702 return;
15703 }
15704
15705 /* Otherwise, set values by mailbox cmd */
15706
15707 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15708 if (!mbox) {
15709 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15710 "6428 Failed allocating mailbox cmd buffer."
15711 " EQ delay was not set.\n");
15712 return;
15713 }
15714 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
15715 sizeof(struct lpfc_sli4_cfg_mhdr));
15716 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15717 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
15718 length, LPFC_SLI4_MBX_EMBED);
15719 eq_delay = &mbox->u.mqe.un.eq_delay;
15720
15721 /* Calculate delay multiper from maximum interrupt per second */
15722 dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
15723 if (dmult)
15724 dmult--;
15725 if (dmult > LPFC_DMULT_MAX)
15726 dmult = LPFC_DMULT_MAX;
15727
15728 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
15729 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
15730 if (!eq)
15731 continue;
15732 eq->q_mode = usdelay;
15733 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
15734 eq_delay->u.request.eq[cnt].phase = 0;
15735 eq_delay->u.request.eq[cnt].delay_multi = dmult;
15736
15737 if (++cnt >= numq)
15738 break;
15739 }
15740 eq_delay->u.request.num_eq = cnt;
15741
15742 mbox->vport = phba->pport;
15743 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15744 mbox->ctx_ndlp = NULL;
15745 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15746 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
15747 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15748 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15749 if (shdr_status || shdr_add_status || rc) {
15750 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15751 "2512 MODIFY_EQ_DELAY mailbox failed with "
15752 "status x%x add_status x%x, mbx status x%x\n",
15753 shdr_status, shdr_add_status, rc);
15754 }
15755 mempool_free(mbox, phba->mbox_mem_pool);
15756 return;
15757}
15758
15759/**
15760 * lpfc_eq_create - Create an Event Queue on the HBA
15761 * @phba: HBA structure that indicates port to create a queue on.
15762 * @eq: The queue structure to use to create the event queue.
15763 * @imax: The maximum interrupt per second limit.
15764 *
15765 * This function creates an event queue, as detailed in @eq, on a port,
15766 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
15767 *
15768 * The @phba struct is used to send mailbox command to HBA. The @eq struct
15769 * is used to get the entry count and entry size that are necessary to
15770 * determine the number of pages to allocate and use for this queue. This
15771 * function will send the EQ_CREATE mailbox command to the HBA to setup the
15772 * event queue. This function is asynchronous and will wait for the mailbox
15773 * command to finish before continuing.
15774 *
15775 * On success this function will return a zero. If unable to allocate enough
15776 * memory this function will return -ENOMEM. If the queue create mailbox command
15777 * fails this function will return -ENXIO.
15778 **/
15779int
15780lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
15781{
15782 struct lpfc_mbx_eq_create *eq_create;
15783 LPFC_MBOXQ_t *mbox;
15784 int rc, length, status = 0;
15785 struct lpfc_dmabuf *dmabuf;
15786 uint32_t shdr_status, shdr_add_status;
15787 union lpfc_sli4_cfg_shdr *shdr;
15788 uint16_t dmult;
15789 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15790
15791 /* sanity check on queue memory */
15792 if (!eq)
15793 return -ENODEV;
15794 if (!phba->sli4_hba.pc_sli4_params.supported)
15795 hw_page_size = SLI4_PAGE_SIZE;
15796
15797 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15798 if (!mbox)
15799 return -ENOMEM;
15800 length = (sizeof(struct lpfc_mbx_eq_create) -
15801 sizeof(struct lpfc_sli4_cfg_mhdr));
15802 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15803 LPFC_MBOX_OPCODE_EQ_CREATE,
15804 length, LPFC_SLI4_MBX_EMBED);
15805 eq_create = &mbox->u.mqe.un.eq_create;
15806 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
15807 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
15808 eq->page_count);
15809 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
15810 LPFC_EQE_SIZE);
15811 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
15812
15813 /* Use version 2 of CREATE_EQ if eqav is set */
15814 if (phba->sli4_hba.pc_sli4_params.eqav) {
15815 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15816 LPFC_Q_CREATE_VERSION_2);
15817 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
15818 phba->sli4_hba.pc_sli4_params.eqav);
15819 }
15820
15821 /* don't setup delay multiplier using EQ_CREATE */
15822 dmult = 0;
15823 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
15824 dmult);
15825 switch (eq->entry_count) {
15826 default:
15827 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15828 "0360 Unsupported EQ count. (%d)\n",
15829 eq->entry_count);
15830 if (eq->entry_count < 256) {
15831 status = -EINVAL;
15832 goto out;
15833 }
15834 fallthrough; /* otherwise default to smallest count */
15835 case 256:
15836 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15837 LPFC_EQ_CNT_256);
15838 break;
15839 case 512:
15840 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15841 LPFC_EQ_CNT_512);
15842 break;
15843 case 1024:
15844 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15845 LPFC_EQ_CNT_1024);
15846 break;
15847 case 2048:
15848 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15849 LPFC_EQ_CNT_2048);
15850 break;
15851 case 4096:
15852 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15853 LPFC_EQ_CNT_4096);
15854 break;
15855 }
15856 list_for_each_entry(dmabuf, &eq->page_list, list) {
15857 memset(dmabuf->virt, 0, hw_page_size);
15858 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15859 putPaddrLow(dmabuf->phys);
15860 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15861 putPaddrHigh(dmabuf->phys);
15862 }
15863 mbox->vport = phba->pport;
15864 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15865 mbox->ctx_buf = NULL;
15866 mbox->ctx_ndlp = NULL;
15867 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15868 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15869 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15870 if (shdr_status || shdr_add_status || rc) {
15871 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15872 "2500 EQ_CREATE mailbox failed with "
15873 "status x%x add_status x%x, mbx status x%x\n",
15874 shdr_status, shdr_add_status, rc);
15875 status = -ENXIO;
15876 }
15877 eq->type = LPFC_EQ;
15878 eq->subtype = LPFC_NONE;
15879 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
15880 if (eq->queue_id == 0xFFFF)
15881 status = -ENXIO;
15882 eq->host_index = 0;
15883 eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
15884 eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
15885out:
15886 mempool_free(mbox, phba->mbox_mem_pool);
15887 return status;
15888}
15889
15890static int lpfc_cq_poll_hdler(struct irq_poll *iop, int budget)
15891{
15892 struct lpfc_queue *cq = container_of(iop, struct lpfc_queue, iop);
15893
15894 __lpfc_sli4_hba_process_cq(cq, LPFC_IRQ_POLL);
15895
15896 return 1;
15897}
15898
15899/**
15900 * lpfc_cq_create - Create a Completion Queue on the HBA
15901 * @phba: HBA structure that indicates port to create a queue on.
15902 * @cq: The queue structure to use to create the completion queue.
15903 * @eq: The event queue to bind this completion queue to.
15904 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
15905 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
15906 *
15907 * This function creates a completion queue, as detailed in @wq, on a port,
15908 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
15909 *
15910 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15911 * is used to get the entry count and entry size that are necessary to
15912 * determine the number of pages to allocate and use for this queue. The @eq
15913 * is used to indicate which event queue to bind this completion queue to. This
15914 * function will send the CQ_CREATE mailbox command to the HBA to setup the
15915 * completion queue. This function is asynchronous and will wait for the mailbox
15916 * command to finish before continuing.
15917 *
15918 * On success this function will return a zero. If unable to allocate enough
15919 * memory this function will return -ENOMEM. If the queue create mailbox command
15920 * fails this function will return -ENXIO.
15921 **/
15922int
15923lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
15924 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
15925{
15926 struct lpfc_mbx_cq_create *cq_create;
15927 struct lpfc_dmabuf *dmabuf;
15928 LPFC_MBOXQ_t *mbox;
15929 int rc, length, status = 0;
15930 uint32_t shdr_status, shdr_add_status;
15931 union lpfc_sli4_cfg_shdr *shdr;
15932
15933 /* sanity check on queue memory */
15934 if (!cq || !eq)
15935 return -ENODEV;
15936
15937 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15938 if (!mbox)
15939 return -ENOMEM;
15940 length = (sizeof(struct lpfc_mbx_cq_create) -
15941 sizeof(struct lpfc_sli4_cfg_mhdr));
15942 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15943 LPFC_MBOX_OPCODE_CQ_CREATE,
15944 length, LPFC_SLI4_MBX_EMBED);
15945 cq_create = &mbox->u.mqe.un.cq_create;
15946 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
15947 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
15948 cq->page_count);
15949 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
15950 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
15951 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15952 phba->sli4_hba.pc_sli4_params.cqv);
15953 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
15954 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
15955 (cq->page_size / SLI4_PAGE_SIZE));
15956 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
15957 eq->queue_id);
15958 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
15959 phba->sli4_hba.pc_sli4_params.cqav);
15960 } else {
15961 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
15962 eq->queue_id);
15963 }
15964 switch (cq->entry_count) {
15965 case 2048:
15966 case 4096:
15967 if (phba->sli4_hba.pc_sli4_params.cqv ==
15968 LPFC_Q_CREATE_VERSION_2) {
15969 cq_create->u.request.context.lpfc_cq_context_count =
15970 cq->entry_count;
15971 bf_set(lpfc_cq_context_count,
15972 &cq_create->u.request.context,
15973 LPFC_CQ_CNT_WORD7);
15974 break;
15975 }
15976 fallthrough;
15977 default:
15978 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15979 "0361 Unsupported CQ count: "
15980 "entry cnt %d sz %d pg cnt %d\n",
15981 cq->entry_count, cq->entry_size,
15982 cq->page_count);
15983 if (cq->entry_count < 256) {
15984 status = -EINVAL;
15985 goto out;
15986 }
15987 fallthrough; /* otherwise default to smallest count */
15988 case 256:
15989 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15990 LPFC_CQ_CNT_256);
15991 break;
15992 case 512:
15993 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15994 LPFC_CQ_CNT_512);
15995 break;
15996 case 1024:
15997 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15998 LPFC_CQ_CNT_1024);
15999 break;
16000 }
16001 list_for_each_entry(dmabuf, &cq->page_list, list) {
16002 memset(dmabuf->virt, 0, cq->page_size);
16003 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16004 putPaddrLow(dmabuf->phys);
16005 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16006 putPaddrHigh(dmabuf->phys);
16007 }
16008 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16009
16010 /* The IOCTL status is embedded in the mailbox subheader. */
16011 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16012 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16013 if (shdr_status || shdr_add_status || rc) {
16014 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16015 "2501 CQ_CREATE mailbox failed with "
16016 "status x%x add_status x%x, mbx status x%x\n",
16017 shdr_status, shdr_add_status, rc);
16018 status = -ENXIO;
16019 goto out;
16020 }
16021 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
16022 if (cq->queue_id == 0xFFFF) {
16023 status = -ENXIO;
16024 goto out;
16025 }
16026 /* link the cq onto the parent eq child list */
16027 list_add_tail(&cq->list, &eq->child_list);
16028 /* Set up completion queue's type and subtype */
16029 cq->type = type;
16030 cq->subtype = subtype;
16031 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
16032 cq->assoc_qid = eq->queue_id;
16033 cq->assoc_qp = eq;
16034 cq->host_index = 0;
16035 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
16036 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
16037
16038 if (cq->queue_id > phba->sli4_hba.cq_max)
16039 phba->sli4_hba.cq_max = cq->queue_id;
16040
16041 irq_poll_init(&cq->iop, LPFC_IRQ_POLL_WEIGHT, lpfc_cq_poll_hdler);
16042out:
16043 mempool_free(mbox, phba->mbox_mem_pool);
16044 return status;
16045}
16046
16047/**
16048 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
16049 * @phba: HBA structure that indicates port to create a queue on.
16050 * @cqp: The queue structure array to use to create the completion queues.
16051 * @hdwq: The hardware queue array with the EQ to bind completion queues to.
16052 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
16053 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
16054 *
16055 * This function creates a set of completion queue, s to support MRQ
16056 * as detailed in @cqp, on a port,
16057 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
16058 *
16059 * The @phba struct is used to send mailbox command to HBA. The @cq struct
16060 * is used to get the entry count and entry size that are necessary to
16061 * determine the number of pages to allocate and use for this queue. The @eq
16062 * is used to indicate which event queue to bind this completion queue to. This
16063 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
16064 * completion queue. This function is asynchronous and will wait for the mailbox
16065 * command to finish before continuing.
16066 *
16067 * On success this function will return a zero. If unable to allocate enough
16068 * memory this function will return -ENOMEM. If the queue create mailbox command
16069 * fails this function will return -ENXIO.
16070 **/
16071int
16072lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
16073 struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
16074 uint32_t subtype)
16075{
16076 struct lpfc_queue *cq;
16077 struct lpfc_queue *eq;
16078 struct lpfc_mbx_cq_create_set *cq_set;
16079 struct lpfc_dmabuf *dmabuf;
16080 LPFC_MBOXQ_t *mbox;
16081 int rc, length, alloclen, status = 0;
16082 int cnt, idx, numcq, page_idx = 0;
16083 uint32_t shdr_status, shdr_add_status;
16084 union lpfc_sli4_cfg_shdr *shdr;
16085 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16086
16087 /* sanity check on queue memory */
16088 numcq = phba->cfg_nvmet_mrq;
16089 if (!cqp || !hdwq || !numcq)
16090 return -ENODEV;
16091
16092 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16093 if (!mbox)
16094 return -ENOMEM;
16095
16096 length = sizeof(struct lpfc_mbx_cq_create_set);
16097 length += ((numcq * cqp[0]->page_count) *
16098 sizeof(struct dma_address));
16099 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16100 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
16101 LPFC_SLI4_MBX_NEMBED);
16102 if (alloclen < length) {
16103 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16104 "3098 Allocated DMA memory size (%d) is "
16105 "less than the requested DMA memory size "
16106 "(%d)\n", alloclen, length);
16107 status = -ENOMEM;
16108 goto out;
16109 }
16110 cq_set = mbox->sge_array->addr[0];
16111 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
16112 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
16113
16114 for (idx = 0; idx < numcq; idx++) {
16115 cq = cqp[idx];
16116 eq = hdwq[idx].hba_eq;
16117 if (!cq || !eq) {
16118 status = -ENOMEM;
16119 goto out;
16120 }
16121 if (!phba->sli4_hba.pc_sli4_params.supported)
16122 hw_page_size = cq->page_size;
16123
16124 switch (idx) {
16125 case 0:
16126 bf_set(lpfc_mbx_cq_create_set_page_size,
16127 &cq_set->u.request,
16128 (hw_page_size / SLI4_PAGE_SIZE));
16129 bf_set(lpfc_mbx_cq_create_set_num_pages,
16130 &cq_set->u.request, cq->page_count);
16131 bf_set(lpfc_mbx_cq_create_set_evt,
16132 &cq_set->u.request, 1);
16133 bf_set(lpfc_mbx_cq_create_set_valid,
16134 &cq_set->u.request, 1);
16135 bf_set(lpfc_mbx_cq_create_set_cqe_size,
16136 &cq_set->u.request, 0);
16137 bf_set(lpfc_mbx_cq_create_set_num_cq,
16138 &cq_set->u.request, numcq);
16139 bf_set(lpfc_mbx_cq_create_set_autovalid,
16140 &cq_set->u.request,
16141 phba->sli4_hba.pc_sli4_params.cqav);
16142 switch (cq->entry_count) {
16143 case 2048:
16144 case 4096:
16145 if (phba->sli4_hba.pc_sli4_params.cqv ==
16146 LPFC_Q_CREATE_VERSION_2) {
16147 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16148 &cq_set->u.request,
16149 cq->entry_count);
16150 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16151 &cq_set->u.request,
16152 LPFC_CQ_CNT_WORD7);
16153 break;
16154 }
16155 fallthrough;
16156 default:
16157 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16158 "3118 Bad CQ count. (%d)\n",
16159 cq->entry_count);
16160 if (cq->entry_count < 256) {
16161 status = -EINVAL;
16162 goto out;
16163 }
16164 fallthrough; /* otherwise default to smallest */
16165 case 256:
16166 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16167 &cq_set->u.request, LPFC_CQ_CNT_256);
16168 break;
16169 case 512:
16170 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16171 &cq_set->u.request, LPFC_CQ_CNT_512);
16172 break;
16173 case 1024:
16174 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16175 &cq_set->u.request, LPFC_CQ_CNT_1024);
16176 break;
16177 }
16178 bf_set(lpfc_mbx_cq_create_set_eq_id0,
16179 &cq_set->u.request, eq->queue_id);
16180 break;
16181 case 1:
16182 bf_set(lpfc_mbx_cq_create_set_eq_id1,
16183 &cq_set->u.request, eq->queue_id);
16184 break;
16185 case 2:
16186 bf_set(lpfc_mbx_cq_create_set_eq_id2,
16187 &cq_set->u.request, eq->queue_id);
16188 break;
16189 case 3:
16190 bf_set(lpfc_mbx_cq_create_set_eq_id3,
16191 &cq_set->u.request, eq->queue_id);
16192 break;
16193 case 4:
16194 bf_set(lpfc_mbx_cq_create_set_eq_id4,
16195 &cq_set->u.request, eq->queue_id);
16196 break;
16197 case 5:
16198 bf_set(lpfc_mbx_cq_create_set_eq_id5,
16199 &cq_set->u.request, eq->queue_id);
16200 break;
16201 case 6:
16202 bf_set(lpfc_mbx_cq_create_set_eq_id6,
16203 &cq_set->u.request, eq->queue_id);
16204 break;
16205 case 7:
16206 bf_set(lpfc_mbx_cq_create_set_eq_id7,
16207 &cq_set->u.request, eq->queue_id);
16208 break;
16209 case 8:
16210 bf_set(lpfc_mbx_cq_create_set_eq_id8,
16211 &cq_set->u.request, eq->queue_id);
16212 break;
16213 case 9:
16214 bf_set(lpfc_mbx_cq_create_set_eq_id9,
16215 &cq_set->u.request, eq->queue_id);
16216 break;
16217 case 10:
16218 bf_set(lpfc_mbx_cq_create_set_eq_id10,
16219 &cq_set->u.request, eq->queue_id);
16220 break;
16221 case 11:
16222 bf_set(lpfc_mbx_cq_create_set_eq_id11,
16223 &cq_set->u.request, eq->queue_id);
16224 break;
16225 case 12:
16226 bf_set(lpfc_mbx_cq_create_set_eq_id12,
16227 &cq_set->u.request, eq->queue_id);
16228 break;
16229 case 13:
16230 bf_set(lpfc_mbx_cq_create_set_eq_id13,
16231 &cq_set->u.request, eq->queue_id);
16232 break;
16233 case 14:
16234 bf_set(lpfc_mbx_cq_create_set_eq_id14,
16235 &cq_set->u.request, eq->queue_id);
16236 break;
16237 case 15:
16238 bf_set(lpfc_mbx_cq_create_set_eq_id15,
16239 &cq_set->u.request, eq->queue_id);
16240 break;
16241 }
16242
16243 /* link the cq onto the parent eq child list */
16244 list_add_tail(&cq->list, &eq->child_list);
16245 /* Set up completion queue's type and subtype */
16246 cq->type = type;
16247 cq->subtype = subtype;
16248 cq->assoc_qid = eq->queue_id;
16249 cq->assoc_qp = eq;
16250 cq->host_index = 0;
16251 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
16252 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
16253 cq->entry_count);
16254 cq->chann = idx;
16255
16256 rc = 0;
16257 list_for_each_entry(dmabuf, &cq->page_list, list) {
16258 memset(dmabuf->virt, 0, hw_page_size);
16259 cnt = page_idx + dmabuf->buffer_tag;
16260 cq_set->u.request.page[cnt].addr_lo =
16261 putPaddrLow(dmabuf->phys);
16262 cq_set->u.request.page[cnt].addr_hi =
16263 putPaddrHigh(dmabuf->phys);
16264 rc++;
16265 }
16266 page_idx += rc;
16267 }
16268
16269 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16270
16271 /* The IOCTL status is embedded in the mailbox subheader. */
16272 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16273 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16274 if (shdr_status || shdr_add_status || rc) {
16275 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16276 "3119 CQ_CREATE_SET mailbox failed with "
16277 "status x%x add_status x%x, mbx status x%x\n",
16278 shdr_status, shdr_add_status, rc);
16279 status = -ENXIO;
16280 goto out;
16281 }
16282 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
16283 if (rc == 0xFFFF) {
16284 status = -ENXIO;
16285 goto out;
16286 }
16287
16288 for (idx = 0; idx < numcq; idx++) {
16289 cq = cqp[idx];
16290 cq->queue_id = rc + idx;
16291 if (cq->queue_id > phba->sli4_hba.cq_max)
16292 phba->sli4_hba.cq_max = cq->queue_id;
16293 }
16294
16295out:
16296 lpfc_sli4_mbox_cmd_free(phba, mbox);
16297 return status;
16298}
16299
16300/**
16301 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
16302 * @phba: HBA structure that indicates port to create a queue on.
16303 * @mq: The queue structure to use to create the mailbox queue.
16304 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
16305 * @cq: The completion queue to associate with this cq.
16306 *
16307 * This function provides failback (fb) functionality when the
16308 * mq_create_ext fails on older FW generations. It's purpose is identical
16309 * to mq_create_ext otherwise.
16310 *
16311 * This routine cannot fail as all attributes were previously accessed and
16312 * initialized in mq_create_ext.
16313 **/
16314static void
16315lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
16316 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
16317{
16318 struct lpfc_mbx_mq_create *mq_create;
16319 struct lpfc_dmabuf *dmabuf;
16320 int length;
16321
16322 length = (sizeof(struct lpfc_mbx_mq_create) -
16323 sizeof(struct lpfc_sli4_cfg_mhdr));
16324 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16325 LPFC_MBOX_OPCODE_MQ_CREATE,
16326 length, LPFC_SLI4_MBX_EMBED);
16327 mq_create = &mbox->u.mqe.un.mq_create;
16328 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
16329 mq->page_count);
16330 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
16331 cq->queue_id);
16332 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
16333 switch (mq->entry_count) {
16334 case 16:
16335 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16336 LPFC_MQ_RING_SIZE_16);
16337 break;
16338 case 32:
16339 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16340 LPFC_MQ_RING_SIZE_32);
16341 break;
16342 case 64:
16343 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16344 LPFC_MQ_RING_SIZE_64);
16345 break;
16346 case 128:
16347 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16348 LPFC_MQ_RING_SIZE_128);
16349 break;
16350 }
16351 list_for_each_entry(dmabuf, &mq->page_list, list) {
16352 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16353 putPaddrLow(dmabuf->phys);
16354 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16355 putPaddrHigh(dmabuf->phys);
16356 }
16357}
16358
16359/**
16360 * lpfc_mq_create - Create a mailbox Queue on the HBA
16361 * @phba: HBA structure that indicates port to create a queue on.
16362 * @mq: The queue structure to use to create the mailbox queue.
16363 * @cq: The completion queue to associate with this cq.
16364 * @subtype: The queue's subtype.
16365 *
16366 * This function creates a mailbox queue, as detailed in @mq, on a port,
16367 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
16368 *
16369 * The @phba struct is used to send mailbox command to HBA. The @cq struct
16370 * is used to get the entry count and entry size that are necessary to
16371 * determine the number of pages to allocate and use for this queue. This
16372 * function will send the MQ_CREATE mailbox command to the HBA to setup the
16373 * mailbox queue. This function is asynchronous and will wait for the mailbox
16374 * command to finish before continuing.
16375 *
16376 * On success this function will return a zero. If unable to allocate enough
16377 * memory this function will return -ENOMEM. If the queue create mailbox command
16378 * fails this function will return -ENXIO.
16379 **/
16380int32_t
16381lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
16382 struct lpfc_queue *cq, uint32_t subtype)
16383{
16384 struct lpfc_mbx_mq_create *mq_create;
16385 struct lpfc_mbx_mq_create_ext *mq_create_ext;
16386 struct lpfc_dmabuf *dmabuf;
16387 LPFC_MBOXQ_t *mbox;
16388 int rc, length, status = 0;
16389 uint32_t shdr_status, shdr_add_status;
16390 union lpfc_sli4_cfg_shdr *shdr;
16391 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16392
16393 /* sanity check on queue memory */
16394 if (!mq || !cq)
16395 return -ENODEV;
16396 if (!phba->sli4_hba.pc_sli4_params.supported)
16397 hw_page_size = SLI4_PAGE_SIZE;
16398
16399 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16400 if (!mbox)
16401 return -ENOMEM;
16402 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
16403 sizeof(struct lpfc_sli4_cfg_mhdr));
16404 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16405 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
16406 length, LPFC_SLI4_MBX_EMBED);
16407
16408 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
16409 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
16410 bf_set(lpfc_mbx_mq_create_ext_num_pages,
16411 &mq_create_ext->u.request, mq->page_count);
16412 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
16413 &mq_create_ext->u.request, 1);
16414 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
16415 &mq_create_ext->u.request, 1);
16416 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
16417 &mq_create_ext->u.request, 1);
16418 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
16419 &mq_create_ext->u.request, 1);
16420 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
16421 &mq_create_ext->u.request, 1);
16422 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
16423 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16424 phba->sli4_hba.pc_sli4_params.mqv);
16425 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
16426 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
16427 cq->queue_id);
16428 else
16429 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
16430 cq->queue_id);
16431 switch (mq->entry_count) {
16432 default:
16433 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16434 "0362 Unsupported MQ count. (%d)\n",
16435 mq->entry_count);
16436 if (mq->entry_count < 16) {
16437 status = -EINVAL;
16438 goto out;
16439 }
16440 fallthrough; /* otherwise default to smallest count */
16441 case 16:
16442 bf_set(lpfc_mq_context_ring_size,
16443 &mq_create_ext->u.request.context,
16444 LPFC_MQ_RING_SIZE_16);
16445 break;
16446 case 32:
16447 bf_set(lpfc_mq_context_ring_size,
16448 &mq_create_ext->u.request.context,
16449 LPFC_MQ_RING_SIZE_32);
16450 break;
16451 case 64:
16452 bf_set(lpfc_mq_context_ring_size,
16453 &mq_create_ext->u.request.context,
16454 LPFC_MQ_RING_SIZE_64);
16455 break;
16456 case 128:
16457 bf_set(lpfc_mq_context_ring_size,
16458 &mq_create_ext->u.request.context,
16459 LPFC_MQ_RING_SIZE_128);
16460 break;
16461 }
16462 list_for_each_entry(dmabuf, &mq->page_list, list) {
16463 memset(dmabuf->virt, 0, hw_page_size);
16464 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
16465 putPaddrLow(dmabuf->phys);
16466 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
16467 putPaddrHigh(dmabuf->phys);
16468 }
16469 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16470 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16471 &mq_create_ext->u.response);
16472 if (rc != MBX_SUCCESS) {
16473 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16474 "2795 MQ_CREATE_EXT failed with "
16475 "status x%x. Failback to MQ_CREATE.\n",
16476 rc);
16477 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
16478 mq_create = &mbox->u.mqe.un.mq_create;
16479 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16480 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
16481 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16482 &mq_create->u.response);
16483 }
16484
16485 /* The IOCTL status is embedded in the mailbox subheader. */
16486 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16487 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16488 if (shdr_status || shdr_add_status || rc) {
16489 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16490 "2502 MQ_CREATE mailbox failed with "
16491 "status x%x add_status x%x, mbx status x%x\n",
16492 shdr_status, shdr_add_status, rc);
16493 status = -ENXIO;
16494 goto out;
16495 }
16496 if (mq->queue_id == 0xFFFF) {
16497 status = -ENXIO;
16498 goto out;
16499 }
16500 mq->type = LPFC_MQ;
16501 mq->assoc_qid = cq->queue_id;
16502 mq->subtype = subtype;
16503 mq->host_index = 0;
16504 mq->hba_index = 0;
16505
16506 /* link the mq onto the parent cq child list */
16507 list_add_tail(&mq->list, &cq->child_list);
16508out:
16509 mempool_free(mbox, phba->mbox_mem_pool);
16510 return status;
16511}
16512
16513/**
16514 * lpfc_wq_create - Create a Work Queue on the HBA
16515 * @phba: HBA structure that indicates port to create a queue on.
16516 * @wq: The queue structure to use to create the work queue.
16517 * @cq: The completion queue to bind this work queue to.
16518 * @subtype: The subtype of the work queue indicating its functionality.
16519 *
16520 * This function creates a work queue, as detailed in @wq, on a port, described
16521 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
16522 *
16523 * The @phba struct is used to send mailbox command to HBA. The @wq struct
16524 * is used to get the entry count and entry size that are necessary to
16525 * determine the number of pages to allocate and use for this queue. The @cq
16526 * is used to indicate which completion queue to bind this work queue to. This
16527 * function will send the WQ_CREATE mailbox command to the HBA to setup the
16528 * work queue. This function is asynchronous and will wait for the mailbox
16529 * command to finish before continuing.
16530 *
16531 * On success this function will return a zero. If unable to allocate enough
16532 * memory this function will return -ENOMEM. If the queue create mailbox command
16533 * fails this function will return -ENXIO.
16534 **/
16535int
16536lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
16537 struct lpfc_queue *cq, uint32_t subtype)
16538{
16539 struct lpfc_mbx_wq_create *wq_create;
16540 struct lpfc_dmabuf *dmabuf;
16541 LPFC_MBOXQ_t *mbox;
16542 int rc, length, status = 0;
16543 uint32_t shdr_status, shdr_add_status;
16544 union lpfc_sli4_cfg_shdr *shdr;
16545 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16546 struct dma_address *page;
16547 void __iomem *bar_memmap_p;
16548 uint32_t db_offset;
16549 uint16_t pci_barset;
16550 uint8_t dpp_barset;
16551 uint32_t dpp_offset;
16552 uint8_t wq_create_version;
16553#ifdef CONFIG_X86
16554 unsigned long pg_addr;
16555#endif
16556
16557 /* sanity check on queue memory */
16558 if (!wq || !cq)
16559 return -ENODEV;
16560 if (!phba->sli4_hba.pc_sli4_params.supported)
16561 hw_page_size = wq->page_size;
16562
16563 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16564 if (!mbox)
16565 return -ENOMEM;
16566 length = (sizeof(struct lpfc_mbx_wq_create) -
16567 sizeof(struct lpfc_sli4_cfg_mhdr));
16568 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16569 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
16570 length, LPFC_SLI4_MBX_EMBED);
16571 wq_create = &mbox->u.mqe.un.wq_create;
16572 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
16573 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
16574 wq->page_count);
16575 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
16576 cq->queue_id);
16577
16578 /* wqv is the earliest version supported, NOT the latest */
16579 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16580 phba->sli4_hba.pc_sli4_params.wqv);
16581
16582 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
16583 (wq->page_size > SLI4_PAGE_SIZE))
16584 wq_create_version = LPFC_Q_CREATE_VERSION_1;
16585 else
16586 wq_create_version = LPFC_Q_CREATE_VERSION_0;
16587
16588 switch (wq_create_version) {
16589 case LPFC_Q_CREATE_VERSION_1:
16590 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
16591 wq->entry_count);
16592 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16593 LPFC_Q_CREATE_VERSION_1);
16594
16595 switch (wq->entry_size) {
16596 default:
16597 case 64:
16598 bf_set(lpfc_mbx_wq_create_wqe_size,
16599 &wq_create->u.request_1,
16600 LPFC_WQ_WQE_SIZE_64);
16601 break;
16602 case 128:
16603 bf_set(lpfc_mbx_wq_create_wqe_size,
16604 &wq_create->u.request_1,
16605 LPFC_WQ_WQE_SIZE_128);
16606 break;
16607 }
16608 /* Request DPP by default */
16609 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
16610 bf_set(lpfc_mbx_wq_create_page_size,
16611 &wq_create->u.request_1,
16612 (wq->page_size / SLI4_PAGE_SIZE));
16613 page = wq_create->u.request_1.page;
16614 break;
16615 default:
16616 page = wq_create->u.request.page;
16617 break;
16618 }
16619
16620 list_for_each_entry(dmabuf, &wq->page_list, list) {
16621 memset(dmabuf->virt, 0, hw_page_size);
16622 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
16623 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
16624 }
16625
16626 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16627 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
16628
16629 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16630 /* The IOCTL status is embedded in the mailbox subheader. */
16631 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16632 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16633 if (shdr_status || shdr_add_status || rc) {
16634 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16635 "2503 WQ_CREATE mailbox failed with "
16636 "status x%x add_status x%x, mbx status x%x\n",
16637 shdr_status, shdr_add_status, rc);
16638 status = -ENXIO;
16639 goto out;
16640 }
16641
16642 if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
16643 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
16644 &wq_create->u.response);
16645 else
16646 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
16647 &wq_create->u.response_1);
16648
16649 if (wq->queue_id == 0xFFFF) {
16650 status = -ENXIO;
16651 goto out;
16652 }
16653
16654 wq->db_format = LPFC_DB_LIST_FORMAT;
16655 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
16656 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
16657 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
16658 &wq_create->u.response);
16659 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
16660 (wq->db_format != LPFC_DB_RING_FORMAT)) {
16661 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16662 "3265 WQ[%d] doorbell format "
16663 "not supported: x%x\n",
16664 wq->queue_id, wq->db_format);
16665 status = -EINVAL;
16666 goto out;
16667 }
16668 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
16669 &wq_create->u.response);
16670 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16671 pci_barset);
16672 if (!bar_memmap_p) {
16673 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16674 "3263 WQ[%d] failed to memmap "
16675 "pci barset:x%x\n",
16676 wq->queue_id, pci_barset);
16677 status = -ENOMEM;
16678 goto out;
16679 }
16680 db_offset = wq_create->u.response.doorbell_offset;
16681 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
16682 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
16683 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16684 "3252 WQ[%d] doorbell offset "
16685 "not supported: x%x\n",
16686 wq->queue_id, db_offset);
16687 status = -EINVAL;
16688 goto out;
16689 }
16690 wq->db_regaddr = bar_memmap_p + db_offset;
16691 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16692 "3264 WQ[%d]: barset:x%x, offset:x%x, "
16693 "format:x%x\n", wq->queue_id,
16694 pci_barset, db_offset, wq->db_format);
16695 } else
16696 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
16697 } else {
16698 /* Check if DPP was honored by the firmware */
16699 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
16700 &wq_create->u.response_1);
16701 if (wq->dpp_enable) {
16702 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
16703 &wq_create->u.response_1);
16704 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16705 pci_barset);
16706 if (!bar_memmap_p) {
16707 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16708 "3267 WQ[%d] failed to memmap "
16709 "pci barset:x%x\n",
16710 wq->queue_id, pci_barset);
16711 status = -ENOMEM;
16712 goto out;
16713 }
16714 db_offset = wq_create->u.response_1.doorbell_offset;
16715 wq->db_regaddr = bar_memmap_p + db_offset;
16716 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
16717 &wq_create->u.response_1);
16718 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
16719 &wq_create->u.response_1);
16720 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16721 dpp_barset);
16722 if (!bar_memmap_p) {
16723 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16724 "3268 WQ[%d] failed to memmap "
16725 "pci barset:x%x\n",
16726 wq->queue_id, dpp_barset);
16727 status = -ENOMEM;
16728 goto out;
16729 }
16730 dpp_offset = wq_create->u.response_1.dpp_offset;
16731 wq->dpp_regaddr = bar_memmap_p + dpp_offset;
16732 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16733 "3271 WQ[%d]: barset:x%x, offset:x%x, "
16734 "dpp_id:x%x dpp_barset:x%x "
16735 "dpp_offset:x%x\n",
16736 wq->queue_id, pci_barset, db_offset,
16737 wq->dpp_id, dpp_barset, dpp_offset);
16738
16739#ifdef CONFIG_X86
16740 /* Enable combined writes for DPP aperture */
16741 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
16742 rc = set_memory_wc(pg_addr, 1);
16743 if (rc) {
16744 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16745 "3272 Cannot setup Combined "
16746 "Write on WQ[%d] - disable DPP\n",
16747 wq->queue_id);
16748 phba->cfg_enable_dpp = 0;
16749 }
16750#else
16751 phba->cfg_enable_dpp = 0;
16752#endif
16753 } else
16754 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
16755 }
16756 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
16757 if (wq->pring == NULL) {
16758 status = -ENOMEM;
16759 goto out;
16760 }
16761 wq->type = LPFC_WQ;
16762 wq->assoc_qid = cq->queue_id;
16763 wq->subtype = subtype;
16764 wq->host_index = 0;
16765 wq->hba_index = 0;
16766 wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
16767
16768 /* link the wq onto the parent cq child list */
16769 list_add_tail(&wq->list, &cq->child_list);
16770out:
16771 mempool_free(mbox, phba->mbox_mem_pool);
16772 return status;
16773}
16774
16775/**
16776 * lpfc_rq_create - Create a Receive Queue on the HBA
16777 * @phba: HBA structure that indicates port to create a queue on.
16778 * @hrq: The queue structure to use to create the header receive queue.
16779 * @drq: The queue structure to use to create the data receive queue.
16780 * @cq: The completion queue to bind this work queue to.
16781 * @subtype: The subtype of the work queue indicating its functionality.
16782 *
16783 * This function creates a receive buffer queue pair , as detailed in @hrq and
16784 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
16785 * to the HBA.
16786 *
16787 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
16788 * struct is used to get the entry count that is necessary to determine the
16789 * number of pages to use for this queue. The @cq is used to indicate which
16790 * completion queue to bind received buffers that are posted to these queues to.
16791 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
16792 * receive queue pair. This function is asynchronous and will wait for the
16793 * mailbox command to finish before continuing.
16794 *
16795 * On success this function will return a zero. If unable to allocate enough
16796 * memory this function will return -ENOMEM. If the queue create mailbox command
16797 * fails this function will return -ENXIO.
16798 **/
16799int
16800lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16801 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
16802{
16803 struct lpfc_mbx_rq_create *rq_create;
16804 struct lpfc_dmabuf *dmabuf;
16805 LPFC_MBOXQ_t *mbox;
16806 int rc, length, status = 0;
16807 uint32_t shdr_status, shdr_add_status;
16808 union lpfc_sli4_cfg_shdr *shdr;
16809 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16810 void __iomem *bar_memmap_p;
16811 uint32_t db_offset;
16812 uint16_t pci_barset;
16813
16814 /* sanity check on queue memory */
16815 if (!hrq || !drq || !cq)
16816 return -ENODEV;
16817 if (!phba->sli4_hba.pc_sli4_params.supported)
16818 hw_page_size = SLI4_PAGE_SIZE;
16819
16820 if (hrq->entry_count != drq->entry_count)
16821 return -EINVAL;
16822 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16823 if (!mbox)
16824 return -ENOMEM;
16825 length = (sizeof(struct lpfc_mbx_rq_create) -
16826 sizeof(struct lpfc_sli4_cfg_mhdr));
16827 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16828 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
16829 length, LPFC_SLI4_MBX_EMBED);
16830 rq_create = &mbox->u.mqe.un.rq_create;
16831 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
16832 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16833 phba->sli4_hba.pc_sli4_params.rqv);
16834 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
16835 bf_set(lpfc_rq_context_rqe_count_1,
16836 &rq_create->u.request.context,
16837 hrq->entry_count);
16838 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
16839 bf_set(lpfc_rq_context_rqe_size,
16840 &rq_create->u.request.context,
16841 LPFC_RQE_SIZE_8);
16842 bf_set(lpfc_rq_context_page_size,
16843 &rq_create->u.request.context,
16844 LPFC_RQ_PAGE_SIZE_4096);
16845 } else {
16846 switch (hrq->entry_count) {
16847 default:
16848 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16849 "2535 Unsupported RQ count. (%d)\n",
16850 hrq->entry_count);
16851 if (hrq->entry_count < 512) {
16852 status = -EINVAL;
16853 goto out;
16854 }
16855 fallthrough; /* otherwise default to smallest count */
16856 case 512:
16857 bf_set(lpfc_rq_context_rqe_count,
16858 &rq_create->u.request.context,
16859 LPFC_RQ_RING_SIZE_512);
16860 break;
16861 case 1024:
16862 bf_set(lpfc_rq_context_rqe_count,
16863 &rq_create->u.request.context,
16864 LPFC_RQ_RING_SIZE_1024);
16865 break;
16866 case 2048:
16867 bf_set(lpfc_rq_context_rqe_count,
16868 &rq_create->u.request.context,
16869 LPFC_RQ_RING_SIZE_2048);
16870 break;
16871 case 4096:
16872 bf_set(lpfc_rq_context_rqe_count,
16873 &rq_create->u.request.context,
16874 LPFC_RQ_RING_SIZE_4096);
16875 break;
16876 }
16877 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
16878 LPFC_HDR_BUF_SIZE);
16879 }
16880 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
16881 cq->queue_id);
16882 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
16883 hrq->page_count);
16884 list_for_each_entry(dmabuf, &hrq->page_list, list) {
16885 memset(dmabuf->virt, 0, hw_page_size);
16886 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16887 putPaddrLow(dmabuf->phys);
16888 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16889 putPaddrHigh(dmabuf->phys);
16890 }
16891 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16892 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
16893
16894 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16895 /* The IOCTL status is embedded in the mailbox subheader. */
16896 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16897 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16898 if (shdr_status || shdr_add_status || rc) {
16899 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16900 "2504 RQ_CREATE mailbox failed with "
16901 "status x%x add_status x%x, mbx status x%x\n",
16902 shdr_status, shdr_add_status, rc);
16903 status = -ENXIO;
16904 goto out;
16905 }
16906 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16907 if (hrq->queue_id == 0xFFFF) {
16908 status = -ENXIO;
16909 goto out;
16910 }
16911
16912 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
16913 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
16914 &rq_create->u.response);
16915 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
16916 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
16917 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16918 "3262 RQ [%d] doorbell format not "
16919 "supported: x%x\n", hrq->queue_id,
16920 hrq->db_format);
16921 status = -EINVAL;
16922 goto out;
16923 }
16924
16925 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
16926 &rq_create->u.response);
16927 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
16928 if (!bar_memmap_p) {
16929 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16930 "3269 RQ[%d] failed to memmap pci "
16931 "barset:x%x\n", hrq->queue_id,
16932 pci_barset);
16933 status = -ENOMEM;
16934 goto out;
16935 }
16936
16937 db_offset = rq_create->u.response.doorbell_offset;
16938 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
16939 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
16940 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16941 "3270 RQ[%d] doorbell offset not "
16942 "supported: x%x\n", hrq->queue_id,
16943 db_offset);
16944 status = -EINVAL;
16945 goto out;
16946 }
16947 hrq->db_regaddr = bar_memmap_p + db_offset;
16948 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16949 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
16950 "format:x%x\n", hrq->queue_id, pci_barset,
16951 db_offset, hrq->db_format);
16952 } else {
16953 hrq->db_format = LPFC_DB_RING_FORMAT;
16954 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16955 }
16956 hrq->type = LPFC_HRQ;
16957 hrq->assoc_qid = cq->queue_id;
16958 hrq->subtype = subtype;
16959 hrq->host_index = 0;
16960 hrq->hba_index = 0;
16961 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16962
16963 /* now create the data queue */
16964 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16965 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
16966 length, LPFC_SLI4_MBX_EMBED);
16967 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16968 phba->sli4_hba.pc_sli4_params.rqv);
16969 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
16970 bf_set(lpfc_rq_context_rqe_count_1,
16971 &rq_create->u.request.context, hrq->entry_count);
16972 if (subtype == LPFC_NVMET)
16973 rq_create->u.request.context.buffer_size =
16974 LPFC_NVMET_DATA_BUF_SIZE;
16975 else
16976 rq_create->u.request.context.buffer_size =
16977 LPFC_DATA_BUF_SIZE;
16978 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
16979 LPFC_RQE_SIZE_8);
16980 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
16981 (PAGE_SIZE/SLI4_PAGE_SIZE));
16982 } else {
16983 switch (drq->entry_count) {
16984 default:
16985 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16986 "2536 Unsupported RQ count. (%d)\n",
16987 drq->entry_count);
16988 if (drq->entry_count < 512) {
16989 status = -EINVAL;
16990 goto out;
16991 }
16992 fallthrough; /* otherwise default to smallest count */
16993 case 512:
16994 bf_set(lpfc_rq_context_rqe_count,
16995 &rq_create->u.request.context,
16996 LPFC_RQ_RING_SIZE_512);
16997 break;
16998 case 1024:
16999 bf_set(lpfc_rq_context_rqe_count,
17000 &rq_create->u.request.context,
17001 LPFC_RQ_RING_SIZE_1024);
17002 break;
17003 case 2048:
17004 bf_set(lpfc_rq_context_rqe_count,
17005 &rq_create->u.request.context,
17006 LPFC_RQ_RING_SIZE_2048);
17007 break;
17008 case 4096:
17009 bf_set(lpfc_rq_context_rqe_count,
17010 &rq_create->u.request.context,
17011 LPFC_RQ_RING_SIZE_4096);
17012 break;
17013 }
17014 if (subtype == LPFC_NVMET)
17015 bf_set(lpfc_rq_context_buf_size,
17016 &rq_create->u.request.context,
17017 LPFC_NVMET_DATA_BUF_SIZE);
17018 else
17019 bf_set(lpfc_rq_context_buf_size,
17020 &rq_create->u.request.context,
17021 LPFC_DATA_BUF_SIZE);
17022 }
17023 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
17024 cq->queue_id);
17025 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
17026 drq->page_count);
17027 list_for_each_entry(dmabuf, &drq->page_list, list) {
17028 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
17029 putPaddrLow(dmabuf->phys);
17030 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
17031 putPaddrHigh(dmabuf->phys);
17032 }
17033 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
17034 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
17035 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17036 /* The IOCTL status is embedded in the mailbox subheader. */
17037 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
17038 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17039 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17040 if (shdr_status || shdr_add_status || rc) {
17041 status = -ENXIO;
17042 goto out;
17043 }
17044 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17045 if (drq->queue_id == 0xFFFF) {
17046 status = -ENXIO;
17047 goto out;
17048 }
17049 drq->type = LPFC_DRQ;
17050 drq->assoc_qid = cq->queue_id;
17051 drq->subtype = subtype;
17052 drq->host_index = 0;
17053 drq->hba_index = 0;
17054 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17055
17056 /* link the header and data RQs onto the parent cq child list */
17057 list_add_tail(&hrq->list, &cq->child_list);
17058 list_add_tail(&drq->list, &cq->child_list);
17059
17060out:
17061 mempool_free(mbox, phba->mbox_mem_pool);
17062 return status;
17063}
17064
17065/**
17066 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
17067 * @phba: HBA structure that indicates port to create a queue on.
17068 * @hrqp: The queue structure array to use to create the header receive queues.
17069 * @drqp: The queue structure array to use to create the data receive queues.
17070 * @cqp: The completion queue array to bind these receive queues to.
17071 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
17072 *
17073 * This function creates a receive buffer queue pair , as detailed in @hrq and
17074 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
17075 * to the HBA.
17076 *
17077 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
17078 * struct is used to get the entry count that is necessary to determine the
17079 * number of pages to use for this queue. The @cq is used to indicate which
17080 * completion queue to bind received buffers that are posted to these queues to.
17081 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
17082 * receive queue pair. This function is asynchronous and will wait for the
17083 * mailbox command to finish before continuing.
17084 *
17085 * On success this function will return a zero. If unable to allocate enough
17086 * memory this function will return -ENOMEM. If the queue create mailbox command
17087 * fails this function will return -ENXIO.
17088 **/
17089int
17090lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
17091 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
17092 uint32_t subtype)
17093{
17094 struct lpfc_queue *hrq, *drq, *cq;
17095 struct lpfc_mbx_rq_create_v2 *rq_create;
17096 struct lpfc_dmabuf *dmabuf;
17097 LPFC_MBOXQ_t *mbox;
17098 int rc, length, alloclen, status = 0;
17099 int cnt, idx, numrq, page_idx = 0;
17100 uint32_t shdr_status, shdr_add_status;
17101 union lpfc_sli4_cfg_shdr *shdr;
17102 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
17103
17104 numrq = phba->cfg_nvmet_mrq;
17105 /* sanity check on array memory */
17106 if (!hrqp || !drqp || !cqp || !numrq)
17107 return -ENODEV;
17108 if (!phba->sli4_hba.pc_sli4_params.supported)
17109 hw_page_size = SLI4_PAGE_SIZE;
17110
17111 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17112 if (!mbox)
17113 return -ENOMEM;
17114
17115 length = sizeof(struct lpfc_mbx_rq_create_v2);
17116 length += ((2 * numrq * hrqp[0]->page_count) *
17117 sizeof(struct dma_address));
17118
17119 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17120 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
17121 LPFC_SLI4_MBX_NEMBED);
17122 if (alloclen < length) {
17123 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17124 "3099 Allocated DMA memory size (%d) is "
17125 "less than the requested DMA memory size "
17126 "(%d)\n", alloclen, length);
17127 status = -ENOMEM;
17128 goto out;
17129 }
17130
17131
17132
17133 rq_create = mbox->sge_array->addr[0];
17134 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
17135
17136 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
17137 cnt = 0;
17138
17139 for (idx = 0; idx < numrq; idx++) {
17140 hrq = hrqp[idx];
17141 drq = drqp[idx];
17142 cq = cqp[idx];
17143
17144 /* sanity check on queue memory */
17145 if (!hrq || !drq || !cq) {
17146 status = -ENODEV;
17147 goto out;
17148 }
17149
17150 if (hrq->entry_count != drq->entry_count) {
17151 status = -EINVAL;
17152 goto out;
17153 }
17154
17155 if (idx == 0) {
17156 bf_set(lpfc_mbx_rq_create_num_pages,
17157 &rq_create->u.request,
17158 hrq->page_count);
17159 bf_set(lpfc_mbx_rq_create_rq_cnt,
17160 &rq_create->u.request, (numrq * 2));
17161 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
17162 1);
17163 bf_set(lpfc_rq_context_base_cq,
17164 &rq_create->u.request.context,
17165 cq->queue_id);
17166 bf_set(lpfc_rq_context_data_size,
17167 &rq_create->u.request.context,
17168 LPFC_NVMET_DATA_BUF_SIZE);
17169 bf_set(lpfc_rq_context_hdr_size,
17170 &rq_create->u.request.context,
17171 LPFC_HDR_BUF_SIZE);
17172 bf_set(lpfc_rq_context_rqe_count_1,
17173 &rq_create->u.request.context,
17174 hrq->entry_count);
17175 bf_set(lpfc_rq_context_rqe_size,
17176 &rq_create->u.request.context,
17177 LPFC_RQE_SIZE_8);
17178 bf_set(lpfc_rq_context_page_size,
17179 &rq_create->u.request.context,
17180 (PAGE_SIZE/SLI4_PAGE_SIZE));
17181 }
17182 rc = 0;
17183 list_for_each_entry(dmabuf, &hrq->page_list, list) {
17184 memset(dmabuf->virt, 0, hw_page_size);
17185 cnt = page_idx + dmabuf->buffer_tag;
17186 rq_create->u.request.page[cnt].addr_lo =
17187 putPaddrLow(dmabuf->phys);
17188 rq_create->u.request.page[cnt].addr_hi =
17189 putPaddrHigh(dmabuf->phys);
17190 rc++;
17191 }
17192 page_idx += rc;
17193
17194 rc = 0;
17195 list_for_each_entry(dmabuf, &drq->page_list, list) {
17196 memset(dmabuf->virt, 0, hw_page_size);
17197 cnt = page_idx + dmabuf->buffer_tag;
17198 rq_create->u.request.page[cnt].addr_lo =
17199 putPaddrLow(dmabuf->phys);
17200 rq_create->u.request.page[cnt].addr_hi =
17201 putPaddrHigh(dmabuf->phys);
17202 rc++;
17203 }
17204 page_idx += rc;
17205
17206 hrq->db_format = LPFC_DB_RING_FORMAT;
17207 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17208 hrq->type = LPFC_HRQ;
17209 hrq->assoc_qid = cq->queue_id;
17210 hrq->subtype = subtype;
17211 hrq->host_index = 0;
17212 hrq->hba_index = 0;
17213 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17214
17215 drq->db_format = LPFC_DB_RING_FORMAT;
17216 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17217 drq->type = LPFC_DRQ;
17218 drq->assoc_qid = cq->queue_id;
17219 drq->subtype = subtype;
17220 drq->host_index = 0;
17221 drq->hba_index = 0;
17222 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17223
17224 list_add_tail(&hrq->list, &cq->child_list);
17225 list_add_tail(&drq->list, &cq->child_list);
17226 }
17227
17228 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17229 /* The IOCTL status is embedded in the mailbox subheader. */
17230 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17231 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17232 if (shdr_status || shdr_add_status || rc) {
17233 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17234 "3120 RQ_CREATE mailbox failed with "
17235 "status x%x add_status x%x, mbx status x%x\n",
17236 shdr_status, shdr_add_status, rc);
17237 status = -ENXIO;
17238 goto out;
17239 }
17240 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17241 if (rc == 0xFFFF) {
17242 status = -ENXIO;
17243 goto out;
17244 }
17245
17246 /* Initialize all RQs with associated queue id */
17247 for (idx = 0; idx < numrq; idx++) {
17248 hrq = hrqp[idx];
17249 hrq->queue_id = rc + (2 * idx);
17250 drq = drqp[idx];
17251 drq->queue_id = rc + (2 * idx) + 1;
17252 }
17253
17254out:
17255 lpfc_sli4_mbox_cmd_free(phba, mbox);
17256 return status;
17257}
17258
17259/**
17260 * lpfc_eq_destroy - Destroy an event Queue on the HBA
17261 * @phba: HBA structure that indicates port to destroy a queue on.
17262 * @eq: The queue structure associated with the queue to destroy.
17263 *
17264 * This function destroys a queue, as detailed in @eq by sending an mailbox
17265 * command, specific to the type of queue, to the HBA.
17266 *
17267 * The @eq struct is used to get the queue ID of the queue to destroy.
17268 *
17269 * On success this function will return a zero. If the queue destroy mailbox
17270 * command fails this function will return -ENXIO.
17271 **/
17272int
17273lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
17274{
17275 LPFC_MBOXQ_t *mbox;
17276 int rc, length, status = 0;
17277 uint32_t shdr_status, shdr_add_status;
17278 union lpfc_sli4_cfg_shdr *shdr;
17279
17280 /* sanity check on queue memory */
17281 if (!eq)
17282 return -ENODEV;
17283
17284 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
17285 if (!mbox)
17286 return -ENOMEM;
17287 length = (sizeof(struct lpfc_mbx_eq_destroy) -
17288 sizeof(struct lpfc_sli4_cfg_mhdr));
17289 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17290 LPFC_MBOX_OPCODE_EQ_DESTROY,
17291 length, LPFC_SLI4_MBX_EMBED);
17292 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
17293 eq->queue_id);
17294 mbox->vport = eq->phba->pport;
17295 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17296
17297 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
17298 /* The IOCTL status is embedded in the mailbox subheader. */
17299 shdr = (union lpfc_sli4_cfg_shdr *)
17300 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
17301 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17302 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17303 if (shdr_status || shdr_add_status || rc) {
17304 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17305 "2505 EQ_DESTROY mailbox failed with "
17306 "status x%x add_status x%x, mbx status x%x\n",
17307 shdr_status, shdr_add_status, rc);
17308 status = -ENXIO;
17309 }
17310
17311 /* Remove eq from any list */
17312 list_del_init(&eq->list);
17313 mempool_free(mbox, eq->phba->mbox_mem_pool);
17314 return status;
17315}
17316
17317/**
17318 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
17319 * @phba: HBA structure that indicates port to destroy a queue on.
17320 * @cq: The queue structure associated with the queue to destroy.
17321 *
17322 * This function destroys a queue, as detailed in @cq by sending an mailbox
17323 * command, specific to the type of queue, to the HBA.
17324 *
17325 * The @cq struct is used to get the queue ID of the queue to destroy.
17326 *
17327 * On success this function will return a zero. If the queue destroy mailbox
17328 * command fails this function will return -ENXIO.
17329 **/
17330int
17331lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
17332{
17333 LPFC_MBOXQ_t *mbox;
17334 int rc, length, status = 0;
17335 uint32_t shdr_status, shdr_add_status;
17336 union lpfc_sli4_cfg_shdr *shdr;
17337
17338 /* sanity check on queue memory */
17339 if (!cq)
17340 return -ENODEV;
17341 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
17342 if (!mbox)
17343 return -ENOMEM;
17344 length = (sizeof(struct lpfc_mbx_cq_destroy) -
17345 sizeof(struct lpfc_sli4_cfg_mhdr));
17346 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17347 LPFC_MBOX_OPCODE_CQ_DESTROY,
17348 length, LPFC_SLI4_MBX_EMBED);
17349 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
17350 cq->queue_id);
17351 mbox->vport = cq->phba->pport;
17352 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17353 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
17354 /* The IOCTL status is embedded in the mailbox subheader. */
17355 shdr = (union lpfc_sli4_cfg_shdr *)
17356 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
17357 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17358 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17359 if (shdr_status || shdr_add_status || rc) {
17360 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17361 "2506 CQ_DESTROY mailbox failed with "
17362 "status x%x add_status x%x, mbx status x%x\n",
17363 shdr_status, shdr_add_status, rc);
17364 status = -ENXIO;
17365 }
17366 /* Remove cq from any list */
17367 list_del_init(&cq->list);
17368 mempool_free(mbox, cq->phba->mbox_mem_pool);
17369 return status;
17370}
17371
17372/**
17373 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
17374 * @phba: HBA structure that indicates port to destroy a queue on.
17375 * @mq: The queue structure associated with the queue to destroy.
17376 *
17377 * This function destroys a queue, as detailed in @mq by sending an mailbox
17378 * command, specific to the type of queue, to the HBA.
17379 *
17380 * The @mq struct is used to get the queue ID of the queue to destroy.
17381 *
17382 * On success this function will return a zero. If the queue destroy mailbox
17383 * command fails this function will return -ENXIO.
17384 **/
17385int
17386lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
17387{
17388 LPFC_MBOXQ_t *mbox;
17389 int rc, length, status = 0;
17390 uint32_t shdr_status, shdr_add_status;
17391 union lpfc_sli4_cfg_shdr *shdr;
17392
17393 /* sanity check on queue memory */
17394 if (!mq)
17395 return -ENODEV;
17396 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
17397 if (!mbox)
17398 return -ENOMEM;
17399 length = (sizeof(struct lpfc_mbx_mq_destroy) -
17400 sizeof(struct lpfc_sli4_cfg_mhdr));
17401 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17402 LPFC_MBOX_OPCODE_MQ_DESTROY,
17403 length, LPFC_SLI4_MBX_EMBED);
17404 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
17405 mq->queue_id);
17406 mbox->vport = mq->phba->pport;
17407 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17408 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
17409 /* The IOCTL status is embedded in the mailbox subheader. */
17410 shdr = (union lpfc_sli4_cfg_shdr *)
17411 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
17412 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17413 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17414 if (shdr_status || shdr_add_status || rc) {
17415 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17416 "2507 MQ_DESTROY mailbox failed with "
17417 "status x%x add_status x%x, mbx status x%x\n",
17418 shdr_status, shdr_add_status, rc);
17419 status = -ENXIO;
17420 }
17421 /* Remove mq from any list */
17422 list_del_init(&mq->list);
17423 mempool_free(mbox, mq->phba->mbox_mem_pool);
17424 return status;
17425}
17426
17427/**
17428 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
17429 * @phba: HBA structure that indicates port to destroy a queue on.
17430 * @wq: The queue structure associated with the queue to destroy.
17431 *
17432 * This function destroys a queue, as detailed in @wq by sending an mailbox
17433 * command, specific to the type of queue, to the HBA.
17434 *
17435 * The @wq struct is used to get the queue ID of the queue to destroy.
17436 *
17437 * On success this function will return a zero. If the queue destroy mailbox
17438 * command fails this function will return -ENXIO.
17439 **/
17440int
17441lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
17442{
17443 LPFC_MBOXQ_t *mbox;
17444 int rc, length, status = 0;
17445 uint32_t shdr_status, shdr_add_status;
17446 union lpfc_sli4_cfg_shdr *shdr;
17447
17448 /* sanity check on queue memory */
17449 if (!wq)
17450 return -ENODEV;
17451 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
17452 if (!mbox)
17453 return -ENOMEM;
17454 length = (sizeof(struct lpfc_mbx_wq_destroy) -
17455 sizeof(struct lpfc_sli4_cfg_mhdr));
17456 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17457 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
17458 length, LPFC_SLI4_MBX_EMBED);
17459 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
17460 wq->queue_id);
17461 mbox->vport = wq->phba->pport;
17462 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17463 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
17464 shdr = (union lpfc_sli4_cfg_shdr *)
17465 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
17466 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17467 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17468 if (shdr_status || shdr_add_status || rc) {
17469 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17470 "2508 WQ_DESTROY mailbox failed with "
17471 "status x%x add_status x%x, mbx status x%x\n",
17472 shdr_status, shdr_add_status, rc);
17473 status = -ENXIO;
17474 }
17475 /* Remove wq from any list */
17476 list_del_init(&wq->list);
17477 kfree(wq->pring);
17478 wq->pring = NULL;
17479 mempool_free(mbox, wq->phba->mbox_mem_pool);
17480 return status;
17481}
17482
17483/**
17484 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
17485 * @phba: HBA structure that indicates port to destroy a queue on.
17486 * @hrq: The queue structure associated with the queue to destroy.
17487 * @drq: The queue structure associated with the queue to destroy.
17488 *
17489 * This function destroys a queue, as detailed in @rq by sending an mailbox
17490 * command, specific to the type of queue, to the HBA.
17491 *
17492 * The @rq struct is used to get the queue ID of the queue to destroy.
17493 *
17494 * On success this function will return a zero. If the queue destroy mailbox
17495 * command fails this function will return -ENXIO.
17496 **/
17497int
17498lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
17499 struct lpfc_queue *drq)
17500{
17501 LPFC_MBOXQ_t *mbox;
17502 int rc, length, status = 0;
17503 uint32_t shdr_status, shdr_add_status;
17504 union lpfc_sli4_cfg_shdr *shdr;
17505
17506 /* sanity check on queue memory */
17507 if (!hrq || !drq)
17508 return -ENODEV;
17509 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
17510 if (!mbox)
17511 return -ENOMEM;
17512 length = (sizeof(struct lpfc_mbx_rq_destroy) -
17513 sizeof(struct lpfc_sli4_cfg_mhdr));
17514 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17515 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
17516 length, LPFC_SLI4_MBX_EMBED);
17517 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17518 hrq->queue_id);
17519 mbox->vport = hrq->phba->pport;
17520 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17521 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
17522 /* The IOCTL status is embedded in the mailbox subheader. */
17523 shdr = (union lpfc_sli4_cfg_shdr *)
17524 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17525 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17526 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17527 if (shdr_status || shdr_add_status || rc) {
17528 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17529 "2509 RQ_DESTROY mailbox failed with "
17530 "status x%x add_status x%x, mbx status x%x\n",
17531 shdr_status, shdr_add_status, rc);
17532 mempool_free(mbox, hrq->phba->mbox_mem_pool);
17533 return -ENXIO;
17534 }
17535 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17536 drq->queue_id);
17537 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
17538 shdr = (union lpfc_sli4_cfg_shdr *)
17539 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17540 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17541 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17542 if (shdr_status || shdr_add_status || rc) {
17543 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17544 "2510 RQ_DESTROY mailbox failed with "
17545 "status x%x add_status x%x, mbx status x%x\n",
17546 shdr_status, shdr_add_status, rc);
17547 status = -ENXIO;
17548 }
17549 list_del_init(&hrq->list);
17550 list_del_init(&drq->list);
17551 mempool_free(mbox, hrq->phba->mbox_mem_pool);
17552 return status;
17553}
17554
17555/**
17556 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
17557 * @phba: The virtual port for which this call being executed.
17558 * @pdma_phys_addr0: Physical address of the 1st SGL page.
17559 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
17560 * @xritag: the xritag that ties this io to the SGL pages.
17561 *
17562 * This routine will post the sgl pages for the IO that has the xritag
17563 * that is in the iocbq structure. The xritag is assigned during iocbq
17564 * creation and persists for as long as the driver is loaded.
17565 * if the caller has fewer than 256 scatter gather segments to map then
17566 * pdma_phys_addr1 should be 0.
17567 * If the caller needs to map more than 256 scatter gather segment then
17568 * pdma_phys_addr1 should be a valid physical address.
17569 * physical address for SGLs must be 64 byte aligned.
17570 * If you are going to map 2 SGL's then the first one must have 256 entries
17571 * the second sgl can have between 1 and 256 entries.
17572 *
17573 * Return codes:
17574 * 0 - Success
17575 * -ENXIO, -ENOMEM - Failure
17576 **/
17577int
17578lpfc_sli4_post_sgl(struct lpfc_hba *phba,
17579 dma_addr_t pdma_phys_addr0,
17580 dma_addr_t pdma_phys_addr1,
17581 uint16_t xritag)
17582{
17583 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
17584 LPFC_MBOXQ_t *mbox;
17585 int rc;
17586 uint32_t shdr_status, shdr_add_status;
17587 uint32_t mbox_tmo;
17588 union lpfc_sli4_cfg_shdr *shdr;
17589
17590 if (xritag == NO_XRI) {
17591 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17592 "0364 Invalid param:\n");
17593 return -EINVAL;
17594 }
17595
17596 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17597 if (!mbox)
17598 return -ENOMEM;
17599
17600 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17601 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
17602 sizeof(struct lpfc_mbx_post_sgl_pages) -
17603 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
17604
17605 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
17606 &mbox->u.mqe.un.post_sgl_pages;
17607 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
17608 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
17609
17610 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
17611 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
17612 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
17613 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
17614
17615 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
17616 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
17617 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
17618 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
17619 if (!phba->sli4_hba.intr_enable)
17620 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17621 else {
17622 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17623 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17624 }
17625 /* The IOCTL status is embedded in the mailbox subheader. */
17626 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
17627 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17628 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17629 if (!phba->sli4_hba.intr_enable)
17630 mempool_free(mbox, phba->mbox_mem_pool);
17631 else if (rc != MBX_TIMEOUT)
17632 mempool_free(mbox, phba->mbox_mem_pool);
17633 if (shdr_status || shdr_add_status || rc) {
17634 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17635 "2511 POST_SGL mailbox failed with "
17636 "status x%x add_status x%x, mbx status x%x\n",
17637 shdr_status, shdr_add_status, rc);
17638 }
17639 return 0;
17640}
17641
17642/**
17643 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
17644 * @phba: pointer to lpfc hba data structure.
17645 *
17646 * This routine is invoked to post rpi header templates to the
17647 * HBA consistent with the SLI-4 interface spec. This routine
17648 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
17649 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
17650 *
17651 * Returns
17652 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
17653 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
17654 **/
17655static uint16_t
17656lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
17657{
17658 unsigned long xri;
17659
17660 /*
17661 * Fetch the next logical xri. Because this index is logical,
17662 * the driver starts at 0 each time.
17663 */
17664 spin_lock_irq(&phba->hbalock);
17665 xri = find_first_zero_bit(phba->sli4_hba.xri_bmask,
17666 phba->sli4_hba.max_cfg_param.max_xri);
17667 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
17668 spin_unlock_irq(&phba->hbalock);
17669 return NO_XRI;
17670 } else {
17671 set_bit(xri, phba->sli4_hba.xri_bmask);
17672 phba->sli4_hba.max_cfg_param.xri_used++;
17673 }
17674 spin_unlock_irq(&phba->hbalock);
17675 return xri;
17676}
17677
17678/**
17679 * __lpfc_sli4_free_xri - Release an xri for reuse.
17680 * @phba: pointer to lpfc hba data structure.
17681 * @xri: xri to release.
17682 *
17683 * This routine is invoked to release an xri to the pool of
17684 * available rpis maintained by the driver.
17685 **/
17686static void
17687__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
17688{
17689 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
17690 phba->sli4_hba.max_cfg_param.xri_used--;
17691 }
17692}
17693
17694/**
17695 * lpfc_sli4_free_xri - Release an xri for reuse.
17696 * @phba: pointer to lpfc hba data structure.
17697 * @xri: xri to release.
17698 *
17699 * This routine is invoked to release an xri to the pool of
17700 * available rpis maintained by the driver.
17701 **/
17702void
17703lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
17704{
17705 spin_lock_irq(&phba->hbalock);
17706 __lpfc_sli4_free_xri(phba, xri);
17707 spin_unlock_irq(&phba->hbalock);
17708}
17709
17710/**
17711 * lpfc_sli4_next_xritag - Get an xritag for the io
17712 * @phba: Pointer to HBA context object.
17713 *
17714 * This function gets an xritag for the iocb. If there is no unused xritag
17715 * it will return 0xffff.
17716 * The function returns the allocated xritag if successful, else returns zero.
17717 * Zero is not a valid xritag.
17718 * The caller is not required to hold any lock.
17719 **/
17720uint16_t
17721lpfc_sli4_next_xritag(struct lpfc_hba *phba)
17722{
17723 uint16_t xri_index;
17724
17725 xri_index = lpfc_sli4_alloc_xri(phba);
17726 if (xri_index == NO_XRI)
17727 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
17728 "2004 Failed to allocate XRI.last XRITAG is %d"
17729 " Max XRI is %d, Used XRI is %d\n",
17730 xri_index,
17731 phba->sli4_hba.max_cfg_param.max_xri,
17732 phba->sli4_hba.max_cfg_param.xri_used);
17733 return xri_index;
17734}
17735
17736/**
17737 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
17738 * @phba: pointer to lpfc hba data structure.
17739 * @post_sgl_list: pointer to els sgl entry list.
17740 * @post_cnt: number of els sgl entries on the list.
17741 *
17742 * This routine is invoked to post a block of driver's sgl pages to the
17743 * HBA using non-embedded mailbox command. No Lock is held. This routine
17744 * is only called when the driver is loading and after all IO has been
17745 * stopped.
17746 **/
17747static int
17748lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
17749 struct list_head *post_sgl_list,
17750 int post_cnt)
17751{
17752 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
17753 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
17754 struct sgl_page_pairs *sgl_pg_pairs;
17755 void *viraddr;
17756 LPFC_MBOXQ_t *mbox;
17757 uint32_t reqlen, alloclen, pg_pairs;
17758 uint32_t mbox_tmo;
17759 uint16_t xritag_start = 0;
17760 int rc = 0;
17761 uint32_t shdr_status, shdr_add_status;
17762 union lpfc_sli4_cfg_shdr *shdr;
17763
17764 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
17765 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
17766 if (reqlen > SLI4_PAGE_SIZE) {
17767 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17768 "2559 Block sgl registration required DMA "
17769 "size (%d) great than a page\n", reqlen);
17770 return -ENOMEM;
17771 }
17772
17773 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17774 if (!mbox)
17775 return -ENOMEM;
17776
17777 /* Allocate DMA memory and set up the non-embedded mailbox command */
17778 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17779 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
17780 LPFC_SLI4_MBX_NEMBED);
17781
17782 if (alloclen < reqlen) {
17783 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17784 "0285 Allocated DMA memory size (%d) is "
17785 "less than the requested DMA memory "
17786 "size (%d)\n", alloclen, reqlen);
17787 lpfc_sli4_mbox_cmd_free(phba, mbox);
17788 return -ENOMEM;
17789 }
17790 /* Set up the SGL pages in the non-embedded DMA pages */
17791 viraddr = mbox->sge_array->addr[0];
17792 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
17793 sgl_pg_pairs = &sgl->sgl_pg_pairs;
17794
17795 pg_pairs = 0;
17796 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
17797 /* Set up the sge entry */
17798 sgl_pg_pairs->sgl_pg0_addr_lo =
17799 cpu_to_le32(putPaddrLow(sglq_entry->phys));
17800 sgl_pg_pairs->sgl_pg0_addr_hi =
17801 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
17802 sgl_pg_pairs->sgl_pg1_addr_lo =
17803 cpu_to_le32(putPaddrLow(0));
17804 sgl_pg_pairs->sgl_pg1_addr_hi =
17805 cpu_to_le32(putPaddrHigh(0));
17806
17807 /* Keep the first xritag on the list */
17808 if (pg_pairs == 0)
17809 xritag_start = sglq_entry->sli4_xritag;
17810 sgl_pg_pairs++;
17811 pg_pairs++;
17812 }
17813
17814 /* Complete initialization and perform endian conversion. */
17815 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
17816 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
17817 sgl->word0 = cpu_to_le32(sgl->word0);
17818
17819 if (!phba->sli4_hba.intr_enable)
17820 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17821 else {
17822 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17823 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17824 }
17825 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
17826 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17827 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17828 if (!phba->sli4_hba.intr_enable)
17829 lpfc_sli4_mbox_cmd_free(phba, mbox);
17830 else if (rc != MBX_TIMEOUT)
17831 lpfc_sli4_mbox_cmd_free(phba, mbox);
17832 if (shdr_status || shdr_add_status || rc) {
17833 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17834 "2513 POST_SGL_BLOCK mailbox command failed "
17835 "status x%x add_status x%x mbx status x%x\n",
17836 shdr_status, shdr_add_status, rc);
17837 rc = -ENXIO;
17838 }
17839 return rc;
17840}
17841
17842/**
17843 * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
17844 * @phba: pointer to lpfc hba data structure.
17845 * @nblist: pointer to nvme buffer list.
17846 * @count: number of scsi buffers on the list.
17847 *
17848 * This routine is invoked to post a block of @count scsi sgl pages from a
17849 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
17850 * No Lock is held.
17851 *
17852 **/
17853static int
17854lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
17855 int count)
17856{
17857 struct lpfc_io_buf *lpfc_ncmd;
17858 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
17859 struct sgl_page_pairs *sgl_pg_pairs;
17860 void *viraddr;
17861 LPFC_MBOXQ_t *mbox;
17862 uint32_t reqlen, alloclen, pg_pairs;
17863 uint32_t mbox_tmo;
17864 uint16_t xritag_start = 0;
17865 int rc = 0;
17866 uint32_t shdr_status, shdr_add_status;
17867 dma_addr_t pdma_phys_bpl1;
17868 union lpfc_sli4_cfg_shdr *shdr;
17869
17870 /* Calculate the requested length of the dma memory */
17871 reqlen = count * sizeof(struct sgl_page_pairs) +
17872 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
17873 if (reqlen > SLI4_PAGE_SIZE) {
17874 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
17875 "6118 Block sgl registration required DMA "
17876 "size (%d) great than a page\n", reqlen);
17877 return -ENOMEM;
17878 }
17879 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17880 if (!mbox) {
17881 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17882 "6119 Failed to allocate mbox cmd memory\n");
17883 return -ENOMEM;
17884 }
17885
17886 /* Allocate DMA memory and set up the non-embedded mailbox command */
17887 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17888 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
17889 reqlen, LPFC_SLI4_MBX_NEMBED);
17890
17891 if (alloclen < reqlen) {
17892 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17893 "6120 Allocated DMA memory size (%d) is "
17894 "less than the requested DMA memory "
17895 "size (%d)\n", alloclen, reqlen);
17896 lpfc_sli4_mbox_cmd_free(phba, mbox);
17897 return -ENOMEM;
17898 }
17899
17900 /* Get the first SGE entry from the non-embedded DMA memory */
17901 viraddr = mbox->sge_array->addr[0];
17902
17903 /* Set up the SGL pages in the non-embedded DMA pages */
17904 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
17905 sgl_pg_pairs = &sgl->sgl_pg_pairs;
17906
17907 pg_pairs = 0;
17908 list_for_each_entry(lpfc_ncmd, nblist, list) {
17909 /* Set up the sge entry */
17910 sgl_pg_pairs->sgl_pg0_addr_lo =
17911 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
17912 sgl_pg_pairs->sgl_pg0_addr_hi =
17913 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
17914 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
17915 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
17916 SGL_PAGE_SIZE;
17917 else
17918 pdma_phys_bpl1 = 0;
17919 sgl_pg_pairs->sgl_pg1_addr_lo =
17920 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
17921 sgl_pg_pairs->sgl_pg1_addr_hi =
17922 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
17923 /* Keep the first xritag on the list */
17924 if (pg_pairs == 0)
17925 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
17926 sgl_pg_pairs++;
17927 pg_pairs++;
17928 }
17929 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
17930 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
17931 /* Perform endian conversion if necessary */
17932 sgl->word0 = cpu_to_le32(sgl->word0);
17933
17934 if (!phba->sli4_hba.intr_enable) {
17935 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17936 } else {
17937 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17938 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17939 }
17940 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
17941 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17942 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17943 if (!phba->sli4_hba.intr_enable)
17944 lpfc_sli4_mbox_cmd_free(phba, mbox);
17945 else if (rc != MBX_TIMEOUT)
17946 lpfc_sli4_mbox_cmd_free(phba, mbox);
17947 if (shdr_status || shdr_add_status || rc) {
17948 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17949 "6125 POST_SGL_BLOCK mailbox command failed "
17950 "status x%x add_status x%x mbx status x%x\n",
17951 shdr_status, shdr_add_status, rc);
17952 rc = -ENXIO;
17953 }
17954 return rc;
17955}
17956
17957/**
17958 * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
17959 * @phba: pointer to lpfc hba data structure.
17960 * @post_nblist: pointer to the nvme buffer list.
17961 * @sb_count: number of nvme buffers.
17962 *
17963 * This routine walks a list of nvme buffers that was passed in. It attempts
17964 * to construct blocks of nvme buffer sgls which contains contiguous xris and
17965 * uses the non-embedded SGL block post mailbox commands to post to the port.
17966 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
17967 * embedded SGL post mailbox command for posting. The @post_nblist passed in
17968 * must be local list, thus no lock is needed when manipulate the list.
17969 *
17970 * Returns: 0 = failure, non-zero number of successfully posted buffers.
17971 **/
17972int
17973lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
17974 struct list_head *post_nblist, int sb_count)
17975{
17976 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
17977 int status, sgl_size;
17978 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
17979 dma_addr_t pdma_phys_sgl1;
17980 int last_xritag = NO_XRI;
17981 int cur_xritag;
17982 LIST_HEAD(prep_nblist);
17983 LIST_HEAD(blck_nblist);
17984 LIST_HEAD(nvme_nblist);
17985
17986 /* sanity check */
17987 if (sb_count <= 0)
17988 return -EINVAL;
17989
17990 sgl_size = phba->cfg_sg_dma_buf_size;
17991 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
17992 list_del_init(&lpfc_ncmd->list);
17993 block_cnt++;
17994 if ((last_xritag != NO_XRI) &&
17995 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
17996 /* a hole in xri block, form a sgl posting block */
17997 list_splice_init(&prep_nblist, &blck_nblist);
17998 post_cnt = block_cnt - 1;
17999 /* prepare list for next posting block */
18000 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
18001 block_cnt = 1;
18002 } else {
18003 /* prepare list for next posting block */
18004 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
18005 /* enough sgls for non-embed sgl mbox command */
18006 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
18007 list_splice_init(&prep_nblist, &blck_nblist);
18008 post_cnt = block_cnt;
18009 block_cnt = 0;
18010 }
18011 }
18012 num_posting++;
18013 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
18014
18015 /* end of repost sgl list condition for NVME buffers */
18016 if (num_posting == sb_count) {
18017 if (post_cnt == 0) {
18018 /* last sgl posting block */
18019 list_splice_init(&prep_nblist, &blck_nblist);
18020 post_cnt = block_cnt;
18021 } else if (block_cnt == 1) {
18022 /* last single sgl with non-contiguous xri */
18023 if (sgl_size > SGL_PAGE_SIZE)
18024 pdma_phys_sgl1 =
18025 lpfc_ncmd->dma_phys_sgl +
18026 SGL_PAGE_SIZE;
18027 else
18028 pdma_phys_sgl1 = 0;
18029 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
18030 status = lpfc_sli4_post_sgl(
18031 phba, lpfc_ncmd->dma_phys_sgl,
18032 pdma_phys_sgl1, cur_xritag);
18033 if (status) {
18034 /* Post error. Buffer unavailable. */
18035 lpfc_ncmd->flags |=
18036 LPFC_SBUF_NOT_POSTED;
18037 } else {
18038 /* Post success. Bffer available. */
18039 lpfc_ncmd->flags &=
18040 ~LPFC_SBUF_NOT_POSTED;
18041 lpfc_ncmd->status = IOSTAT_SUCCESS;
18042 num_posted++;
18043 }
18044 /* success, put on NVME buffer sgl list */
18045 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
18046 }
18047 }
18048
18049 /* continue until a nembed page worth of sgls */
18050 if (post_cnt == 0)
18051 continue;
18052
18053 /* post block of NVME buffer list sgls */
18054 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
18055 post_cnt);
18056
18057 /* don't reset xirtag due to hole in xri block */
18058 if (block_cnt == 0)
18059 last_xritag = NO_XRI;
18060
18061 /* reset NVME buffer post count for next round of posting */
18062 post_cnt = 0;
18063
18064 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
18065 while (!list_empty(&blck_nblist)) {
18066 list_remove_head(&blck_nblist, lpfc_ncmd,
18067 struct lpfc_io_buf, list);
18068 if (status) {
18069 /* Post error. Mark buffer unavailable. */
18070 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
18071 } else {
18072 /* Post success, Mark buffer available. */
18073 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
18074 lpfc_ncmd->status = IOSTAT_SUCCESS;
18075 num_posted++;
18076 }
18077 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
18078 }
18079 }
18080 /* Push NVME buffers with sgl posted to the available list */
18081 lpfc_io_buf_replenish(phba, &nvme_nblist);
18082
18083 return num_posted;
18084}
18085
18086/**
18087 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
18088 * @phba: pointer to lpfc_hba struct that the frame was received on
18089 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18090 *
18091 * This function checks the fields in the @fc_hdr to see if the FC frame is a
18092 * valid type of frame that the LPFC driver will handle. This function will
18093 * return a zero if the frame is a valid frame or a non zero value when the
18094 * frame does not pass the check.
18095 **/
18096static int
18097lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
18098{
18099 /* make rctl_names static to save stack space */
18100 struct fc_vft_header *fc_vft_hdr;
18101 uint32_t *header = (uint32_t *) fc_hdr;
18102
18103#define FC_RCTL_MDS_DIAGS 0xF4
18104
18105 switch (fc_hdr->fh_r_ctl) {
18106 case FC_RCTL_DD_UNCAT: /* uncategorized information */
18107 case FC_RCTL_DD_SOL_DATA: /* solicited data */
18108 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
18109 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
18110 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
18111 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
18112 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
18113 case FC_RCTL_DD_CMD_STATUS: /* command status */
18114 case FC_RCTL_ELS_REQ: /* extended link services request */
18115 case FC_RCTL_ELS_REP: /* extended link services reply */
18116 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
18117 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
18118 case FC_RCTL_BA_ABTS: /* basic link service abort */
18119 case FC_RCTL_BA_RMC: /* remove connection */
18120 case FC_RCTL_BA_ACC: /* basic accept */
18121 case FC_RCTL_BA_RJT: /* basic reject */
18122 case FC_RCTL_BA_PRMT:
18123 case FC_RCTL_ACK_1: /* acknowledge_1 */
18124 case FC_RCTL_ACK_0: /* acknowledge_0 */
18125 case FC_RCTL_P_RJT: /* port reject */
18126 case FC_RCTL_F_RJT: /* fabric reject */
18127 case FC_RCTL_P_BSY: /* port busy */
18128 case FC_RCTL_F_BSY: /* fabric busy to data frame */
18129 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
18130 case FC_RCTL_LCR: /* link credit reset */
18131 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
18132 case FC_RCTL_END: /* end */
18133 break;
18134 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
18135 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
18136 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
18137 return lpfc_fc_frame_check(phba, fc_hdr);
18138 case FC_RCTL_BA_NOP: /* basic link service NOP */
18139 default:
18140 goto drop;
18141 }
18142
18143 switch (fc_hdr->fh_type) {
18144 case FC_TYPE_BLS:
18145 case FC_TYPE_ELS:
18146 case FC_TYPE_FCP:
18147 case FC_TYPE_CT:
18148 case FC_TYPE_NVME:
18149 break;
18150 case FC_TYPE_IP:
18151 case FC_TYPE_ILS:
18152 default:
18153 goto drop;
18154 }
18155
18156 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
18157 "2538 Received frame rctl:x%x, type:x%x, "
18158 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
18159 fc_hdr->fh_r_ctl, fc_hdr->fh_type,
18160 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
18161 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
18162 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
18163 be32_to_cpu(header[6]));
18164 return 0;
18165drop:
18166 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
18167 "2539 Dropped frame rctl:x%x type:x%x\n",
18168 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18169 return 1;
18170}
18171
18172/**
18173 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
18174 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18175 *
18176 * This function processes the FC header to retrieve the VFI from the VF
18177 * header, if one exists. This function will return the VFI if one exists
18178 * or 0 if no VSAN Header exists.
18179 **/
18180static uint32_t
18181lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
18182{
18183 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
18184
18185 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
18186 return 0;
18187 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
18188}
18189
18190/**
18191 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
18192 * @phba: Pointer to the HBA structure to search for the vport on
18193 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18194 * @fcfi: The FC Fabric ID that the frame came from
18195 * @did: Destination ID to match against
18196 *
18197 * This function searches the @phba for a vport that matches the content of the
18198 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
18199 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
18200 * returns the matching vport pointer or NULL if unable to match frame to a
18201 * vport.
18202 **/
18203static struct lpfc_vport *
18204lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
18205 uint16_t fcfi, uint32_t did)
18206{
18207 struct lpfc_vport **vports;
18208 struct lpfc_vport *vport = NULL;
18209 int i;
18210
18211 if (did == Fabric_DID)
18212 return phba->pport;
18213 if ((phba->pport->fc_flag & FC_PT2PT) &&
18214 !(phba->link_state == LPFC_HBA_READY))
18215 return phba->pport;
18216
18217 vports = lpfc_create_vport_work_array(phba);
18218 if (vports != NULL) {
18219 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
18220 if (phba->fcf.fcfi == fcfi &&
18221 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
18222 vports[i]->fc_myDID == did) {
18223 vport = vports[i];
18224 break;
18225 }
18226 }
18227 }
18228 lpfc_destroy_vport_work_array(phba, vports);
18229 return vport;
18230}
18231
18232/**
18233 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
18234 * @vport: The vport to work on.
18235 *
18236 * This function updates the receive sequence time stamp for this vport. The
18237 * receive sequence time stamp indicates the time that the last frame of the
18238 * the sequence that has been idle for the longest amount of time was received.
18239 * the driver uses this time stamp to indicate if any received sequences have
18240 * timed out.
18241 **/
18242static void
18243lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
18244{
18245 struct lpfc_dmabuf *h_buf;
18246 struct hbq_dmabuf *dmabuf = NULL;
18247
18248 /* get the oldest sequence on the rcv list */
18249 h_buf = list_get_first(&vport->rcv_buffer_list,
18250 struct lpfc_dmabuf, list);
18251 if (!h_buf)
18252 return;
18253 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18254 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
18255}
18256
18257/**
18258 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
18259 * @vport: The vport that the received sequences were sent to.
18260 *
18261 * This function cleans up all outstanding received sequences. This is called
18262 * by the driver when a link event or user action invalidates all the received
18263 * sequences.
18264 **/
18265void
18266lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
18267{
18268 struct lpfc_dmabuf *h_buf, *hnext;
18269 struct lpfc_dmabuf *d_buf, *dnext;
18270 struct hbq_dmabuf *dmabuf = NULL;
18271
18272 /* start with the oldest sequence on the rcv list */
18273 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
18274 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18275 list_del_init(&dmabuf->hbuf.list);
18276 list_for_each_entry_safe(d_buf, dnext,
18277 &dmabuf->dbuf.list, list) {
18278 list_del_init(&d_buf->list);
18279 lpfc_in_buf_free(vport->phba, d_buf);
18280 }
18281 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
18282 }
18283}
18284
18285/**
18286 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
18287 * @vport: The vport that the received sequences were sent to.
18288 *
18289 * This function determines whether any received sequences have timed out by
18290 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
18291 * indicates that there is at least one timed out sequence this routine will
18292 * go through the received sequences one at a time from most inactive to most
18293 * active to determine which ones need to be cleaned up. Once it has determined
18294 * that a sequence needs to be cleaned up it will simply free up the resources
18295 * without sending an abort.
18296 **/
18297void
18298lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
18299{
18300 struct lpfc_dmabuf *h_buf, *hnext;
18301 struct lpfc_dmabuf *d_buf, *dnext;
18302 struct hbq_dmabuf *dmabuf = NULL;
18303 unsigned long timeout;
18304 int abort_count = 0;
18305
18306 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
18307 vport->rcv_buffer_time_stamp);
18308 if (list_empty(&vport->rcv_buffer_list) ||
18309 time_before(jiffies, timeout))
18310 return;
18311 /* start with the oldest sequence on the rcv list */
18312 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
18313 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18314 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
18315 dmabuf->time_stamp);
18316 if (time_before(jiffies, timeout))
18317 break;
18318 abort_count++;
18319 list_del_init(&dmabuf->hbuf.list);
18320 list_for_each_entry_safe(d_buf, dnext,
18321 &dmabuf->dbuf.list, list) {
18322 list_del_init(&d_buf->list);
18323 lpfc_in_buf_free(vport->phba, d_buf);
18324 }
18325 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
18326 }
18327 if (abort_count)
18328 lpfc_update_rcv_time_stamp(vport);
18329}
18330
18331/**
18332 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
18333 * @vport: pointer to a vitural port
18334 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
18335 *
18336 * This function searches through the existing incomplete sequences that have
18337 * been sent to this @vport. If the frame matches one of the incomplete
18338 * sequences then the dbuf in the @dmabuf is added to the list of frames that
18339 * make up that sequence. If no sequence is found that matches this frame then
18340 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
18341 * This function returns a pointer to the first dmabuf in the sequence list that
18342 * the frame was linked to.
18343 **/
18344static struct hbq_dmabuf *
18345lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
18346{
18347 struct fc_frame_header *new_hdr;
18348 struct fc_frame_header *temp_hdr;
18349 struct lpfc_dmabuf *d_buf;
18350 struct lpfc_dmabuf *h_buf;
18351 struct hbq_dmabuf *seq_dmabuf = NULL;
18352 struct hbq_dmabuf *temp_dmabuf = NULL;
18353 uint8_t found = 0;
18354
18355 INIT_LIST_HEAD(&dmabuf->dbuf.list);
18356 dmabuf->time_stamp = jiffies;
18357 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18358
18359 /* Use the hdr_buf to find the sequence that this frame belongs to */
18360 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
18361 temp_hdr = (struct fc_frame_header *)h_buf->virt;
18362 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
18363 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
18364 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
18365 continue;
18366 /* found a pending sequence that matches this frame */
18367 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18368 break;
18369 }
18370 if (!seq_dmabuf) {
18371 /*
18372 * This indicates first frame received for this sequence.
18373 * Queue the buffer on the vport's rcv_buffer_list.
18374 */
18375 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
18376 lpfc_update_rcv_time_stamp(vport);
18377 return dmabuf;
18378 }
18379 temp_hdr = seq_dmabuf->hbuf.virt;
18380 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
18381 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
18382 list_del_init(&seq_dmabuf->hbuf.list);
18383 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
18384 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
18385 lpfc_update_rcv_time_stamp(vport);
18386 return dmabuf;
18387 }
18388 /* move this sequence to the tail to indicate a young sequence */
18389 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
18390 seq_dmabuf->time_stamp = jiffies;
18391 lpfc_update_rcv_time_stamp(vport);
18392 if (list_empty(&seq_dmabuf->dbuf.list)) {
18393 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
18394 return seq_dmabuf;
18395 }
18396 /* find the correct place in the sequence to insert this frame */
18397 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
18398 while (!found) {
18399 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18400 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
18401 /*
18402 * If the frame's sequence count is greater than the frame on
18403 * the list then insert the frame right after this frame
18404 */
18405 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
18406 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
18407 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
18408 found = 1;
18409 break;
18410 }
18411
18412 if (&d_buf->list == &seq_dmabuf->dbuf.list)
18413 break;
18414 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
18415 }
18416
18417 if (found)
18418 return seq_dmabuf;
18419 return NULL;
18420}
18421
18422/**
18423 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
18424 * @vport: pointer to a vitural port
18425 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18426 *
18427 * This function tries to abort from the partially assembed sequence, described
18428 * by the information from basic abbort @dmabuf. It checks to see whether such
18429 * partially assembled sequence held by the driver. If so, it shall free up all
18430 * the frames from the partially assembled sequence.
18431 *
18432 * Return
18433 * true -- if there is matching partially assembled sequence present and all
18434 * the frames freed with the sequence;
18435 * false -- if there is no matching partially assembled sequence present so
18436 * nothing got aborted in the lower layer driver
18437 **/
18438static bool
18439lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
18440 struct hbq_dmabuf *dmabuf)
18441{
18442 struct fc_frame_header *new_hdr;
18443 struct fc_frame_header *temp_hdr;
18444 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
18445 struct hbq_dmabuf *seq_dmabuf = NULL;
18446
18447 /* Use the hdr_buf to find the sequence that matches this frame */
18448 INIT_LIST_HEAD(&dmabuf->dbuf.list);
18449 INIT_LIST_HEAD(&dmabuf->hbuf.list);
18450 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18451 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
18452 temp_hdr = (struct fc_frame_header *)h_buf->virt;
18453 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
18454 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
18455 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
18456 continue;
18457 /* found a pending sequence that matches this frame */
18458 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18459 break;
18460 }
18461
18462 /* Free up all the frames from the partially assembled sequence */
18463 if (seq_dmabuf) {
18464 list_for_each_entry_safe(d_buf, n_buf,
18465 &seq_dmabuf->dbuf.list, list) {
18466 list_del_init(&d_buf->list);
18467 lpfc_in_buf_free(vport->phba, d_buf);
18468 }
18469 return true;
18470 }
18471 return false;
18472}
18473
18474/**
18475 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
18476 * @vport: pointer to a vitural port
18477 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18478 *
18479 * This function tries to abort from the assembed sequence from upper level
18480 * protocol, described by the information from basic abbort @dmabuf. It
18481 * checks to see whether such pending context exists at upper level protocol.
18482 * If so, it shall clean up the pending context.
18483 *
18484 * Return
18485 * true -- if there is matching pending context of the sequence cleaned
18486 * at ulp;
18487 * false -- if there is no matching pending context of the sequence present
18488 * at ulp.
18489 **/
18490static bool
18491lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
18492{
18493 struct lpfc_hba *phba = vport->phba;
18494 int handled;
18495
18496 /* Accepting abort at ulp with SLI4 only */
18497 if (phba->sli_rev < LPFC_SLI_REV4)
18498 return false;
18499
18500 /* Register all caring upper level protocols to attend abort */
18501 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
18502 if (handled)
18503 return true;
18504
18505 return false;
18506}
18507
18508/**
18509 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
18510 * @phba: Pointer to HBA context object.
18511 * @cmd_iocbq: pointer to the command iocbq structure.
18512 * @rsp_iocbq: pointer to the response iocbq structure.
18513 *
18514 * This function handles the sequence abort response iocb command complete
18515 * event. It properly releases the memory allocated to the sequence abort
18516 * accept iocb.
18517 **/
18518static void
18519lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
18520 struct lpfc_iocbq *cmd_iocbq,
18521 struct lpfc_iocbq *rsp_iocbq)
18522{
18523 if (cmd_iocbq) {
18524 lpfc_nlp_put(cmd_iocbq->ndlp);
18525 lpfc_sli_release_iocbq(phba, cmd_iocbq);
18526 }
18527
18528 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
18529 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
18530 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18531 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
18532 get_job_ulpstatus(phba, rsp_iocbq),
18533 get_job_word4(phba, rsp_iocbq));
18534}
18535
18536/**
18537 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
18538 * @phba: Pointer to HBA context object.
18539 * @xri: xri id in transaction.
18540 *
18541 * This function validates the xri maps to the known range of XRIs allocated an
18542 * used by the driver.
18543 **/
18544uint16_t
18545lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
18546 uint16_t xri)
18547{
18548 uint16_t i;
18549
18550 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
18551 if (xri == phba->sli4_hba.xri_ids[i])
18552 return i;
18553 }
18554 return NO_XRI;
18555}
18556
18557/**
18558 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
18559 * @vport: pointer to a virtual port.
18560 * @fc_hdr: pointer to a FC frame header.
18561 * @aborted: was the partially assembled receive sequence successfully aborted
18562 *
18563 * This function sends a basic response to a previous unsol sequence abort
18564 * event after aborting the sequence handling.
18565 **/
18566void
18567lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
18568 struct fc_frame_header *fc_hdr, bool aborted)
18569{
18570 struct lpfc_hba *phba = vport->phba;
18571 struct lpfc_iocbq *ctiocb = NULL;
18572 struct lpfc_nodelist *ndlp;
18573 uint16_t oxid, rxid, xri, lxri;
18574 uint32_t sid, fctl;
18575 union lpfc_wqe128 *icmd;
18576 int rc;
18577
18578 if (!lpfc_is_link_up(phba))
18579 return;
18580
18581 sid = sli4_sid_from_fc_hdr(fc_hdr);
18582 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
18583 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
18584
18585 ndlp = lpfc_findnode_did(vport, sid);
18586 if (!ndlp) {
18587 ndlp = lpfc_nlp_init(vport, sid);
18588 if (!ndlp) {
18589 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
18590 "1268 Failed to allocate ndlp for "
18591 "oxid:x%x SID:x%x\n", oxid, sid);
18592 return;
18593 }
18594 /* Put ndlp onto pport node list */
18595 lpfc_enqueue_node(vport, ndlp);
18596 }
18597
18598 /* Allocate buffer for rsp iocb */
18599 ctiocb = lpfc_sli_get_iocbq(phba);
18600 if (!ctiocb)
18601 return;
18602
18603 icmd = &ctiocb->wqe;
18604
18605 /* Extract the F_CTL field from FC_HDR */
18606 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
18607
18608 ctiocb->ndlp = lpfc_nlp_get(ndlp);
18609 if (!ctiocb->ndlp) {
18610 lpfc_sli_release_iocbq(phba, ctiocb);
18611 return;
18612 }
18613
18614 ctiocb->vport = phba->pport;
18615 ctiocb->cmd_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
18616 ctiocb->sli4_lxritag = NO_XRI;
18617 ctiocb->sli4_xritag = NO_XRI;
18618 ctiocb->abort_rctl = FC_RCTL_BA_ACC;
18619
18620 if (fctl & FC_FC_EX_CTX)
18621 /* Exchange responder sent the abort so we
18622 * own the oxid.
18623 */
18624 xri = oxid;
18625 else
18626 xri = rxid;
18627 lxri = lpfc_sli4_xri_inrange(phba, xri);
18628 if (lxri != NO_XRI)
18629 lpfc_set_rrq_active(phba, ndlp, lxri,
18630 (xri == oxid) ? rxid : oxid, 0);
18631 /* For BA_ABTS from exchange responder, if the logical xri with
18632 * the oxid maps to the FCP XRI range, the port no longer has
18633 * that exchange context, send a BLS_RJT. Override the IOCB for
18634 * a BA_RJT.
18635 */
18636 if ((fctl & FC_FC_EX_CTX) &&
18637 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
18638 ctiocb->abort_rctl = FC_RCTL_BA_RJT;
18639 bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0);
18640 bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp,
18641 FC_BA_RJT_INV_XID);
18642 bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp,
18643 FC_BA_RJT_UNABLE);
18644 }
18645
18646 /* If BA_ABTS failed to abort a partially assembled receive sequence,
18647 * the driver no longer has that exchange, send a BLS_RJT. Override
18648 * the IOCB for a BA_RJT.
18649 */
18650 if (aborted == false) {
18651 ctiocb->abort_rctl = FC_RCTL_BA_RJT;
18652 bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0);
18653 bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp,
18654 FC_BA_RJT_INV_XID);
18655 bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp,
18656 FC_BA_RJT_UNABLE);
18657 }
18658
18659 if (fctl & FC_FC_EX_CTX) {
18660 /* ABTS sent by responder to CT exchange, construction
18661 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
18662 * field and RX_ID from ABTS for RX_ID field.
18663 */
18664 ctiocb->abort_bls = LPFC_ABTS_UNSOL_RSP;
18665 bf_set(xmit_bls_rsp64_rxid, &icmd->xmit_bls_rsp, rxid);
18666 } else {
18667 /* ABTS sent by initiator to CT exchange, construction
18668 * of BA_ACC will need to allocate a new XRI as for the
18669 * XRI_TAG field.
18670 */
18671 ctiocb->abort_bls = LPFC_ABTS_UNSOL_INT;
18672 }
18673
18674 /* OX_ID is invariable to who sent ABTS to CT exchange */
18675 bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, oxid);
18676 bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, rxid);
18677
18678 /* Use CT=VPI */
18679 bf_set(wqe_els_did, &icmd->xmit_bls_rsp.wqe_dest,
18680 ndlp->nlp_DID);
18681 bf_set(xmit_bls_rsp64_temprpi, &icmd->xmit_bls_rsp,
18682 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
18683 bf_set(wqe_cmnd, &icmd->generic.wqe_com, CMD_XMIT_BLS_RSP64_CX);
18684
18685 /* Xmit CT abts response on exchange <xid> */
18686 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
18687 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
18688 ctiocb->abort_rctl, oxid, phba->link_state);
18689
18690 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
18691 if (rc == IOCB_ERROR) {
18692 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
18693 "2925 Failed to issue CT ABTS RSP x%x on "
18694 "xri x%x, Data x%x\n",
18695 ctiocb->abort_rctl, oxid,
18696 phba->link_state);
18697 lpfc_nlp_put(ndlp);
18698 ctiocb->ndlp = NULL;
18699 lpfc_sli_release_iocbq(phba, ctiocb);
18700 }
18701}
18702
18703/**
18704 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
18705 * @vport: Pointer to the vport on which this sequence was received
18706 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18707 *
18708 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
18709 * receive sequence is only partially assembed by the driver, it shall abort
18710 * the partially assembled frames for the sequence. Otherwise, if the
18711 * unsolicited receive sequence has been completely assembled and passed to
18712 * the Upper Layer Protocol (ULP), it then mark the per oxid status for the
18713 * unsolicited sequence has been aborted. After that, it will issue a basic
18714 * accept to accept the abort.
18715 **/
18716static void
18717lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
18718 struct hbq_dmabuf *dmabuf)
18719{
18720 struct lpfc_hba *phba = vport->phba;
18721 struct fc_frame_header fc_hdr;
18722 uint32_t fctl;
18723 bool aborted;
18724
18725 /* Make a copy of fc_hdr before the dmabuf being released */
18726 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
18727 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
18728
18729 if (fctl & FC_FC_EX_CTX) {
18730 /* ABTS by responder to exchange, no cleanup needed */
18731 aborted = true;
18732 } else {
18733 /* ABTS by initiator to exchange, need to do cleanup */
18734 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
18735 if (aborted == false)
18736 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
18737 }
18738 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18739
18740 if (phba->nvmet_support) {
18741 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
18742 return;
18743 }
18744
18745 /* Respond with BA_ACC or BA_RJT accordingly */
18746 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
18747}
18748
18749/**
18750 * lpfc_seq_complete - Indicates if a sequence is complete
18751 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18752 *
18753 * This function checks the sequence, starting with the frame described by
18754 * @dmabuf, to see if all the frames associated with this sequence are present.
18755 * the frames associated with this sequence are linked to the @dmabuf using the
18756 * dbuf list. This function looks for two major things. 1) That the first frame
18757 * has a sequence count of zero. 2) There is a frame with last frame of sequence
18758 * set. 3) That there are no holes in the sequence count. The function will
18759 * return 1 when the sequence is complete, otherwise it will return 0.
18760 **/
18761static int
18762lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
18763{
18764 struct fc_frame_header *hdr;
18765 struct lpfc_dmabuf *d_buf;
18766 struct hbq_dmabuf *seq_dmabuf;
18767 uint32_t fctl;
18768 int seq_count = 0;
18769
18770 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18771 /* make sure first fame of sequence has a sequence count of zero */
18772 if (hdr->fh_seq_cnt != seq_count)
18773 return 0;
18774 fctl = (hdr->fh_f_ctl[0] << 16 |
18775 hdr->fh_f_ctl[1] << 8 |
18776 hdr->fh_f_ctl[2]);
18777 /* If last frame of sequence we can return success. */
18778 if (fctl & FC_FC_END_SEQ)
18779 return 1;
18780 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
18781 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18782 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18783 /* If there is a hole in the sequence count then fail. */
18784 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
18785 return 0;
18786 fctl = (hdr->fh_f_ctl[0] << 16 |
18787 hdr->fh_f_ctl[1] << 8 |
18788 hdr->fh_f_ctl[2]);
18789 /* If last frame of sequence we can return success. */
18790 if (fctl & FC_FC_END_SEQ)
18791 return 1;
18792 }
18793 return 0;
18794}
18795
18796/**
18797 * lpfc_prep_seq - Prep sequence for ULP processing
18798 * @vport: Pointer to the vport on which this sequence was received
18799 * @seq_dmabuf: pointer to a dmabuf that describes the FC sequence
18800 *
18801 * This function takes a sequence, described by a list of frames, and creates
18802 * a list of iocbq structures to describe the sequence. This iocbq list will be
18803 * used to issue to the generic unsolicited sequence handler. This routine
18804 * returns a pointer to the first iocbq in the list. If the function is unable
18805 * to allocate an iocbq then it throw out the received frames that were not
18806 * able to be described and return a pointer to the first iocbq. If unable to
18807 * allocate any iocbqs (including the first) this function will return NULL.
18808 **/
18809static struct lpfc_iocbq *
18810lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
18811{
18812 struct hbq_dmabuf *hbq_buf;
18813 struct lpfc_dmabuf *d_buf, *n_buf;
18814 struct lpfc_iocbq *first_iocbq, *iocbq;
18815 struct fc_frame_header *fc_hdr;
18816 uint32_t sid;
18817 uint32_t len, tot_len;
18818
18819 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18820 /* remove from receive buffer list */
18821 list_del_init(&seq_dmabuf->hbuf.list);
18822 lpfc_update_rcv_time_stamp(vport);
18823 /* get the Remote Port's SID */
18824 sid = sli4_sid_from_fc_hdr(fc_hdr);
18825 tot_len = 0;
18826 /* Get an iocbq struct to fill in. */
18827 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
18828 if (first_iocbq) {
18829 /* Initialize the first IOCB. */
18830 first_iocbq->wcqe_cmpl.total_data_placed = 0;
18831 bf_set(lpfc_wcqe_c_status, &first_iocbq->wcqe_cmpl,
18832 IOSTAT_SUCCESS);
18833 first_iocbq->vport = vport;
18834
18835 /* Check FC Header to see what TYPE of frame we are rcv'ing */
18836 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
18837 bf_set(els_rsp64_sid, &first_iocbq->wqe.xmit_els_rsp,
18838 sli4_did_from_fc_hdr(fc_hdr));
18839 }
18840
18841 bf_set(wqe_ctxt_tag, &first_iocbq->wqe.xmit_els_rsp.wqe_com,
18842 NO_XRI);
18843 bf_set(wqe_rcvoxid, &first_iocbq->wqe.xmit_els_rsp.wqe_com,
18844 be16_to_cpu(fc_hdr->fh_ox_id));
18845
18846 /* put the first buffer into the first iocb */
18847 tot_len = bf_get(lpfc_rcqe_length,
18848 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
18849
18850 first_iocbq->cmd_dmabuf = &seq_dmabuf->dbuf;
18851 first_iocbq->bpl_dmabuf = NULL;
18852 /* Keep track of the BDE count */
18853 first_iocbq->wcqe_cmpl.word3 = 1;
18854
18855 if (tot_len > LPFC_DATA_BUF_SIZE)
18856 first_iocbq->wqe.gen_req.bde.tus.f.bdeSize =
18857 LPFC_DATA_BUF_SIZE;
18858 else
18859 first_iocbq->wqe.gen_req.bde.tus.f.bdeSize = tot_len;
18860
18861 first_iocbq->wcqe_cmpl.total_data_placed = tot_len;
18862 bf_set(wqe_els_did, &first_iocbq->wqe.xmit_els_rsp.wqe_dest,
18863 sid);
18864 }
18865 iocbq = first_iocbq;
18866 /*
18867 * Each IOCBq can have two Buffers assigned, so go through the list
18868 * of buffers for this sequence and save two buffers in each IOCBq
18869 */
18870 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
18871 if (!iocbq) {
18872 lpfc_in_buf_free(vport->phba, d_buf);
18873 continue;
18874 }
18875 if (!iocbq->bpl_dmabuf) {
18876 iocbq->bpl_dmabuf = d_buf;
18877 iocbq->wcqe_cmpl.word3++;
18878 /* We need to get the size out of the right CQE */
18879 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18880 len = bf_get(lpfc_rcqe_length,
18881 &hbq_buf->cq_event.cqe.rcqe_cmpl);
18882 iocbq->unsol_rcv_len = len;
18883 iocbq->wcqe_cmpl.total_data_placed += len;
18884 tot_len += len;
18885 } else {
18886 iocbq = lpfc_sli_get_iocbq(vport->phba);
18887 if (!iocbq) {
18888 if (first_iocbq) {
18889 bf_set(lpfc_wcqe_c_status,
18890 &first_iocbq->wcqe_cmpl,
18891 IOSTAT_SUCCESS);
18892 first_iocbq->wcqe_cmpl.parameter =
18893 IOERR_NO_RESOURCES;
18894 }
18895 lpfc_in_buf_free(vport->phba, d_buf);
18896 continue;
18897 }
18898 /* We need to get the size out of the right CQE */
18899 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18900 len = bf_get(lpfc_rcqe_length,
18901 &hbq_buf->cq_event.cqe.rcqe_cmpl);
18902 iocbq->cmd_dmabuf = d_buf;
18903 iocbq->bpl_dmabuf = NULL;
18904 iocbq->wcqe_cmpl.word3 = 1;
18905
18906 if (len > LPFC_DATA_BUF_SIZE)
18907 iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize =
18908 LPFC_DATA_BUF_SIZE;
18909 else
18910 iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize =
18911 len;
18912
18913 tot_len += len;
18914 iocbq->wcqe_cmpl.total_data_placed = tot_len;
18915 bf_set(wqe_els_did, &iocbq->wqe.xmit_els_rsp.wqe_dest,
18916 sid);
18917 list_add_tail(&iocbq->list, &first_iocbq->list);
18918 }
18919 }
18920 /* Free the sequence's header buffer */
18921 if (!first_iocbq)
18922 lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
18923
18924 return first_iocbq;
18925}
18926
18927static void
18928lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
18929 struct hbq_dmabuf *seq_dmabuf)
18930{
18931 struct fc_frame_header *fc_hdr;
18932 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
18933 struct lpfc_hba *phba = vport->phba;
18934
18935 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18936 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
18937 if (!iocbq) {
18938 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18939 "2707 Ring %d handler: Failed to allocate "
18940 "iocb Rctl x%x Type x%x received\n",
18941 LPFC_ELS_RING,
18942 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18943 return;
18944 }
18945 if (!lpfc_complete_unsol_iocb(phba,
18946 phba->sli4_hba.els_wq->pring,
18947 iocbq, fc_hdr->fh_r_ctl,
18948 fc_hdr->fh_type)) {
18949 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18950 "2540 Ring %d handler: unexpected Rctl "
18951 "x%x Type x%x received\n",
18952 LPFC_ELS_RING,
18953 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18954 lpfc_in_buf_free(phba, &seq_dmabuf->dbuf);
18955 }
18956
18957 /* Free iocb created in lpfc_prep_seq */
18958 list_for_each_entry_safe(curr_iocb, next_iocb,
18959 &iocbq->list, list) {
18960 list_del_init(&curr_iocb->list);
18961 lpfc_sli_release_iocbq(phba, curr_iocb);
18962 }
18963 lpfc_sli_release_iocbq(phba, iocbq);
18964}
18965
18966static void
18967lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
18968 struct lpfc_iocbq *rspiocb)
18969{
18970 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
18971
18972 if (pcmd && pcmd->virt)
18973 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
18974 kfree(pcmd);
18975 lpfc_sli_release_iocbq(phba, cmdiocb);
18976 lpfc_drain_txq(phba);
18977}
18978
18979static void
18980lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
18981 struct hbq_dmabuf *dmabuf)
18982{
18983 struct fc_frame_header *fc_hdr;
18984 struct lpfc_hba *phba = vport->phba;
18985 struct lpfc_iocbq *iocbq = NULL;
18986 union lpfc_wqe128 *pwqe;
18987 struct lpfc_dmabuf *pcmd = NULL;
18988 uint32_t frame_len;
18989 int rc;
18990 unsigned long iflags;
18991
18992 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18993 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
18994
18995 /* Send the received frame back */
18996 iocbq = lpfc_sli_get_iocbq(phba);
18997 if (!iocbq) {
18998 /* Queue cq event and wakeup worker thread to process it */
18999 spin_lock_irqsave(&phba->hbalock, iflags);
19000 list_add_tail(&dmabuf->cq_event.list,
19001 &phba->sli4_hba.sp_queue_event);
19002 phba->hba_flag |= HBA_SP_QUEUE_EVT;
19003 spin_unlock_irqrestore(&phba->hbalock, iflags);
19004 lpfc_worker_wake_up(phba);
19005 return;
19006 }
19007
19008 /* Allocate buffer for command payload */
19009 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
19010 if (pcmd)
19011 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
19012 &pcmd->phys);
19013 if (!pcmd || !pcmd->virt)
19014 goto exit;
19015
19016 INIT_LIST_HEAD(&pcmd->list);
19017
19018 /* copyin the payload */
19019 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
19020
19021 iocbq->cmd_dmabuf = pcmd;
19022 iocbq->vport = vport;
19023 iocbq->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK;
19024 iocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
19025 iocbq->num_bdes = 0;
19026
19027 pwqe = &iocbq->wqe;
19028 /* fill in BDE's for command */
19029 pwqe->gen_req.bde.addrHigh = putPaddrHigh(pcmd->phys);
19030 pwqe->gen_req.bde.addrLow = putPaddrLow(pcmd->phys);
19031 pwqe->gen_req.bde.tus.f.bdeSize = frame_len;
19032 pwqe->gen_req.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
19033
19034 pwqe->send_frame.frame_len = frame_len;
19035 pwqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((__be32 *)fc_hdr));
19036 pwqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((__be32 *)fc_hdr + 1));
19037 pwqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((__be32 *)fc_hdr + 2));
19038 pwqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((__be32 *)fc_hdr + 3));
19039 pwqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((__be32 *)fc_hdr + 4));
19040 pwqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((__be32 *)fc_hdr + 5));
19041
19042 pwqe->generic.wqe_com.word7 = 0;
19043 pwqe->generic.wqe_com.word10 = 0;
19044
19045 bf_set(wqe_cmnd, &pwqe->generic.wqe_com, CMD_SEND_FRAME);
19046 bf_set(wqe_sof, &pwqe->generic.wqe_com, 0x2E); /* SOF byte */
19047 bf_set(wqe_eof, &pwqe->generic.wqe_com, 0x41); /* EOF byte */
19048 bf_set(wqe_lenloc, &pwqe->generic.wqe_com, 1);
19049 bf_set(wqe_xbl, &pwqe->generic.wqe_com, 1);
19050 bf_set(wqe_dbde, &pwqe->generic.wqe_com, 1);
19051 bf_set(wqe_xc, &pwqe->generic.wqe_com, 1);
19052 bf_set(wqe_cmd_type, &pwqe->generic.wqe_com, 0xA);
19053 bf_set(wqe_cqid, &pwqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
19054 bf_set(wqe_xri_tag, &pwqe->generic.wqe_com, iocbq->sli4_xritag);
19055 bf_set(wqe_reqtag, &pwqe->generic.wqe_com, iocbq->iotag);
19056 bf_set(wqe_class, &pwqe->generic.wqe_com, CLASS3);
19057 pwqe->generic.wqe_com.abort_tag = iocbq->iotag;
19058
19059 iocbq->cmd_cmpl = lpfc_sli4_mds_loopback_cmpl;
19060
19061 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
19062 if (rc == IOCB_ERROR)
19063 goto exit;
19064
19065 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19066 return;
19067
19068exit:
19069 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
19070 "2023 Unable to process MDS loopback frame\n");
19071 if (pcmd && pcmd->virt)
19072 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
19073 kfree(pcmd);
19074 if (iocbq)
19075 lpfc_sli_release_iocbq(phba, iocbq);
19076 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19077}
19078
19079/**
19080 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
19081 * @phba: Pointer to HBA context object.
19082 * @dmabuf: Pointer to a dmabuf that describes the FC sequence.
19083 *
19084 * This function is called with no lock held. This function processes all
19085 * the received buffers and gives it to upper layers when a received buffer
19086 * indicates that it is the final frame in the sequence. The interrupt
19087 * service routine processes received buffers at interrupt contexts.
19088 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
19089 * appropriate receive function when the final frame in a sequence is received.
19090 **/
19091void
19092lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
19093 struct hbq_dmabuf *dmabuf)
19094{
19095 struct hbq_dmabuf *seq_dmabuf;
19096 struct fc_frame_header *fc_hdr;
19097 struct lpfc_vport *vport;
19098 uint32_t fcfi;
19099 uint32_t did;
19100
19101 /* Process each received buffer */
19102 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
19103
19104 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
19105 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
19106 vport = phba->pport;
19107 /* Handle MDS Loopback frames */
19108 if (!(phba->pport->load_flag & FC_UNLOADING))
19109 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
19110 else
19111 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19112 return;
19113 }
19114
19115 /* check to see if this a valid type of frame */
19116 if (lpfc_fc_frame_check(phba, fc_hdr)) {
19117 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19118 return;
19119 }
19120
19121 if ((bf_get(lpfc_cqe_code,
19122 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
19123 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
19124 &dmabuf->cq_event.cqe.rcqe_cmpl);
19125 else
19126 fcfi = bf_get(lpfc_rcqe_fcf_id,
19127 &dmabuf->cq_event.cqe.rcqe_cmpl);
19128
19129 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
19130 vport = phba->pport;
19131 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
19132 "2023 MDS Loopback %d bytes\n",
19133 bf_get(lpfc_rcqe_length,
19134 &dmabuf->cq_event.cqe.rcqe_cmpl));
19135 /* Handle MDS Loopback frames */
19136 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
19137 return;
19138 }
19139
19140 /* d_id this frame is directed to */
19141 did = sli4_did_from_fc_hdr(fc_hdr);
19142
19143 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
19144 if (!vport) {
19145 /* throw out the frame */
19146 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19147 return;
19148 }
19149
19150 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
19151 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
19152 (did != Fabric_DID)) {
19153 /*
19154 * Throw out the frame if we are not pt2pt.
19155 * The pt2pt protocol allows for discovery frames
19156 * to be received without a registered VPI.
19157 */
19158 if (!(vport->fc_flag & FC_PT2PT) ||
19159 (phba->link_state == LPFC_HBA_READY)) {
19160 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19161 return;
19162 }
19163 }
19164
19165 /* Handle the basic abort sequence (BA_ABTS) event */
19166 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
19167 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
19168 return;
19169 }
19170
19171 /* Link this frame */
19172 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
19173 if (!seq_dmabuf) {
19174 /* unable to add frame to vport - throw it out */
19175 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19176 return;
19177 }
19178 /* If not last frame in sequence continue processing frames. */
19179 if (!lpfc_seq_complete(seq_dmabuf))
19180 return;
19181
19182 /* Send the complete sequence to the upper layer protocol */
19183 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
19184}
19185
19186/**
19187 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
19188 * @phba: pointer to lpfc hba data structure.
19189 *
19190 * This routine is invoked to post rpi header templates to the
19191 * HBA consistent with the SLI-4 interface spec. This routine
19192 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
19193 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
19194 *
19195 * This routine does not require any locks. It's usage is expected
19196 * to be driver load or reset recovery when the driver is
19197 * sequential.
19198 *
19199 * Return codes
19200 * 0 - successful
19201 * -EIO - The mailbox failed to complete successfully.
19202 * When this error occurs, the driver is not guaranteed
19203 * to have any rpi regions posted to the device and
19204 * must either attempt to repost the regions or take a
19205 * fatal error.
19206 **/
19207int
19208lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
19209{
19210 struct lpfc_rpi_hdr *rpi_page;
19211 uint32_t rc = 0;
19212 uint16_t lrpi = 0;
19213
19214 /* SLI4 ports that support extents do not require RPI headers. */
19215 if (!phba->sli4_hba.rpi_hdrs_in_use)
19216 goto exit;
19217 if (phba->sli4_hba.extents_in_use)
19218 return -EIO;
19219
19220 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
19221 /*
19222 * Assign the rpi headers a physical rpi only if the driver
19223 * has not initialized those resources. A port reset only
19224 * needs the headers posted.
19225 */
19226 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
19227 LPFC_RPI_RSRC_RDY)
19228 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
19229
19230 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
19231 if (rc != MBX_SUCCESS) {
19232 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19233 "2008 Error %d posting all rpi "
19234 "headers\n", rc);
19235 rc = -EIO;
19236 break;
19237 }
19238 }
19239
19240 exit:
19241 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
19242 LPFC_RPI_RSRC_RDY);
19243 return rc;
19244}
19245
19246/**
19247 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
19248 * @phba: pointer to lpfc hba data structure.
19249 * @rpi_page: pointer to the rpi memory region.
19250 *
19251 * This routine is invoked to post a single rpi header to the
19252 * HBA consistent with the SLI-4 interface spec. This memory region
19253 * maps up to 64 rpi context regions.
19254 *
19255 * Return codes
19256 * 0 - successful
19257 * -ENOMEM - No available memory
19258 * -EIO - The mailbox failed to complete successfully.
19259 **/
19260int
19261lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
19262{
19263 LPFC_MBOXQ_t *mboxq;
19264 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
19265 uint32_t rc = 0;
19266 uint32_t shdr_status, shdr_add_status;
19267 union lpfc_sli4_cfg_shdr *shdr;
19268
19269 /* SLI4 ports that support extents do not require RPI headers. */
19270 if (!phba->sli4_hba.rpi_hdrs_in_use)
19271 return rc;
19272 if (phba->sli4_hba.extents_in_use)
19273 return -EIO;
19274
19275 /* The port is notified of the header region via a mailbox command. */
19276 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19277 if (!mboxq) {
19278 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19279 "2001 Unable to allocate memory for issuing "
19280 "SLI_CONFIG_SPECIAL mailbox command\n");
19281 return -ENOMEM;
19282 }
19283
19284 /* Post all rpi memory regions to the port. */
19285 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
19286 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
19287 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
19288 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
19289 sizeof(struct lpfc_sli4_cfg_mhdr),
19290 LPFC_SLI4_MBX_EMBED);
19291
19292
19293 /* Post the physical rpi to the port for this rpi header. */
19294 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
19295 rpi_page->start_rpi);
19296 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
19297 hdr_tmpl, rpi_page->page_count);
19298
19299 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
19300 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
19301 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19302 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
19303 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
19304 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
19305 mempool_free(mboxq, phba->mbox_mem_pool);
19306 if (shdr_status || shdr_add_status || rc) {
19307 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19308 "2514 POST_RPI_HDR mailbox failed with "
19309 "status x%x add_status x%x, mbx status x%x\n",
19310 shdr_status, shdr_add_status, rc);
19311 rc = -ENXIO;
19312 } else {
19313 /*
19314 * The next_rpi stores the next logical module-64 rpi value used
19315 * to post physical rpis in subsequent rpi postings.
19316 */
19317 spin_lock_irq(&phba->hbalock);
19318 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
19319 spin_unlock_irq(&phba->hbalock);
19320 }
19321 return rc;
19322}
19323
19324/**
19325 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
19326 * @phba: pointer to lpfc hba data structure.
19327 *
19328 * This routine is invoked to post rpi header templates to the
19329 * HBA consistent with the SLI-4 interface spec. This routine
19330 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
19331 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
19332 *
19333 * Returns
19334 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
19335 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
19336 **/
19337int
19338lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
19339{
19340 unsigned long rpi;
19341 uint16_t max_rpi, rpi_limit;
19342 uint16_t rpi_remaining, lrpi = 0;
19343 struct lpfc_rpi_hdr *rpi_hdr;
19344 unsigned long iflag;
19345
19346 /*
19347 * Fetch the next logical rpi. Because this index is logical,
19348 * the driver starts at 0 each time.
19349 */
19350 spin_lock_irqsave(&phba->hbalock, iflag);
19351 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
19352 rpi_limit = phba->sli4_hba.next_rpi;
19353
19354 rpi = find_first_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit);
19355 if (rpi >= rpi_limit)
19356 rpi = LPFC_RPI_ALLOC_ERROR;
19357 else {
19358 set_bit(rpi, phba->sli4_hba.rpi_bmask);
19359 phba->sli4_hba.max_cfg_param.rpi_used++;
19360 phba->sli4_hba.rpi_count++;
19361 }
19362 lpfc_printf_log(phba, KERN_INFO,
19363 LOG_NODE | LOG_DISCOVERY,
19364 "0001 Allocated rpi:x%x max:x%x lim:x%x\n",
19365 (int) rpi, max_rpi, rpi_limit);
19366
19367 /*
19368 * Don't try to allocate more rpi header regions if the device limit
19369 * has been exhausted.
19370 */
19371 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
19372 (phba->sli4_hba.rpi_count >= max_rpi)) {
19373 spin_unlock_irqrestore(&phba->hbalock, iflag);
19374 return rpi;
19375 }
19376
19377 /*
19378 * RPI header postings are not required for SLI4 ports capable of
19379 * extents.
19380 */
19381 if (!phba->sli4_hba.rpi_hdrs_in_use) {
19382 spin_unlock_irqrestore(&phba->hbalock, iflag);
19383 return rpi;
19384 }
19385
19386 /*
19387 * If the driver is running low on rpi resources, allocate another
19388 * page now. Note that the next_rpi value is used because
19389 * it represents how many are actually in use whereas max_rpi notes
19390 * how many are supported max by the device.
19391 */
19392 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
19393 spin_unlock_irqrestore(&phba->hbalock, iflag);
19394 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
19395 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
19396 if (!rpi_hdr) {
19397 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19398 "2002 Error Could not grow rpi "
19399 "count\n");
19400 } else {
19401 lrpi = rpi_hdr->start_rpi;
19402 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
19403 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
19404 }
19405 }
19406
19407 return rpi;
19408}
19409
19410/**
19411 * __lpfc_sli4_free_rpi - Release an rpi for reuse.
19412 * @phba: pointer to lpfc hba data structure.
19413 * @rpi: rpi to free
19414 *
19415 * This routine is invoked to release an rpi to the pool of
19416 * available rpis maintained by the driver.
19417 **/
19418static void
19419__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
19420{
19421 /*
19422 * if the rpi value indicates a prior unreg has already
19423 * been done, skip the unreg.
19424 */
19425 if (rpi == LPFC_RPI_ALLOC_ERROR)
19426 return;
19427
19428 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
19429 phba->sli4_hba.rpi_count--;
19430 phba->sli4_hba.max_cfg_param.rpi_used--;
19431 } else {
19432 lpfc_printf_log(phba, KERN_INFO,
19433 LOG_NODE | LOG_DISCOVERY,
19434 "2016 rpi %x not inuse\n",
19435 rpi);
19436 }
19437}
19438
19439/**
19440 * lpfc_sli4_free_rpi - Release an rpi for reuse.
19441 * @phba: pointer to lpfc hba data structure.
19442 * @rpi: rpi to free
19443 *
19444 * This routine is invoked to release an rpi to the pool of
19445 * available rpis maintained by the driver.
19446 **/
19447void
19448lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
19449{
19450 spin_lock_irq(&phba->hbalock);
19451 __lpfc_sli4_free_rpi(phba, rpi);
19452 spin_unlock_irq(&phba->hbalock);
19453}
19454
19455/**
19456 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
19457 * @phba: pointer to lpfc hba data structure.
19458 *
19459 * This routine is invoked to remove the memory region that
19460 * provided rpi via a bitmask.
19461 **/
19462void
19463lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
19464{
19465 kfree(phba->sli4_hba.rpi_bmask);
19466 kfree(phba->sli4_hba.rpi_ids);
19467 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
19468}
19469
19470/**
19471 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
19472 * @ndlp: pointer to lpfc nodelist data structure.
19473 * @cmpl: completion call-back.
19474 * @arg: data to load as MBox 'caller buffer information'
19475 *
19476 * This routine is invoked to remove the memory region that
19477 * provided rpi via a bitmask.
19478 **/
19479int
19480lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
19481 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
19482{
19483 LPFC_MBOXQ_t *mboxq;
19484 struct lpfc_hba *phba = ndlp->phba;
19485 int rc;
19486
19487 /* The port is notified of the header region via a mailbox command. */
19488 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19489 if (!mboxq)
19490 return -ENOMEM;
19491
19492 /* If cmpl assigned, then this nlp_get pairs with
19493 * lpfc_mbx_cmpl_resume_rpi.
19494 *
19495 * Else cmpl is NULL, then this nlp_get pairs with
19496 * lpfc_sli_def_mbox_cmpl.
19497 */
19498 if (!lpfc_nlp_get(ndlp)) {
19499 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19500 "2122 %s: Failed to get nlp ref\n",
19501 __func__);
19502 mempool_free(mboxq, phba->mbox_mem_pool);
19503 return -EIO;
19504 }
19505
19506 /* Post all rpi memory regions to the port. */
19507 lpfc_resume_rpi(mboxq, ndlp);
19508 if (cmpl) {
19509 mboxq->mbox_cmpl = cmpl;
19510 mboxq->ctx_buf = arg;
19511 } else
19512 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19513 mboxq->ctx_ndlp = ndlp;
19514 mboxq->vport = ndlp->vport;
19515 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19516 if (rc == MBX_NOT_FINISHED) {
19517 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19518 "2010 Resume RPI Mailbox failed "
19519 "status %d, mbxStatus x%x\n", rc,
19520 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
19521 lpfc_nlp_put(ndlp);
19522 mempool_free(mboxq, phba->mbox_mem_pool);
19523 return -EIO;
19524 }
19525 return 0;
19526}
19527
19528/**
19529 * lpfc_sli4_init_vpi - Initialize a vpi with the port
19530 * @vport: Pointer to the vport for which the vpi is being initialized
19531 *
19532 * This routine is invoked to activate a vpi with the port.
19533 *
19534 * Returns:
19535 * 0 success
19536 * -Evalue otherwise
19537 **/
19538int
19539lpfc_sli4_init_vpi(struct lpfc_vport *vport)
19540{
19541 LPFC_MBOXQ_t *mboxq;
19542 int rc = 0;
19543 int retval = MBX_SUCCESS;
19544 uint32_t mbox_tmo;
19545 struct lpfc_hba *phba = vport->phba;
19546 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19547 if (!mboxq)
19548 return -ENOMEM;
19549 lpfc_init_vpi(phba, mboxq, vport->vpi);
19550 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
19551 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
19552 if (rc != MBX_SUCCESS) {
19553 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
19554 "2022 INIT VPI Mailbox failed "
19555 "status %d, mbxStatus x%x\n", rc,
19556 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
19557 retval = -EIO;
19558 }
19559 if (rc != MBX_TIMEOUT)
19560 mempool_free(mboxq, vport->phba->mbox_mem_pool);
19561
19562 return retval;
19563}
19564
19565/**
19566 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
19567 * @phba: pointer to lpfc hba data structure.
19568 * @mboxq: Pointer to mailbox object.
19569 *
19570 * This routine is invoked to manually add a single FCF record. The caller
19571 * must pass a completely initialized FCF_Record. This routine takes
19572 * care of the nonembedded mailbox operations.
19573 **/
19574static void
19575lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
19576{
19577 void *virt_addr;
19578 union lpfc_sli4_cfg_shdr *shdr;
19579 uint32_t shdr_status, shdr_add_status;
19580
19581 virt_addr = mboxq->sge_array->addr[0];
19582 /* The IOCTL status is embedded in the mailbox subheader. */
19583 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
19584 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
19585 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
19586
19587 if ((shdr_status || shdr_add_status) &&
19588 (shdr_status != STATUS_FCF_IN_USE))
19589 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19590 "2558 ADD_FCF_RECORD mailbox failed with "
19591 "status x%x add_status x%x\n",
19592 shdr_status, shdr_add_status);
19593
19594 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19595}
19596
19597/**
19598 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
19599 * @phba: pointer to lpfc hba data structure.
19600 * @fcf_record: pointer to the initialized fcf record to add.
19601 *
19602 * This routine is invoked to manually add a single FCF record. The caller
19603 * must pass a completely initialized FCF_Record. This routine takes
19604 * care of the nonembedded mailbox operations.
19605 **/
19606int
19607lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
19608{
19609 int rc = 0;
19610 LPFC_MBOXQ_t *mboxq;
19611 uint8_t *bytep;
19612 void *virt_addr;
19613 struct lpfc_mbx_sge sge;
19614 uint32_t alloc_len, req_len;
19615 uint32_t fcfindex;
19616
19617 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19618 if (!mboxq) {
19619 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19620 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
19621 return -ENOMEM;
19622 }
19623
19624 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
19625 sizeof(uint32_t);
19626
19627 /* Allocate DMA memory and set up the non-embedded mailbox command */
19628 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
19629 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
19630 req_len, LPFC_SLI4_MBX_NEMBED);
19631 if (alloc_len < req_len) {
19632 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19633 "2523 Allocated DMA memory size (x%x) is "
19634 "less than the requested DMA memory "
19635 "size (x%x)\n", alloc_len, req_len);
19636 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19637 return -ENOMEM;
19638 }
19639
19640 /*
19641 * Get the first SGE entry from the non-embedded DMA memory. This
19642 * routine only uses a single SGE.
19643 */
19644 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
19645 virt_addr = mboxq->sge_array->addr[0];
19646 /*
19647 * Configure the FCF record for FCFI 0. This is the driver's
19648 * hardcoded default and gets used in nonFIP mode.
19649 */
19650 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
19651 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
19652 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
19653
19654 /*
19655 * Copy the fcf_index and the FCF Record Data. The data starts after
19656 * the FCoE header plus word10. The data copy needs to be endian
19657 * correct.
19658 */
19659 bytep += sizeof(uint32_t);
19660 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
19661 mboxq->vport = phba->pport;
19662 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
19663 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19664 if (rc == MBX_NOT_FINISHED) {
19665 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19666 "2515 ADD_FCF_RECORD mailbox failed with "
19667 "status 0x%x\n", rc);
19668 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19669 rc = -EIO;
19670 } else
19671 rc = 0;
19672
19673 return rc;
19674}
19675
19676/**
19677 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
19678 * @phba: pointer to lpfc hba data structure.
19679 * @fcf_record: pointer to the fcf record to write the default data.
19680 * @fcf_index: FCF table entry index.
19681 *
19682 * This routine is invoked to build the driver's default FCF record. The
19683 * values used are hardcoded. This routine handles memory initialization.
19684 *
19685 **/
19686void
19687lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
19688 struct fcf_record *fcf_record,
19689 uint16_t fcf_index)
19690{
19691 memset(fcf_record, 0, sizeof(struct fcf_record));
19692 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
19693 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
19694 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
19695 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
19696 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
19697 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
19698 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
19699 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
19700 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
19701 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
19702 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
19703 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
19704 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
19705 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
19706 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
19707 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
19708 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
19709 /* Set the VLAN bit map */
19710 if (phba->valid_vlan) {
19711 fcf_record->vlan_bitmap[phba->vlan_id / 8]
19712 = 1 << (phba->vlan_id % 8);
19713 }
19714}
19715
19716/**
19717 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
19718 * @phba: pointer to lpfc hba data structure.
19719 * @fcf_index: FCF table entry offset.
19720 *
19721 * This routine is invoked to scan the entire FCF table by reading FCF
19722 * record and processing it one at a time starting from the @fcf_index
19723 * for initial FCF discovery or fast FCF failover rediscovery.
19724 *
19725 * Return 0 if the mailbox command is submitted successfully, none 0
19726 * otherwise.
19727 **/
19728int
19729lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19730{
19731 int rc = 0, error;
19732 LPFC_MBOXQ_t *mboxq;
19733
19734 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
19735 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
19736 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19737 if (!mboxq) {
19738 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19739 "2000 Failed to allocate mbox for "
19740 "READ_FCF cmd\n");
19741 error = -ENOMEM;
19742 goto fail_fcf_scan;
19743 }
19744 /* Construct the read FCF record mailbox command */
19745 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19746 if (rc) {
19747 error = -EINVAL;
19748 goto fail_fcf_scan;
19749 }
19750 /* Issue the mailbox command asynchronously */
19751 mboxq->vport = phba->pport;
19752 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
19753
19754 spin_lock_irq(&phba->hbalock);
19755 phba->hba_flag |= FCF_TS_INPROG;
19756 spin_unlock_irq(&phba->hbalock);
19757
19758 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19759 if (rc == MBX_NOT_FINISHED)
19760 error = -EIO;
19761 else {
19762 /* Reset eligible FCF count for new scan */
19763 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
19764 phba->fcf.eligible_fcf_cnt = 0;
19765 error = 0;
19766 }
19767fail_fcf_scan:
19768 if (error) {
19769 if (mboxq)
19770 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19771 /* FCF scan failed, clear FCF_TS_INPROG flag */
19772 spin_lock_irq(&phba->hbalock);
19773 phba->hba_flag &= ~FCF_TS_INPROG;
19774 spin_unlock_irq(&phba->hbalock);
19775 }
19776 return error;
19777}
19778
19779/**
19780 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
19781 * @phba: pointer to lpfc hba data structure.
19782 * @fcf_index: FCF table entry offset.
19783 *
19784 * This routine is invoked to read an FCF record indicated by @fcf_index
19785 * and to use it for FLOGI roundrobin FCF failover.
19786 *
19787 * Return 0 if the mailbox command is submitted successfully, none 0
19788 * otherwise.
19789 **/
19790int
19791lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19792{
19793 int rc = 0, error;
19794 LPFC_MBOXQ_t *mboxq;
19795
19796 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19797 if (!mboxq) {
19798 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
19799 "2763 Failed to allocate mbox for "
19800 "READ_FCF cmd\n");
19801 error = -ENOMEM;
19802 goto fail_fcf_read;
19803 }
19804 /* Construct the read FCF record mailbox command */
19805 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19806 if (rc) {
19807 error = -EINVAL;
19808 goto fail_fcf_read;
19809 }
19810 /* Issue the mailbox command asynchronously */
19811 mboxq->vport = phba->pport;
19812 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
19813 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19814 if (rc == MBX_NOT_FINISHED)
19815 error = -EIO;
19816 else
19817 error = 0;
19818
19819fail_fcf_read:
19820 if (error && mboxq)
19821 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19822 return error;
19823}
19824
19825/**
19826 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
19827 * @phba: pointer to lpfc hba data structure.
19828 * @fcf_index: FCF table entry offset.
19829 *
19830 * This routine is invoked to read an FCF record indicated by @fcf_index to
19831 * determine whether it's eligible for FLOGI roundrobin failover list.
19832 *
19833 * Return 0 if the mailbox command is submitted successfully, none 0
19834 * otherwise.
19835 **/
19836int
19837lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19838{
19839 int rc = 0, error;
19840 LPFC_MBOXQ_t *mboxq;
19841
19842 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19843 if (!mboxq) {
19844 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
19845 "2758 Failed to allocate mbox for "
19846 "READ_FCF cmd\n");
19847 error = -ENOMEM;
19848 goto fail_fcf_read;
19849 }
19850 /* Construct the read FCF record mailbox command */
19851 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19852 if (rc) {
19853 error = -EINVAL;
19854 goto fail_fcf_read;
19855 }
19856 /* Issue the mailbox command asynchronously */
19857 mboxq->vport = phba->pport;
19858 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
19859 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19860 if (rc == MBX_NOT_FINISHED)
19861 error = -EIO;
19862 else
19863 error = 0;
19864
19865fail_fcf_read:
19866 if (error && mboxq)
19867 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19868 return error;
19869}
19870
19871/**
19872 * lpfc_check_next_fcf_pri_level
19873 * @phba: pointer to the lpfc_hba struct for this port.
19874 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
19875 * routine when the rr_bmask is empty. The FCF indecies are put into the
19876 * rr_bmask based on their priority level. Starting from the highest priority
19877 * to the lowest. The most likely FCF candidate will be in the highest
19878 * priority group. When this routine is called it searches the fcf_pri list for
19879 * next lowest priority group and repopulates the rr_bmask with only those
19880 * fcf_indexes.
19881 * returns:
19882 * 1=success 0=failure
19883 **/
19884static int
19885lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
19886{
19887 uint16_t next_fcf_pri;
19888 uint16_t last_index;
19889 struct lpfc_fcf_pri *fcf_pri;
19890 int rc;
19891 int ret = 0;
19892
19893 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
19894 LPFC_SLI4_FCF_TBL_INDX_MAX);
19895 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19896 "3060 Last IDX %d\n", last_index);
19897
19898 /* Verify the priority list has 2 or more entries */
19899 spin_lock_irq(&phba->hbalock);
19900 if (list_empty(&phba->fcf.fcf_pri_list) ||
19901 list_is_singular(&phba->fcf.fcf_pri_list)) {
19902 spin_unlock_irq(&phba->hbalock);
19903 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19904 "3061 Last IDX %d\n", last_index);
19905 return 0; /* Empty rr list */
19906 }
19907 spin_unlock_irq(&phba->hbalock);
19908
19909 next_fcf_pri = 0;
19910 /*
19911 * Clear the rr_bmask and set all of the bits that are at this
19912 * priority.
19913 */
19914 memset(phba->fcf.fcf_rr_bmask, 0,
19915 sizeof(*phba->fcf.fcf_rr_bmask));
19916 spin_lock_irq(&phba->hbalock);
19917 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
19918 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
19919 continue;
19920 /*
19921 * the 1st priority that has not FLOGI failed
19922 * will be the highest.
19923 */
19924 if (!next_fcf_pri)
19925 next_fcf_pri = fcf_pri->fcf_rec.priority;
19926 spin_unlock_irq(&phba->hbalock);
19927 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
19928 rc = lpfc_sli4_fcf_rr_index_set(phba,
19929 fcf_pri->fcf_rec.fcf_index);
19930 if (rc)
19931 return 0;
19932 }
19933 spin_lock_irq(&phba->hbalock);
19934 }
19935 /*
19936 * if next_fcf_pri was not set above and the list is not empty then
19937 * we have failed flogis on all of them. So reset flogi failed
19938 * and start at the beginning.
19939 */
19940 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
19941 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
19942 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
19943 /*
19944 * the 1st priority that has not FLOGI failed
19945 * will be the highest.
19946 */
19947 if (!next_fcf_pri)
19948 next_fcf_pri = fcf_pri->fcf_rec.priority;
19949 spin_unlock_irq(&phba->hbalock);
19950 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
19951 rc = lpfc_sli4_fcf_rr_index_set(phba,
19952 fcf_pri->fcf_rec.fcf_index);
19953 if (rc)
19954 return 0;
19955 }
19956 spin_lock_irq(&phba->hbalock);
19957 }
19958 } else
19959 ret = 1;
19960 spin_unlock_irq(&phba->hbalock);
19961
19962 return ret;
19963}
19964/**
19965 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
19966 * @phba: pointer to lpfc hba data structure.
19967 *
19968 * This routine is to get the next eligible FCF record index in a round
19969 * robin fashion. If the next eligible FCF record index equals to the
19970 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
19971 * shall be returned, otherwise, the next eligible FCF record's index
19972 * shall be returned.
19973 **/
19974uint16_t
19975lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
19976{
19977 uint16_t next_fcf_index;
19978
19979initial_priority:
19980 /* Search start from next bit of currently registered FCF index */
19981 next_fcf_index = phba->fcf.current_rec.fcf_indx;
19982
19983next_priority:
19984 /* Determine the next fcf index to check */
19985 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
19986 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
19987 LPFC_SLI4_FCF_TBL_INDX_MAX,
19988 next_fcf_index);
19989
19990 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
19991 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19992 /*
19993 * If we have wrapped then we need to clear the bits that
19994 * have been tested so that we can detect when we should
19995 * change the priority level.
19996 */
19997 next_fcf_index = find_first_bit(phba->fcf.fcf_rr_bmask,
19998 LPFC_SLI4_FCF_TBL_INDX_MAX);
19999 }
20000
20001
20002 /* Check roundrobin failover list empty condition */
20003 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
20004 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
20005 /*
20006 * If next fcf index is not found check if there are lower
20007 * Priority level fcf's in the fcf_priority list.
20008 * Set up the rr_bmask with all of the avaiable fcf bits
20009 * at that level and continue the selection process.
20010 */
20011 if (lpfc_check_next_fcf_pri_level(phba))
20012 goto initial_priority;
20013 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
20014 "2844 No roundrobin failover FCF available\n");
20015
20016 return LPFC_FCOE_FCF_NEXT_NONE;
20017 }
20018
20019 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
20020 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
20021 LPFC_FCF_FLOGI_FAILED) {
20022 if (list_is_singular(&phba->fcf.fcf_pri_list))
20023 return LPFC_FCOE_FCF_NEXT_NONE;
20024
20025 goto next_priority;
20026 }
20027
20028 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20029 "2845 Get next roundrobin failover FCF (x%x)\n",
20030 next_fcf_index);
20031
20032 return next_fcf_index;
20033}
20034
20035/**
20036 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
20037 * @phba: pointer to lpfc hba data structure.
20038 * @fcf_index: index into the FCF table to 'set'
20039 *
20040 * This routine sets the FCF record index in to the eligible bmask for
20041 * roundrobin failover search. It checks to make sure that the index
20042 * does not go beyond the range of the driver allocated bmask dimension
20043 * before setting the bit.
20044 *
20045 * Returns 0 if the index bit successfully set, otherwise, it returns
20046 * -EINVAL.
20047 **/
20048int
20049lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
20050{
20051 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
20052 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20053 "2610 FCF (x%x) reached driver's book "
20054 "keeping dimension:x%x\n",
20055 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
20056 return -EINVAL;
20057 }
20058 /* Set the eligible FCF record index bmask */
20059 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
20060
20061 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20062 "2790 Set FCF (x%x) to roundrobin FCF failover "
20063 "bmask\n", fcf_index);
20064
20065 return 0;
20066}
20067
20068/**
20069 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
20070 * @phba: pointer to lpfc hba data structure.
20071 * @fcf_index: index into the FCF table to 'clear'
20072 *
20073 * This routine clears the FCF record index from the eligible bmask for
20074 * roundrobin failover search. It checks to make sure that the index
20075 * does not go beyond the range of the driver allocated bmask dimension
20076 * before clearing the bit.
20077 **/
20078void
20079lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
20080{
20081 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
20082 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
20083 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20084 "2762 FCF (x%x) reached driver's book "
20085 "keeping dimension:x%x\n",
20086 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
20087 return;
20088 }
20089 /* Clear the eligible FCF record index bmask */
20090 spin_lock_irq(&phba->hbalock);
20091 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
20092 list) {
20093 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
20094 list_del_init(&fcf_pri->list);
20095 break;
20096 }
20097 }
20098 spin_unlock_irq(&phba->hbalock);
20099 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
20100
20101 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20102 "2791 Clear FCF (x%x) from roundrobin failover "
20103 "bmask\n", fcf_index);
20104}
20105
20106/**
20107 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
20108 * @phba: pointer to lpfc hba data structure.
20109 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
20110 *
20111 * This routine is the completion routine for the rediscover FCF table mailbox
20112 * command. If the mailbox command returned failure, it will try to stop the
20113 * FCF rediscover wait timer.
20114 **/
20115static void
20116lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
20117{
20118 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
20119 uint32_t shdr_status, shdr_add_status;
20120
20121 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
20122
20123 shdr_status = bf_get(lpfc_mbox_hdr_status,
20124 &redisc_fcf->header.cfg_shdr.response);
20125 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
20126 &redisc_fcf->header.cfg_shdr.response);
20127 if (shdr_status || shdr_add_status) {
20128 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20129 "2746 Requesting for FCF rediscovery failed "
20130 "status x%x add_status x%x\n",
20131 shdr_status, shdr_add_status);
20132 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
20133 spin_lock_irq(&phba->hbalock);
20134 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
20135 spin_unlock_irq(&phba->hbalock);
20136 /*
20137 * CVL event triggered FCF rediscover request failed,
20138 * last resort to re-try current registered FCF entry.
20139 */
20140 lpfc_retry_pport_discovery(phba);
20141 } else {
20142 spin_lock_irq(&phba->hbalock);
20143 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
20144 spin_unlock_irq(&phba->hbalock);
20145 /*
20146 * DEAD FCF event triggered FCF rediscover request
20147 * failed, last resort to fail over as a link down
20148 * to FCF registration.
20149 */
20150 lpfc_sli4_fcf_dead_failthrough(phba);
20151 }
20152 } else {
20153 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20154 "2775 Start FCF rediscover quiescent timer\n");
20155 /*
20156 * Start FCF rediscovery wait timer for pending FCF
20157 * before rescan FCF record table.
20158 */
20159 lpfc_fcf_redisc_wait_start_timer(phba);
20160 }
20161
20162 mempool_free(mbox, phba->mbox_mem_pool);
20163}
20164
20165/**
20166 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
20167 * @phba: pointer to lpfc hba data structure.
20168 *
20169 * This routine is invoked to request for rediscovery of the entire FCF table
20170 * by the port.
20171 **/
20172int
20173lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
20174{
20175 LPFC_MBOXQ_t *mbox;
20176 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
20177 int rc, length;
20178
20179 /* Cancel retry delay timers to all vports before FCF rediscover */
20180 lpfc_cancel_all_vport_retry_delay_timer(phba);
20181
20182 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20183 if (!mbox) {
20184 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20185 "2745 Failed to allocate mbox for "
20186 "requesting FCF rediscover.\n");
20187 return -ENOMEM;
20188 }
20189
20190 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
20191 sizeof(struct lpfc_sli4_cfg_mhdr));
20192 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
20193 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
20194 length, LPFC_SLI4_MBX_EMBED);
20195
20196 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
20197 /* Set count to 0 for invalidating the entire FCF database */
20198 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
20199
20200 /* Issue the mailbox command asynchronously */
20201 mbox->vport = phba->pport;
20202 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
20203 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
20204
20205 if (rc == MBX_NOT_FINISHED) {
20206 mempool_free(mbox, phba->mbox_mem_pool);
20207 return -EIO;
20208 }
20209 return 0;
20210}
20211
20212/**
20213 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
20214 * @phba: pointer to lpfc hba data structure.
20215 *
20216 * This function is the failover routine as a last resort to the FCF DEAD
20217 * event when driver failed to perform fast FCF failover.
20218 **/
20219void
20220lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
20221{
20222 uint32_t link_state;
20223
20224 /*
20225 * Last resort as FCF DEAD event failover will treat this as
20226 * a link down, but save the link state because we don't want
20227 * it to be changed to Link Down unless it is already down.
20228 */
20229 link_state = phba->link_state;
20230 lpfc_linkdown(phba);
20231 phba->link_state = link_state;
20232
20233 /* Unregister FCF if no devices connected to it */
20234 lpfc_unregister_unused_fcf(phba);
20235}
20236
20237/**
20238 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
20239 * @phba: pointer to lpfc hba data structure.
20240 * @rgn23_data: pointer to configure region 23 data.
20241 *
20242 * This function gets SLI3 port configure region 23 data through memory dump
20243 * mailbox command. When it successfully retrieves data, the size of the data
20244 * will be returned, otherwise, 0 will be returned.
20245 **/
20246static uint32_t
20247lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
20248{
20249 LPFC_MBOXQ_t *pmb = NULL;
20250 MAILBOX_t *mb;
20251 uint32_t offset = 0;
20252 int rc;
20253
20254 if (!rgn23_data)
20255 return 0;
20256
20257 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20258 if (!pmb) {
20259 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20260 "2600 failed to allocate mailbox memory\n");
20261 return 0;
20262 }
20263 mb = &pmb->u.mb;
20264
20265 do {
20266 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
20267 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
20268
20269 if (rc != MBX_SUCCESS) {
20270 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
20271 "2601 failed to read config "
20272 "region 23, rc 0x%x Status 0x%x\n",
20273 rc, mb->mbxStatus);
20274 mb->un.varDmp.word_cnt = 0;
20275 }
20276 /*
20277 * dump mem may return a zero when finished or we got a
20278 * mailbox error, either way we are done.
20279 */
20280 if (mb->un.varDmp.word_cnt == 0)
20281 break;
20282
20283 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
20284 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
20285
20286 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
20287 rgn23_data + offset,
20288 mb->un.varDmp.word_cnt);
20289 offset += mb->un.varDmp.word_cnt;
20290 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
20291
20292 mempool_free(pmb, phba->mbox_mem_pool);
20293 return offset;
20294}
20295
20296/**
20297 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
20298 * @phba: pointer to lpfc hba data structure.
20299 * @rgn23_data: pointer to configure region 23 data.
20300 *
20301 * This function gets SLI4 port configure region 23 data through memory dump
20302 * mailbox command. When it successfully retrieves data, the size of the data
20303 * will be returned, otherwise, 0 will be returned.
20304 **/
20305static uint32_t
20306lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
20307{
20308 LPFC_MBOXQ_t *mboxq = NULL;
20309 struct lpfc_dmabuf *mp = NULL;
20310 struct lpfc_mqe *mqe;
20311 uint32_t data_length = 0;
20312 int rc;
20313
20314 if (!rgn23_data)
20315 return 0;
20316
20317 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20318 if (!mboxq) {
20319 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20320 "3105 failed to allocate mailbox memory\n");
20321 return 0;
20322 }
20323
20324 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
20325 goto out;
20326 mqe = &mboxq->u.mqe;
20327 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
20328 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
20329 if (rc)
20330 goto out;
20331 data_length = mqe->un.mb_words[5];
20332 if (data_length == 0)
20333 goto out;
20334 if (data_length > DMP_RGN23_SIZE) {
20335 data_length = 0;
20336 goto out;
20337 }
20338 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
20339out:
20340 lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
20341 return data_length;
20342}
20343
20344/**
20345 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
20346 * @phba: pointer to lpfc hba data structure.
20347 *
20348 * This function read region 23 and parse TLV for port status to
20349 * decide if the user disaled the port. If the TLV indicates the
20350 * port is disabled, the hba_flag is set accordingly.
20351 **/
20352void
20353lpfc_sli_read_link_ste(struct lpfc_hba *phba)
20354{
20355 uint8_t *rgn23_data = NULL;
20356 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
20357 uint32_t offset = 0;
20358
20359 /* Get adapter Region 23 data */
20360 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
20361 if (!rgn23_data)
20362 goto out;
20363
20364 if (phba->sli_rev < LPFC_SLI_REV4)
20365 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
20366 else {
20367 if_type = bf_get(lpfc_sli_intf_if_type,
20368 &phba->sli4_hba.sli_intf);
20369 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
20370 goto out;
20371 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
20372 }
20373
20374 if (!data_size)
20375 goto out;
20376
20377 /* Check the region signature first */
20378 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
20379 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20380 "2619 Config region 23 has bad signature\n");
20381 goto out;
20382 }
20383 offset += 4;
20384
20385 /* Check the data structure version */
20386 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
20387 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20388 "2620 Config region 23 has bad version\n");
20389 goto out;
20390 }
20391 offset += 4;
20392
20393 /* Parse TLV entries in the region */
20394 while (offset < data_size) {
20395 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
20396 break;
20397 /*
20398 * If the TLV is not driver specific TLV or driver id is
20399 * not linux driver id, skip the record.
20400 */
20401 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
20402 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
20403 (rgn23_data[offset + 3] != 0)) {
20404 offset += rgn23_data[offset + 1] * 4 + 4;
20405 continue;
20406 }
20407
20408 /* Driver found a driver specific TLV in the config region */
20409 sub_tlv_len = rgn23_data[offset + 1] * 4;
20410 offset += 4;
20411 tlv_offset = 0;
20412
20413 /*
20414 * Search for configured port state sub-TLV.
20415 */
20416 while ((offset < data_size) &&
20417 (tlv_offset < sub_tlv_len)) {
20418 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
20419 offset += 4;
20420 tlv_offset += 4;
20421 break;
20422 }
20423 if (rgn23_data[offset] != PORT_STE_TYPE) {
20424 offset += rgn23_data[offset + 1] * 4 + 4;
20425 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
20426 continue;
20427 }
20428
20429 /* This HBA contains PORT_STE configured */
20430 if (!rgn23_data[offset + 2])
20431 phba->hba_flag |= LINK_DISABLED;
20432
20433 goto out;
20434 }
20435 }
20436
20437out:
20438 kfree(rgn23_data);
20439 return;
20440}
20441
20442/**
20443 * lpfc_log_fw_write_cmpl - logs firmware write completion status
20444 * @phba: pointer to lpfc hba data structure
20445 * @shdr_status: wr_object rsp's status field
20446 * @shdr_add_status: wr_object rsp's add_status field
20447 * @shdr_add_status_2: wr_object rsp's add_status_2 field
20448 * @shdr_change_status: wr_object rsp's change_status field
20449 * @shdr_csf: wr_object rsp's csf bit
20450 *
20451 * This routine is intended to be called after a firmware write completes.
20452 * It will log next action items to be performed by the user to instantiate
20453 * the newly downloaded firmware or reason for incompatibility.
20454 **/
20455static void
20456lpfc_log_fw_write_cmpl(struct lpfc_hba *phba, u32 shdr_status,
20457 u32 shdr_add_status, u32 shdr_add_status_2,
20458 u32 shdr_change_status, u32 shdr_csf)
20459{
20460 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20461 "4198 %s: flash_id x%02x, asic_rev x%02x, "
20462 "status x%02x, add_status x%02x, add_status_2 x%02x, "
20463 "change_status x%02x, csf %01x\n", __func__,
20464 phba->sli4_hba.flash_id, phba->sli4_hba.asic_rev,
20465 shdr_status, shdr_add_status, shdr_add_status_2,
20466 shdr_change_status, shdr_csf);
20467
20468 if (shdr_add_status == LPFC_ADD_STATUS_INCOMPAT_OBJ) {
20469 switch (shdr_add_status_2) {
20470 case LPFC_ADD_STATUS_2_INCOMPAT_FLASH:
20471 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20472 "4199 Firmware write failed: "
20473 "image incompatible with flash x%02x\n",
20474 phba->sli4_hba.flash_id);
20475 break;
20476 case LPFC_ADD_STATUS_2_INCORRECT_ASIC:
20477 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20478 "4200 Firmware write failed: "
20479 "image incompatible with ASIC "
20480 "architecture x%02x\n",
20481 phba->sli4_hba.asic_rev);
20482 break;
20483 default:
20484 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20485 "4210 Firmware write failed: "
20486 "add_status_2 x%02x\n",
20487 shdr_add_status_2);
20488 break;
20489 }
20490 } else if (!shdr_status && !shdr_add_status) {
20491 if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
20492 shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
20493 if (shdr_csf)
20494 shdr_change_status =
20495 LPFC_CHANGE_STATUS_PCI_RESET;
20496 }
20497
20498 switch (shdr_change_status) {
20499 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
20500 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20501 "3198 Firmware write complete: System "
20502 "reboot required to instantiate\n");
20503 break;
20504 case (LPFC_CHANGE_STATUS_FW_RESET):
20505 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20506 "3199 Firmware write complete: "
20507 "Firmware reset required to "
20508 "instantiate\n");
20509 break;
20510 case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
20511 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20512 "3200 Firmware write complete: Port "
20513 "Migration or PCI Reset required to "
20514 "instantiate\n");
20515 break;
20516 case (LPFC_CHANGE_STATUS_PCI_RESET):
20517 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20518 "3201 Firmware write complete: PCI "
20519 "Reset required to instantiate\n");
20520 break;
20521 default:
20522 break;
20523 }
20524 }
20525}
20526
20527/**
20528 * lpfc_wr_object - write an object to the firmware
20529 * @phba: HBA structure that indicates port to create a queue on.
20530 * @dmabuf_list: list of dmabufs to write to the port.
20531 * @size: the total byte value of the objects to write to the port.
20532 * @offset: the current offset to be used to start the transfer.
20533 *
20534 * This routine will create a wr_object mailbox command to send to the port.
20535 * the mailbox command will be constructed using the dma buffers described in
20536 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
20537 * BDEs that the imbedded mailbox can support. The @offset variable will be
20538 * used to indicate the starting offset of the transfer and will also return
20539 * the offset after the write object mailbox has completed. @size is used to
20540 * determine the end of the object and whether the eof bit should be set.
20541 *
20542 * Return 0 is successful and offset will contain the the new offset to use
20543 * for the next write.
20544 * Return negative value for error cases.
20545 **/
20546int
20547lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
20548 uint32_t size, uint32_t *offset)
20549{
20550 struct lpfc_mbx_wr_object *wr_object;
20551 LPFC_MBOXQ_t *mbox;
20552 int rc = 0, i = 0;
20553 uint32_t shdr_status, shdr_add_status, shdr_add_status_2;
20554 uint32_t shdr_change_status = 0, shdr_csf = 0;
20555 uint32_t mbox_tmo;
20556 struct lpfc_dmabuf *dmabuf;
20557 uint32_t written = 0;
20558 bool check_change_status = false;
20559
20560 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20561 if (!mbox)
20562 return -ENOMEM;
20563
20564 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
20565 LPFC_MBOX_OPCODE_WRITE_OBJECT,
20566 sizeof(struct lpfc_mbx_wr_object) -
20567 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
20568
20569 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
20570 wr_object->u.request.write_offset = *offset;
20571 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
20572 wr_object->u.request.object_name[0] =
20573 cpu_to_le32(wr_object->u.request.object_name[0]);
20574 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
20575 list_for_each_entry(dmabuf, dmabuf_list, list) {
20576 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
20577 break;
20578 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
20579 wr_object->u.request.bde[i].addrHigh =
20580 putPaddrHigh(dmabuf->phys);
20581 if (written + SLI4_PAGE_SIZE >= size) {
20582 wr_object->u.request.bde[i].tus.f.bdeSize =
20583 (size - written);
20584 written += (size - written);
20585 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
20586 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
20587 check_change_status = true;
20588 } else {
20589 wr_object->u.request.bde[i].tus.f.bdeSize =
20590 SLI4_PAGE_SIZE;
20591 written += SLI4_PAGE_SIZE;
20592 }
20593 i++;
20594 }
20595 wr_object->u.request.bde_count = i;
20596 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
20597 if (!phba->sli4_hba.intr_enable)
20598 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
20599 else {
20600 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
20601 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
20602 }
20603 /* The IOCTL status is embedded in the mailbox subheader. */
20604 shdr_status = bf_get(lpfc_mbox_hdr_status,
20605 &wr_object->header.cfg_shdr.response);
20606 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
20607 &wr_object->header.cfg_shdr.response);
20608 shdr_add_status_2 = bf_get(lpfc_mbox_hdr_add_status_2,
20609 &wr_object->header.cfg_shdr.response);
20610 if (check_change_status) {
20611 shdr_change_status = bf_get(lpfc_wr_object_change_status,
20612 &wr_object->u.response);
20613 shdr_csf = bf_get(lpfc_wr_object_csf,
20614 &wr_object->u.response);
20615 }
20616
20617 if (!phba->sli4_hba.intr_enable)
20618 mempool_free(mbox, phba->mbox_mem_pool);
20619 else if (rc != MBX_TIMEOUT)
20620 mempool_free(mbox, phba->mbox_mem_pool);
20621 if (shdr_status || shdr_add_status || shdr_add_status_2 || rc) {
20622 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20623 "3025 Write Object mailbox failed with "
20624 "status x%x add_status x%x, add_status_2 x%x, "
20625 "mbx status x%x\n",
20626 shdr_status, shdr_add_status, shdr_add_status_2,
20627 rc);
20628 rc = -ENXIO;
20629 *offset = shdr_add_status;
20630 } else {
20631 *offset += wr_object->u.response.actual_write_length;
20632 }
20633
20634 if (rc || check_change_status)
20635 lpfc_log_fw_write_cmpl(phba, shdr_status, shdr_add_status,
20636 shdr_add_status_2, shdr_change_status,
20637 shdr_csf);
20638 return rc;
20639}
20640
20641/**
20642 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
20643 * @vport: pointer to vport data structure.
20644 *
20645 * This function iterate through the mailboxq and clean up all REG_LOGIN
20646 * and REG_VPI mailbox commands associated with the vport. This function
20647 * is called when driver want to restart discovery of the vport due to
20648 * a Clear Virtual Link event.
20649 **/
20650void
20651lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
20652{
20653 struct lpfc_hba *phba = vport->phba;
20654 LPFC_MBOXQ_t *mb, *nextmb;
20655 struct lpfc_nodelist *ndlp;
20656 struct lpfc_nodelist *act_mbx_ndlp = NULL;
20657 LIST_HEAD(mbox_cmd_list);
20658 uint8_t restart_loop;
20659
20660 /* Clean up internally queued mailbox commands with the vport */
20661 spin_lock_irq(&phba->hbalock);
20662 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
20663 if (mb->vport != vport)
20664 continue;
20665
20666 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
20667 (mb->u.mb.mbxCommand != MBX_REG_VPI))
20668 continue;
20669
20670 list_move_tail(&mb->list, &mbox_cmd_list);
20671 }
20672 /* Clean up active mailbox command with the vport */
20673 mb = phba->sli.mbox_active;
20674 if (mb && (mb->vport == vport)) {
20675 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
20676 (mb->u.mb.mbxCommand == MBX_REG_VPI))
20677 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
20678 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
20679 act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
20680
20681 /* This reference is local to this routine. The
20682 * reference is removed at routine exit.
20683 */
20684 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
20685
20686 /* Unregister the RPI when mailbox complete */
20687 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
20688 }
20689 }
20690 /* Cleanup any mailbox completions which are not yet processed */
20691 do {
20692 restart_loop = 0;
20693 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
20694 /*
20695 * If this mailox is already processed or it is
20696 * for another vport ignore it.
20697 */
20698 if ((mb->vport != vport) ||
20699 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
20700 continue;
20701
20702 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
20703 (mb->u.mb.mbxCommand != MBX_REG_VPI))
20704 continue;
20705
20706 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
20707 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
20708 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
20709 /* Unregister the RPI when mailbox complete */
20710 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
20711 restart_loop = 1;
20712 spin_unlock_irq(&phba->hbalock);
20713 spin_lock(&ndlp->lock);
20714 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
20715 spin_unlock(&ndlp->lock);
20716 spin_lock_irq(&phba->hbalock);
20717 break;
20718 }
20719 }
20720 } while (restart_loop);
20721
20722 spin_unlock_irq(&phba->hbalock);
20723
20724 /* Release the cleaned-up mailbox commands */
20725 while (!list_empty(&mbox_cmd_list)) {
20726 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
20727 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
20728 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
20729 mb->ctx_ndlp = NULL;
20730 if (ndlp) {
20731 spin_lock(&ndlp->lock);
20732 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
20733 spin_unlock(&ndlp->lock);
20734 lpfc_nlp_put(ndlp);
20735 }
20736 }
20737 lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_UNLOCKED);
20738 }
20739
20740 /* Release the ndlp with the cleaned-up active mailbox command */
20741 if (act_mbx_ndlp) {
20742 spin_lock(&act_mbx_ndlp->lock);
20743 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
20744 spin_unlock(&act_mbx_ndlp->lock);
20745 lpfc_nlp_put(act_mbx_ndlp);
20746 }
20747}
20748
20749/**
20750 * lpfc_drain_txq - Drain the txq
20751 * @phba: Pointer to HBA context object.
20752 *
20753 * This function attempt to submit IOCBs on the txq
20754 * to the adapter. For SLI4 adapters, the txq contains
20755 * ELS IOCBs that have been deferred because the there
20756 * are no SGLs. This congestion can occur with large
20757 * vport counts during node discovery.
20758 **/
20759
20760uint32_t
20761lpfc_drain_txq(struct lpfc_hba *phba)
20762{
20763 LIST_HEAD(completions);
20764 struct lpfc_sli_ring *pring;
20765 struct lpfc_iocbq *piocbq = NULL;
20766 unsigned long iflags = 0;
20767 char *fail_msg = NULL;
20768 uint32_t txq_cnt = 0;
20769 struct lpfc_queue *wq;
20770 int ret = 0;
20771
20772 if (phba->link_flag & LS_MDS_LOOPBACK) {
20773 /* MDS WQE are posted only to first WQ*/
20774 wq = phba->sli4_hba.hdwq[0].io_wq;
20775 if (unlikely(!wq))
20776 return 0;
20777 pring = wq->pring;
20778 } else {
20779 wq = phba->sli4_hba.els_wq;
20780 if (unlikely(!wq))
20781 return 0;
20782 pring = lpfc_phba_elsring(phba);
20783 }
20784
20785 if (unlikely(!pring) || list_empty(&pring->txq))
20786 return 0;
20787
20788 spin_lock_irqsave(&pring->ring_lock, iflags);
20789 list_for_each_entry(piocbq, &pring->txq, list) {
20790 txq_cnt++;
20791 }
20792
20793 if (txq_cnt > pring->txq_max)
20794 pring->txq_max = txq_cnt;
20795
20796 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20797
20798 while (!list_empty(&pring->txq)) {
20799 spin_lock_irqsave(&pring->ring_lock, iflags);
20800
20801 piocbq = lpfc_sli_ringtx_get(phba, pring);
20802 if (!piocbq) {
20803 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20804 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20805 "2823 txq empty and txq_cnt is %d\n ",
20806 txq_cnt);
20807 break;
20808 }
20809 txq_cnt--;
20810
20811 ret = __lpfc_sli_issue_iocb(phba, pring->ringno, piocbq, 0);
20812
20813 if (ret && ret != IOCB_BUSY) {
20814 fail_msg = " - Cannot send IO ";
20815 piocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
20816 }
20817 if (fail_msg) {
20818 piocbq->cmd_flag |= LPFC_DRIVER_ABORTED;
20819 /* Failed means we can't issue and need to cancel */
20820 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20821 "2822 IOCB failed %s iotag 0x%x "
20822 "xri 0x%x %d flg x%x\n",
20823 fail_msg, piocbq->iotag,
20824 piocbq->sli4_xritag, ret,
20825 piocbq->cmd_flag);
20826 list_add_tail(&piocbq->list, &completions);
20827 fail_msg = NULL;
20828 }
20829 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20830 if (txq_cnt == 0 || ret == IOCB_BUSY)
20831 break;
20832 }
20833 /* Cancel all the IOCBs that cannot be issued */
20834 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
20835 IOERR_SLI_ABORTED);
20836
20837 return txq_cnt;
20838}
20839
20840/**
20841 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
20842 * @phba: Pointer to HBA context object.
20843 * @pwqeq: Pointer to command WQE.
20844 * @sglq: Pointer to the scatter gather queue object.
20845 *
20846 * This routine converts the bpl or bde that is in the WQE
20847 * to a sgl list for the sli4 hardware. The physical address
20848 * of the bpl/bde is converted back to a virtual address.
20849 * If the WQE contains a BPL then the list of BDE's is
20850 * converted to sli4_sge's. If the WQE contains a single
20851 * BDE then it is converted to a single sli_sge.
20852 * The WQE is still in cpu endianness so the contents of
20853 * the bpl can be used without byte swapping.
20854 *
20855 * Returns valid XRI = Success, NO_XRI = Failure.
20856 */
20857static uint16_t
20858lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
20859 struct lpfc_sglq *sglq)
20860{
20861 uint16_t xritag = NO_XRI;
20862 struct ulp_bde64 *bpl = NULL;
20863 struct ulp_bde64 bde;
20864 struct sli4_sge *sgl = NULL;
20865 struct lpfc_dmabuf *dmabuf;
20866 union lpfc_wqe128 *wqe;
20867 int numBdes = 0;
20868 int i = 0;
20869 uint32_t offset = 0; /* accumulated offset in the sg request list */
20870 int inbound = 0; /* number of sg reply entries inbound from firmware */
20871 uint32_t cmd;
20872
20873 if (!pwqeq || !sglq)
20874 return xritag;
20875
20876 sgl = (struct sli4_sge *)sglq->sgl;
20877 wqe = &pwqeq->wqe;
20878 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
20879
20880 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
20881 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
20882 return sglq->sli4_xritag;
20883 numBdes = pwqeq->num_bdes;
20884 if (numBdes) {
20885 /* The addrHigh and addrLow fields within the WQE
20886 * have not been byteswapped yet so there is no
20887 * need to swap them back.
20888 */
20889 if (pwqeq->bpl_dmabuf)
20890 dmabuf = pwqeq->bpl_dmabuf;
20891 else
20892 return xritag;
20893
20894 bpl = (struct ulp_bde64 *)dmabuf->virt;
20895 if (!bpl)
20896 return xritag;
20897
20898 for (i = 0; i < numBdes; i++) {
20899 /* Should already be byte swapped. */
20900 sgl->addr_hi = bpl->addrHigh;
20901 sgl->addr_lo = bpl->addrLow;
20902
20903 sgl->word2 = le32_to_cpu(sgl->word2);
20904 if ((i+1) == numBdes)
20905 bf_set(lpfc_sli4_sge_last, sgl, 1);
20906 else
20907 bf_set(lpfc_sli4_sge_last, sgl, 0);
20908 /* swap the size field back to the cpu so we
20909 * can assign it to the sgl.
20910 */
20911 bde.tus.w = le32_to_cpu(bpl->tus.w);
20912 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
20913 /* The offsets in the sgl need to be accumulated
20914 * separately for the request and reply lists.
20915 * The request is always first, the reply follows.
20916 */
20917 switch (cmd) {
20918 case CMD_GEN_REQUEST64_WQE:
20919 /* add up the reply sg entries */
20920 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
20921 inbound++;
20922 /* first inbound? reset the offset */
20923 if (inbound == 1)
20924 offset = 0;
20925 bf_set(lpfc_sli4_sge_offset, sgl, offset);
20926 bf_set(lpfc_sli4_sge_type, sgl,
20927 LPFC_SGE_TYPE_DATA);
20928 offset += bde.tus.f.bdeSize;
20929 break;
20930 case CMD_FCP_TRSP64_WQE:
20931 bf_set(lpfc_sli4_sge_offset, sgl, 0);
20932 bf_set(lpfc_sli4_sge_type, sgl,
20933 LPFC_SGE_TYPE_DATA);
20934 break;
20935 case CMD_FCP_TSEND64_WQE:
20936 case CMD_FCP_TRECEIVE64_WQE:
20937 bf_set(lpfc_sli4_sge_type, sgl,
20938 bpl->tus.f.bdeFlags);
20939 if (i < 3)
20940 offset = 0;
20941 else
20942 offset += bde.tus.f.bdeSize;
20943 bf_set(lpfc_sli4_sge_offset, sgl, offset);
20944 break;
20945 }
20946 sgl->word2 = cpu_to_le32(sgl->word2);
20947 bpl++;
20948 sgl++;
20949 }
20950 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
20951 /* The addrHigh and addrLow fields of the BDE have not
20952 * been byteswapped yet so they need to be swapped
20953 * before putting them in the sgl.
20954 */
20955 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
20956 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
20957 sgl->word2 = le32_to_cpu(sgl->word2);
20958 bf_set(lpfc_sli4_sge_last, sgl, 1);
20959 sgl->word2 = cpu_to_le32(sgl->word2);
20960 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
20961 }
20962 return sglq->sli4_xritag;
20963}
20964
20965/**
20966 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
20967 * @phba: Pointer to HBA context object.
20968 * @qp: Pointer to HDW queue.
20969 * @pwqe: Pointer to command WQE.
20970 **/
20971int
20972lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
20973 struct lpfc_iocbq *pwqe)
20974{
20975 union lpfc_wqe128 *wqe = &pwqe->wqe;
20976 struct lpfc_async_xchg_ctx *ctxp;
20977 struct lpfc_queue *wq;
20978 struct lpfc_sglq *sglq;
20979 struct lpfc_sli_ring *pring;
20980 unsigned long iflags;
20981 uint32_t ret = 0;
20982
20983 /* NVME_LS and NVME_LS ABTS requests. */
20984 if (pwqe->cmd_flag & LPFC_IO_NVME_LS) {
20985 pring = phba->sli4_hba.nvmels_wq->pring;
20986 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20987 qp, wq_access);
20988 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
20989 if (!sglq) {
20990 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20991 return WQE_BUSY;
20992 }
20993 pwqe->sli4_lxritag = sglq->sli4_lxritag;
20994 pwqe->sli4_xritag = sglq->sli4_xritag;
20995 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
20996 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20997 return WQE_ERROR;
20998 }
20999 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
21000 pwqe->sli4_xritag);
21001 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
21002 if (ret) {
21003 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21004 return ret;
21005 }
21006
21007 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21008 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21009
21010 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
21011 return 0;
21012 }
21013
21014 /* NVME_FCREQ and NVME_ABTS requests */
21015 if (pwqe->cmd_flag & (LPFC_IO_NVME | LPFC_IO_FCP | LPFC_IO_CMF)) {
21016 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
21017 wq = qp->io_wq;
21018 pring = wq->pring;
21019
21020 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
21021
21022 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
21023 qp, wq_access);
21024 ret = lpfc_sli4_wq_put(wq, wqe);
21025 if (ret) {
21026 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21027 return ret;
21028 }
21029 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21030 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21031
21032 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
21033 return 0;
21034 }
21035
21036 /* NVMET requests */
21037 if (pwqe->cmd_flag & LPFC_IO_NVMET) {
21038 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
21039 wq = qp->io_wq;
21040 pring = wq->pring;
21041
21042 ctxp = pwqe->context_un.axchg;
21043 sglq = ctxp->ctxbuf->sglq;
21044 if (pwqe->sli4_xritag == NO_XRI) {
21045 pwqe->sli4_lxritag = sglq->sli4_lxritag;
21046 pwqe->sli4_xritag = sglq->sli4_xritag;
21047 }
21048 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
21049 pwqe->sli4_xritag);
21050 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
21051
21052 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
21053 qp, wq_access);
21054 ret = lpfc_sli4_wq_put(wq, wqe);
21055 if (ret) {
21056 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21057 return ret;
21058 }
21059 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21060 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21061
21062 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
21063 return 0;
21064 }
21065 return WQE_ERROR;
21066}
21067
21068/**
21069 * lpfc_sli4_issue_abort_iotag - SLI-4 WQE init & issue for the Abort
21070 * @phba: Pointer to HBA context object.
21071 * @cmdiocb: Pointer to driver command iocb object.
21072 * @cmpl: completion function.
21073 *
21074 * Fill the appropriate fields for the abort WQE and call
21075 * internal routine lpfc_sli4_issue_wqe to send the WQE
21076 * This function is called with hbalock held and no ring_lock held.
21077 *
21078 * RETURNS 0 - SUCCESS
21079 **/
21080
21081int
21082lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
21083 void *cmpl)
21084{
21085 struct lpfc_vport *vport = cmdiocb->vport;
21086 struct lpfc_iocbq *abtsiocb = NULL;
21087 union lpfc_wqe128 *abtswqe;
21088 struct lpfc_io_buf *lpfc_cmd;
21089 int retval = IOCB_ERROR;
21090 u16 xritag = cmdiocb->sli4_xritag;
21091
21092 /*
21093 * The scsi command can not be in txq and it is in flight because the
21094 * pCmd is still pointing at the SCSI command we have to abort. There
21095 * is no need to search the txcmplq. Just send an abort to the FW.
21096 */
21097
21098 abtsiocb = __lpfc_sli_get_iocbq(phba);
21099 if (!abtsiocb)
21100 return WQE_NORESOURCE;
21101
21102 /* Indicate the IO is being aborted by the driver. */
21103 cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
21104
21105 abtswqe = &abtsiocb->wqe;
21106 memset(abtswqe, 0, sizeof(*abtswqe));
21107
21108 if (!lpfc_is_link_up(phba) || (phba->link_flag & LS_EXTERNAL_LOOPBACK))
21109 bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1);
21110 bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG);
21111 abtswqe->abort_cmd.rsrvd5 = 0;
21112 abtswqe->abort_cmd.wqe_com.abort_tag = xritag;
21113 bf_set(wqe_reqtag, &abtswqe->abort_cmd.wqe_com, abtsiocb->iotag);
21114 bf_set(wqe_cmnd, &abtswqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
21115 bf_set(wqe_xri_tag, &abtswqe->generic.wqe_com, 0);
21116 bf_set(wqe_qosd, &abtswqe->abort_cmd.wqe_com, 1);
21117 bf_set(wqe_lenloc, &abtswqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
21118 bf_set(wqe_cmd_type, &abtswqe->abort_cmd.wqe_com, OTHER_COMMAND);
21119
21120 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
21121 abtsiocb->hba_wqidx = cmdiocb->hba_wqidx;
21122 abtsiocb->cmd_flag |= LPFC_USE_FCPWQIDX;
21123 if (cmdiocb->cmd_flag & LPFC_IO_FCP)
21124 abtsiocb->cmd_flag |= LPFC_IO_FCP;
21125 if (cmdiocb->cmd_flag & LPFC_IO_NVME)
21126 abtsiocb->cmd_flag |= LPFC_IO_NVME;
21127 if (cmdiocb->cmd_flag & LPFC_IO_FOF)
21128 abtsiocb->cmd_flag |= LPFC_IO_FOF;
21129 abtsiocb->vport = vport;
21130 abtsiocb->cmd_cmpl = cmpl;
21131
21132 lpfc_cmd = container_of(cmdiocb, struct lpfc_io_buf, cur_iocbq);
21133 retval = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, abtsiocb);
21134
21135 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21136 "0359 Abort xri x%x, original iotag x%x, "
21137 "abort cmd iotag x%x retval x%x\n",
21138 xritag, cmdiocb->iotag, abtsiocb->iotag, retval);
21139
21140 if (retval) {
21141 cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
21142 __lpfc_sli_release_iocbq(phba, abtsiocb);
21143 }
21144
21145 return retval;
21146}
21147
21148#ifdef LPFC_MXP_STAT
21149/**
21150 * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
21151 * @phba: pointer to lpfc hba data structure.
21152 * @hwqid: belong to which HWQ.
21153 *
21154 * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
21155 * 15 seconds after a test case is running.
21156 *
21157 * The user should call lpfc_debugfs_multixripools_write before running a test
21158 * case to clear stat_snapshot_taken. Then the user starts a test case. During
21159 * test case is running, stat_snapshot_taken is incremented by 1 every time when
21160 * this routine is called from heartbeat timer. When stat_snapshot_taken is
21161 * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
21162 **/
21163void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
21164{
21165 struct lpfc_sli4_hdw_queue *qp;
21166 struct lpfc_multixri_pool *multixri_pool;
21167 struct lpfc_pvt_pool *pvt_pool;
21168 struct lpfc_pbl_pool *pbl_pool;
21169 u32 txcmplq_cnt;
21170
21171 qp = &phba->sli4_hba.hdwq[hwqid];
21172 multixri_pool = qp->p_multixri_pool;
21173 if (!multixri_pool)
21174 return;
21175
21176 if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
21177 pvt_pool = &qp->p_multixri_pool->pvt_pool;
21178 pbl_pool = &qp->p_multixri_pool->pbl_pool;
21179 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21180
21181 multixri_pool->stat_pbl_count = pbl_pool->count;
21182 multixri_pool->stat_pvt_count = pvt_pool->count;
21183 multixri_pool->stat_busy_count = txcmplq_cnt;
21184 }
21185
21186 multixri_pool->stat_snapshot_taken++;
21187}
21188#endif
21189
21190/**
21191 * lpfc_adjust_pvt_pool_count - Adjust private pool count
21192 * @phba: pointer to lpfc hba data structure.
21193 * @hwqid: belong to which HWQ.
21194 *
21195 * This routine moves some XRIs from private to public pool when private pool
21196 * is not busy.
21197 **/
21198void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
21199{
21200 struct lpfc_multixri_pool *multixri_pool;
21201 u32 io_req_count;
21202 u32 prev_io_req_count;
21203
21204 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
21205 if (!multixri_pool)
21206 return;
21207 io_req_count = multixri_pool->io_req_count;
21208 prev_io_req_count = multixri_pool->prev_io_req_count;
21209
21210 if (prev_io_req_count != io_req_count) {
21211 /* Private pool is busy */
21212 multixri_pool->prev_io_req_count = io_req_count;
21213 } else {
21214 /* Private pool is not busy.
21215 * Move XRIs from private to public pool.
21216 */
21217 lpfc_move_xri_pvt_to_pbl(phba, hwqid);
21218 }
21219}
21220
21221/**
21222 * lpfc_adjust_high_watermark - Adjust high watermark
21223 * @phba: pointer to lpfc hba data structure.
21224 * @hwqid: belong to which HWQ.
21225 *
21226 * This routine sets high watermark as number of outstanding XRIs,
21227 * but make sure the new value is between xri_limit/2 and xri_limit.
21228 **/
21229void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
21230{
21231 u32 new_watermark;
21232 u32 watermark_max;
21233 u32 watermark_min;
21234 u32 xri_limit;
21235 u32 txcmplq_cnt;
21236 u32 abts_io_bufs;
21237 struct lpfc_multixri_pool *multixri_pool;
21238 struct lpfc_sli4_hdw_queue *qp;
21239
21240 qp = &phba->sli4_hba.hdwq[hwqid];
21241 multixri_pool = qp->p_multixri_pool;
21242 if (!multixri_pool)
21243 return;
21244 xri_limit = multixri_pool->xri_limit;
21245
21246 watermark_max = xri_limit;
21247 watermark_min = xri_limit / 2;
21248
21249 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21250 abts_io_bufs = qp->abts_scsi_io_bufs;
21251 abts_io_bufs += qp->abts_nvme_io_bufs;
21252
21253 new_watermark = txcmplq_cnt + abts_io_bufs;
21254 new_watermark = min(watermark_max, new_watermark);
21255 new_watermark = max(watermark_min, new_watermark);
21256 multixri_pool->pvt_pool.high_watermark = new_watermark;
21257
21258#ifdef LPFC_MXP_STAT
21259 multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
21260 new_watermark);
21261#endif
21262}
21263
21264/**
21265 * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
21266 * @phba: pointer to lpfc hba data structure.
21267 * @hwqid: belong to which HWQ.
21268 *
21269 * This routine is called from hearbeat timer when pvt_pool is idle.
21270 * All free XRIs are moved from private to public pool on hwqid with 2 steps.
21271 * The first step moves (all - low_watermark) amount of XRIs.
21272 * The second step moves the rest of XRIs.
21273 **/
21274void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
21275{
21276 struct lpfc_pbl_pool *pbl_pool;
21277 struct lpfc_pvt_pool *pvt_pool;
21278 struct lpfc_sli4_hdw_queue *qp;
21279 struct lpfc_io_buf *lpfc_ncmd;
21280 struct lpfc_io_buf *lpfc_ncmd_next;
21281 unsigned long iflag;
21282 struct list_head tmp_list;
21283 u32 tmp_count;
21284
21285 qp = &phba->sli4_hba.hdwq[hwqid];
21286 pbl_pool = &qp->p_multixri_pool->pbl_pool;
21287 pvt_pool = &qp->p_multixri_pool->pvt_pool;
21288 tmp_count = 0;
21289
21290 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
21291 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
21292
21293 if (pvt_pool->count > pvt_pool->low_watermark) {
21294 /* Step 1: move (all - low_watermark) from pvt_pool
21295 * to pbl_pool
21296 */
21297
21298 /* Move low watermark of bufs from pvt_pool to tmp_list */
21299 INIT_LIST_HEAD(&tmp_list);
21300 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21301 &pvt_pool->list, list) {
21302 list_move_tail(&lpfc_ncmd->list, &tmp_list);
21303 tmp_count++;
21304 if (tmp_count >= pvt_pool->low_watermark)
21305 break;
21306 }
21307
21308 /* Move all bufs from pvt_pool to pbl_pool */
21309 list_splice_init(&pvt_pool->list, &pbl_pool->list);
21310
21311 /* Move all bufs from tmp_list to pvt_pool */
21312 list_splice(&tmp_list, &pvt_pool->list);
21313
21314 pbl_pool->count += (pvt_pool->count - tmp_count);
21315 pvt_pool->count = tmp_count;
21316 } else {
21317 /* Step 2: move the rest from pvt_pool to pbl_pool */
21318 list_splice_init(&pvt_pool->list, &pbl_pool->list);
21319 pbl_pool->count += pvt_pool->count;
21320 pvt_pool->count = 0;
21321 }
21322
21323 spin_unlock(&pvt_pool->lock);
21324 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21325}
21326
21327/**
21328 * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
21329 * @phba: pointer to lpfc hba data structure
21330 * @qp: pointer to HDW queue
21331 * @pbl_pool: specified public free XRI pool
21332 * @pvt_pool: specified private free XRI pool
21333 * @count: number of XRIs to move
21334 *
21335 * This routine tries to move some free common bufs from the specified pbl_pool
21336 * to the specified pvt_pool. It might move less than count XRIs if there's not
21337 * enough in public pool.
21338 *
21339 * Return:
21340 * true - if XRIs are successfully moved from the specified pbl_pool to the
21341 * specified pvt_pool
21342 * false - if the specified pbl_pool is empty or locked by someone else
21343 **/
21344static bool
21345_lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
21346 struct lpfc_pbl_pool *pbl_pool,
21347 struct lpfc_pvt_pool *pvt_pool, u32 count)
21348{
21349 struct lpfc_io_buf *lpfc_ncmd;
21350 struct lpfc_io_buf *lpfc_ncmd_next;
21351 unsigned long iflag;
21352 int ret;
21353
21354 ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
21355 if (ret) {
21356 if (pbl_pool->count) {
21357 /* Move a batch of XRIs from public to private pool */
21358 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
21359 list_for_each_entry_safe(lpfc_ncmd,
21360 lpfc_ncmd_next,
21361 &pbl_pool->list,
21362 list) {
21363 list_move_tail(&lpfc_ncmd->list,
21364 &pvt_pool->list);
21365 pvt_pool->count++;
21366 pbl_pool->count--;
21367 count--;
21368 if (count == 0)
21369 break;
21370 }
21371
21372 spin_unlock(&pvt_pool->lock);
21373 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21374 return true;
21375 }
21376 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21377 }
21378
21379 return false;
21380}
21381
21382/**
21383 * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
21384 * @phba: pointer to lpfc hba data structure.
21385 * @hwqid: belong to which HWQ.
21386 * @count: number of XRIs to move
21387 *
21388 * This routine tries to find some free common bufs in one of public pools with
21389 * Round Robin method. The search always starts from local hwqid, then the next
21390 * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
21391 * a batch of free common bufs are moved to private pool on hwqid.
21392 * It might move less than count XRIs if there's not enough in public pool.
21393 **/
21394void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
21395{
21396 struct lpfc_multixri_pool *multixri_pool;
21397 struct lpfc_multixri_pool *next_multixri_pool;
21398 struct lpfc_pvt_pool *pvt_pool;
21399 struct lpfc_pbl_pool *pbl_pool;
21400 struct lpfc_sli4_hdw_queue *qp;
21401 u32 next_hwqid;
21402 u32 hwq_count;
21403 int ret;
21404
21405 qp = &phba->sli4_hba.hdwq[hwqid];
21406 multixri_pool = qp->p_multixri_pool;
21407 pvt_pool = &multixri_pool->pvt_pool;
21408 pbl_pool = &multixri_pool->pbl_pool;
21409
21410 /* Check if local pbl_pool is available */
21411 ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
21412 if (ret) {
21413#ifdef LPFC_MXP_STAT
21414 multixri_pool->local_pbl_hit_count++;
21415#endif
21416 return;
21417 }
21418
21419 hwq_count = phba->cfg_hdw_queue;
21420
21421 /* Get the next hwqid which was found last time */
21422 next_hwqid = multixri_pool->rrb_next_hwqid;
21423
21424 do {
21425 /* Go to next hwq */
21426 next_hwqid = (next_hwqid + 1) % hwq_count;
21427
21428 next_multixri_pool =
21429 phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
21430 pbl_pool = &next_multixri_pool->pbl_pool;
21431
21432 /* Check if the public free xri pool is available */
21433 ret = _lpfc_move_xri_pbl_to_pvt(
21434 phba, qp, pbl_pool, pvt_pool, count);
21435
21436 /* Exit while-loop if success or all hwqid are checked */
21437 } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
21438
21439 /* Starting point for the next time */
21440 multixri_pool->rrb_next_hwqid = next_hwqid;
21441
21442 if (!ret) {
21443 /* stats: all public pools are empty*/
21444 multixri_pool->pbl_empty_count++;
21445 }
21446
21447#ifdef LPFC_MXP_STAT
21448 if (ret) {
21449 if (next_hwqid == hwqid)
21450 multixri_pool->local_pbl_hit_count++;
21451 else
21452 multixri_pool->other_pbl_hit_count++;
21453 }
21454#endif
21455}
21456
21457/**
21458 * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
21459 * @phba: pointer to lpfc hba data structure.
21460 * @hwqid: belong to which HWQ.
21461 *
21462 * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
21463 * low watermark.
21464 **/
21465void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
21466{
21467 struct lpfc_multixri_pool *multixri_pool;
21468 struct lpfc_pvt_pool *pvt_pool;
21469
21470 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
21471 pvt_pool = &multixri_pool->pvt_pool;
21472
21473 if (pvt_pool->count < pvt_pool->low_watermark)
21474 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
21475}
21476
21477/**
21478 * lpfc_release_io_buf - Return one IO buf back to free pool
21479 * @phba: pointer to lpfc hba data structure.
21480 * @lpfc_ncmd: IO buf to be returned.
21481 * @qp: belong to which HWQ.
21482 *
21483 * This routine returns one IO buf back to free pool. If this is an urgent IO,
21484 * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
21485 * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
21486 * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to
21487 * lpfc_io_buf_list_put.
21488 **/
21489void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
21490 struct lpfc_sli4_hdw_queue *qp)
21491{
21492 unsigned long iflag;
21493 struct lpfc_pbl_pool *pbl_pool;
21494 struct lpfc_pvt_pool *pvt_pool;
21495 struct lpfc_epd_pool *epd_pool;
21496 u32 txcmplq_cnt;
21497 u32 xri_owned;
21498 u32 xri_limit;
21499 u32 abts_io_bufs;
21500
21501 /* MUST zero fields if buffer is reused by another protocol */
21502 lpfc_ncmd->nvmeCmd = NULL;
21503 lpfc_ncmd->cur_iocbq.cmd_cmpl = NULL;
21504
21505 if (phba->cfg_xpsgl && !phba->nvmet_support &&
21506 !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
21507 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
21508
21509 if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
21510 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
21511
21512 if (phba->cfg_xri_rebalancing) {
21513 if (lpfc_ncmd->expedite) {
21514 /* Return to expedite pool */
21515 epd_pool = &phba->epd_pool;
21516 spin_lock_irqsave(&epd_pool->lock, iflag);
21517 list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
21518 epd_pool->count++;
21519 spin_unlock_irqrestore(&epd_pool->lock, iflag);
21520 return;
21521 }
21522
21523 /* Avoid invalid access if an IO sneaks in and is being rejected
21524 * just _after_ xri pools are destroyed in lpfc_offline.
21525 * Nothing much can be done at this point.
21526 */
21527 if (!qp->p_multixri_pool)
21528 return;
21529
21530 pbl_pool = &qp->p_multixri_pool->pbl_pool;
21531 pvt_pool = &qp->p_multixri_pool->pvt_pool;
21532
21533 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21534 abts_io_bufs = qp->abts_scsi_io_bufs;
21535 abts_io_bufs += qp->abts_nvme_io_bufs;
21536
21537 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
21538 xri_limit = qp->p_multixri_pool->xri_limit;
21539
21540#ifdef LPFC_MXP_STAT
21541 if (xri_owned <= xri_limit)
21542 qp->p_multixri_pool->below_limit_count++;
21543 else
21544 qp->p_multixri_pool->above_limit_count++;
21545#endif
21546
21547 /* XRI goes to either public or private free xri pool
21548 * based on watermark and xri_limit
21549 */
21550 if ((pvt_pool->count < pvt_pool->low_watermark) ||
21551 (xri_owned < xri_limit &&
21552 pvt_pool->count < pvt_pool->high_watermark)) {
21553 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
21554 qp, free_pvt_pool);
21555 list_add_tail(&lpfc_ncmd->list,
21556 &pvt_pool->list);
21557 pvt_pool->count++;
21558 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21559 } else {
21560 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
21561 qp, free_pub_pool);
21562 list_add_tail(&lpfc_ncmd->list,
21563 &pbl_pool->list);
21564 pbl_pool->count++;
21565 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21566 }
21567 } else {
21568 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
21569 qp, free_xri);
21570 list_add_tail(&lpfc_ncmd->list,
21571 &qp->lpfc_io_buf_list_put);
21572 qp->put_io_bufs++;
21573 spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
21574 iflag);
21575 }
21576}
21577
21578/**
21579 * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
21580 * @phba: pointer to lpfc hba data structure.
21581 * @qp: pointer to HDW queue
21582 * @pvt_pool: pointer to private pool data structure.
21583 * @ndlp: pointer to lpfc nodelist data structure.
21584 *
21585 * This routine tries to get one free IO buf from private pool.
21586 *
21587 * Return:
21588 * pointer to one free IO buf - if private pool is not empty
21589 * NULL - if private pool is empty
21590 **/
21591static struct lpfc_io_buf *
21592lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
21593 struct lpfc_sli4_hdw_queue *qp,
21594 struct lpfc_pvt_pool *pvt_pool,
21595 struct lpfc_nodelist *ndlp)
21596{
21597 struct lpfc_io_buf *lpfc_ncmd;
21598 struct lpfc_io_buf *lpfc_ncmd_next;
21599 unsigned long iflag;
21600
21601 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
21602 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21603 &pvt_pool->list, list) {
21604 if (lpfc_test_rrq_active(
21605 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
21606 continue;
21607 list_del(&lpfc_ncmd->list);
21608 pvt_pool->count--;
21609 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21610 return lpfc_ncmd;
21611 }
21612 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21613
21614 return NULL;
21615}
21616
21617/**
21618 * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
21619 * @phba: pointer to lpfc hba data structure.
21620 *
21621 * This routine tries to get one free IO buf from expedite pool.
21622 *
21623 * Return:
21624 * pointer to one free IO buf - if expedite pool is not empty
21625 * NULL - if expedite pool is empty
21626 **/
21627static struct lpfc_io_buf *
21628lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
21629{
21630 struct lpfc_io_buf *lpfc_ncmd;
21631 struct lpfc_io_buf *lpfc_ncmd_next;
21632 unsigned long iflag;
21633 struct lpfc_epd_pool *epd_pool;
21634
21635 epd_pool = &phba->epd_pool;
21636 lpfc_ncmd = NULL;
21637
21638 spin_lock_irqsave(&epd_pool->lock, iflag);
21639 if (epd_pool->count > 0) {
21640 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21641 &epd_pool->list, list) {
21642 list_del(&lpfc_ncmd->list);
21643 epd_pool->count--;
21644 break;
21645 }
21646 }
21647 spin_unlock_irqrestore(&epd_pool->lock, iflag);
21648
21649 return lpfc_ncmd;
21650}
21651
21652/**
21653 * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
21654 * @phba: pointer to lpfc hba data structure.
21655 * @ndlp: pointer to lpfc nodelist data structure.
21656 * @hwqid: belong to which HWQ
21657 * @expedite: 1 means this request is urgent.
21658 *
21659 * This routine will do the following actions and then return a pointer to
21660 * one free IO buf.
21661 *
21662 * 1. If private free xri count is empty, move some XRIs from public to
21663 * private pool.
21664 * 2. Get one XRI from private free xri pool.
21665 * 3. If we fail to get one from pvt_pool and this is an expedite request,
21666 * get one free xri from expedite pool.
21667 *
21668 * Note: ndlp is only used on SCSI side for RRQ testing.
21669 * The caller should pass NULL for ndlp on NVME side.
21670 *
21671 * Return:
21672 * pointer to one free IO buf - if private pool is not empty
21673 * NULL - if private pool is empty
21674 **/
21675static struct lpfc_io_buf *
21676lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
21677 struct lpfc_nodelist *ndlp,
21678 int hwqid, int expedite)
21679{
21680 struct lpfc_sli4_hdw_queue *qp;
21681 struct lpfc_multixri_pool *multixri_pool;
21682 struct lpfc_pvt_pool *pvt_pool;
21683 struct lpfc_io_buf *lpfc_ncmd;
21684
21685 qp = &phba->sli4_hba.hdwq[hwqid];
21686 lpfc_ncmd = NULL;
21687 if (!qp) {
21688 lpfc_printf_log(phba, KERN_INFO,
21689 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21690 "5556 NULL qp for hwqid x%x\n", hwqid);
21691 return lpfc_ncmd;
21692 }
21693 multixri_pool = qp->p_multixri_pool;
21694 if (!multixri_pool) {
21695 lpfc_printf_log(phba, KERN_INFO,
21696 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21697 "5557 NULL multixri for hwqid x%x\n", hwqid);
21698 return lpfc_ncmd;
21699 }
21700 pvt_pool = &multixri_pool->pvt_pool;
21701 if (!pvt_pool) {
21702 lpfc_printf_log(phba, KERN_INFO,
21703 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21704 "5558 NULL pvt_pool for hwqid x%x\n", hwqid);
21705 return lpfc_ncmd;
21706 }
21707 multixri_pool->io_req_count++;
21708
21709 /* If pvt_pool is empty, move some XRIs from public to private pool */
21710 if (pvt_pool->count == 0)
21711 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
21712
21713 /* Get one XRI from private free xri pool */
21714 lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
21715
21716 if (lpfc_ncmd) {
21717 lpfc_ncmd->hdwq = qp;
21718 lpfc_ncmd->hdwq_no = hwqid;
21719 } else if (expedite) {
21720 /* If we fail to get one from pvt_pool and this is an expedite
21721 * request, get one free xri from expedite pool.
21722 */
21723 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
21724 }
21725
21726 return lpfc_ncmd;
21727}
21728
21729static inline struct lpfc_io_buf *
21730lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
21731{
21732 struct lpfc_sli4_hdw_queue *qp;
21733 struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
21734
21735 qp = &phba->sli4_hba.hdwq[idx];
21736 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
21737 &qp->lpfc_io_buf_list_get, list) {
21738 if (lpfc_test_rrq_active(phba, ndlp,
21739 lpfc_cmd->cur_iocbq.sli4_lxritag))
21740 continue;
21741
21742 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
21743 continue;
21744
21745 list_del_init(&lpfc_cmd->list);
21746 qp->get_io_bufs--;
21747 lpfc_cmd->hdwq = qp;
21748 lpfc_cmd->hdwq_no = idx;
21749 return lpfc_cmd;
21750 }
21751 return NULL;
21752}
21753
21754/**
21755 * lpfc_get_io_buf - Get one IO buffer from free pool
21756 * @phba: The HBA for which this call is being executed.
21757 * @ndlp: pointer to lpfc nodelist data structure.
21758 * @hwqid: belong to which HWQ
21759 * @expedite: 1 means this request is urgent.
21760 *
21761 * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
21762 * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
21763 * a IO buffer from head of @hdwq io_buf_list and returns to caller.
21764 *
21765 * Note: ndlp is only used on SCSI side for RRQ testing.
21766 * The caller should pass NULL for ndlp on NVME side.
21767 *
21768 * Return codes:
21769 * NULL - Error
21770 * Pointer to lpfc_io_buf - Success
21771 **/
21772struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
21773 struct lpfc_nodelist *ndlp,
21774 u32 hwqid, int expedite)
21775{
21776 struct lpfc_sli4_hdw_queue *qp;
21777 unsigned long iflag;
21778 struct lpfc_io_buf *lpfc_cmd;
21779
21780 qp = &phba->sli4_hba.hdwq[hwqid];
21781 lpfc_cmd = NULL;
21782 if (!qp) {
21783 lpfc_printf_log(phba, KERN_WARNING,
21784 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21785 "5555 NULL qp for hwqid x%x\n", hwqid);
21786 return lpfc_cmd;
21787 }
21788
21789 if (phba->cfg_xri_rebalancing)
21790 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
21791 phba, ndlp, hwqid, expedite);
21792 else {
21793 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
21794 qp, alloc_xri_get);
21795 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
21796 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
21797 if (!lpfc_cmd) {
21798 lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
21799 qp, alloc_xri_put);
21800 list_splice(&qp->lpfc_io_buf_list_put,
21801 &qp->lpfc_io_buf_list_get);
21802 qp->get_io_bufs += qp->put_io_bufs;
21803 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
21804 qp->put_io_bufs = 0;
21805 spin_unlock(&qp->io_buf_list_put_lock);
21806 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
21807 expedite)
21808 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
21809 }
21810 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
21811 }
21812
21813 return lpfc_cmd;
21814}
21815
21816/**
21817 * lpfc_read_object - Retrieve object data from HBA
21818 * @phba: The HBA for which this call is being executed.
21819 * @rdobject: Pathname of object data we want to read.
21820 * @datap: Pointer to where data will be copied to.
21821 * @datasz: size of data area
21822 *
21823 * This routine is limited to object sizes of LPFC_BPL_SIZE (1024) or less.
21824 * The data will be truncated if datasz is not large enough.
21825 * Version 1 is not supported with Embedded mbox cmd, so we must use version 0.
21826 * Returns the actual bytes read from the object.
21827 */
21828int
21829lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
21830 uint32_t datasz)
21831{
21832 struct lpfc_mbx_read_object *read_object;
21833 LPFC_MBOXQ_t *mbox;
21834 int rc, length, eof, j, byte_cnt = 0;
21835 uint32_t shdr_status, shdr_add_status;
21836 union lpfc_sli4_cfg_shdr *shdr;
21837 struct lpfc_dmabuf *pcmd;
21838 u32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW] = {0};
21839
21840 /* sanity check on queue memory */
21841 if (!datap)
21842 return -ENODEV;
21843
21844 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
21845 if (!mbox)
21846 return -ENOMEM;
21847 length = (sizeof(struct lpfc_mbx_read_object) -
21848 sizeof(struct lpfc_sli4_cfg_mhdr));
21849 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
21850 LPFC_MBOX_OPCODE_READ_OBJECT,
21851 length, LPFC_SLI4_MBX_EMBED);
21852 read_object = &mbox->u.mqe.un.read_object;
21853 shdr = (union lpfc_sli4_cfg_shdr *)&read_object->header.cfg_shdr;
21854
21855 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_0);
21856 bf_set(lpfc_mbx_rd_object_rlen, &read_object->u.request, datasz);
21857 read_object->u.request.rd_object_offset = 0;
21858 read_object->u.request.rd_object_cnt = 1;
21859
21860 memset((void *)read_object->u.request.rd_object_name, 0,
21861 LPFC_OBJ_NAME_SZ);
21862 scnprintf((char *)rd_object_name, sizeof(rd_object_name), rdobject);
21863 for (j = 0; j < strlen(rdobject); j++)
21864 read_object->u.request.rd_object_name[j] =
21865 cpu_to_le32(rd_object_name[j]);
21866
21867 pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL);
21868 if (pcmd)
21869 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
21870 if (!pcmd || !pcmd->virt) {
21871 kfree(pcmd);
21872 mempool_free(mbox, phba->mbox_mem_pool);
21873 return -ENOMEM;
21874 }
21875 memset((void *)pcmd->virt, 0, LPFC_BPL_SIZE);
21876 read_object->u.request.rd_object_hbuf[0].pa_lo =
21877 putPaddrLow(pcmd->phys);
21878 read_object->u.request.rd_object_hbuf[0].pa_hi =
21879 putPaddrHigh(pcmd->phys);
21880 read_object->u.request.rd_object_hbuf[0].length = LPFC_BPL_SIZE;
21881
21882 mbox->vport = phba->pport;
21883 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
21884 mbox->ctx_ndlp = NULL;
21885
21886 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
21887 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
21888 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
21889
21890 if (shdr_status == STATUS_FAILED &&
21891 shdr_add_status == ADD_STATUS_INVALID_OBJECT_NAME) {
21892 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
21893 "4674 No port cfg file in FW.\n");
21894 byte_cnt = -ENOENT;
21895 } else if (shdr_status || shdr_add_status || rc) {
21896 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
21897 "2625 READ_OBJECT mailbox failed with "
21898 "status x%x add_status x%x, mbx status x%x\n",
21899 shdr_status, shdr_add_status, rc);
21900 byte_cnt = -ENXIO;
21901 } else {
21902 /* Success */
21903 length = read_object->u.response.rd_object_actual_rlen;
21904 eof = bf_get(lpfc_mbx_rd_object_eof, &read_object->u.response);
21905 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_CGN_MGMT,
21906 "2626 READ_OBJECT Success len %d:%d, EOF %d\n",
21907 length, datasz, eof);
21908
21909 /* Detect the port config file exists but is empty */
21910 if (!length && eof) {
21911 byte_cnt = 0;
21912 goto exit;
21913 }
21914
21915 byte_cnt = length;
21916 lpfc_sli_pcimem_bcopy(pcmd->virt, datap, byte_cnt);
21917 }
21918
21919 exit:
21920 /* This is an embedded SLI4 mailbox with an external buffer allocated.
21921 * Free the pcmd and then cleanup with the correct routine.
21922 */
21923 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
21924 kfree(pcmd);
21925 lpfc_sli4_mbox_cmd_free(phba, mbox);
21926 return byte_cnt;
21927}
21928
21929/**
21930 * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
21931 * @phba: The HBA for which this call is being executed.
21932 * @lpfc_buf: IO buf structure to append the SGL chunk
21933 *
21934 * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool,
21935 * and will allocate an SGL chunk if the pool is empty.
21936 *
21937 * Return codes:
21938 * NULL - Error
21939 * Pointer to sli4_hybrid_sgl - Success
21940 **/
21941struct sli4_hybrid_sgl *
21942lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
21943{
21944 struct sli4_hybrid_sgl *list_entry = NULL;
21945 struct sli4_hybrid_sgl *tmp = NULL;
21946 struct sli4_hybrid_sgl *allocated_sgl = NULL;
21947 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21948 struct list_head *buf_list = &hdwq->sgl_list;
21949 unsigned long iflags;
21950
21951 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21952
21953 if (likely(!list_empty(buf_list))) {
21954 /* break off 1 chunk from the sgl_list */
21955 list_for_each_entry_safe(list_entry, tmp,
21956 buf_list, list_node) {
21957 list_move_tail(&list_entry->list_node,
21958 &lpfc_buf->dma_sgl_xtra_list);
21959 break;
21960 }
21961 } else {
21962 /* allocate more */
21963 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21964 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
21965 cpu_to_node(hdwq->io_wq->chann));
21966 if (!tmp) {
21967 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21968 "8353 error kmalloc memory for HDWQ "
21969 "%d %s\n",
21970 lpfc_buf->hdwq_no, __func__);
21971 return NULL;
21972 }
21973
21974 tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
21975 GFP_ATOMIC, &tmp->dma_phys_sgl);
21976 if (!tmp->dma_sgl) {
21977 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21978 "8354 error pool_alloc memory for HDWQ "
21979 "%d %s\n",
21980 lpfc_buf->hdwq_no, __func__);
21981 kfree(tmp);
21982 return NULL;
21983 }
21984
21985 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21986 list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
21987 }
21988
21989 allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
21990 struct sli4_hybrid_sgl,
21991 list_node);
21992
21993 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21994
21995 return allocated_sgl;
21996}
21997
21998/**
21999 * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool
22000 * @phba: The HBA for which this call is being executed.
22001 * @lpfc_buf: IO buf structure with the SGL chunk
22002 *
22003 * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool.
22004 *
22005 * Return codes:
22006 * 0 - Success
22007 * -EINVAL - Error
22008 **/
22009int
22010lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
22011{
22012 int rc = 0;
22013 struct sli4_hybrid_sgl *list_entry = NULL;
22014 struct sli4_hybrid_sgl *tmp = NULL;
22015 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22016 struct list_head *buf_list = &hdwq->sgl_list;
22017 unsigned long iflags;
22018
22019 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22020
22021 if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
22022 list_for_each_entry_safe(list_entry, tmp,
22023 &lpfc_buf->dma_sgl_xtra_list,
22024 list_node) {
22025 list_move_tail(&list_entry->list_node,
22026 buf_list);
22027 }
22028 } else {
22029 rc = -EINVAL;
22030 }
22031
22032 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22033 return rc;
22034}
22035
22036/**
22037 * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool
22038 * @phba: phba object
22039 * @hdwq: hdwq to cleanup sgl buff resources on
22040 *
22041 * This routine frees all SGL chunks of hdwq SGL chunk pool.
22042 *
22043 * Return codes:
22044 * None
22045 **/
22046void
22047lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
22048 struct lpfc_sli4_hdw_queue *hdwq)
22049{
22050 struct list_head *buf_list = &hdwq->sgl_list;
22051 struct sli4_hybrid_sgl *list_entry = NULL;
22052 struct sli4_hybrid_sgl *tmp = NULL;
22053 unsigned long iflags;
22054
22055 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22056
22057 /* Free sgl pool */
22058 list_for_each_entry_safe(list_entry, tmp,
22059 buf_list, list_node) {
22060 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
22061 list_entry->dma_sgl,
22062 list_entry->dma_phys_sgl);
22063 list_del(&list_entry->list_node);
22064 kfree(list_entry);
22065 }
22066
22067 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22068}
22069
22070/**
22071 * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq
22072 * @phba: The HBA for which this call is being executed.
22073 * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer
22074 *
22075 * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool,
22076 * and will allocate an CMD/RSP buffer if the pool is empty.
22077 *
22078 * Return codes:
22079 * NULL - Error
22080 * Pointer to fcp_cmd_rsp_buf - Success
22081 **/
22082struct fcp_cmd_rsp_buf *
22083lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22084 struct lpfc_io_buf *lpfc_buf)
22085{
22086 struct fcp_cmd_rsp_buf *list_entry = NULL;
22087 struct fcp_cmd_rsp_buf *tmp = NULL;
22088 struct fcp_cmd_rsp_buf *allocated_buf = NULL;
22089 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22090 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22091 unsigned long iflags;
22092
22093 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22094
22095 if (likely(!list_empty(buf_list))) {
22096 /* break off 1 chunk from the list */
22097 list_for_each_entry_safe(list_entry, tmp,
22098 buf_list,
22099 list_node) {
22100 list_move_tail(&list_entry->list_node,
22101 &lpfc_buf->dma_cmd_rsp_list);
22102 break;
22103 }
22104 } else {
22105 /* allocate more */
22106 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22107 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
22108 cpu_to_node(hdwq->io_wq->chann));
22109 if (!tmp) {
22110 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22111 "8355 error kmalloc memory for HDWQ "
22112 "%d %s\n",
22113 lpfc_buf->hdwq_no, __func__);
22114 return NULL;
22115 }
22116
22117 tmp->fcp_cmnd = dma_pool_zalloc(phba->lpfc_cmd_rsp_buf_pool,
22118 GFP_ATOMIC,
22119 &tmp->fcp_cmd_rsp_dma_handle);
22120
22121 if (!tmp->fcp_cmnd) {
22122 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22123 "8356 error pool_alloc memory for HDWQ "
22124 "%d %s\n",
22125 lpfc_buf->hdwq_no, __func__);
22126 kfree(tmp);
22127 return NULL;
22128 }
22129
22130 tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
22131 sizeof(struct fcp_cmnd));
22132
22133 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22134 list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
22135 }
22136
22137 allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
22138 struct fcp_cmd_rsp_buf,
22139 list_node);
22140
22141 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22142
22143 return allocated_buf;
22144}
22145
22146/**
22147 * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool
22148 * @phba: The HBA for which this call is being executed.
22149 * @lpfc_buf: IO buf structure with the CMD/RSP buf
22150 *
22151 * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool.
22152 *
22153 * Return codes:
22154 * 0 - Success
22155 * -EINVAL - Error
22156 **/
22157int
22158lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22159 struct lpfc_io_buf *lpfc_buf)
22160{
22161 int rc = 0;
22162 struct fcp_cmd_rsp_buf *list_entry = NULL;
22163 struct fcp_cmd_rsp_buf *tmp = NULL;
22164 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22165 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22166 unsigned long iflags;
22167
22168 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22169
22170 if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
22171 list_for_each_entry_safe(list_entry, tmp,
22172 &lpfc_buf->dma_cmd_rsp_list,
22173 list_node) {
22174 list_move_tail(&list_entry->list_node,
22175 buf_list);
22176 }
22177 } else {
22178 rc = -EINVAL;
22179 }
22180
22181 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22182 return rc;
22183}
22184
22185/**
22186 * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool
22187 * @phba: phba object
22188 * @hdwq: hdwq to cleanup cmd rsp buff resources on
22189 *
22190 * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool.
22191 *
22192 * Return codes:
22193 * None
22194 **/
22195void
22196lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22197 struct lpfc_sli4_hdw_queue *hdwq)
22198{
22199 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22200 struct fcp_cmd_rsp_buf *list_entry = NULL;
22201 struct fcp_cmd_rsp_buf *tmp = NULL;
22202 unsigned long iflags;
22203
22204 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22205
22206 /* Free cmd_rsp buf pool */
22207 list_for_each_entry_safe(list_entry, tmp,
22208 buf_list,
22209 list_node) {
22210 dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
22211 list_entry->fcp_cmnd,
22212 list_entry->fcp_cmd_rsp_dma_handle);
22213 list_del(&list_entry->list_node);
22214 kfree(list_entry);
22215 }
22216
22217 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22218}
22219
22220/**
22221 * lpfc_sli_prep_wqe - Prepare WQE for the command to be posted
22222 * @phba: phba object
22223 * @job: job entry of the command to be posted.
22224 *
22225 * Fill the common fields of the wqe for each of the command.
22226 *
22227 * Return codes:
22228 * None
22229 **/
22230void
22231lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job)
22232{
22233 u8 cmnd;
22234 u32 *pcmd;
22235 u32 if_type = 0;
22236 u32 fip, abort_tag;
22237 struct lpfc_nodelist *ndlp = NULL;
22238 union lpfc_wqe128 *wqe = &job->wqe;
22239 u8 command_type = ELS_COMMAND_NON_FIP;
22240
22241 fip = phba->hba_flag & HBA_FIP_SUPPORT;
22242 /* The fcp commands will set command type */
22243 if (job->cmd_flag & LPFC_IO_FCP)
22244 command_type = FCP_COMMAND;
22245 else if (fip && (job->cmd_flag & LPFC_FIP_ELS_ID_MASK))
22246 command_type = ELS_COMMAND_FIP;
22247 else
22248 command_type = ELS_COMMAND_NON_FIP;
22249
22250 abort_tag = job->iotag;
22251 cmnd = bf_get(wqe_cmnd, &wqe->els_req.wqe_com);
22252
22253 switch (cmnd) {
22254 case CMD_ELS_REQUEST64_WQE:
22255 ndlp = job->ndlp;
22256
22257 if_type = bf_get(lpfc_sli_intf_if_type,
22258 &phba->sli4_hba.sli_intf);
22259 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
22260 pcmd = (u32 *)job->cmd_dmabuf->virt;
22261 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
22262 *pcmd == ELS_CMD_SCR ||
22263 *pcmd == ELS_CMD_RDF ||
22264 *pcmd == ELS_CMD_EDC ||
22265 *pcmd == ELS_CMD_RSCN_XMT ||
22266 *pcmd == ELS_CMD_FDISC ||
22267 *pcmd == ELS_CMD_LOGO ||
22268 *pcmd == ELS_CMD_QFPA ||
22269 *pcmd == ELS_CMD_UVEM ||
22270 *pcmd == ELS_CMD_PLOGI)) {
22271 bf_set(els_req64_sp, &wqe->els_req, 1);
22272 bf_set(els_req64_sid, &wqe->els_req,
22273 job->vport->fc_myDID);
22274
22275 if ((*pcmd == ELS_CMD_FLOGI) &&
22276 !(phba->fc_topology ==
22277 LPFC_TOPOLOGY_LOOP))
22278 bf_set(els_req64_sid, &wqe->els_req, 0);
22279
22280 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
22281 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
22282 phba->vpi_ids[job->vport->vpi]);
22283 } else if (pcmd) {
22284 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
22285 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
22286 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
22287 }
22288 }
22289
22290 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
22291 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
22292
22293 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
22294 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
22295 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
22296 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
22297 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
22298 break;
22299 case CMD_XMIT_ELS_RSP64_WQE:
22300 ndlp = job->ndlp;
22301
22302 /* word4 */
22303 wqe->xmit_els_rsp.word4 = 0;
22304
22305 if_type = bf_get(lpfc_sli_intf_if_type,
22306 &phba->sli4_hba.sli_intf);
22307 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
22308 if (job->vport->fc_flag & FC_PT2PT) {
22309 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
22310 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
22311 job->vport->fc_myDID);
22312 if (job->vport->fc_myDID == Fabric_DID) {
22313 bf_set(wqe_els_did,
22314 &wqe->xmit_els_rsp.wqe_dest, 0);
22315 }
22316 }
22317 }
22318
22319 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
22320 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
22321 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
22322 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
22323 LPFC_WQE_LENLOC_WORD3);
22324 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
22325
22326 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
22327 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
22328 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
22329 job->vport->fc_myDID);
22330 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
22331 }
22332
22333 if (phba->sli_rev == LPFC_SLI_REV4) {
22334 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
22335 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
22336
22337 if (bf_get(wqe_ct, &wqe->xmit_els_rsp.wqe_com))
22338 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
22339 phba->vpi_ids[job->vport->vpi]);
22340 }
22341 command_type = OTHER_COMMAND;
22342 break;
22343 case CMD_GEN_REQUEST64_WQE:
22344 /* Word 10 */
22345 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
22346 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
22347 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
22348 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
22349 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
22350 command_type = OTHER_COMMAND;
22351 break;
22352 case CMD_XMIT_SEQUENCE64_WQE:
22353 if (phba->link_flag & LS_LOOPBACK_MODE)
22354 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
22355
22356 wqe->xmit_sequence.rsvd3 = 0;
22357 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
22358 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
22359 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
22360 LPFC_WQE_IOD_WRITE);
22361 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
22362 LPFC_WQE_LENLOC_WORD12);
22363 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
22364 command_type = OTHER_COMMAND;
22365 break;
22366 case CMD_XMIT_BLS_RSP64_WQE:
22367 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
22368 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
22369 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
22370 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
22371 phba->vpi_ids[phba->pport->vpi]);
22372 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
22373 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
22374 LPFC_WQE_LENLOC_NONE);
22375 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
22376 command_type = OTHER_COMMAND;
22377 break;
22378 case CMD_FCP_ICMND64_WQE: /* task mgmt commands */
22379 case CMD_ABORT_XRI_WQE: /* abort iotag */
22380 case CMD_SEND_FRAME: /* mds loopback */
22381 /* cases already formatted for sli4 wqe - no chgs necessary */
22382 return;
22383 default:
22384 dump_stack();
22385 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
22386 "6207 Invalid command 0x%x\n",
22387 cmnd);
22388 break;
22389 }
22390
22391 wqe->generic.wqe_com.abort_tag = abort_tag;
22392 bf_set(wqe_reqtag, &wqe->generic.wqe_com, job->iotag);
22393 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
22394 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
22395}